diff options
70 files changed, 491 insertions, 344 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index f216db847022..e9c7b50c612d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4049,6 +4049,12 @@ W: http://www.pharscape.org | |||
4049 | S: Maintained | 4049 | S: Maintained |
4050 | F: drivers/net/usb/hso.c | 4050 | F: drivers/net/usb/hso.c |
4051 | 4051 | ||
4052 | HSR NETWORK PROTOCOL | ||
4053 | M: Arvid Brodin <arvid.brodin@alten.se> | ||
4054 | L: netdev@vger.kernel.org | ||
4055 | S: Maintained | ||
4056 | F: net/hsr/ | ||
4057 | |||
4052 | HTCPEN TOUCHSCREEN DRIVER | 4058 | HTCPEN TOUCHSCREEN DRIVER |
4053 | M: Pau Oliva Fora <pof@eslack.org> | 4059 | M: Pau Oliva Fora <pof@eslack.org> |
4054 | L: linux-input@vger.kernel.org | 4060 | L: linux-input@vger.kernel.org |
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index ef5356cd280a..850246206b12 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
@@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, | |||
114 | struct hash_ctx *ctx = ask->private; | 114 | struct hash_ctx *ctx = ask->private; |
115 | int err; | 115 | int err; |
116 | 116 | ||
117 | if (flags & MSG_SENDPAGE_NOTLAST) | ||
118 | flags |= MSG_MORE; | ||
119 | |||
117 | lock_sock(sk); | 120 | lock_sock(sk); |
118 | sg_init_table(ctx->sgl.sg, 1); | 121 | sg_init_table(ctx->sgl.sg, 1); |
119 | sg_set_page(ctx->sgl.sg, page, size, offset); | 122 | sg_set_page(ctx->sgl.sg, page, size, offset); |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6a6dfc062d2a..a19c027b29bd 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, | |||
378 | struct skcipher_sg_list *sgl; | 378 | struct skcipher_sg_list *sgl; |
379 | int err = -EINVAL; | 379 | int err = -EINVAL; |
380 | 380 | ||
381 | if (flags & MSG_SENDPAGE_NOTLAST) | ||
382 | flags |= MSG_MORE; | ||
383 | |||
381 | lock_sock(sk); | 384 | lock_sock(sk); |
382 | if (!ctx->more && ctx->used) | 385 | if (!ctx->more && ctx->used) |
383 | goto unlock; | 386 | goto unlock; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4dd5ee2a34cc..36eab0c4fb33 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -4110,7 +4110,7 @@ static int bond_check_params(struct bond_params *params) | |||
4110 | if (!miimon) { | 4110 | if (!miimon) { |
4111 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); | 4111 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); |
4112 | pr_warning("Forcing miimon to 100msec\n"); | 4112 | pr_warning("Forcing miimon to 100msec\n"); |
4113 | miimon = 100; | 4113 | miimon = BOND_DEFAULT_MIIMON; |
4114 | } | 4114 | } |
4115 | } | 4115 | } |
4116 | 4116 | ||
@@ -4147,7 +4147,7 @@ static int bond_check_params(struct bond_params *params) | |||
4147 | if (!miimon) { | 4147 | if (!miimon) { |
4148 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); | 4148 | pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n"); |
4149 | pr_warning("Forcing miimon to 100msec\n"); | 4149 | pr_warning("Forcing miimon to 100msec\n"); |
4150 | miimon = 100; | 4150 | miimon = BOND_DEFAULT_MIIMON; |
4151 | } | 4151 | } |
4152 | } | 4152 | } |
4153 | 4153 | ||
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 9a5223c7b4d1..ea6f640782b7 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c | |||
@@ -45,10 +45,15 @@ int bond_option_mode_set(struct bonding *bond, int mode) | |||
45 | return -EPERM; | 45 | return -EPERM; |
46 | } | 46 | } |
47 | 47 | ||
48 | if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) { | 48 | if (BOND_NO_USES_ARP(mode) && bond->params.arp_interval) { |
49 | pr_err("%s: %s mode is incompatible with arp monitoring.\n", | 49 | pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", |
50 | bond->dev->name, bond_mode_tbl[mode].modename); | 50 | bond->dev->name, bond_mode_tbl[mode].modename); |
51 | return -EINVAL; | 51 | /* disable arp monitoring */ |
52 | bond->params.arp_interval = 0; | ||
53 | /* set miimon to default value */ | ||
54 | bond->params.miimon = BOND_DEFAULT_MIIMON; | ||
55 | pr_info("%s: Setting MII monitoring interval to %d.\n", | ||
56 | bond->dev->name, bond->params.miimon); | ||
52 | } | 57 | } |
53 | 58 | ||
54 | /* don't cache arp_validate between modes */ | 59 | /* don't cache arp_validate between modes */ |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 0ec2a7e8c8a9..abf5e106edc5 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -523,9 +523,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
523 | ret = -EINVAL; | 523 | ret = -EINVAL; |
524 | goto out; | 524 | goto out; |
525 | } | 525 | } |
526 | if (bond->params.mode == BOND_MODE_ALB || | 526 | if (BOND_NO_USES_ARP(bond->params.mode)) { |
527 | bond->params.mode == BOND_MODE_TLB || | ||
528 | bond->params.mode == BOND_MODE_8023AD) { | ||
529 | pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", | 527 | pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n", |
530 | bond->dev->name, bond->dev->name); | 528 | bond->dev->name, bond->dev->name); |
531 | ret = -EINVAL; | 529 | ret = -EINVAL; |
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index ca31286aa028..a9f4f9f4d8ce 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h | |||
@@ -35,6 +35,8 @@ | |||
35 | 35 | ||
36 | #define BOND_MAX_ARP_TARGETS 16 | 36 | #define BOND_MAX_ARP_TARGETS 16 |
37 | 37 | ||
38 | #define BOND_DEFAULT_MIIMON 100 | ||
39 | |||
38 | #define IS_UP(dev) \ | 40 | #define IS_UP(dev) \ |
39 | ((((dev)->flags & IFF_UP) == IFF_UP) && \ | 41 | ((((dev)->flags & IFF_UP) == IFF_UP) && \ |
40 | netif_running(dev) && \ | 42 | netif_running(dev) && \ |
@@ -55,6 +57,11 @@ | |||
55 | ((mode) == BOND_MODE_TLB) || \ | 57 | ((mode) == BOND_MODE_TLB) || \ |
56 | ((mode) == BOND_MODE_ALB)) | 58 | ((mode) == BOND_MODE_ALB)) |
57 | 59 | ||
60 | #define BOND_NO_USES_ARP(mode) \ | ||
61 | (((mode) == BOND_MODE_8023AD) || \ | ||
62 | ((mode) == BOND_MODE_TLB) || \ | ||
63 | ((mode) == BOND_MODE_ALB)) | ||
64 | |||
58 | #define TX_QUEUE_OVERRIDE(mode) \ | 65 | #define TX_QUEUE_OVERRIDE(mode) \ |
59 | (((mode) == BOND_MODE_ACTIVEBACKUP) || \ | 66 | (((mode) == BOND_MODE_ACTIVEBACKUP) || \ |
60 | ((mode) == BOND_MODE_ROUNDROBIN)) | 67 | ((mode) == BOND_MODE_ROUNDROBIN)) |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index e3fc07cf2f62..77061eebb034 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -712,22 +712,31 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode) | |||
712 | return 0; | 712 | return 0; |
713 | } | 713 | } |
714 | 714 | ||
715 | static int c_can_get_berr_counter(const struct net_device *dev, | 715 | static int __c_can_get_berr_counter(const struct net_device *dev, |
716 | struct can_berr_counter *bec) | 716 | struct can_berr_counter *bec) |
717 | { | 717 | { |
718 | unsigned int reg_err_counter; | 718 | unsigned int reg_err_counter; |
719 | struct c_can_priv *priv = netdev_priv(dev); | 719 | struct c_can_priv *priv = netdev_priv(dev); |
720 | 720 | ||
721 | c_can_pm_runtime_get_sync(priv); | ||
722 | |||
723 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); | 721 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); |
724 | bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> | 722 | bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> |
725 | ERR_CNT_REC_SHIFT; | 723 | ERR_CNT_REC_SHIFT; |
726 | bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; | 724 | bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; |
727 | 725 | ||
726 | return 0; | ||
727 | } | ||
728 | |||
729 | static int c_can_get_berr_counter(const struct net_device *dev, | ||
730 | struct can_berr_counter *bec) | ||
731 | { | ||
732 | struct c_can_priv *priv = netdev_priv(dev); | ||
733 | int err; | ||
734 | |||
735 | c_can_pm_runtime_get_sync(priv); | ||
736 | err = __c_can_get_berr_counter(dev, bec); | ||
728 | c_can_pm_runtime_put_sync(priv); | 737 | c_can_pm_runtime_put_sync(priv); |
729 | 738 | ||
730 | return 0; | 739 | return err; |
731 | } | 740 | } |
732 | 741 | ||
733 | /* | 742 | /* |
@@ -754,6 +763,7 @@ static void c_can_do_tx(struct net_device *dev) | |||
754 | if (!(val & (1 << (msg_obj_no - 1)))) { | 763 | if (!(val & (1 << (msg_obj_no - 1)))) { |
755 | can_get_echo_skb(dev, | 764 | can_get_echo_skb(dev, |
756 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); | 765 | msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); |
766 | c_can_object_get(dev, 0, msg_obj_no, IF_COMM_ALL); | ||
757 | stats->tx_bytes += priv->read_reg(priv, | 767 | stats->tx_bytes += priv->read_reg(priv, |
758 | C_CAN_IFACE(MSGCTRL_REG, 0)) | 768 | C_CAN_IFACE(MSGCTRL_REG, 0)) |
759 | & IF_MCONT_DLC_MASK; | 769 | & IF_MCONT_DLC_MASK; |
@@ -872,7 +882,7 @@ static int c_can_handle_state_change(struct net_device *dev, | |||
872 | if (unlikely(!skb)) | 882 | if (unlikely(!skb)) |
873 | return 0; | 883 | return 0; |
874 | 884 | ||
875 | c_can_get_berr_counter(dev, &bec); | 885 | __c_can_get_berr_counter(dev, &bec); |
876 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); | 886 | reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); |
877 | rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> | 887 | rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> |
878 | ERR_CNT_RP_SHIFT; | 888 | ERR_CNT_RP_SHIFT; |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index ae08cf129ebb..aaed97bee471 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -1020,13 +1020,13 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1020 | dev_err(&pdev->dev, "no ipg clock defined\n"); | 1020 | dev_err(&pdev->dev, "no ipg clock defined\n"); |
1021 | return PTR_ERR(clk_ipg); | 1021 | return PTR_ERR(clk_ipg); |
1022 | } | 1022 | } |
1023 | clock_freq = clk_get_rate(clk_ipg); | ||
1024 | 1023 | ||
1025 | clk_per = devm_clk_get(&pdev->dev, "per"); | 1024 | clk_per = devm_clk_get(&pdev->dev, "per"); |
1026 | if (IS_ERR(clk_per)) { | 1025 | if (IS_ERR(clk_per)) { |
1027 | dev_err(&pdev->dev, "no per clock defined\n"); | 1026 | dev_err(&pdev->dev, "no per clock defined\n"); |
1028 | return PTR_ERR(clk_per); | 1027 | return PTR_ERR(clk_per); |
1029 | } | 1028 | } |
1029 | clock_freq = clk_get_rate(clk_per); | ||
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1032 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 7164a999f50f..f17c3018b7c7 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -494,20 +494,20 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
494 | uint8_t isrc, status; | 494 | uint8_t isrc, status; |
495 | int n = 0; | 495 | int n = 0; |
496 | 496 | ||
497 | /* Shared interrupts and IRQ off? */ | ||
498 | if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) | ||
499 | return IRQ_NONE; | ||
500 | |||
501 | if (priv->pre_irq) | 497 | if (priv->pre_irq) |
502 | priv->pre_irq(priv); | 498 | priv->pre_irq(priv); |
503 | 499 | ||
500 | /* Shared interrupts and IRQ off? */ | ||
501 | if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) | ||
502 | goto out; | ||
503 | |||
504 | while ((isrc = priv->read_reg(priv, SJA1000_IR)) && | 504 | while ((isrc = priv->read_reg(priv, SJA1000_IR)) && |
505 | (n < SJA1000_MAX_IRQ)) { | 505 | (n < SJA1000_MAX_IRQ)) { |
506 | n++; | 506 | |
507 | status = priv->read_reg(priv, SJA1000_SR); | 507 | status = priv->read_reg(priv, SJA1000_SR); |
508 | /* check for absent controller due to hw unplug */ | 508 | /* check for absent controller due to hw unplug */ |
509 | if (status == 0xFF && sja1000_is_absent(priv)) | 509 | if (status == 0xFF && sja1000_is_absent(priv)) |
510 | return IRQ_NONE; | 510 | goto out; |
511 | 511 | ||
512 | if (isrc & IRQ_WUI) | 512 | if (isrc & IRQ_WUI) |
513 | netdev_warn(dev, "wakeup interrupt\n"); | 513 | netdev_warn(dev, "wakeup interrupt\n"); |
@@ -535,7 +535,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
535 | status = priv->read_reg(priv, SJA1000_SR); | 535 | status = priv->read_reg(priv, SJA1000_SR); |
536 | /* check for absent controller */ | 536 | /* check for absent controller */ |
537 | if (status == 0xFF && sja1000_is_absent(priv)) | 537 | if (status == 0xFF && sja1000_is_absent(priv)) |
538 | return IRQ_NONE; | 538 | goto out; |
539 | } | 539 | } |
540 | } | 540 | } |
541 | if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { | 541 | if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { |
@@ -543,8 +543,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
543 | if (sja1000_err(dev, isrc, status)) | 543 | if (sja1000_err(dev, isrc, status)) |
544 | break; | 544 | break; |
545 | } | 545 | } |
546 | n++; | ||
546 | } | 547 | } |
547 | 548 | out: | |
548 | if (priv->post_irq) | 549 | if (priv->post_irq) |
549 | priv->post_irq(priv); | 550 | priv->post_irq(priv); |
550 | 551 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a9e068423ba0..369b736dde05 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -10629,10 +10629,8 @@ static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir) | |||
10629 | static ssize_t tg3_show_temp(struct device *dev, | 10629 | static ssize_t tg3_show_temp(struct device *dev, |
10630 | struct device_attribute *devattr, char *buf) | 10630 | struct device_attribute *devattr, char *buf) |
10631 | { | 10631 | { |
10632 | struct pci_dev *pdev = to_pci_dev(dev); | ||
10633 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
10634 | struct tg3 *tp = netdev_priv(netdev); | ||
10635 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | 10632 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); |
10633 | struct tg3 *tp = dev_get_drvdata(dev); | ||
10636 | u32 temperature; | 10634 | u32 temperature; |
10637 | 10635 | ||
10638 | spin_lock_bh(&tp->lock); | 10636 | spin_lock_bh(&tp->lock); |
@@ -10650,29 +10648,25 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, | |||
10650 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, | 10648 | static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, |
10651 | TG3_TEMP_MAX_OFFSET); | 10649 | TG3_TEMP_MAX_OFFSET); |
10652 | 10650 | ||
10653 | static struct attribute *tg3_attributes[] = { | 10651 | static struct attribute *tg3_attrs[] = { |
10654 | &sensor_dev_attr_temp1_input.dev_attr.attr, | 10652 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
10655 | &sensor_dev_attr_temp1_crit.dev_attr.attr, | 10653 | &sensor_dev_attr_temp1_crit.dev_attr.attr, |
10656 | &sensor_dev_attr_temp1_max.dev_attr.attr, | 10654 | &sensor_dev_attr_temp1_max.dev_attr.attr, |
10657 | NULL | 10655 | NULL |
10658 | }; | 10656 | }; |
10659 | 10657 | ATTRIBUTE_GROUPS(tg3); | |
10660 | static const struct attribute_group tg3_group = { | ||
10661 | .attrs = tg3_attributes, | ||
10662 | }; | ||
10663 | 10658 | ||
10664 | static void tg3_hwmon_close(struct tg3 *tp) | 10659 | static void tg3_hwmon_close(struct tg3 *tp) |
10665 | { | 10660 | { |
10666 | if (tp->hwmon_dev) { | 10661 | if (tp->hwmon_dev) { |
10667 | hwmon_device_unregister(tp->hwmon_dev); | 10662 | hwmon_device_unregister(tp->hwmon_dev); |
10668 | tp->hwmon_dev = NULL; | 10663 | tp->hwmon_dev = NULL; |
10669 | sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group); | ||
10670 | } | 10664 | } |
10671 | } | 10665 | } |
10672 | 10666 | ||
10673 | static void tg3_hwmon_open(struct tg3 *tp) | 10667 | static void tg3_hwmon_open(struct tg3 *tp) |
10674 | { | 10668 | { |
10675 | int i, err; | 10669 | int i; |
10676 | u32 size = 0; | 10670 | u32 size = 0; |
10677 | struct pci_dev *pdev = tp->pdev; | 10671 | struct pci_dev *pdev = tp->pdev; |
10678 | struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; | 10672 | struct tg3_ocir ocirs[TG3_SD_NUM_RECS]; |
@@ -10690,18 +10684,11 @@ static void tg3_hwmon_open(struct tg3 *tp) | |||
10690 | if (!size) | 10684 | if (!size) |
10691 | return; | 10685 | return; |
10692 | 10686 | ||
10693 | /* Register hwmon sysfs hooks */ | 10687 | tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3", |
10694 | err = sysfs_create_group(&pdev->dev.kobj, &tg3_group); | 10688 | tp, tg3_groups); |
10695 | if (err) { | ||
10696 | dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n"); | ||
10697 | return; | ||
10698 | } | ||
10699 | |||
10700 | tp->hwmon_dev = hwmon_device_register(&pdev->dev); | ||
10701 | if (IS_ERR(tp->hwmon_dev)) { | 10689 | if (IS_ERR(tp->hwmon_dev)) { |
10702 | tp->hwmon_dev = NULL; | 10690 | tp->hwmon_dev = NULL; |
10703 | dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); | 10691 | dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n"); |
10704 | sysfs_remove_group(&pdev->dev.kobj, &tg3_group); | ||
10705 | } | 10692 | } |
10706 | } | 10693 | } |
10707 | 10694 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index f4825db5d179..5878df619b53 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -503,6 +503,7 @@ struct be_adapter { | |||
503 | }; | 503 | }; |
504 | 504 | ||
505 | #define be_physfn(adapter) (!adapter->virtfn) | 505 | #define be_physfn(adapter) (!adapter->virtfn) |
506 | #define be_virtfn(adapter) (adapter->virtfn) | ||
506 | #define sriov_enabled(adapter) (adapter->num_vfs > 0) | 507 | #define sriov_enabled(adapter) (adapter->num_vfs > 0) |
507 | #define sriov_want(adapter) (be_physfn(adapter) && \ | 508 | #define sriov_want(adapter) (be_physfn(adapter) && \ |
508 | (num_vfs || pci_num_vf(adapter->pdev))) | 509 | (num_vfs || pci_num_vf(adapter->pdev))) |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index dbcd5262c016..e0e8bc1ef14c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -1032,6 +1032,13 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, | |||
1032 | } else { | 1032 | } else { |
1033 | req->hdr.version = 2; | 1033 | req->hdr.version = 2; |
1034 | req->page_size = 1; /* 1 for 4K */ | 1034 | req->page_size = 1; /* 1 for 4K */ |
1035 | |||
1036 | /* coalesce-wm field in this cmd is not relevant to Lancer. | ||
1037 | * Lancer uses COMMON_MODIFY_CQ to set this field | ||
1038 | */ | ||
1039 | if (!lancer_chip(adapter)) | ||
1040 | AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, | ||
1041 | ctxt, coalesce_wm); | ||
1035 | AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, | 1042 | AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, |
1036 | no_delay); | 1043 | no_delay); |
1037 | AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, | 1044 | AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index abde97471636..fee64bf10446 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2658,8 +2658,8 @@ static int be_close(struct net_device *netdev) | |||
2658 | 2658 | ||
2659 | be_roce_dev_close(adapter); | 2659 | be_roce_dev_close(adapter); |
2660 | 2660 | ||
2661 | for_all_evt_queues(adapter, eqo, i) { | 2661 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { |
2662 | if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { | 2662 | for_all_evt_queues(adapter, eqo, i) { |
2663 | napi_disable(&eqo->napi); | 2663 | napi_disable(&eqo->napi); |
2664 | be_disable_busy_poll(eqo); | 2664 | be_disable_busy_poll(eqo); |
2665 | } | 2665 | } |
@@ -3253,12 +3253,10 @@ static int be_mac_setup(struct be_adapter *adapter) | |||
3253 | memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); | 3253 | memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); |
3254 | } | 3254 | } |
3255 | 3255 | ||
3256 | /* On BE3 VFs this cmd may fail due to lack of privilege. | 3256 | /* For BE3-R VFs, the PF programs the initial MAC address */ |
3257 | * Ignore the failure as in this case pmac_id is fetched | 3257 | if (!(BEx_chip(adapter) && be_virtfn(adapter))) |
3258 | * in the IFACE_CREATE cmd. | 3258 | be_cmd_pmac_add(adapter, mac, adapter->if_handle, |
3259 | */ | 3259 | &adapter->pmac_id[0], 0); |
3260 | be_cmd_pmac_add(adapter, mac, adapter->if_handle, | ||
3261 | &adapter->pmac_id[0], 0); | ||
3262 | return 0; | 3260 | return 0; |
3263 | } | 3261 | } |
3264 | 3262 | ||
@@ -4599,6 +4597,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4599 | if (adapter->wol) | 4597 | if (adapter->wol) |
4600 | be_setup_wol(adapter, true); | 4598 | be_setup_wol(adapter, true); |
4601 | 4599 | ||
4600 | be_intr_set(adapter, false); | ||
4602 | cancel_delayed_work_sync(&adapter->func_recovery_work); | 4601 | cancel_delayed_work_sync(&adapter->func_recovery_work); |
4603 | 4602 | ||
4604 | netif_device_detach(netdev); | 4603 | netif_device_detach(netdev); |
@@ -4634,6 +4633,7 @@ static int be_resume(struct pci_dev *pdev) | |||
4634 | if (status) | 4633 | if (status) |
4635 | return status; | 4634 | return status; |
4636 | 4635 | ||
4636 | be_intr_set(adapter, true); | ||
4637 | /* tell fw we're ready to fire cmds */ | 4637 | /* tell fw we're ready to fire cmds */ |
4638 | status = be_cmd_fw_init(adapter); | 4638 | status = be_cmd_fw_init(adapter); |
4639 | if (status) | 4639 | if (status) |
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 58c147271a36..f9313b36c887 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h | |||
@@ -83,6 +83,11 @@ struct e1000_adapter; | |||
83 | 83 | ||
84 | #define E1000_MAX_INTR 10 | 84 | #define E1000_MAX_INTR 10 |
85 | 85 | ||
86 | /* | ||
87 | * Count for polling __E1000_RESET condition every 10-20msec. | ||
88 | */ | ||
89 | #define E1000_CHECK_RESET_COUNT 50 | ||
90 | |||
86 | /* TX/RX descriptor defines */ | 91 | /* TX/RX descriptor defines */ |
87 | #define E1000_DEFAULT_TXD 256 | 92 | #define E1000_DEFAULT_TXD 256 |
88 | #define E1000_MAX_TXD 256 | 93 | #define E1000_MAX_TXD 256 |
@@ -312,8 +317,6 @@ struct e1000_adapter { | |||
312 | struct delayed_work watchdog_task; | 317 | struct delayed_work watchdog_task; |
313 | struct delayed_work fifo_stall_task; | 318 | struct delayed_work fifo_stall_task; |
314 | struct delayed_work phy_info_task; | 319 | struct delayed_work phy_info_task; |
315 | |||
316 | struct mutex mutex; | ||
317 | }; | 320 | }; |
318 | 321 | ||
319 | enum e1000_state_t { | 322 | enum e1000_state_t { |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index e38622825fa7..46e6544ed1b7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -494,13 +494,20 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter) | |||
494 | { | 494 | { |
495 | set_bit(__E1000_DOWN, &adapter->flags); | 495 | set_bit(__E1000_DOWN, &adapter->flags); |
496 | 496 | ||
497 | /* Only kill reset task if adapter is not resetting */ | ||
498 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) | ||
499 | cancel_work_sync(&adapter->reset_task); | ||
500 | |||
501 | cancel_delayed_work_sync(&adapter->watchdog_task); | 497 | cancel_delayed_work_sync(&adapter->watchdog_task); |
498 | |||
499 | /* | ||
500 | * Since the watchdog task can reschedule other tasks, we should cancel | ||
501 | * it first, otherwise we can run into the situation when a work is | ||
502 | * still running after the adapter has been turned down. | ||
503 | */ | ||
504 | |||
502 | cancel_delayed_work_sync(&adapter->phy_info_task); | 505 | cancel_delayed_work_sync(&adapter->phy_info_task); |
503 | cancel_delayed_work_sync(&adapter->fifo_stall_task); | 506 | cancel_delayed_work_sync(&adapter->fifo_stall_task); |
507 | |||
508 | /* Only kill reset task if adapter is not resetting */ | ||
509 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) | ||
510 | cancel_work_sync(&adapter->reset_task); | ||
504 | } | 511 | } |
505 | 512 | ||
506 | void e1000_down(struct e1000_adapter *adapter) | 513 | void e1000_down(struct e1000_adapter *adapter) |
@@ -544,21 +551,8 @@ void e1000_down(struct e1000_adapter *adapter) | |||
544 | e1000_clean_all_rx_rings(adapter); | 551 | e1000_clean_all_rx_rings(adapter); |
545 | } | 552 | } |
546 | 553 | ||
547 | static void e1000_reinit_safe(struct e1000_adapter *adapter) | ||
548 | { | ||
549 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | ||
550 | msleep(1); | ||
551 | mutex_lock(&adapter->mutex); | ||
552 | e1000_down(adapter); | ||
553 | e1000_up(adapter); | ||
554 | mutex_unlock(&adapter->mutex); | ||
555 | clear_bit(__E1000_RESETTING, &adapter->flags); | ||
556 | } | ||
557 | |||
558 | void e1000_reinit_locked(struct e1000_adapter *adapter) | 554 | void e1000_reinit_locked(struct e1000_adapter *adapter) |
559 | { | 555 | { |
560 | /* if rtnl_lock is not held the call path is bogus */ | ||
561 | ASSERT_RTNL(); | ||
562 | WARN_ON(in_interrupt()); | 556 | WARN_ON(in_interrupt()); |
563 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | 557 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) |
564 | msleep(1); | 558 | msleep(1); |
@@ -1316,7 +1310,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter) | |||
1316 | e1000_irq_disable(adapter); | 1310 | e1000_irq_disable(adapter); |
1317 | 1311 | ||
1318 | spin_lock_init(&adapter->stats_lock); | 1312 | spin_lock_init(&adapter->stats_lock); |
1319 | mutex_init(&adapter->mutex); | ||
1320 | 1313 | ||
1321 | set_bit(__E1000_DOWN, &adapter->flags); | 1314 | set_bit(__E1000_DOWN, &adapter->flags); |
1322 | 1315 | ||
@@ -1440,6 +1433,10 @@ static int e1000_close(struct net_device *netdev) | |||
1440 | { | 1433 | { |
1441 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1434 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1442 | struct e1000_hw *hw = &adapter->hw; | 1435 | struct e1000_hw *hw = &adapter->hw; |
1436 | int count = E1000_CHECK_RESET_COUNT; | ||
1437 | |||
1438 | while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) | ||
1439 | usleep_range(10000, 20000); | ||
1443 | 1440 | ||
1444 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 1441 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
1445 | e1000_down(adapter); | 1442 | e1000_down(adapter); |
@@ -2325,11 +2322,8 @@ static void e1000_update_phy_info_task(struct work_struct *work) | |||
2325 | struct e1000_adapter *adapter = container_of(work, | 2322 | struct e1000_adapter *adapter = container_of(work, |
2326 | struct e1000_adapter, | 2323 | struct e1000_adapter, |
2327 | phy_info_task.work); | 2324 | phy_info_task.work); |
2328 | if (test_bit(__E1000_DOWN, &adapter->flags)) | 2325 | |
2329 | return; | ||
2330 | mutex_lock(&adapter->mutex); | ||
2331 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); | 2326 | e1000_phy_get_info(&adapter->hw, &adapter->phy_info); |
2332 | mutex_unlock(&adapter->mutex); | ||
2333 | } | 2327 | } |
2334 | 2328 | ||
2335 | /** | 2329 | /** |
@@ -2345,9 +2339,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) | |||
2345 | struct net_device *netdev = adapter->netdev; | 2339 | struct net_device *netdev = adapter->netdev; |
2346 | u32 tctl; | 2340 | u32 tctl; |
2347 | 2341 | ||
2348 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
2349 | return; | ||
2350 | mutex_lock(&adapter->mutex); | ||
2351 | if (atomic_read(&adapter->tx_fifo_stall)) { | 2342 | if (atomic_read(&adapter->tx_fifo_stall)) { |
2352 | if ((er32(TDT) == er32(TDH)) && | 2343 | if ((er32(TDT) == er32(TDH)) && |
2353 | (er32(TDFT) == er32(TDFH)) && | 2344 | (er32(TDFT) == er32(TDFH)) && |
@@ -2368,7 +2359,6 @@ static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) | |||
2368 | schedule_delayed_work(&adapter->fifo_stall_task, 1); | 2359 | schedule_delayed_work(&adapter->fifo_stall_task, 1); |
2369 | } | 2360 | } |
2370 | } | 2361 | } |
2371 | mutex_unlock(&adapter->mutex); | ||
2372 | } | 2362 | } |
2373 | 2363 | ||
2374 | bool e1000_has_link(struct e1000_adapter *adapter) | 2364 | bool e1000_has_link(struct e1000_adapter *adapter) |
@@ -2422,10 +2412,6 @@ static void e1000_watchdog(struct work_struct *work) | |||
2422 | struct e1000_tx_ring *txdr = adapter->tx_ring; | 2412 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2423 | u32 link, tctl; | 2413 | u32 link, tctl; |
2424 | 2414 | ||
2425 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
2426 | return; | ||
2427 | |||
2428 | mutex_lock(&adapter->mutex); | ||
2429 | link = e1000_has_link(adapter); | 2415 | link = e1000_has_link(adapter); |
2430 | if ((netif_carrier_ok(netdev)) && link) | 2416 | if ((netif_carrier_ok(netdev)) && link) |
2431 | goto link_up; | 2417 | goto link_up; |
@@ -2516,7 +2502,7 @@ link_up: | |||
2516 | adapter->tx_timeout_count++; | 2502 | adapter->tx_timeout_count++; |
2517 | schedule_work(&adapter->reset_task); | 2503 | schedule_work(&adapter->reset_task); |
2518 | /* exit immediately since reset is imminent */ | 2504 | /* exit immediately since reset is imminent */ |
2519 | goto unlock; | 2505 | return; |
2520 | } | 2506 | } |
2521 | } | 2507 | } |
2522 | 2508 | ||
@@ -2544,9 +2530,6 @@ link_up: | |||
2544 | /* Reschedule the task */ | 2530 | /* Reschedule the task */ |
2545 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | 2531 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2546 | schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); | 2532 | schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); |
2547 | |||
2548 | unlock: | ||
2549 | mutex_unlock(&adapter->mutex); | ||
2550 | } | 2533 | } |
2551 | 2534 | ||
2552 | enum latency_range { | 2535 | enum latency_range { |
@@ -3495,10 +3478,8 @@ static void e1000_reset_task(struct work_struct *work) | |||
3495 | struct e1000_adapter *adapter = | 3478 | struct e1000_adapter *adapter = |
3496 | container_of(work, struct e1000_adapter, reset_task); | 3479 | container_of(work, struct e1000_adapter, reset_task); |
3497 | 3480 | ||
3498 | if (test_bit(__E1000_DOWN, &adapter->flags)) | ||
3499 | return; | ||
3500 | e_err(drv, "Reset adapter\n"); | 3481 | e_err(drv, "Reset adapter\n"); |
3501 | e1000_reinit_safe(adapter); | 3482 | e1000_reinit_locked(adapter); |
3502 | } | 3483 | } |
3503 | 3484 | ||
3504 | /** | 3485 | /** |
@@ -4963,6 +4944,11 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
4963 | netif_device_detach(netdev); | 4944 | netif_device_detach(netdev); |
4964 | 4945 | ||
4965 | if (netif_running(netdev)) { | 4946 | if (netif_running(netdev)) { |
4947 | int count = E1000_CHECK_RESET_COUNT; | ||
4948 | |||
4949 | while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) | ||
4950 | usleep_range(10000, 20000); | ||
4951 | |||
4966 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | 4952 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
4967 | e1000_down(adapter); | 4953 | e1000_down(adapter); |
4968 | } | 4954 | } |
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index b0f3666b1d7f..c3143da497c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c | |||
@@ -2062,14 +2062,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) | |||
2062 | { | 2062 | { |
2063 | struct igb_adapter *adapter = netdev_priv(netdev); | 2063 | struct igb_adapter *adapter = netdev_priv(netdev); |
2064 | 2064 | ||
2065 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
2066 | WAKE_BCAST | WAKE_MAGIC | | ||
2067 | WAKE_PHY; | ||
2068 | wol->wolopts = 0; | 2065 | wol->wolopts = 0; |
2069 | 2066 | ||
2070 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) | 2067 | if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) |
2071 | return; | 2068 | return; |
2072 | 2069 | ||
2070 | wol->supported = WAKE_UCAST | WAKE_MCAST | | ||
2071 | WAKE_BCAST | WAKE_MAGIC | | ||
2072 | WAKE_PHY; | ||
2073 | |||
2073 | /* apply any specific unsupported masks here */ | 2074 | /* apply any specific unsupported masks here */ |
2074 | switch (adapter->hw.device_id) { | 2075 | switch (adapter->hw.device_id) { |
2075 | default: | 2076 | default: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0c55079ebee3..cc06854296a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4251,8 +4251,8 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, | |||
4251 | rx_ring->l2_accel_priv = NULL; | 4251 | rx_ring->l2_accel_priv = NULL; |
4252 | } | 4252 | } |
4253 | 4253 | ||
4254 | int ixgbe_fwd_ring_down(struct net_device *vdev, | 4254 | static int ixgbe_fwd_ring_down(struct net_device *vdev, |
4255 | struct ixgbe_fwd_adapter *accel) | 4255 | struct ixgbe_fwd_adapter *accel) |
4256 | { | 4256 | { |
4257 | struct ixgbe_adapter *adapter = accel->real_adapter; | 4257 | struct ixgbe_adapter *adapter = accel->real_adapter; |
4258 | unsigned int rxbase = accel->rx_base_queue; | 4258 | unsigned int rxbase = accel->rx_base_queue; |
@@ -7986,10 +7986,9 @@ skip_sriov: | |||
7986 | NETIF_F_TSO | | 7986 | NETIF_F_TSO | |
7987 | NETIF_F_TSO6 | | 7987 | NETIF_F_TSO6 | |
7988 | NETIF_F_RXHASH | | 7988 | NETIF_F_RXHASH | |
7989 | NETIF_F_RXCSUM | | 7989 | NETIF_F_RXCSUM; |
7990 | NETIF_F_HW_L2FW_DOFFLOAD; | ||
7991 | 7990 | ||
7992 | netdev->hw_features = netdev->features; | 7991 | netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; |
7993 | 7992 | ||
7994 | switch (adapter->hw.mac.type) { | 7993 | switch (adapter->hw.mac.type) { |
7995 | case ixgbe_mac_82599EB: | 7994 | case ixgbe_mac_82599EB: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index e4c676006be9..39217e5ff7dc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | |||
@@ -46,6 +46,7 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl); | |||
46 | static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); | 46 | static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); |
47 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); | 47 | static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); |
48 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); | 48 | static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); |
49 | static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); | ||
49 | 50 | ||
50 | /** | 51 | /** |
51 | * ixgbe_identify_phy_generic - Get physical layer module | 52 | * ixgbe_identify_phy_generic - Get physical layer module |
@@ -1164,7 +1165,7 @@ err_read_i2c_eeprom: | |||
1164 | * | 1165 | * |
1165 | * Searches for and identifies the QSFP module and assigns appropriate PHY type | 1166 | * Searches for and identifies the QSFP module and assigns appropriate PHY type |
1166 | **/ | 1167 | **/ |
1167 | s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) | 1168 | static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) |
1168 | { | 1169 | { |
1169 | struct ixgbe_adapter *adapter = hw->back; | 1170 | struct ixgbe_adapter *adapter = hw->back; |
1170 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; | 1171 | s32 status = IXGBE_ERR_PHY_ADDR_INVALID; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index aae900a256da..fffcbdd2bf0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | |||
@@ -145,7 +145,6 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, | |||
145 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); | 145 | s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); |
146 | s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); | 146 | s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); |
147 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); | 147 | s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); |
148 | s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); | ||
149 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, | 148 | s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, |
150 | u16 *list_offset, | 149 | u16 *list_offset, |
151 | u16 *data_offset); | 150 | u16 *data_offset); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 40626690e8a8..c11d063473e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
@@ -140,7 +140,6 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
140 | { | 140 | { |
141 | struct mlx4_en_priv *priv = netdev_priv(dev); | 141 | struct mlx4_en_priv *priv = netdev_priv(dev); |
142 | struct mlx4_en_dev *mdev = priv->mdev; | 142 | struct mlx4_en_dev *mdev = priv->mdev; |
143 | struct mlx4_en_tx_ring *tx_ring; | ||
144 | int i, carrier_ok; | 143 | int i, carrier_ok; |
145 | 144 | ||
146 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); | 145 | memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST); |
@@ -150,16 +149,10 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
150 | carrier_ok = netif_carrier_ok(dev); | 149 | carrier_ok = netif_carrier_ok(dev); |
151 | 150 | ||
152 | netif_carrier_off(dev); | 151 | netif_carrier_off(dev); |
153 | retry_tx: | ||
154 | /* Wait until all tx queues are empty. | 152 | /* Wait until all tx queues are empty. |
155 | * there should not be any additional incoming traffic | 153 | * there should not be any additional incoming traffic |
156 | * since we turned the carrier off */ | 154 | * since we turned the carrier off */ |
157 | msleep(200); | 155 | msleep(200); |
158 | for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { | ||
159 | tx_ring = priv->tx_ring[i]; | ||
160 | if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) | ||
161 | goto retry_tx; | ||
162 | } | ||
163 | 156 | ||
164 | if (priv->mdev->dev->caps.flags & | 157 | if (priv->mdev->dev->caps.flags & |
165 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { | 158 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index f2a2128165dd..737c1a881f78 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp) | |||
678 | le32_to_cpu(txd->opts1) & 0xffff, | 678 | le32_to_cpu(txd->opts1) & 0xffff, |
679 | PCI_DMA_TODEVICE); | 679 | PCI_DMA_TODEVICE); |
680 | 680 | ||
681 | bytes_compl += skb->len; | ||
682 | pkts_compl++; | ||
683 | |||
684 | if (status & LastFrag) { | 681 | if (status & LastFrag) { |
685 | if (status & (TxError | TxFIFOUnder)) { | 682 | if (status & (TxError | TxFIFOUnder)) { |
686 | netif_dbg(cp, tx_err, cp->dev, | 683 | netif_dbg(cp, tx_err, cp->dev, |
@@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp) | |||
702 | netif_dbg(cp, tx_done, cp->dev, | 699 | netif_dbg(cp, tx_done, cp->dev, |
703 | "tx done, slot %d\n", tx_tail); | 700 | "tx done, slot %d\n", tx_tail); |
704 | } | 701 | } |
702 | bytes_compl += skb->len; | ||
703 | pkts_compl++; | ||
705 | dev_kfree_skb_irq(skb); | 704 | dev_kfree_skb_irq(skb); |
706 | } | 705 | } |
707 | 706 | ||
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 799387570766..c737f0ea5de7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3465,6 +3465,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) | |||
3465 | rtl_writephy(tp, 0x14, 0x9065); | 3465 | rtl_writephy(tp, 0x14, 0x9065); |
3466 | rtl_writephy(tp, 0x14, 0x1065); | 3466 | rtl_writephy(tp, 0x14, 0x1065); |
3467 | 3467 | ||
3468 | /* Check ALDPS bit, disable it if enabled */ | ||
3469 | rtl_writephy(tp, 0x1f, 0x0a43); | ||
3470 | if (rtl_readphy(tp, 0x10) & 0x0004) | ||
3471 | rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); | ||
3472 | |||
3468 | rtl_writephy(tp, 0x1f, 0x0000); | 3473 | rtl_writephy(tp, 0x1f, 0x0000); |
3469 | } | 3474 | } |
3470 | 3475 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 656a3277c2b2..15816cacb548 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h | |||
@@ -75,6 +75,8 @@ struct efx_mcdi_mon { | |||
75 | unsigned long last_update; | 75 | unsigned long last_update; |
76 | struct device *device; | 76 | struct device *device; |
77 | struct efx_mcdi_mon_attribute *attrs; | 77 | struct efx_mcdi_mon_attribute *attrs; |
78 | struct attribute_group group; | ||
79 | const struct attribute_group *groups[2]; | ||
78 | unsigned int n_attrs; | 80 | unsigned int n_attrs; |
79 | }; | 81 | }; |
80 | 82 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c index 4cc5d95b2a5a..d72ad4fc3617 100644 --- a/drivers/net/ethernet/sfc/mcdi_mon.c +++ b/drivers/net/ethernet/sfc/mcdi_mon.c | |||
@@ -139,17 +139,10 @@ static int efx_mcdi_mon_update(struct efx_nic *efx) | |||
139 | return rc; | 139 | return rc; |
140 | } | 140 | } |
141 | 141 | ||
142 | static ssize_t efx_mcdi_mon_show_name(struct device *dev, | ||
143 | struct device_attribute *attr, | ||
144 | char *buf) | ||
145 | { | ||
146 | return sprintf(buf, "%s\n", KBUILD_MODNAME); | ||
147 | } | ||
148 | |||
149 | static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, | 142 | static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index, |
150 | efx_dword_t *entry) | 143 | efx_dword_t *entry) |
151 | { | 144 | { |
152 | struct efx_nic *efx = dev_get_drvdata(dev); | 145 | struct efx_nic *efx = dev_get_drvdata(dev->parent); |
153 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 146 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
154 | int rc; | 147 | int rc; |
155 | 148 | ||
@@ -263,7 +256,7 @@ static ssize_t efx_mcdi_mon_show_label(struct device *dev, | |||
263 | efx_mcdi_sensor_type[mon_attr->type].label); | 256 | efx_mcdi_sensor_type[mon_attr->type].label); |
264 | } | 257 | } |
265 | 258 | ||
266 | static int | 259 | static void |
267 | efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | 260 | efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, |
268 | ssize_t (*reader)(struct device *, | 261 | ssize_t (*reader)(struct device *, |
269 | struct device_attribute *, char *), | 262 | struct device_attribute *, char *), |
@@ -272,7 +265,6 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | |||
272 | { | 265 | { |
273 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 266 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
274 | struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; | 267 | struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs]; |
275 | int rc; | ||
276 | 268 | ||
277 | strlcpy(attr->name, name, sizeof(attr->name)); | 269 | strlcpy(attr->name, name, sizeof(attr->name)); |
278 | attr->index = index; | 270 | attr->index = index; |
@@ -286,10 +278,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name, | |||
286 | attr->dev_attr.attr.name = attr->name; | 278 | attr->dev_attr.attr.name = attr->name; |
287 | attr->dev_attr.attr.mode = S_IRUGO; | 279 | attr->dev_attr.attr.mode = S_IRUGO; |
288 | attr->dev_attr.show = reader; | 280 | attr->dev_attr.show = reader; |
289 | rc = device_create_file(&efx->pci_dev->dev, &attr->dev_attr); | 281 | hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr; |
290 | if (rc == 0) | ||
291 | ++hwmon->n_attrs; | ||
292 | return rc; | ||
293 | } | 282 | } |
294 | 283 | ||
295 | int efx_mcdi_mon_probe(struct efx_nic *efx) | 284 | int efx_mcdi_mon_probe(struct efx_nic *efx) |
@@ -338,26 +327,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
338 | efx_mcdi_mon_update(efx); | 327 | efx_mcdi_mon_update(efx); |
339 | 328 | ||
340 | /* Allocate space for the maximum possible number of | 329 | /* Allocate space for the maximum possible number of |
341 | * attributes for this set of sensors: name of the driver plus | 330 | * attributes for this set of sensors: |
342 | * value, min, max, crit, alarm and label for each sensor. | 331 | * value, min, max, crit, alarm and label for each sensor. |
343 | */ | 332 | */ |
344 | n_attrs = 1 + 6 * n_sensors; | 333 | n_attrs = 6 * n_sensors; |
345 | hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); | 334 | hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL); |
346 | if (!hwmon->attrs) { | 335 | if (!hwmon->attrs) { |
347 | rc = -ENOMEM; | 336 | rc = -ENOMEM; |
348 | goto fail; | 337 | goto fail; |
349 | } | 338 | } |
350 | 339 | hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *), | |
351 | hwmon->device = hwmon_device_register(&efx->pci_dev->dev); | 340 | GFP_KERNEL); |
352 | if (IS_ERR(hwmon->device)) { | 341 | if (!hwmon->group.attrs) { |
353 | rc = PTR_ERR(hwmon->device); | 342 | rc = -ENOMEM; |
354 | goto fail; | 343 | goto fail; |
355 | } | 344 | } |
356 | 345 | ||
357 | rc = efx_mcdi_mon_add_attr(efx, "name", efx_mcdi_mon_show_name, 0, 0, 0); | ||
358 | if (rc) | ||
359 | goto fail; | ||
360 | |||
361 | for (i = 0, j = -1, type = -1; ; i++) { | 346 | for (i = 0, j = -1, type = -1; ; i++) { |
362 | enum efx_hwmon_type hwmon_type; | 347 | enum efx_hwmon_type hwmon_type; |
363 | const char *hwmon_prefix; | 348 | const char *hwmon_prefix; |
@@ -372,7 +357,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
372 | page = type / 32; | 357 | page = type / 32; |
373 | j = -1; | 358 | j = -1; |
374 | if (page == n_pages) | 359 | if (page == n_pages) |
375 | return 0; | 360 | goto hwmon_register; |
376 | 361 | ||
377 | MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, | 362 | MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, |
378 | page); | 363 | page); |
@@ -453,28 +438,22 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
453 | if (min1 != max1) { | 438 | if (min1 != max1) { |
454 | snprintf(name, sizeof(name), "%s%u_input", | 439 | snprintf(name, sizeof(name), "%s%u_input", |
455 | hwmon_prefix, hwmon_index); | 440 | hwmon_prefix, hwmon_index); |
456 | rc = efx_mcdi_mon_add_attr( | 441 | efx_mcdi_mon_add_attr( |
457 | efx, name, efx_mcdi_mon_show_value, i, type, 0); | 442 | efx, name, efx_mcdi_mon_show_value, i, type, 0); |
458 | if (rc) | ||
459 | goto fail; | ||
460 | 443 | ||
461 | if (hwmon_type != EFX_HWMON_POWER) { | 444 | if (hwmon_type != EFX_HWMON_POWER) { |
462 | snprintf(name, sizeof(name), "%s%u_min", | 445 | snprintf(name, sizeof(name), "%s%u_min", |
463 | hwmon_prefix, hwmon_index); | 446 | hwmon_prefix, hwmon_index); |
464 | rc = efx_mcdi_mon_add_attr( | 447 | efx_mcdi_mon_add_attr( |
465 | efx, name, efx_mcdi_mon_show_limit, | 448 | efx, name, efx_mcdi_mon_show_limit, |
466 | i, type, min1); | 449 | i, type, min1); |
467 | if (rc) | ||
468 | goto fail; | ||
469 | } | 450 | } |
470 | 451 | ||
471 | snprintf(name, sizeof(name), "%s%u_max", | 452 | snprintf(name, sizeof(name), "%s%u_max", |
472 | hwmon_prefix, hwmon_index); | 453 | hwmon_prefix, hwmon_index); |
473 | rc = efx_mcdi_mon_add_attr( | 454 | efx_mcdi_mon_add_attr( |
474 | efx, name, efx_mcdi_mon_show_limit, | 455 | efx, name, efx_mcdi_mon_show_limit, |
475 | i, type, max1); | 456 | i, type, max1); |
476 | if (rc) | ||
477 | goto fail; | ||
478 | 457 | ||
479 | if (min2 != max2) { | 458 | if (min2 != max2) { |
480 | /* Assume max2 is critical value. | 459 | /* Assume max2 is critical value. |
@@ -482,32 +461,38 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) | |||
482 | */ | 461 | */ |
483 | snprintf(name, sizeof(name), "%s%u_crit", | 462 | snprintf(name, sizeof(name), "%s%u_crit", |
484 | hwmon_prefix, hwmon_index); | 463 | hwmon_prefix, hwmon_index); |
485 | rc = efx_mcdi_mon_add_attr( | 464 | efx_mcdi_mon_add_attr( |
486 | efx, name, efx_mcdi_mon_show_limit, | 465 | efx, name, efx_mcdi_mon_show_limit, |
487 | i, type, max2); | 466 | i, type, max2); |
488 | if (rc) | ||
489 | goto fail; | ||
490 | } | 467 | } |
491 | } | 468 | } |
492 | 469 | ||
493 | snprintf(name, sizeof(name), "%s%u_alarm", | 470 | snprintf(name, sizeof(name), "%s%u_alarm", |
494 | hwmon_prefix, hwmon_index); | 471 | hwmon_prefix, hwmon_index); |
495 | rc = efx_mcdi_mon_add_attr( | 472 | efx_mcdi_mon_add_attr( |
496 | efx, name, efx_mcdi_mon_show_alarm, i, type, 0); | 473 | efx, name, efx_mcdi_mon_show_alarm, i, type, 0); |
497 | if (rc) | ||
498 | goto fail; | ||
499 | 474 | ||
500 | if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && | 475 | if (type < ARRAY_SIZE(efx_mcdi_sensor_type) && |
501 | efx_mcdi_sensor_type[type].label) { | 476 | efx_mcdi_sensor_type[type].label) { |
502 | snprintf(name, sizeof(name), "%s%u_label", | 477 | snprintf(name, sizeof(name), "%s%u_label", |
503 | hwmon_prefix, hwmon_index); | 478 | hwmon_prefix, hwmon_index); |
504 | rc = efx_mcdi_mon_add_attr( | 479 | efx_mcdi_mon_add_attr( |
505 | efx, name, efx_mcdi_mon_show_label, i, type, 0); | 480 | efx, name, efx_mcdi_mon_show_label, i, type, 0); |
506 | if (rc) | ||
507 | goto fail; | ||
508 | } | 481 | } |
509 | } | 482 | } |
510 | 483 | ||
484 | hwmon_register: | ||
485 | hwmon->groups[0] = &hwmon->group; | ||
486 | hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev, | ||
487 | KBUILD_MODNAME, NULL, | ||
488 | hwmon->groups); | ||
489 | if (IS_ERR(hwmon->device)) { | ||
490 | rc = PTR_ERR(hwmon->device); | ||
491 | goto fail; | ||
492 | } | ||
493 | |||
494 | return 0; | ||
495 | |||
511 | fail: | 496 | fail: |
512 | efx_mcdi_mon_remove(efx); | 497 | efx_mcdi_mon_remove(efx); |
513 | return rc; | 498 | return rc; |
@@ -516,14 +501,11 @@ fail: | |||
516 | void efx_mcdi_mon_remove(struct efx_nic *efx) | 501 | void efx_mcdi_mon_remove(struct efx_nic *efx) |
517 | { | 502 | { |
518 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); | 503 | struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx); |
519 | unsigned int i; | ||
520 | 504 | ||
521 | for (i = 0; i < hwmon->n_attrs; i++) | ||
522 | device_remove_file(&efx->pci_dev->dev, | ||
523 | &hwmon->attrs[i].dev_attr); | ||
524 | kfree(hwmon->attrs); | ||
525 | if (hwmon->device) | 505 | if (hwmon->device) |
526 | hwmon_device_unregister(hwmon->device); | 506 | hwmon_device_unregister(hwmon->device); |
507 | kfree(hwmon->attrs); | ||
508 | kfree(hwmon->group.attrs); | ||
527 | efx_nic_free_buffer(efx, &hwmon->dma_buf); | 509 | efx_nic_free_buffer(efx, &hwmon->dma_buf); |
528 | } | 510 | } |
529 | 511 | ||
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index c9d4c872e81d..749654b976bc 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h | |||
@@ -46,7 +46,8 @@ | |||
46 | defined(CONFIG_MACH_LITTLETON) ||\ | 46 | defined(CONFIG_MACH_LITTLETON) ||\ |
47 | defined(CONFIG_MACH_ZYLONITE2) ||\ | 47 | defined(CONFIG_MACH_ZYLONITE2) ||\ |
48 | defined(CONFIG_ARCH_VIPER) ||\ | 48 | defined(CONFIG_ARCH_VIPER) ||\ |
49 | defined(CONFIG_MACH_STARGATE2) | 49 | defined(CONFIG_MACH_STARGATE2) ||\ |
50 | defined(CONFIG_ARCH_VERSATILE) | ||
50 | 51 | ||
51 | #include <asm/mach-types.h> | 52 | #include <asm/mach-types.h> |
52 | 53 | ||
@@ -154,6 +155,8 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
154 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | 155 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) |
155 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | 156 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) |
156 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | 157 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) |
158 | #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) | ||
159 | #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) | ||
157 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | 160 | #define SMC_IRQ_FLAGS (-1) /* from resource */ |
158 | 161 | ||
159 | /* We actually can't write halfwords properly if not word aligned */ | 162 | /* We actually can't write halfwords properly if not word aligned */ |
@@ -206,23 +209,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
206 | #define RPC_LSA_DEFAULT RPC_LED_TX_RX | 209 | #define RPC_LSA_DEFAULT RPC_LED_TX_RX |
207 | #define RPC_LSB_DEFAULT RPC_LED_100_10 | 210 | #define RPC_LSB_DEFAULT RPC_LED_100_10 |
208 | 211 | ||
209 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
210 | |||
211 | #define SMC_CAN_USE_8BIT 1 | ||
212 | #define SMC_CAN_USE_16BIT 1 | ||
213 | #define SMC_CAN_USE_32BIT 1 | ||
214 | #define SMC_NOWAIT 1 | ||
215 | |||
216 | #define SMC_inb(a, r) readb((a) + (r)) | ||
217 | #define SMC_inw(a, r) readw((a) + (r)) | ||
218 | #define SMC_inl(a, r) readl((a) + (r)) | ||
219 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
220 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
221 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
222 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
223 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
224 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | ||
225 | |||
226 | #elif defined(CONFIG_MN10300) | 212 | #elif defined(CONFIG_MN10300) |
227 | 213 | ||
228 | /* | 214 | /* |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index d022bf936572..ad61d26a44f3 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -2172,16 +2172,13 @@ static int velocity_poll(struct napi_struct *napi, int budget) | |||
2172 | unsigned int rx_done; | 2172 | unsigned int rx_done; |
2173 | unsigned long flags; | 2173 | unsigned long flags; |
2174 | 2174 | ||
2175 | spin_lock_irqsave(&vptr->lock, flags); | ||
2176 | /* | 2175 | /* |
2177 | * Do rx and tx twice for performance (taken from the VIA | 2176 | * Do rx and tx twice for performance (taken from the VIA |
2178 | * out-of-tree driver). | 2177 | * out-of-tree driver). |
2179 | */ | 2178 | */ |
2180 | rx_done = velocity_rx_srv(vptr, budget / 2); | 2179 | rx_done = velocity_rx_srv(vptr, budget); |
2181 | velocity_tx_srv(vptr); | 2180 | spin_lock_irqsave(&vptr->lock, flags); |
2182 | rx_done += velocity_rx_srv(vptr, budget - rx_done); | ||
2183 | velocity_tx_srv(vptr); | 2181 | velocity_tx_srv(vptr); |
2184 | |||
2185 | /* If budget not fully consumed, exit the polling mode */ | 2182 | /* If budget not fully consumed, exit the polling mode */ |
2186 | if (rx_done < budget) { | 2183 | if (rx_done < budget) { |
2187 | napi_complete(napi); | 2184 | napi_complete(napi); |
@@ -2342,6 +2339,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
2342 | if (ret < 0) | 2339 | if (ret < 0) |
2343 | goto out_free_tmp_vptr_1; | 2340 | goto out_free_tmp_vptr_1; |
2344 | 2341 | ||
2342 | napi_disable(&vptr->napi); | ||
2343 | |||
2345 | spin_lock_irqsave(&vptr->lock, flags); | 2344 | spin_lock_irqsave(&vptr->lock, flags); |
2346 | 2345 | ||
2347 | netif_stop_queue(dev); | 2346 | netif_stop_queue(dev); |
@@ -2362,6 +2361,8 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) | |||
2362 | 2361 | ||
2363 | velocity_give_many_rx_descs(vptr); | 2362 | velocity_give_many_rx_descs(vptr); |
2364 | 2363 | ||
2364 | napi_enable(&vptr->napi); | ||
2365 | |||
2365 | mac_enable_int(vptr->mac_regs); | 2366 | mac_enable_int(vptr->mac_regs); |
2366 | netif_start_queue(dev); | 2367 | netif_start_queue(dev); |
2367 | 2368 | ||
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index dc76670c2f2a..9093004f9b63 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -744,7 +744,7 @@ err: | |||
744 | rcu_read_lock(); | 744 | rcu_read_lock(); |
745 | vlan = rcu_dereference(q->vlan); | 745 | vlan = rcu_dereference(q->vlan); |
746 | if (vlan) | 746 | if (vlan) |
747 | vlan->dev->stats.tx_dropped++; | 747 | this_cpu_inc(vlan->pcpu_stats->tx_dropped); |
748 | rcu_read_unlock(); | 748 | rcu_read_unlock(); |
749 | 749 | ||
750 | return err; | 750 | return err; |
@@ -767,7 +767,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
767 | const struct sk_buff *skb, | 767 | const struct sk_buff *skb, |
768 | const struct iovec *iv, int len) | 768 | const struct iovec *iv, int len) |
769 | { | 769 | { |
770 | struct macvlan_dev *vlan; | ||
771 | int ret; | 770 | int ret; |
772 | int vnet_hdr_len = 0; | 771 | int vnet_hdr_len = 0; |
773 | int vlan_offset = 0; | 772 | int vlan_offset = 0; |
@@ -821,15 +820,6 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
821 | copied += len; | 820 | copied += len; |
822 | 821 | ||
823 | done: | 822 | done: |
824 | rcu_read_lock(); | ||
825 | vlan = rcu_dereference(q->vlan); | ||
826 | if (vlan) { | ||
827 | preempt_disable(); | ||
828 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); | ||
829 | preempt_enable(); | ||
830 | } | ||
831 | rcu_read_unlock(); | ||
832 | |||
833 | return ret ? ret : copied; | 823 | return ret ? ret : copied; |
834 | } | 824 | } |
835 | 825 | ||
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 508e4359338b..14372c65a7e8 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c | |||
@@ -64,6 +64,7 @@ | |||
64 | 64 | ||
65 | #define PHY_ID_VSC8234 0x000fc620 | 65 | #define PHY_ID_VSC8234 0x000fc620 |
66 | #define PHY_ID_VSC8244 0x000fc6c0 | 66 | #define PHY_ID_VSC8244 0x000fc6c0 |
67 | #define PHY_ID_VSC8514 0x00070670 | ||
67 | #define PHY_ID_VSC8574 0x000704a0 | 68 | #define PHY_ID_VSC8574 0x000704a0 |
68 | #define PHY_ID_VSC8662 0x00070660 | 69 | #define PHY_ID_VSC8662 0x00070660 |
69 | #define PHY_ID_VSC8221 0x000fc550 | 70 | #define PHY_ID_VSC8221 0x000fc550 |
@@ -131,6 +132,7 @@ static int vsc82xx_config_intr(struct phy_device *phydev) | |||
131 | err = phy_write(phydev, MII_VSC8244_IMASK, | 132 | err = phy_write(phydev, MII_VSC8244_IMASK, |
132 | (phydev->drv->phy_id == PHY_ID_VSC8234 || | 133 | (phydev->drv->phy_id == PHY_ID_VSC8234 || |
133 | phydev->drv->phy_id == PHY_ID_VSC8244 || | 134 | phydev->drv->phy_id == PHY_ID_VSC8244 || |
135 | phydev->drv->phy_id == PHY_ID_VSC8514 || | ||
134 | phydev->drv->phy_id == PHY_ID_VSC8574) ? | 136 | phydev->drv->phy_id == PHY_ID_VSC8574) ? |
135 | MII_VSC8244_IMASK_MASK : | 137 | MII_VSC8244_IMASK_MASK : |
136 | MII_VSC8221_IMASK_MASK); | 138 | MII_VSC8221_IMASK_MASK); |
@@ -246,6 +248,18 @@ static struct phy_driver vsc82xx_driver[] = { | |||
246 | .config_intr = &vsc82xx_config_intr, | 248 | .config_intr = &vsc82xx_config_intr, |
247 | .driver = { .owner = THIS_MODULE,}, | 249 | .driver = { .owner = THIS_MODULE,}, |
248 | }, { | 250 | }, { |
251 | .phy_id = PHY_ID_VSC8514, | ||
252 | .name = "Vitesse VSC8514", | ||
253 | .phy_id_mask = 0x000ffff0, | ||
254 | .features = PHY_GBIT_FEATURES, | ||
255 | .flags = PHY_HAS_INTERRUPT, | ||
256 | .config_init = &vsc824x_config_init, | ||
257 | .config_aneg = &vsc82x4_config_aneg, | ||
258 | .read_status = &genphy_read_status, | ||
259 | .ack_interrupt = &vsc824x_ack_interrupt, | ||
260 | .config_intr = &vsc82xx_config_intr, | ||
261 | .driver = { .owner = THIS_MODULE,}, | ||
262 | }, { | ||
249 | .phy_id = PHY_ID_VSC8574, | 263 | .phy_id = PHY_ID_VSC8574, |
250 | .name = "Vitesse VSC8574", | 264 | .name = "Vitesse VSC8574", |
251 | .phy_id_mask = 0x000ffff0, | 265 | .phy_id_mask = 0x000ffff0, |
@@ -315,6 +329,7 @@ module_exit(vsc82xx_exit); | |||
315 | static struct mdio_device_id __maybe_unused vitesse_tbl[] = { | 329 | static struct mdio_device_id __maybe_unused vitesse_tbl[] = { |
316 | { PHY_ID_VSC8234, 0x000ffff0 }, | 330 | { PHY_ID_VSC8234, 0x000ffff0 }, |
317 | { PHY_ID_VSC8244, 0x000fffc0 }, | 331 | { PHY_ID_VSC8244, 0x000fffc0 }, |
332 | { PHY_ID_VSC8514, 0x000ffff0 }, | ||
318 | { PHY_ID_VSC8574, 0x000ffff0 }, | 333 | { PHY_ID_VSC8574, 0x000ffff0 }, |
319 | { PHY_ID_VSC8662, 0x000ffff0 }, | 334 | { PHY_ID_VSC8662, 0x000ffff0 }, |
320 | { PHY_ID_VSC8221, 0x000ffff0 }, | 335 | { PHY_ID_VSC8221, 0x000ffff0 }, |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 34b0de09d881..736050d6b451 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1366,6 +1366,8 @@ static int team_user_linkup_option_get(struct team *team, | |||
1366 | return 0; | 1366 | return 0; |
1367 | } | 1367 | } |
1368 | 1368 | ||
1369 | static void __team_carrier_check(struct team *team); | ||
1370 | |||
1369 | static int team_user_linkup_option_set(struct team *team, | 1371 | static int team_user_linkup_option_set(struct team *team, |
1370 | struct team_gsetter_ctx *ctx) | 1372 | struct team_gsetter_ctx *ctx) |
1371 | { | 1373 | { |
@@ -1373,6 +1375,7 @@ static int team_user_linkup_option_set(struct team *team, | |||
1373 | 1375 | ||
1374 | port->user.linkup = ctx->data.bool_val; | 1376 | port->user.linkup = ctx->data.bool_val; |
1375 | team_refresh_port_linkup(port); | 1377 | team_refresh_port_linkup(port); |
1378 | __team_carrier_check(port->team); | ||
1376 | return 0; | 1379 | return 0; |
1377 | } | 1380 | } |
1378 | 1381 | ||
@@ -1392,6 +1395,7 @@ static int team_user_linkup_en_option_set(struct team *team, | |||
1392 | 1395 | ||
1393 | port->user.linkup_enabled = ctx->data.bool_val; | 1396 | port->user.linkup_enabled = ctx->data.bool_val; |
1394 | team_refresh_port_linkup(port); | 1397 | team_refresh_port_linkup(port); |
1398 | __team_carrier_check(port->team); | ||
1395 | return 0; | 1399 | return 0; |
1396 | } | 1400 | } |
1397 | 1401 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7bab4de658a9..916241d16c67 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -299,35 +299,76 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq, | |||
299 | return skb; | 299 | return skb; |
300 | } | 300 | } |
301 | 301 | ||
302 | static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | 302 | static struct sk_buff *receive_small(void *buf, unsigned int len) |
303 | { | 303 | { |
304 | struct skb_vnet_hdr *hdr = skb_vnet_hdr(head_skb); | 304 | struct sk_buff * skb = buf; |
305 | |||
306 | len -= sizeof(struct virtio_net_hdr); | ||
307 | skb_trim(skb, len); | ||
308 | |||
309 | return skb; | ||
310 | } | ||
311 | |||
312 | static struct sk_buff *receive_big(struct net_device *dev, | ||
313 | struct receive_queue *rq, | ||
314 | void *buf, | ||
315 | unsigned int len) | ||
316 | { | ||
317 | struct page *page = buf; | ||
318 | struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); | ||
319 | |||
320 | if (unlikely(!skb)) | ||
321 | goto err; | ||
322 | |||
323 | return skb; | ||
324 | |||
325 | err: | ||
326 | dev->stats.rx_dropped++; | ||
327 | give_pages(rq, page); | ||
328 | return NULL; | ||
329 | } | ||
330 | |||
331 | static struct sk_buff *receive_mergeable(struct net_device *dev, | ||
332 | struct receive_queue *rq, | ||
333 | void *buf, | ||
334 | unsigned int len) | ||
335 | { | ||
336 | struct skb_vnet_hdr *hdr = buf; | ||
337 | int num_buf = hdr->mhdr.num_buffers; | ||
338 | struct page *page = virt_to_head_page(buf); | ||
339 | int offset = buf - page_address(page); | ||
340 | struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, | ||
341 | MERGE_BUFFER_LEN); | ||
305 | struct sk_buff *curr_skb = head_skb; | 342 | struct sk_buff *curr_skb = head_skb; |
306 | char *buf; | ||
307 | struct page *page; | ||
308 | int num_buf, len, offset; | ||
309 | 343 | ||
310 | num_buf = hdr->mhdr.num_buffers; | 344 | if (unlikely(!curr_skb)) |
345 | goto err_skb; | ||
346 | |||
311 | while (--num_buf) { | 347 | while (--num_buf) { |
312 | int num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | 348 | int num_skb_frags; |
349 | |||
313 | buf = virtqueue_get_buf(rq->vq, &len); | 350 | buf = virtqueue_get_buf(rq->vq, &len); |
314 | if (unlikely(!buf)) { | 351 | if (unlikely(!buf)) { |
315 | pr_debug("%s: rx error: %d buffers missing\n", | 352 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
316 | head_skb->dev->name, hdr->mhdr.num_buffers); | 353 | dev->name, num_buf, hdr->mhdr.num_buffers); |
317 | head_skb->dev->stats.rx_length_errors++; | 354 | dev->stats.rx_length_errors++; |
318 | return -EINVAL; | 355 | goto err_buf; |
319 | } | 356 | } |
320 | if (unlikely(len > MERGE_BUFFER_LEN)) { | 357 | if (unlikely(len > MERGE_BUFFER_LEN)) { |
321 | pr_debug("%s: rx error: merge buffer too long\n", | 358 | pr_debug("%s: rx error: merge buffer too long\n", |
322 | head_skb->dev->name); | 359 | dev->name); |
323 | len = MERGE_BUFFER_LEN; | 360 | len = MERGE_BUFFER_LEN; |
324 | } | 361 | } |
362 | |||
363 | page = virt_to_head_page(buf); | ||
364 | --rq->num; | ||
365 | |||
366 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | ||
325 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { | 367 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
326 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | 368 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
327 | if (unlikely(!nskb)) { | 369 | |
328 | head_skb->dev->stats.rx_dropped++; | 370 | if (unlikely(!nskb)) |
329 | return -ENOMEM; | 371 | goto err_skb; |
330 | } | ||
331 | if (curr_skb == head_skb) | 372 | if (curr_skb == head_skb) |
332 | skb_shinfo(curr_skb)->frag_list = nskb; | 373 | skb_shinfo(curr_skb)->frag_list = nskb; |
333 | else | 374 | else |
@@ -341,8 +382,7 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
341 | head_skb->len += len; | 382 | head_skb->len += len; |
342 | head_skb->truesize += MERGE_BUFFER_LEN; | 383 | head_skb->truesize += MERGE_BUFFER_LEN; |
343 | } | 384 | } |
344 | page = virt_to_head_page(buf); | 385 | offset = buf - page_address(page); |
345 | offset = buf - (char *)page_address(page); | ||
346 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { | 386 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
347 | put_page(page); | 387 | put_page(page); |
348 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | 388 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
@@ -351,9 +391,28 @@ static int receive_mergeable(struct receive_queue *rq, struct sk_buff *head_skb) | |||
351 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | 391 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
352 | offset, len, MERGE_BUFFER_LEN); | 392 | offset, len, MERGE_BUFFER_LEN); |
353 | } | 393 | } |
394 | } | ||
395 | |||
396 | return head_skb; | ||
397 | |||
398 | err_skb: | ||
399 | put_page(page); | ||
400 | while (--num_buf) { | ||
401 | buf = virtqueue_get_buf(rq->vq, &len); | ||
402 | if (unlikely(!buf)) { | ||
403 | pr_debug("%s: rx error: %d buffers missing\n", | ||
404 | dev->name, num_buf); | ||
405 | dev->stats.rx_length_errors++; | ||
406 | break; | ||
407 | } | ||
408 | page = virt_to_head_page(buf); | ||
409 | put_page(page); | ||
354 | --rq->num; | 410 | --rq->num; |
355 | } | 411 | } |
356 | return 0; | 412 | err_buf: |
413 | dev->stats.rx_dropped++; | ||
414 | dev_kfree_skb(head_skb); | ||
415 | return NULL; | ||
357 | } | 416 | } |
358 | 417 | ||
359 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | 418 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) |
@@ -362,7 +421,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
362 | struct net_device *dev = vi->dev; | 421 | struct net_device *dev = vi->dev; |
363 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 422 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
364 | struct sk_buff *skb; | 423 | struct sk_buff *skb; |
365 | struct page *page; | ||
366 | struct skb_vnet_hdr *hdr; | 424 | struct skb_vnet_hdr *hdr; |
367 | 425 | ||
368 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { | 426 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
@@ -377,33 +435,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
377 | return; | 435 | return; |
378 | } | 436 | } |
379 | 437 | ||
380 | if (!vi->mergeable_rx_bufs && !vi->big_packets) { | 438 | if (vi->mergeable_rx_bufs) |
381 | skb = buf; | 439 | skb = receive_mergeable(dev, rq, buf, len); |
382 | len -= sizeof(struct virtio_net_hdr); | 440 | else if (vi->big_packets) |
383 | skb_trim(skb, len); | 441 | skb = receive_big(dev, rq, buf, len); |
384 | } else if (vi->mergeable_rx_bufs) { | 442 | else |
385 | struct page *page = virt_to_head_page(buf); | 443 | skb = receive_small(buf, len); |
386 | skb = page_to_skb(rq, page, | 444 | |
387 | (char *)buf - (char *)page_address(page), | 445 | if (unlikely(!skb)) |
388 | len, MERGE_BUFFER_LEN); | 446 | return; |
389 | if (unlikely(!skb)) { | ||
390 | dev->stats.rx_dropped++; | ||
391 | put_page(page); | ||
392 | return; | ||
393 | } | ||
394 | if (receive_mergeable(rq, skb)) { | ||
395 | dev_kfree_skb(skb); | ||
396 | return; | ||
397 | } | ||
398 | } else { | ||
399 | page = buf; | ||
400 | skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); | ||
401 | if (unlikely(!skb)) { | ||
402 | dev->stats.rx_dropped++; | ||
403 | give_pages(rq, page); | ||
404 | return; | ||
405 | } | ||
406 | } | ||
407 | 447 | ||
408 | hdr = skb_vnet_hdr(skb); | 448 | hdr = skb_vnet_hdr(skb); |
409 | 449 | ||
@@ -1084,7 +1124,7 @@ static void virtnet_set_rx_mode(struct net_device *dev) | |||
1084 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | 1124 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
1085 | VIRTIO_NET_CTRL_MAC_TABLE_SET, | 1125 | VIRTIO_NET_CTRL_MAC_TABLE_SET, |
1086 | sg, NULL)) | 1126 | sg, NULL)) |
1087 | dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); | 1127 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
1088 | 1128 | ||
1089 | kfree(buf); | 1129 | kfree(buf); |
1090 | } | 1130 | } |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 919b6509455c..64f0e0d18b81 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/udp.h> | 39 | #include <linux/udp.h> |
40 | 40 | ||
41 | #include <net/tcp.h> | 41 | #include <net/tcp.h> |
42 | #include <net/ip6_checksum.h> | ||
42 | 43 | ||
43 | #include <xen/xen.h> | 44 | #include <xen/xen.h> |
44 | #include <xen/events.h> | 45 | #include <xen/events.h> |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index bd6f743d87a7..892ea6161376 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
@@ -1404,11 +1404,22 @@ enum { | |||
1404 | }; | 1404 | }; |
1405 | #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) | 1405 | #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) |
1406 | 1406 | ||
1407 | static struct genl_multicast_group pmcraid_mcgrps[] = { | ||
1408 | { .name = "events", /* not really used - see ID discussion below */ }, | ||
1409 | }; | ||
1410 | |||
1407 | static struct genl_family pmcraid_event_family = { | 1411 | static struct genl_family pmcraid_event_family = { |
1408 | .id = GENL_ID_GENERATE, | 1412 | /* |
1413 | * Due to prior multicast group abuse (the code having assumed that | ||
1414 | * the family ID can be used as a multicast group ID) we need to | ||
1415 | * statically allocate a family (and thus group) ID. | ||
1416 | */ | ||
1417 | .id = GENL_ID_PMCRAID, | ||
1409 | .name = "pmcraid", | 1418 | .name = "pmcraid", |
1410 | .version = 1, | 1419 | .version = 1, |
1411 | .maxattr = PMCRAID_AEN_ATTR_MAX | 1420 | .maxattr = PMCRAID_AEN_ATTR_MAX, |
1421 | .mcgrps = pmcraid_mcgrps, | ||
1422 | .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps), | ||
1412 | }; | 1423 | }; |
1413 | 1424 | ||
1414 | /** | 1425 | /** |
@@ -1511,9 +1522,8 @@ static int pmcraid_notify_aen( | |||
1511 | return result; | 1522 | return result; |
1512 | } | 1523 | } |
1513 | 1524 | ||
1514 | result = | 1525 | result = genlmsg_multicast(&pmcraid_event_family, skb, |
1515 | genlmsg_multicast(&pmcraid_event_family, skb, 0, | 1526 | 0, 0, GFP_ATOMIC); |
1516 | pmcraid_event_family.id, GFP_ATOMIC); | ||
1517 | 1527 | ||
1518 | /* If there are no listeners, genlmsg_multicast may return non-zero | 1528 | /* If there are no listeners, genlmsg_multicast may return non-zero |
1519 | * value. | 1529 | * value. |
diff --git a/include/net/ip.h b/include/net/ip.h index 217bc5bfc6c6..5a25f36fe3a7 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -473,7 +473,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
473 | int ip_ra_control(struct sock *sk, unsigned char on, | 473 | int ip_ra_control(struct sock *sk, unsigned char on, |
474 | void (*destructor)(struct sock *)); | 474 | void (*destructor)(struct sock *)); |
475 | 475 | ||
476 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len); | 476 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len); |
477 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, | 477 | void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, |
478 | u32 info, u8 *payload); | 478 | u32 info, u8 *payload); |
479 | void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, | 479 | void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport, |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 2a5f668cd683..eb198acaac1d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -776,8 +776,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
776 | 776 | ||
777 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); | 777 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); |
778 | 778 | ||
779 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len); | 779 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, |
780 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len); | 780 | int *addr_len); |
781 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, | ||
782 | int *addr_len); | ||
781 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, | 783 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, |
782 | u32 info, u8 *payload); | 784 | u32 info, u8 *payload); |
783 | void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); | 785 | void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); |
diff --git a/include/net/ping.h b/include/net/ping.h index 3f67704f3747..90f48417b03d 100644 --- a/include/net/ping.h +++ b/include/net/ping.h | |||
@@ -31,7 +31,8 @@ | |||
31 | 31 | ||
32 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ | 32 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ |
33 | struct pingv6_ops { | 33 | struct pingv6_ops { |
34 | int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len); | 34 | int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len, |
35 | int *addr_len); | ||
35 | int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, | 36 | int (*ip6_datagram_recv_ctl)(struct sock *sk, struct msghdr *msg, |
36 | struct sk_buff *skb); | 37 | struct sk_buff *skb); |
37 | int (*icmpv6_err_convert)(u8 type, u8 code, int *err); | 38 | int (*icmpv6_err_convert)(u8 type, u8 code, int *err); |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 2174d8da0770..ea0ca5f6e629 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -629,6 +629,7 @@ struct sctp_chunk { | |||
629 | #define SCTP_NEED_FRTX 0x1 | 629 | #define SCTP_NEED_FRTX 0x1 |
630 | #define SCTP_DONT_FRTX 0x2 | 630 | #define SCTP_DONT_FRTX 0x2 |
631 | __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ | 631 | __u16 rtt_in_progress:1, /* This chunk used for RTT calc? */ |
632 | resent:1, /* Has this chunk ever been resent. */ | ||
632 | has_tsn:1, /* Does this chunk have a TSN yet? */ | 633 | has_tsn:1, /* Does this chunk have a TSN yet? */ |
633 | has_ssn:1, /* Does this chunk have a SSN yet? */ | 634 | has_ssn:1, /* Does this chunk have a SSN yet? */ |
634 | singleton:1, /* Only chunk in the packet? */ | 635 | singleton:1, /* Only chunk in the packet? */ |
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index 1af72d8228e0..c3363ba1ae05 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h | |||
@@ -28,6 +28,7 @@ struct genlmsghdr { | |||
28 | #define GENL_ID_GENERATE 0 | 28 | #define GENL_ID_GENERATE 0 |
29 | #define GENL_ID_CTRL NLMSG_MIN_TYPE | 29 | #define GENL_ID_CTRL NLMSG_MIN_TYPE |
30 | #define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) | 30 | #define GENL_ID_VFS_DQUOT (NLMSG_MIN_TYPE + 1) |
31 | #define GENL_ID_PMCRAID (NLMSG_MIN_TYPE + 2) | ||
31 | 32 | ||
32 | /************************************************************************** | 33 | /************************************************************************** |
33 | * Controller | 34 | * Controller |
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index b78566f59aba..6db460121f84 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h | |||
@@ -488,7 +488,9 @@ enum { | |||
488 | IFLA_HSR_UNSPEC, | 488 | IFLA_HSR_UNSPEC, |
489 | IFLA_HSR_SLAVE1, | 489 | IFLA_HSR_SLAVE1, |
490 | IFLA_HSR_SLAVE2, | 490 | IFLA_HSR_SLAVE2, |
491 | IFLA_HSR_MULTICAST_SPEC, | 491 | IFLA_HSR_MULTICAST_SPEC, /* Last byte of supervision addr */ |
492 | IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ | ||
493 | IFLA_HSR_SEQ_NR, | ||
492 | __IFLA_HSR_MAX, | 494 | __IFLA_HSR_MAX, |
493 | }; | 495 | }; |
494 | 496 | ||
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h index 4e31db4eea41..f2159d30d1f5 100644 --- a/include/uapi/linux/netlink_diag.h +++ b/include/uapi/linux/netlink_diag.h | |||
@@ -33,6 +33,7 @@ struct netlink_diag_ring { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | enum { | 35 | enum { |
36 | /* NETLINK_DIAG_NONE, standard nl API requires this attribute! */ | ||
36 | NETLINK_DIAG_MEMINFO, | 37 | NETLINK_DIAG_MEMINFO, |
37 | NETLINK_DIAG_GROUPS, | 38 | NETLINK_DIAG_GROUPS, |
38 | NETLINK_DIAG_RX_RING, | 39 | NETLINK_DIAG_RX_RING, |
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index b2cc0cd9c4d9..d08c63f3dd6f 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h | |||
@@ -29,6 +29,7 @@ struct packet_diag_msg { | |||
29 | }; | 29 | }; |
30 | 30 | ||
31 | enum { | 31 | enum { |
32 | /* PACKET_DIAG_NONE, standard nl API requires this attribute! */ | ||
32 | PACKET_DIAG_INFO, | 33 | PACKET_DIAG_INFO, |
33 | PACKET_DIAG_MCLIST, | 34 | PACKET_DIAG_MCLIST, |
34 | PACKET_DIAG_RX_RING, | 35 | PACKET_DIAG_RX_RING, |
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h index b9e2a6a7446f..1eb0b8dd1830 100644 --- a/include/uapi/linux/unix_diag.h +++ b/include/uapi/linux/unix_diag.h | |||
@@ -31,6 +31,7 @@ struct unix_diag_msg { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | enum { | 33 | enum { |
34 | /* UNIX_DIAG_NONE, standard nl API requires this attribute! */ | ||
34 | UNIX_DIAG_NAME, | 35 | UNIX_DIAG_NAME, |
35 | UNIX_DIAG_VFS, | 36 | UNIX_DIAG_VFS, |
36 | UNIX_DIAG_PEER, | 37 | UNIX_DIAG_PEER, |
diff --git a/net/compat.c b/net/compat.c index 618c6a8a911b..dd32e34c1e2c 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) | |||
72 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) | 72 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) |
73 | return -EFAULT; | 73 | return -EFAULT; |
74 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 74 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
75 | return -EINVAL; | 75 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
76 | kmsg->msg_name = compat_ptr(tmp1); | 76 | kmsg->msg_name = compat_ptr(tmp1); |
77 | kmsg->msg_iov = compat_ptr(tmp2); | 77 | kmsg->msg_iov = compat_ptr(tmp2); |
78 | kmsg->msg_control = compat_ptr(tmp3); | 78 | kmsg->msg_control = compat_ptr(tmp3); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 261357a66300..a797fff7f222 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -2527,6 +2527,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev, | |||
2527 | if (x) { | 2527 | if (x) { |
2528 | int ret; | 2528 | int ret; |
2529 | __u8 *eth; | 2529 | __u8 *eth; |
2530 | struct iphdr *iph; | ||
2531 | |||
2530 | nhead = x->props.header_len - skb_headroom(skb); | 2532 | nhead = x->props.header_len - skb_headroom(skb); |
2531 | if (nhead > 0) { | 2533 | if (nhead > 0) { |
2532 | ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); | 2534 | ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); |
@@ -2548,6 +2550,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev, | |||
2548 | eth = (__u8 *) skb_push(skb, ETH_HLEN); | 2550 | eth = (__u8 *) skb_push(skb, ETH_HLEN); |
2549 | memcpy(eth, pkt_dev->hh, 12); | 2551 | memcpy(eth, pkt_dev->hh, 12); |
2550 | *(u16 *) ð[12] = protocol; | 2552 | *(u16 *) ð[12] = protocol; |
2553 | |||
2554 | /* Update IPv4 header len as well as checksum value */ | ||
2555 | iph = ip_hdr(skb); | ||
2556 | iph->tot_len = htons(skb->len - ETH_HLEN); | ||
2557 | ip_send_check(iph); | ||
2551 | } | 2558 | } |
2552 | } | 2559 | } |
2553 | return 1; | 2560 | return 1; |
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 003f5bb3acd2..4bdab1521878 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c | |||
@@ -288,7 +288,8 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr, | |||
288 | static bool seq_nr_after(u16 a, u16 b) | 288 | static bool seq_nr_after(u16 a, u16 b) |
289 | { | 289 | { |
290 | /* Remove inconsistency where | 290 | /* Remove inconsistency where |
291 | * seq_nr_after(a, b) == seq_nr_before(a, b) */ | 291 | * seq_nr_after(a, b) == seq_nr_before(a, b) |
292 | */ | ||
292 | if ((int) b - a == 32768) | 293 | if ((int) b - a == 32768) |
293 | return false; | 294 | return false; |
294 | 295 | ||
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 5325af85eea6..01a5261ac7a5 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c | |||
@@ -23,6 +23,8 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { | |||
23 | [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, | 23 | [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, |
24 | [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, | 24 | [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, |
25 | [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, | 25 | [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, |
26 | [IFLA_HSR_SUPERVISION_ADDR] = { .type = NLA_BINARY, .len = ETH_ALEN }, | ||
27 | [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, | ||
26 | }; | 28 | }; |
27 | 29 | ||
28 | 30 | ||
@@ -59,6 +61,31 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, | |||
59 | return hsr_dev_finalize(dev, link, multicast_spec); | 61 | return hsr_dev_finalize(dev, link, multicast_spec); |
60 | } | 62 | } |
61 | 63 | ||
64 | static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) | ||
65 | { | ||
66 | struct hsr_priv *hsr_priv; | ||
67 | |||
68 | hsr_priv = netdev_priv(dev); | ||
69 | |||
70 | if (hsr_priv->slave[0]) | ||
71 | if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex)) | ||
72 | goto nla_put_failure; | ||
73 | |||
74 | if (hsr_priv->slave[1]) | ||
75 | if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex)) | ||
76 | goto nla_put_failure; | ||
77 | |||
78 | if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, | ||
79 | hsr_priv->sup_multicast_addr) || | ||
80 | nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr)) | ||
81 | goto nla_put_failure; | ||
82 | |||
83 | return 0; | ||
84 | |||
85 | nla_put_failure: | ||
86 | return -EMSGSIZE; | ||
87 | } | ||
88 | |||
62 | static struct rtnl_link_ops hsr_link_ops __read_mostly = { | 89 | static struct rtnl_link_ops hsr_link_ops __read_mostly = { |
63 | .kind = "hsr", | 90 | .kind = "hsr", |
64 | .maxtype = IFLA_HSR_MAX, | 91 | .maxtype = IFLA_HSR_MAX, |
@@ -66,6 +93,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = { | |||
66 | .priv_size = sizeof(struct hsr_priv), | 93 | .priv_size = sizeof(struct hsr_priv), |
67 | .setup = hsr_dev_setup, | 94 | .setup = hsr_dev_setup, |
68 | .newlink = hsr_newlink, | 95 | .newlink = hsr_newlink, |
96 | .fill_info = hsr_fill_info, | ||
69 | }; | 97 | }; |
70 | 98 | ||
71 | 99 | ||
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 3f858266fa7e..ddf32a6bc415 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -386,7 +386,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf | |||
386 | /* | 386 | /* |
387 | * Handle MSG_ERRQUEUE | 387 | * Handle MSG_ERRQUEUE |
388 | */ | 388 | */ |
389 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | 389 | int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) |
390 | { | 390 | { |
391 | struct sock_exterr_skb *serr; | 391 | struct sock_exterr_skb *serr; |
392 | struct sk_buff *skb, *skb2; | 392 | struct sk_buff *skb, *skb2; |
@@ -423,6 +423,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
423 | serr->addr_offset); | 423 | serr->addr_offset); |
424 | sin->sin_port = serr->port; | 424 | sin->sin_port = serr->port; |
425 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); | 425 | memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); |
426 | *addr_len = sizeof(*sin); | ||
426 | } | 427 | } |
427 | 428 | ||
428 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 429 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 876c6ca2d8f9..242e7f4ed6f4 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -772,7 +772,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
772 | err = PTR_ERR(rt); | 772 | err = PTR_ERR(rt); |
773 | rt = NULL; | 773 | rt = NULL; |
774 | if (err == -ENETUNREACH) | 774 | if (err == -ENETUNREACH) |
775 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | 775 | IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
776 | goto out; | 776 | goto out; |
777 | } | 777 | } |
778 | 778 | ||
@@ -841,10 +841,11 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
841 | 841 | ||
842 | if (flags & MSG_ERRQUEUE) { | 842 | if (flags & MSG_ERRQUEUE) { |
843 | if (family == AF_INET) { | 843 | if (family == AF_INET) { |
844 | return ip_recv_error(sk, msg, len); | 844 | return ip_recv_error(sk, msg, len, addr_len); |
845 | #if IS_ENABLED(CONFIG_IPV6) | 845 | #if IS_ENABLED(CONFIG_IPV6) |
846 | } else if (family == AF_INET6) { | 846 | } else if (family == AF_INET6) { |
847 | return pingv6_ops.ipv6_recv_error(sk, msg, len); | 847 | return pingv6_ops.ipv6_recv_error(sk, msg, len, |
848 | addr_len); | ||
848 | #endif | 849 | #endif |
849 | } | 850 | } |
850 | } | 851 | } |
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index ce848461acbb..46d6a1c923a8 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c | |||
@@ -31,10 +31,6 @@ | |||
31 | const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; | 31 | const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; |
32 | const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; | 32 | const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; |
33 | 33 | ||
34 | /* | ||
35 | * Add a protocol handler to the hash tables | ||
36 | */ | ||
37 | |||
38 | int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) | 34 | int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) |
39 | { | 35 | { |
40 | if (!prot->netns_ok) { | 36 | if (!prot->netns_ok) { |
@@ -55,10 +51,6 @@ int inet_add_offload(const struct net_offload *prot, unsigned char protocol) | |||
55 | } | 51 | } |
56 | EXPORT_SYMBOL(inet_add_offload); | 52 | EXPORT_SYMBOL(inet_add_offload); |
57 | 53 | ||
58 | /* | ||
59 | * Remove a protocol from the hash tables. | ||
60 | */ | ||
61 | |||
62 | int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) | 54 | int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) |
63 | { | 55 | { |
64 | int ret; | 56 | int ret; |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 5cb8ddb505ee..23c3e5b5bb53 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -697,7 +697,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
697 | goto out; | 697 | goto out; |
698 | 698 | ||
699 | if (flags & MSG_ERRQUEUE) { | 699 | if (flags & MSG_ERRQUEUE) { |
700 | err = ip_recv_error(sk, msg, len); | 700 | err = ip_recv_error(sk, msg, len, addr_len); |
701 | goto out; | 701 | goto out; |
702 | } | 702 | } |
703 | 703 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 59a6f8b90cd9..067213924751 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -177,7 +177,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
177 | if (IS_ERR(rt)) { | 177 | if (IS_ERR(rt)) { |
178 | err = PTR_ERR(rt); | 178 | err = PTR_ERR(rt); |
179 | if (err == -ENETUNREACH) | 179 | if (err == -ENETUNREACH) |
180 | IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); | 180 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); |
181 | return err; | 181 | return err; |
182 | } | 182 | } |
183 | 183 | ||
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 03e9154f7e68..269a89ecd2f4 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c | |||
@@ -60,7 +60,6 @@ EXPORT_SYMBOL(tcp_destroy_cgroup); | |||
60 | static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) | 60 | static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) |
61 | { | 61 | { |
62 | struct cg_proto *cg_proto; | 62 | struct cg_proto *cg_proto; |
63 | u64 old_lim; | ||
64 | int i; | 63 | int i; |
65 | int ret; | 64 | int ret; |
66 | 65 | ||
@@ -71,7 +70,6 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) | |||
71 | if (val > RES_COUNTER_MAX) | 70 | if (val > RES_COUNTER_MAX) |
72 | val = RES_COUNTER_MAX; | 71 | val = RES_COUNTER_MAX; |
73 | 72 | ||
74 | old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT); | ||
75 | ret = res_counter_set_limit(&cg_proto->memory_allocated, val); | 73 | ret = res_counter_set_limit(&cg_proto->memory_allocated, val); |
76 | if (ret) | 74 | if (ret) |
77 | return ret; | 75 | return ret; |
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index a2b68a108eae..05606353c7e7 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c | |||
@@ -274,33 +274,32 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff * | |||
274 | { | 274 | { |
275 | const struct iphdr *iph = skb_gro_network_header(skb); | 275 | const struct iphdr *iph = skb_gro_network_header(skb); |
276 | __wsum wsum; | 276 | __wsum wsum; |
277 | __sum16 sum; | 277 | |
278 | /* Don't bother verifying checksum if we're going to flush anyway. */ | ||
279 | if (NAPI_GRO_CB(skb)->flush) | ||
280 | goto skip_csum; | ||
281 | |||
282 | wsum = skb->csum; | ||
278 | 283 | ||
279 | switch (skb->ip_summed) { | 284 | switch (skb->ip_summed) { |
285 | case CHECKSUM_NONE: | ||
286 | wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), | ||
287 | 0); | ||
288 | |||
289 | /* fall through */ | ||
290 | |||
280 | case CHECKSUM_COMPLETE: | 291 | case CHECKSUM_COMPLETE: |
281 | if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, | 292 | if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, |
282 | skb->csum)) { | 293 | wsum)) { |
283 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 294 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
284 | break; | 295 | break; |
285 | } | 296 | } |
286 | flush: | 297 | |
287 | NAPI_GRO_CB(skb)->flush = 1; | 298 | NAPI_GRO_CB(skb)->flush = 1; |
288 | return NULL; | 299 | return NULL; |
289 | |||
290 | case CHECKSUM_NONE: | ||
291 | wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr, | ||
292 | skb_gro_len(skb), IPPROTO_TCP, 0); | ||
293 | sum = csum_fold(skb_checksum(skb, | ||
294 | skb_gro_offset(skb), | ||
295 | skb_gro_len(skb), | ||
296 | wsum)); | ||
297 | if (sum) | ||
298 | goto flush; | ||
299 | |||
300 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
301 | break; | ||
302 | } | 300 | } |
303 | 301 | ||
302 | skip_csum: | ||
304 | return tcp_gro_receive(head, skb); | 303 | return tcp_gro_receive(head, skb); |
305 | } | 304 | } |
306 | 305 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5944d7d668dd..44f6a20fa29d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -999,7 +999,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
999 | err = PTR_ERR(rt); | 999 | err = PTR_ERR(rt); |
1000 | rt = NULL; | 1000 | rt = NULL; |
1001 | if (err == -ENETUNREACH) | 1001 | if (err == -ENETUNREACH) |
1002 | IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); | 1002 | IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
1003 | goto out; | 1003 | goto out; |
1004 | } | 1004 | } |
1005 | 1005 | ||
@@ -1098,6 +1098,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, | |||
1098 | struct udp_sock *up = udp_sk(sk); | 1098 | struct udp_sock *up = udp_sk(sk); |
1099 | int ret; | 1099 | int ret; |
1100 | 1100 | ||
1101 | if (flags & MSG_SENDPAGE_NOTLAST) | ||
1102 | flags |= MSG_MORE; | ||
1103 | |||
1101 | if (!up->pending) { | 1104 | if (!up->pending) { |
1102 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; | 1105 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; |
1103 | 1106 | ||
@@ -1236,7 +1239,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1236 | bool slow; | 1239 | bool slow; |
1237 | 1240 | ||
1238 | if (flags & MSG_ERRQUEUE) | 1241 | if (flags & MSG_ERRQUEUE) |
1239 | return ip_recv_error(sk, msg, len); | 1242 | return ip_recv_error(sk, msg, len, addr_len); |
1240 | 1243 | ||
1241 | try_again: | 1244 | try_again: |
1242 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | 1245 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index a454b0ff57c7..8dfe1f4d3c1a 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) | |||
318 | /* | 318 | /* |
319 | * Handle MSG_ERRQUEUE | 319 | * Handle MSG_ERRQUEUE |
320 | */ | 320 | */ |
321 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | 321 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) |
322 | { | 322 | { |
323 | struct ipv6_pinfo *np = inet6_sk(sk); | 323 | struct ipv6_pinfo *np = inet6_sk(sk); |
324 | struct sock_exterr_skb *serr; | 324 | struct sock_exterr_skb *serr; |
@@ -369,6 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
369 | &sin->sin6_addr); | 369 | &sin->sin6_addr); |
370 | sin->sin6_scope_id = 0; | 370 | sin->sin6_scope_id = 0; |
371 | } | 371 | } |
372 | *addr_len = sizeof(*sin); | ||
372 | } | 373 | } |
373 | 374 | ||
374 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); | 375 | memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); |
@@ -377,6 +378,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
377 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { | 378 | if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { |
378 | sin->sin6_family = AF_INET6; | 379 | sin->sin6_family = AF_INET6; |
379 | sin->sin6_flowinfo = 0; | 380 | sin->sin6_flowinfo = 0; |
381 | sin->sin6_port = 0; | ||
380 | if (skb->protocol == htons(ETH_P_IPV6)) { | 382 | if (skb->protocol == htons(ETH_P_IPV6)) { |
381 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 383 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
382 | if (np->rxopt.all) | 384 | if (np->rxopt.all) |
@@ -423,7 +425,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error); | |||
423 | /* | 425 | /* |
424 | * Handle IPV6_RECVPATHMTU | 426 | * Handle IPV6_RECVPATHMTU |
425 | */ | 427 | */ |
426 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) | 428 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, |
429 | int *addr_len) | ||
427 | { | 430 | { |
428 | struct ipv6_pinfo *np = inet6_sk(sk); | 431 | struct ipv6_pinfo *np = inet6_sk(sk); |
429 | struct sk_buff *skb; | 432 | struct sk_buff *skb; |
@@ -457,6 +460,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) | |||
457 | sin->sin6_port = 0; | 460 | sin->sin6_port = 0; |
458 | sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; | 461 | sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; |
459 | sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; | 462 | sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; |
463 | *addr_len = sizeof(*sin); | ||
460 | } | 464 | } |
461 | 465 | ||
462 | put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); | 466 | put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 59df872e2f4d..4acdb63495db 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -116,8 +116,8 @@ static int ip6_finish_output2(struct sk_buff *skb) | |||
116 | } | 116 | } |
117 | rcu_read_unlock_bh(); | 117 | rcu_read_unlock_bh(); |
118 | 118 | ||
119 | IP6_INC_STATS_BH(dev_net(dst->dev), | 119 | IP6_INC_STATS(dev_net(dst->dev), |
120 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); | 120 | ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); |
121 | kfree_skb(skb); | 121 | kfree_skb(skb); |
122 | return -EINVAL; | 122 | return -EINVAL; |
123 | } | 123 | } |
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 8815e31a87fe..a83243c3d656 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -57,7 +57,8 @@ static struct inet_protosw pingv6_protosw = { | |||
57 | 57 | ||
58 | 58 | ||
59 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ | 59 | /* Compatibility glue so we can support IPv6 when it's compiled as a module */ |
60 | static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | 60 | static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, |
61 | int *addr_len) | ||
61 | { | 62 | { |
62 | return -EAFNOSUPPORT; | 63 | return -EAFNOSUPPORT; |
63 | } | 64 | } |
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index 22d1bd4670da..e048cf1bb6a2 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c | |||
@@ -36,10 +36,6 @@ int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol | |||
36 | } | 36 | } |
37 | EXPORT_SYMBOL(inet6_add_protocol); | 37 | EXPORT_SYMBOL(inet6_add_protocol); |
38 | 38 | ||
39 | /* | ||
40 | * Remove a protocol from the hash tables. | ||
41 | */ | ||
42 | |||
43 | int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) | 39 | int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) |
44 | { | 40 | { |
45 | int ret; | 41 | int ret; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index e24ff1df0401..7fb4e14c467f 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -466,10 +466,10 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
466 | return -EOPNOTSUPP; | 466 | return -EOPNOTSUPP; |
467 | 467 | ||
468 | if (flags & MSG_ERRQUEUE) | 468 | if (flags & MSG_ERRQUEUE) |
469 | return ipv6_recv_error(sk, msg, len); | 469 | return ipv6_recv_error(sk, msg, len, addr_len); |
470 | 470 | ||
471 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | 471 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) |
472 | return ipv6_recv_rxpmtu(sk, msg, len); | 472 | return ipv6_recv_rxpmtu(sk, msg, len, addr_len); |
473 | 473 | ||
474 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 474 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
475 | if (!skb) | 475 | if (!skb) |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1b4a4a953675..366fbba3359a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -478,14 +478,44 @@ static void ipip6_tunnel_uninit(struct net_device *dev) | |||
478 | dev_put(dev); | 478 | dev_put(dev); |
479 | } | 479 | } |
480 | 480 | ||
481 | /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH | ||
482 | * if sufficient data bytes are available | ||
483 | */ | ||
484 | static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb) | ||
485 | { | ||
486 | const struct iphdr *iph = (const struct iphdr *) skb->data; | ||
487 | struct rt6_info *rt; | ||
488 | struct sk_buff *skb2; | ||
489 | |||
490 | if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8)) | ||
491 | return 1; | ||
492 | |||
493 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
494 | |||
495 | if (!skb2) | ||
496 | return 1; | ||
497 | |||
498 | skb_dst_drop(skb2); | ||
499 | skb_pull(skb2, iph->ihl * 4); | ||
500 | skb_reset_network_header(skb2); | ||
501 | |||
502 | rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); | ||
503 | |||
504 | if (rt && rt->dst.dev) | ||
505 | skb2->dev = rt->dst.dev; | ||
506 | |||
507 | icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); | ||
508 | |||
509 | if (rt) | ||
510 | ip6_rt_put(rt); | ||
511 | |||
512 | kfree_skb(skb2); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
481 | 516 | ||
482 | static int ipip6_err(struct sk_buff *skb, u32 info) | 517 | static int ipip6_err(struct sk_buff *skb, u32 info) |
483 | { | 518 | { |
484 | |||
485 | /* All the routers (except for Linux) return only | ||
486 | 8 bytes of packet payload. It means, that precise relaying of | ||
487 | ICMP in the real Internet is absolutely infeasible. | ||
488 | */ | ||
489 | const struct iphdr *iph = (const struct iphdr *)skb->data; | 519 | const struct iphdr *iph = (const struct iphdr *)skb->data; |
490 | const int type = icmp_hdr(skb)->type; | 520 | const int type = icmp_hdr(skb)->type; |
491 | const int code = icmp_hdr(skb)->code; | 521 | const int code = icmp_hdr(skb)->code; |
@@ -500,7 +530,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
500 | case ICMP_DEST_UNREACH: | 530 | case ICMP_DEST_UNREACH: |
501 | switch (code) { | 531 | switch (code) { |
502 | case ICMP_SR_FAILED: | 532 | case ICMP_SR_FAILED: |
503 | case ICMP_PORT_UNREACH: | ||
504 | /* Impossible event. */ | 533 | /* Impossible event. */ |
505 | return 0; | 534 | return 0; |
506 | default: | 535 | default: |
@@ -545,6 +574,9 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
545 | goto out; | 574 | goto out; |
546 | 575 | ||
547 | err = 0; | 576 | err = 0; |
577 | if (!ipip6_err_gen_icmpv6_unreach(skb)) | ||
578 | goto out; | ||
579 | |||
548 | if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) | 580 | if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) |
549 | goto out; | 581 | goto out; |
550 | 582 | ||
@@ -919,7 +951,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
919 | if (!new_skb) { | 951 | if (!new_skb) { |
920 | ip_rt_put(rt); | 952 | ip_rt_put(rt); |
921 | dev->stats.tx_dropped++; | 953 | dev->stats.tx_dropped++; |
922 | dev_kfree_skb(skb); | 954 | kfree_skb(skb); |
923 | return NETDEV_TX_OK; | 955 | return NETDEV_TX_OK; |
924 | } | 956 | } |
925 | if (skb->sk) | 957 | if (skb->sk) |
@@ -945,7 +977,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, | |||
945 | tx_error_icmp: | 977 | tx_error_icmp: |
946 | dst_link_failure(skb); | 978 | dst_link_failure(skb); |
947 | tx_error: | 979 | tx_error: |
948 | dev_kfree_skb(skb); | 980 | kfree_skb(skb); |
949 | out: | 981 | out: |
950 | dev->stats.tx_errors++; | 982 | dev->stats.tx_errors++; |
951 | return NETDEV_TX_OK; | 983 | return NETDEV_TX_OK; |
@@ -985,7 +1017,7 @@ static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb, | |||
985 | 1017 | ||
986 | tx_err: | 1018 | tx_err: |
987 | dev->stats.tx_errors++; | 1019 | dev->stats.tx_errors++; |
988 | dev_kfree_skb(skb); | 1020 | kfree_skb(skb); |
989 | return NETDEV_TX_OK; | 1021 | return NETDEV_TX_OK; |
990 | 1022 | ||
991 | } | 1023 | } |
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index c1097c798900..6d18157dc32c 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c | |||
@@ -37,34 +37,32 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, | |||
37 | { | 37 | { |
38 | const struct ipv6hdr *iph = skb_gro_network_header(skb); | 38 | const struct ipv6hdr *iph = skb_gro_network_header(skb); |
39 | __wsum wsum; | 39 | __wsum wsum; |
40 | __sum16 sum; | 40 | |
41 | /* Don't bother verifying checksum if we're going to flush anyway. */ | ||
42 | if (NAPI_GRO_CB(skb)->flush) | ||
43 | goto skip_csum; | ||
44 | |||
45 | wsum = skb->csum; | ||
41 | 46 | ||
42 | switch (skb->ip_summed) { | 47 | switch (skb->ip_summed) { |
48 | case CHECKSUM_NONE: | ||
49 | wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), | ||
50 | wsum); | ||
51 | |||
52 | /* fall through */ | ||
53 | |||
43 | case CHECKSUM_COMPLETE: | 54 | case CHECKSUM_COMPLETE: |
44 | if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, | 55 | if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, |
45 | skb->csum)) { | 56 | wsum)) { |
46 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 57 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
47 | break; | 58 | break; |
48 | } | 59 | } |
49 | flush: | 60 | |
50 | NAPI_GRO_CB(skb)->flush = 1; | 61 | NAPI_GRO_CB(skb)->flush = 1; |
51 | return NULL; | 62 | return NULL; |
52 | |||
53 | case CHECKSUM_NONE: | ||
54 | wsum = ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, | ||
55 | skb_gro_len(skb), | ||
56 | IPPROTO_TCP, 0)); | ||
57 | sum = csum_fold(skb_checksum(skb, | ||
58 | skb_gro_offset(skb), | ||
59 | skb_gro_len(skb), | ||
60 | wsum)); | ||
61 | if (sum) | ||
62 | goto flush; | ||
63 | |||
64 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
65 | break; | ||
66 | } | 63 | } |
67 | 64 | ||
65 | skip_csum: | ||
68 | return tcp_gro_receive(head, skb); | 66 | return tcp_gro_receive(head, skb); |
69 | } | 67 | } |
70 | 68 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 81eb8cf8389b..bcd5699313c3 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -393,10 +393,10 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
393 | bool slow; | 393 | bool slow; |
394 | 394 | ||
395 | if (flags & MSG_ERRQUEUE) | 395 | if (flags & MSG_ERRQUEUE) |
396 | return ipv6_recv_error(sk, msg, len); | 396 | return ipv6_recv_error(sk, msg, len, addr_len); |
397 | 397 | ||
398 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) | 398 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) |
399 | return ipv6_recv_rxpmtu(sk, msg, len); | 399 | return ipv6_recv_rxpmtu(sk, msg, len, addr_len); |
400 | 400 | ||
401 | try_again: | 401 | try_again: |
402 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), | 402 | skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index cfd65304be60..d9b437e55007 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -665,7 +665,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
665 | *addr_len = sizeof(*lsa); | 665 | *addr_len = sizeof(*lsa); |
666 | 666 | ||
667 | if (flags & MSG_ERRQUEUE) | 667 | if (flags & MSG_ERRQUEUE) |
668 | return ipv6_recv_error(sk, msg, len); | 668 | return ipv6_recv_error(sk, msg, len, addr_len); |
669 | 669 | ||
670 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 670 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
671 | if (!skb) | 671 | if (!skb) |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 4518a57aa5fe..713671ae45af 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -74,9 +74,12 @@ static struct list_head family_ht[GENL_FAM_TAB_SIZE]; | |||
74 | * Bit 17 is marked as already used since the VFS quota code | 74 | * Bit 17 is marked as already used since the VFS quota code |
75 | * also abused this API and relied on family == group ID, we | 75 | * also abused this API and relied on family == group ID, we |
76 | * cater to that by giving it a static family and group ID. | 76 | * cater to that by giving it a static family and group ID. |
77 | * Bit 18 is marked as already used since the PMCRAID driver | ||
78 | * did the same thing as the VFS quota code (maybe copied?) | ||
77 | */ | 79 | */ |
78 | static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | | 80 | static unsigned long mc_group_start = 0x3 | BIT(GENL_ID_CTRL) | |
79 | BIT(GENL_ID_VFS_DQUOT); | 81 | BIT(GENL_ID_VFS_DQUOT) | |
82 | BIT(GENL_ID_PMCRAID); | ||
80 | static unsigned long *mc_groups = &mc_group_start; | 83 | static unsigned long *mc_groups = &mc_group_start; |
81 | static unsigned long mc_groups_longs = 1; | 84 | static unsigned long mc_groups_longs = 1; |
82 | 85 | ||
@@ -139,6 +142,7 @@ static u16 genl_generate_id(void) | |||
139 | 142 | ||
140 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { | 143 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { |
141 | if (id_gen_idx != GENL_ID_VFS_DQUOT && | 144 | if (id_gen_idx != GENL_ID_VFS_DQUOT && |
145 | id_gen_idx != GENL_ID_PMCRAID && | ||
142 | !genl_family_find_byid(id_gen_idx)) | 146 | !genl_family_find_byid(id_gen_idx)) |
143 | return id_gen_idx; | 147 | return id_gen_idx; |
144 | if (++id_gen_idx > GENL_MAX_ID) | 148 | if (++id_gen_idx > GENL_MAX_ID) |
@@ -214,7 +218,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family) | |||
214 | { | 218 | { |
215 | int first_id; | 219 | int first_id; |
216 | int n_groups = family->n_mcgrps; | 220 | int n_groups = family->n_mcgrps; |
217 | int err, i; | 221 | int err = 0, i; |
218 | bool groups_allocated = false; | 222 | bool groups_allocated = false; |
219 | 223 | ||
220 | if (!n_groups) | 224 | if (!n_groups) |
@@ -236,9 +240,12 @@ static int genl_validate_assign_mc_groups(struct genl_family *family) | |||
236 | } else if (strcmp(family->name, "NET_DM") == 0) { | 240 | } else if (strcmp(family->name, "NET_DM") == 0) { |
237 | first_id = 1; | 241 | first_id = 1; |
238 | BUG_ON(n_groups != 1); | 242 | BUG_ON(n_groups != 1); |
239 | } else if (strcmp(family->name, "VFS_DQUOT") == 0) { | 243 | } else if (family->id == GENL_ID_VFS_DQUOT) { |
240 | first_id = GENL_ID_VFS_DQUOT; | 244 | first_id = GENL_ID_VFS_DQUOT; |
241 | BUG_ON(n_groups != 1); | 245 | BUG_ON(n_groups != 1); |
246 | } else if (family->id == GENL_ID_PMCRAID) { | ||
247 | first_id = GENL_ID_PMCRAID; | ||
248 | BUG_ON(n_groups != 1); | ||
242 | } else { | 249 | } else { |
243 | groups_allocated = true; | 250 | groups_allocated = true; |
244 | err = genl_allocate_reserve_groups(n_groups, &first_id); | 251 | err = genl_allocate_reserve_groups(n_groups, &first_id); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ac27c86ef6d1..ba2548bd85bf 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -439,9 +439,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po, | |||
439 | 439 | ||
440 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; | 440 | pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc; |
441 | 441 | ||
442 | spin_lock(&rb_queue->lock); | 442 | spin_lock_bh(&rb_queue->lock); |
443 | pkc->delete_blk_timer = 1; | 443 | pkc->delete_blk_timer = 1; |
444 | spin_unlock(&rb_queue->lock); | 444 | spin_unlock_bh(&rb_queue->lock); |
445 | 445 | ||
446 | prb_del_retire_blk_timer(pkc); | 446 | prb_del_retire_blk_timer(pkc); |
447 | } | 447 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 75c94e59a3bd..bccd52b36e97 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -215,10 +215,10 @@ static bool loss_4state(struct netem_sched_data *q) | |||
215 | if (rnd < clg->a4) { | 215 | if (rnd < clg->a4) { |
216 | clg->state = 4; | 216 | clg->state = 4; |
217 | return true; | 217 | return true; |
218 | } else if (clg->a4 < rnd && rnd < clg->a1) { | 218 | } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { |
219 | clg->state = 3; | 219 | clg->state = 3; |
220 | return true; | 220 | return true; |
221 | } else if (clg->a1 < rnd) | 221 | } else if (clg->a1 + clg->a4 < rnd) |
222 | clg->state = 1; | 222 | clg->state = 1; |
223 | 223 | ||
224 | break; | 224 | break; |
@@ -268,10 +268,11 @@ static bool loss_gilb_ell(struct netem_sched_data *q) | |||
268 | clg->state = 2; | 268 | clg->state = 2; |
269 | if (net_random() < clg->a4) | 269 | if (net_random() < clg->a4) |
270 | return true; | 270 | return true; |
271 | break; | ||
271 | case 2: | 272 | case 2: |
272 | if (net_random() < clg->a2) | 273 | if (net_random() < clg->a2) |
273 | clg->state = 1; | 274 | clg->state = 1; |
274 | if (clg->a3 > net_random()) | 275 | if (net_random() > clg->a3) |
275 | return true; | 276 | return true; |
276 | } | 277 | } |
277 | 278 | ||
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 68f98595819c..a6090051c5db 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <net/netlink.h> | 21 | #include <net/netlink.h> |
22 | #include <net/sch_generic.h> | 22 | #include <net/sch_generic.h> |
23 | #include <net/pkt_sched.h> | 23 | #include <net/pkt_sched.h> |
24 | #include <net/tcp.h> | ||
24 | 25 | ||
25 | 26 | ||
26 | /* Simple Token Bucket Filter. | 27 | /* Simple Token Bucket Filter. |
@@ -117,6 +118,22 @@ struct tbf_sched_data { | |||
117 | }; | 118 | }; |
118 | 119 | ||
119 | 120 | ||
121 | /* | ||
122 | * Return length of individual segments of a gso packet, | ||
123 | * including all headers (MAC, IP, TCP/UDP) | ||
124 | */ | ||
125 | static unsigned int skb_gso_seglen(const struct sk_buff *skb) | ||
126 | { | ||
127 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | ||
128 | const struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
129 | |||
130 | if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) | ||
131 | hdr_len += tcp_hdrlen(skb); | ||
132 | else | ||
133 | hdr_len += sizeof(struct udphdr); | ||
134 | return hdr_len + shinfo->gso_size; | ||
135 | } | ||
136 | |||
120 | /* GSO packet is too big, segment it so that tbf can transmit | 137 | /* GSO packet is too big, segment it so that tbf can transmit |
121 | * each segment in time | 138 | * each segment in time |
122 | */ | 139 | */ |
@@ -136,12 +153,8 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch) | |||
136 | while (segs) { | 153 | while (segs) { |
137 | nskb = segs->next; | 154 | nskb = segs->next; |
138 | segs->next = NULL; | 155 | segs->next = NULL; |
139 | if (likely(segs->len <= q->max_size)) { | 156 | qdisc_skb_cb(segs)->pkt_len = segs->len; |
140 | qdisc_skb_cb(segs)->pkt_len = segs->len; | 157 | ret = qdisc_enqueue(segs, q->qdisc); |
141 | ret = qdisc_enqueue(segs, q->qdisc); | ||
142 | } else { | ||
143 | ret = qdisc_reshape_fail(skb, sch); | ||
144 | } | ||
145 | if (ret != NET_XMIT_SUCCESS) { | 158 | if (ret != NET_XMIT_SUCCESS) { |
146 | if (net_xmit_drop_count(ret)) | 159 | if (net_xmit_drop_count(ret)) |
147 | sch->qstats.drops++; | 160 | sch->qstats.drops++; |
@@ -163,7 +176,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
163 | int ret; | 176 | int ret; |
164 | 177 | ||
165 | if (qdisc_pkt_len(skb) > q->max_size) { | 178 | if (qdisc_pkt_len(skb) > q->max_size) { |
166 | if (skb_is_gso(skb)) | 179 | if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size) |
167 | return tbf_segment(skb, sch); | 180 | return tbf_segment(skb, sch); |
168 | return qdisc_reshape_fail(skb, sch); | 181 | return qdisc_reshape_fail(skb, sch); |
169 | } | 182 | } |
@@ -319,6 +332,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |||
319 | if (max_size < 0) | 332 | if (max_size < 0) |
320 | goto done; | 333 | goto done; |
321 | 334 | ||
335 | if (max_size < psched_mtu(qdisc_dev(sch))) | ||
336 | pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n", | ||
337 | max_size, qdisc_dev(sch)->name, | ||
338 | psched_mtu(qdisc_dev(sch))); | ||
339 | |||
322 | if (q->qdisc != &noop_qdisc) { | 340 | if (q->qdisc != &noop_qdisc) { |
323 | err = fifo_set_limit(q->qdisc, qopt->limit); | 341 | err = fifo_set_limit(q->qdisc, qopt->limit); |
324 | if (err) | 342 | if (err) |
diff --git a/net/sctp/output.c b/net/sctp/output.c index e650978daf27..0e2644d0a773 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -474,10 +474,11 @@ int sctp_packet_transmit(struct sctp_packet *packet) | |||
474 | * for a given destination transport address. | 474 | * for a given destination transport address. |
475 | */ | 475 | */ |
476 | 476 | ||
477 | if (!tp->rto_pending) { | 477 | if (!chunk->resent && !tp->rto_pending) { |
478 | chunk->rtt_in_progress = 1; | 478 | chunk->rtt_in_progress = 1; |
479 | tp->rto_pending = 1; | 479 | tp->rto_pending = 1; |
480 | } | 480 | } |
481 | |||
481 | has_data = 1; | 482 | has_data = 1; |
482 | } | 483 | } |
483 | 484 | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 94df75877869..f51ba985a36e 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -446,6 +446,8 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
446 | transport->rto_pending = 0; | 446 | transport->rto_pending = 0; |
447 | } | 447 | } |
448 | 448 | ||
449 | chunk->resent = 1; | ||
450 | |||
449 | /* Move the chunk to the retransmit queue. The chunks | 451 | /* Move the chunk to the retransmit queue. The chunks |
450 | * on the retransmit queue are always kept in order. | 452 | * on the retransmit queue are always kept in order. |
451 | */ | 453 | */ |
@@ -1375,6 +1377,7 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1375 | * instance). | 1377 | * instance). |
1376 | */ | 1378 | */ |
1377 | if (!tchunk->tsn_gap_acked && | 1379 | if (!tchunk->tsn_gap_acked && |
1380 | !tchunk->resent && | ||
1378 | tchunk->rtt_in_progress) { | 1381 | tchunk->rtt_in_progress) { |
1379 | tchunk->rtt_in_progress = 0; | 1382 | tchunk->rtt_in_progress = 0; |
1380 | rtt = jiffies - tchunk->sent_at; | 1383 | rtt = jiffies - tchunk->sent_at; |
@@ -1391,7 +1394,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, | |||
1391 | */ | 1394 | */ |
1392 | if (!tchunk->tsn_gap_acked) { | 1395 | if (!tchunk->tsn_gap_acked) { |
1393 | tchunk->tsn_gap_acked = 1; | 1396 | tchunk->tsn_gap_acked = 1; |
1394 | *highest_new_tsn_in_sack = tsn; | 1397 | if (TSN_lt(*highest_new_tsn_in_sack, tsn)) |
1398 | *highest_new_tsn_in_sack = tsn; | ||
1395 | bytes_acked += sctp_data_size(tchunk); | 1399 | bytes_acked += sctp_data_size(tchunk); |
1396 | if (!tchunk->transport) | 1400 | if (!tchunk->transport) |
1397 | migrate_bytes += sctp_data_size(tchunk); | 1401 | migrate_bytes += sctp_data_size(tchunk); |
diff --git a/net/socket.c b/net/socket.c index 0b18693f2be6..e83c416708af 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1973,7 +1973,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, | |||
1973 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) | 1973 | if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) |
1974 | return -EFAULT; | 1974 | return -EFAULT; |
1975 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) | 1975 | if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
1976 | return -EINVAL; | 1976 | kmsg->msg_namelen = sizeof(struct sockaddr_storage); |
1977 | return 0; | 1977 | return 0; |
1978 | } | 1978 | } |
1979 | 1979 | ||