diff options
author | David S. Miller <davem@davemloft.net> | 2015-04-02 16:16:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-02 16:16:53 -0400 |
commit | 9f0d34bc344889c2e6c593bd949d7ab821f0f4a5 (patch) | |
tree | e5bfc776a09315afa4dbcae97ac04f2cca239c96 /drivers/net | |
parent | e4a924f5768c55002c02ceba9b9f86824c35f956 (diff) | |
parent | 0a4812798fae4f6bfcaab51e31b3898ff5ea3108 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/asix_common.c
drivers/net/usb/sr9800.c
drivers/net/usb/usbnet.c
include/linux/usb/usbnet.h
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c
The TCP conflicts were overlapping changes. In 'net' we added a
READ_ONCE() to the socket cached RX route read, whilst in 'net-next'
Eric Dumazet touched the surrounding code dealing with how mini
sockets are handled.
With USB, it's a case of the same bug fix first going into net-next
and then I cherry picked it back into net.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
48 files changed, 661 insertions, 344 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7b4684ccdb3f..78dde56ae6e6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3881,7 +3881,8 @@ static inline int bond_slave_override(struct bonding *bond, | |||
3881 | /* Find out if any slaves have the same mapping as this skb. */ | 3881 | /* Find out if any slaves have the same mapping as this skb. */ |
3882 | bond_for_each_slave_rcu(bond, slave, iter) { | 3882 | bond_for_each_slave_rcu(bond, slave, iter) { |
3883 | if (slave->queue_id == skb->queue_mapping) { | 3883 | if (slave->queue_id == skb->queue_mapping) { |
3884 | if (bond_slave_can_tx(slave)) { | 3884 | if (bond_slave_is_up(slave) && |
3885 | slave->link == BOND_LINK_UP) { | ||
3885 | bond_dev_queue_xmit(bond, skb, slave->dev); | 3886 | bond_dev_queue_xmit(bond, skb, slave->dev); |
3886 | return 0; | 3887 | return 0; |
3887 | } | 3888 | } |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 80c46ad4cee4..ad0a7e8c2c2b 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) | |||
592 | rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? | 592 | rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? |
593 | CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; | 593 | CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; |
594 | new_state = max(tx_state, rx_state); | 594 | new_state = max(tx_state, rx_state); |
595 | } else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) { | 595 | } else { |
596 | __flexcan_get_berr_counter(dev, &bec); | 596 | __flexcan_get_berr_counter(dev, &bec); |
597 | new_state = CAN_STATE_ERROR_PASSIVE; | 597 | new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ? |
598 | CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF; | ||
598 | rx_state = bec.rxerr >= bec.txerr ? new_state : 0; | 599 | rx_state = bec.rxerr >= bec.txerr ? new_state : 0; |
599 | tx_state = bec.rxerr <= bec.txerr ? new_state : 0; | 600 | tx_state = bec.rxerr <= bec.txerr ? new_state : 0; |
600 | } else { | ||
601 | new_state = CAN_STATE_BUS_OFF; | ||
602 | } | 601 | } |
603 | 602 | ||
604 | /* state hasn't changed */ | 603 | /* state hasn't changed */ |
@@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1158 | const struct flexcan_devtype_data *devtype_data; | 1157 | const struct flexcan_devtype_data *devtype_data; |
1159 | struct net_device *dev; | 1158 | struct net_device *dev; |
1160 | struct flexcan_priv *priv; | 1159 | struct flexcan_priv *priv; |
1160 | struct regulator *reg_xceiver; | ||
1161 | struct resource *mem; | 1161 | struct resource *mem; |
1162 | struct clk *clk_ipg = NULL, *clk_per = NULL; | 1162 | struct clk *clk_ipg = NULL, *clk_per = NULL; |
1163 | void __iomem *base; | 1163 | void __iomem *base; |
1164 | int err, irq; | 1164 | int err, irq; |
1165 | u32 clock_freq = 0; | 1165 | u32 clock_freq = 0; |
1166 | 1166 | ||
1167 | reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); | ||
1168 | if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) | ||
1169 | return -EPROBE_DEFER; | ||
1170 | else if (IS_ERR(reg_xceiver)) | ||
1171 | reg_xceiver = NULL; | ||
1172 | |||
1167 | if (pdev->dev.of_node) | 1173 | if (pdev->dev.of_node) |
1168 | of_property_read_u32(pdev->dev.of_node, | 1174 | of_property_read_u32(pdev->dev.of_node, |
1169 | "clock-frequency", &clock_freq); | 1175 | "clock-frequency", &clock_freq); |
@@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev) | |||
1224 | priv->pdata = dev_get_platdata(&pdev->dev); | 1230 | priv->pdata = dev_get_platdata(&pdev->dev); |
1225 | priv->devtype_data = devtype_data; | 1231 | priv->devtype_data = devtype_data; |
1226 | 1232 | ||
1227 | priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); | 1233 | priv->reg_xceiver = reg_xceiver; |
1228 | if (IS_ERR(priv->reg_xceiver)) | ||
1229 | priv->reg_xceiver = NULL; | ||
1230 | 1234 | ||
1231 | netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); | 1235 | netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT); |
1232 | 1236 | ||
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 009acc8641fc..8b4d3e6875eb 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
@@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * | |||
901 | } | 901 | } |
902 | 902 | ||
903 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 903 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
904 | if (!dev) | ||
905 | return -ENOMEM; | ||
904 | init_usb_anchor(&dev->rx_submitted); | 906 | init_usb_anchor(&dev->rx_submitted); |
905 | 907 | ||
906 | atomic_set(&dev->active_channels, 0); | 908 | atomic_set(&dev->active_channels, 0); |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index d269ae0b072a..4643914859b2 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/can/dev.h> | 25 | #include <linux/can/dev.h> |
26 | #include <linux/can/error.h> | 26 | #include <linux/can/error.h> |
27 | 27 | ||
28 | #define MAX_TX_URBS 16 | ||
29 | #define MAX_RX_URBS 4 | 28 | #define MAX_RX_URBS 4 |
30 | #define START_TIMEOUT 1000 /* msecs */ | 29 | #define START_TIMEOUT 1000 /* msecs */ |
31 | #define STOP_TIMEOUT 1000 /* msecs */ | 30 | #define STOP_TIMEOUT 1000 /* msecs */ |
@@ -443,6 +442,7 @@ struct kvaser_usb_error_summary { | |||
443 | }; | 442 | }; |
444 | }; | 443 | }; |
445 | 444 | ||
445 | /* Context for an outstanding, not yet ACKed, transmission */ | ||
446 | struct kvaser_usb_tx_urb_context { | 446 | struct kvaser_usb_tx_urb_context { |
447 | struct kvaser_usb_net_priv *priv; | 447 | struct kvaser_usb_net_priv *priv; |
448 | u32 echo_index; | 448 | u32 echo_index; |
@@ -456,8 +456,13 @@ struct kvaser_usb { | |||
456 | struct usb_endpoint_descriptor *bulk_in, *bulk_out; | 456 | struct usb_endpoint_descriptor *bulk_in, *bulk_out; |
457 | struct usb_anchor rx_submitted; | 457 | struct usb_anchor rx_submitted; |
458 | 458 | ||
459 | /* @max_tx_urbs: Firmware-reported maximum number of oustanding, | ||
460 | * not yet ACKed, transmissions on this device. This value is | ||
461 | * also used as a sentinel for marking free tx contexts. | ||
462 | */ | ||
459 | u32 fw_version; | 463 | u32 fw_version; |
460 | unsigned int nchannels; | 464 | unsigned int nchannels; |
465 | unsigned int max_tx_urbs; | ||
461 | enum kvaser_usb_family family; | 466 | enum kvaser_usb_family family; |
462 | 467 | ||
463 | bool rxinitdone; | 468 | bool rxinitdone; |
@@ -467,19 +472,18 @@ struct kvaser_usb { | |||
467 | 472 | ||
468 | struct kvaser_usb_net_priv { | 473 | struct kvaser_usb_net_priv { |
469 | struct can_priv can; | 474 | struct can_priv can; |
470 | 475 | struct can_berr_counter bec; | |
471 | spinlock_t tx_contexts_lock; | ||
472 | int active_tx_contexts; | ||
473 | struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS]; | ||
474 | |||
475 | struct usb_anchor tx_submitted; | ||
476 | struct completion start_comp, stop_comp; | ||
477 | 476 | ||
478 | struct kvaser_usb *dev; | 477 | struct kvaser_usb *dev; |
479 | struct net_device *netdev; | 478 | struct net_device *netdev; |
480 | int channel; | 479 | int channel; |
481 | 480 | ||
482 | struct can_berr_counter bec; | 481 | struct completion start_comp, stop_comp; |
482 | struct usb_anchor tx_submitted; | ||
483 | |||
484 | spinlock_t tx_contexts_lock; | ||
485 | int active_tx_contexts; | ||
486 | struct kvaser_usb_tx_urb_context tx_contexts[]; | ||
483 | }; | 487 | }; |
484 | 488 | ||
485 | static const struct usb_device_id kvaser_usb_table[] = { | 489 | static const struct usb_device_id kvaser_usb_table[] = { |
@@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, | |||
592 | * for further details. | 596 | * for further details. |
593 | */ | 597 | */ |
594 | if (tmp->len == 0) { | 598 | if (tmp->len == 0) { |
595 | pos = round_up(pos, | 599 | pos = round_up(pos, le16_to_cpu(dev->bulk_in-> |
596 | dev->bulk_in->wMaxPacketSize); | 600 | wMaxPacketSize)); |
597 | continue; | 601 | continue; |
598 | } | 602 | } |
599 | 603 | ||
@@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev) | |||
657 | switch (dev->family) { | 661 | switch (dev->family) { |
658 | case KVASER_LEAF: | 662 | case KVASER_LEAF: |
659 | dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); | 663 | dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version); |
664 | dev->max_tx_urbs = | ||
665 | le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx); | ||
660 | break; | 666 | break; |
661 | case KVASER_USBCAN: | 667 | case KVASER_USBCAN: |
662 | dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); | 668 | dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version); |
669 | dev->max_tx_urbs = | ||
670 | le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx); | ||
663 | break; | 671 | break; |
664 | } | 672 | } |
665 | 673 | ||
@@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
715 | 723 | ||
716 | stats = &priv->netdev->stats; | 724 | stats = &priv->netdev->stats; |
717 | 725 | ||
718 | context = &priv->tx_contexts[tid % MAX_TX_URBS]; | 726 | context = &priv->tx_contexts[tid % dev->max_tx_urbs]; |
719 | 727 | ||
720 | /* Sometimes the state change doesn't come after a bus-off event */ | 728 | /* Sometimes the state change doesn't come after a bus-off event */ |
721 | if (priv->can.restart_ms && | 729 | if (priv->can.restart_ms && |
@@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, | |||
744 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | 752 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
745 | 753 | ||
746 | can_get_echo_skb(priv->netdev, context->echo_index); | 754 | can_get_echo_skb(priv->netdev, context->echo_index); |
747 | context->echo_index = MAX_TX_URBS; | 755 | context->echo_index = dev->max_tx_urbs; |
748 | --priv->active_tx_contexts; | 756 | --priv->active_tx_contexts; |
749 | netif_wake_queue(priv->netdev); | 757 | netif_wake_queue(priv->netdev); |
750 | 758 | ||
@@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) | |||
1329 | * number of events in case of a heavy rx load on the bus. | 1337 | * number of events in case of a heavy rx load on the bus. |
1330 | */ | 1338 | */ |
1331 | if (msg->len == 0) { | 1339 | if (msg->len == 0) { |
1332 | pos = round_up(pos, dev->bulk_in->wMaxPacketSize); | 1340 | pos = round_up(pos, le16_to_cpu(dev->bulk_in-> |
1341 | wMaxPacketSize)); | ||
1333 | continue; | 1342 | continue; |
1334 | } | 1343 | } |
1335 | 1344 | ||
@@ -1512,11 +1521,13 @@ error: | |||
1512 | 1521 | ||
1513 | static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) | 1522 | static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) |
1514 | { | 1523 | { |
1515 | int i; | 1524 | int i, max_tx_urbs; |
1525 | |||
1526 | max_tx_urbs = priv->dev->max_tx_urbs; | ||
1516 | 1527 | ||
1517 | priv->active_tx_contexts = 0; | 1528 | priv->active_tx_contexts = 0; |
1518 | for (i = 0; i < MAX_TX_URBS; i++) | 1529 | for (i = 0; i < max_tx_urbs; i++) |
1519 | priv->tx_contexts[i].echo_index = MAX_TX_URBS; | 1530 | priv->tx_contexts[i].echo_index = max_tx_urbs; |
1520 | } | 1531 | } |
1521 | 1532 | ||
1522 | /* This method might sleep. Do not call it in the atomic context | 1533 | /* This method might sleep. Do not call it in the atomic context |
@@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1702 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; | 1713 | *msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; |
1703 | 1714 | ||
1704 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | 1715 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
1705 | for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) { | 1716 | for (i = 0; i < dev->max_tx_urbs; i++) { |
1706 | if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { | 1717 | if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { |
1707 | context = &priv->tx_contexts[i]; | 1718 | context = &priv->tx_contexts[i]; |
1708 | 1719 | ||
1709 | context->echo_index = i; | 1720 | context->echo_index = i; |
1710 | can_put_echo_skb(skb, netdev, context->echo_index); | 1721 | can_put_echo_skb(skb, netdev, context->echo_index); |
1711 | ++priv->active_tx_contexts; | 1722 | ++priv->active_tx_contexts; |
1712 | if (priv->active_tx_contexts >= MAX_TX_URBS) | 1723 | if (priv->active_tx_contexts >= dev->max_tx_urbs) |
1713 | netif_stop_queue(netdev); | 1724 | netif_stop_queue(netdev); |
1714 | 1725 | ||
1715 | break; | 1726 | break; |
@@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, | |||
1743 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); | 1754 | spin_lock_irqsave(&priv->tx_contexts_lock, flags); |
1744 | 1755 | ||
1745 | can_free_echo_skb(netdev, context->echo_index); | 1756 | can_free_echo_skb(netdev, context->echo_index); |
1746 | context->echo_index = MAX_TX_URBS; | 1757 | context->echo_index = dev->max_tx_urbs; |
1747 | --priv->active_tx_contexts; | 1758 | --priv->active_tx_contexts; |
1748 | netif_wake_queue(netdev); | 1759 | netif_wake_queue(netdev); |
1749 | 1760 | ||
@@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf, | |||
1881 | if (err) | 1892 | if (err) |
1882 | return err; | 1893 | return err; |
1883 | 1894 | ||
1884 | netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); | 1895 | netdev = alloc_candev(sizeof(*priv) + |
1896 | dev->max_tx_urbs * sizeof(*priv->tx_contexts), | ||
1897 | dev->max_tx_urbs); | ||
1885 | if (!netdev) { | 1898 | if (!netdev) { |
1886 | dev_err(&intf->dev, "Cannot alloc candev\n"); | 1899 | dev_err(&intf->dev, "Cannot alloc candev\n"); |
1887 | return -ENOMEM; | 1900 | return -ENOMEM; |
@@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
2009 | return err; | 2022 | return err; |
2010 | } | 2023 | } |
2011 | 2024 | ||
2025 | dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", | ||
2026 | ((dev->fw_version >> 24) & 0xff), | ||
2027 | ((dev->fw_version >> 16) & 0xff), | ||
2028 | (dev->fw_version & 0xffff)); | ||
2029 | |||
2030 | dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs); | ||
2031 | |||
2012 | err = kvaser_usb_get_card_info(dev); | 2032 | err = kvaser_usb_get_card_info(dev); |
2013 | if (err) { | 2033 | if (err) { |
2014 | dev_err(&intf->dev, | 2034 | dev_err(&intf->dev, |
@@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf, | |||
2016 | return err; | 2036 | return err; |
2017 | } | 2037 | } |
2018 | 2038 | ||
2019 | dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", | ||
2020 | ((dev->fw_version >> 24) & 0xff), | ||
2021 | ((dev->fw_version >> 16) & 0xff), | ||
2022 | (dev->fw_version & 0xffff)); | ||
2023 | |||
2024 | for (i = 0; i < dev->nchannels; i++) { | 2039 | for (i = 0; i < dev->nchannels; i++) { |
2025 | err = kvaser_usb_init_one(intf, id, i); | 2040 | err = kvaser_usb_init_one(intf, id, i); |
2026 | if (err) { | 2041 | if (err) { |
diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h index 1ba7c25002e1..e8fc4952c6b0 100644 --- a/drivers/net/can/usb/peak_usb/pcan_ucan.h +++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h | |||
@@ -26,8 +26,8 @@ | |||
26 | #define PUCAN_CMD_FILTER_STD 0x008 | 26 | #define PUCAN_CMD_FILTER_STD 0x008 |
27 | #define PUCAN_CMD_TX_ABORT 0x009 | 27 | #define PUCAN_CMD_TX_ABORT 0x009 |
28 | #define PUCAN_CMD_WR_ERR_CNT 0x00a | 28 | #define PUCAN_CMD_WR_ERR_CNT 0x00a |
29 | #define PUCAN_CMD_RX_FRAME_ENABLE 0x00b | 29 | #define PUCAN_CMD_SET_EN_OPTION 0x00b |
30 | #define PUCAN_CMD_RX_FRAME_DISABLE 0x00c | 30 | #define PUCAN_CMD_CLR_DIS_OPTION 0x00c |
31 | #define PUCAN_CMD_END_OF_COLLECTION 0x3ff | 31 | #define PUCAN_CMD_END_OF_COLLECTION 0x3ff |
32 | 32 | ||
33 | /* uCAN received messages list */ | 33 | /* uCAN received messages list */ |
@@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt { | |||
101 | u16 unused; | 101 | u16 unused; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | /* uCAN RX_FRAME_ENABLE command fields */ | 104 | /* uCAN SET_EN/CLR_DIS _OPTION command fields */ |
105 | #define PUCAN_FLTEXT_ERROR 0x0001 | 105 | #define PUCAN_OPTION_ERROR 0x0001 |
106 | #define PUCAN_FLTEXT_BUSLOAD 0x0002 | 106 | #define PUCAN_OPTION_BUSLOAD 0x0002 |
107 | #define PUCAN_OPTION_CANDFDISO 0x0004 | ||
107 | 108 | ||
108 | struct __packed pucan_filter_ext { | 109 | struct __packed pucan_options { |
109 | __le16 opcode_channel; | 110 | __le16 opcode_channel; |
110 | 111 | ||
111 | __le16 ext_mask; | 112 | __le16 options; |
112 | u32 unused; | 113 | u32 unused; |
113 | }; | 114 | }; |
114 | 115 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 00481eeb9924..09d14e70abd7 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -110,13 +110,13 @@ struct __packed pcan_ufd_led { | |||
110 | u8 unused[5]; | 110 | u8 unused[5]; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | /* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */ | 113 | /* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */ |
114 | #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 | 114 | #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 |
115 | 115 | ||
116 | struct __packed pcan_ufd_filter_ext { | 116 | struct __packed pcan_ufd_options { |
117 | __le16 opcode_channel; | 117 | __le16 opcode_channel; |
118 | 118 | ||
119 | __le16 ext_mask; | 119 | __le16 ucan_mask; |
120 | u16 unused; | 120 | u16 unused; |
121 | __le16 usb_mask; | 121 | __le16 usb_mask; |
122 | }; | 122 | }; |
@@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) | |||
251 | /* moves the pointer forward */ | 251 | /* moves the pointer forward */ |
252 | pc += sizeof(struct pucan_wr_err_cnt); | 252 | pc += sizeof(struct pucan_wr_err_cnt); |
253 | 253 | ||
254 | /* add command to switch from ISO to non-ISO mode, if fw allows it */ | ||
255 | if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) { | ||
256 | struct pucan_options *puo = (struct pucan_options *)pc; | ||
257 | |||
258 | puo->opcode_channel = | ||
259 | (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ? | ||
260 | pucan_cmd_opcode_channel(dev, | ||
261 | PUCAN_CMD_CLR_DIS_OPTION) : | ||
262 | pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION); | ||
263 | |||
264 | puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO); | ||
265 | |||
266 | /* to be sure that no other extended bits will be taken into | ||
267 | * account | ||
268 | */ | ||
269 | puo->unused = 0; | ||
270 | |||
271 | /* moves the pointer forward */ | ||
272 | pc += sizeof(struct pucan_options); | ||
273 | } | ||
274 | |||
254 | /* next, go back to operational mode */ | 275 | /* next, go back to operational mode */ |
255 | cmd = (struct pucan_command *)pc; | 276 | cmd = (struct pucan_command *)pc; |
256 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, | 277 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, |
@@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx, | |||
321 | return pcan_usb_fd_send_cmd(dev, cmd); | 342 | return pcan_usb_fd_send_cmd(dev, cmd); |
322 | } | 343 | } |
323 | 344 | ||
324 | /* set/unset notifications filter: | 345 | /* set/unset options |
325 | * | 346 | * |
326 | * onoff sets(1)/unset(0) notifications | 347 | * onoff set(1)/unset(0) options |
327 | * mask each bit defines a kind of notification to set/unset | 348 | * mask each bit defines a kind of options to set/unset |
328 | */ | 349 | */ |
329 | static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev, | 350 | static int pcan_usb_fd_set_options(struct peak_usb_device *dev, |
330 | bool onoff, u16 ext_mask, u16 usb_mask) | 351 | bool onoff, u16 ucan_mask, u16 usb_mask) |
331 | { | 352 | { |
332 | struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev); | 353 | struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev); |
333 | 354 | ||
334 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, | 355 | cmd->opcode_channel = pucan_cmd_opcode_channel(dev, |
335 | (onoff) ? PUCAN_CMD_RX_FRAME_ENABLE : | 356 | (onoff) ? PUCAN_CMD_SET_EN_OPTION : |
336 | PUCAN_CMD_RX_FRAME_DISABLE); | 357 | PUCAN_CMD_CLR_DIS_OPTION); |
337 | 358 | ||
338 | cmd->ext_mask = cpu_to_le16(ext_mask); | 359 | cmd->ucan_mask = cpu_to_le16(ucan_mask); |
339 | cmd->usb_mask = cpu_to_le16(usb_mask); | 360 | cmd->usb_mask = cpu_to_le16(usb_mask); |
340 | 361 | ||
341 | /* send the command */ | 362 | /* send the command */ |
@@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev) | |||
770 | &pcan_usb_pro_fd); | 791 | &pcan_usb_pro_fd); |
771 | 792 | ||
772 | /* enable USB calibration messages */ | 793 | /* enable USB calibration messages */ |
773 | err = pcan_usb_fd_set_filter_ext(dev, 1, | 794 | err = pcan_usb_fd_set_options(dev, 1, |
774 | PUCAN_FLTEXT_ERROR, | 795 | PUCAN_OPTION_ERROR, |
775 | PCAN_UFD_FLTEXT_CALIBRATION); | 796 | PCAN_UFD_FLTEXT_CALIBRATION); |
776 | } | 797 | } |
777 | 798 | ||
778 | pdev->usb_if->dev_opened_count++; | 799 | pdev->usb_if->dev_opened_count++; |
@@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev) | |||
806 | 827 | ||
807 | /* turn off special msgs for that interface if no other dev opened */ | 828 | /* turn off special msgs for that interface if no other dev opened */ |
808 | if (pdev->usb_if->dev_opened_count == 1) | 829 | if (pdev->usb_if->dev_opened_count == 1) |
809 | pcan_usb_fd_set_filter_ext(dev, 0, | 830 | pcan_usb_fd_set_options(dev, 0, |
810 | PUCAN_FLTEXT_ERROR, | 831 | PUCAN_OPTION_ERROR, |
811 | PCAN_UFD_FLTEXT_CALIBRATION); | 832 | PCAN_UFD_FLTEXT_CALIBRATION); |
812 | pdev->usb_if->dev_opened_count--; | 833 | pdev->usb_if->dev_opened_count--; |
813 | 834 | ||
814 | return 0; | 835 | return 0; |
@@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) | |||
860 | pdev->usb_if->fw_info.fw_version[2], | 881 | pdev->usb_if->fw_info.fw_version[2], |
861 | dev->adapter->ctrl_count); | 882 | dev->adapter->ctrl_count); |
862 | 883 | ||
863 | /* the currently supported hw is non-ISO */ | 884 | /* check for ability to switch between ISO/non-ISO modes */ |
864 | dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; | 885 | if (pdev->usb_if->fw_info.fw_version[0] >= 2) { |
886 | /* firmware >= 2.x supports ISO/non-ISO switching */ | ||
887 | dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; | ||
888 | } else { | ||
889 | /* firmware < 2.x only supports fixed(!) non-ISO */ | ||
890 | dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO; | ||
891 | } | ||
865 | 892 | ||
866 | /* tell the hardware the can driver is running */ | 893 | /* tell the hardware the can driver is running */ |
867 | err = pcan_usb_fd_drv_loaded(dev, 1); | 894 | err = pcan_usb_fd_drv_loaded(dev, 1); |
@@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev) | |||
937 | if (dev->ctrl_idx == 0) { | 964 | if (dev->ctrl_idx == 0) { |
938 | /* turn off calibration message if any device were opened */ | 965 | /* turn off calibration message if any device were opened */ |
939 | if (pdev->usb_if->dev_opened_count > 0) | 966 | if (pdev->usb_if->dev_opened_count > 0) |
940 | pcan_usb_fd_set_filter_ext(dev, 0, | 967 | pcan_usb_fd_set_options(dev, 0, |
941 | PUCAN_FLTEXT_ERROR, | 968 | PUCAN_OPTION_ERROR, |
942 | PCAN_UFD_FLTEXT_CALIBRATION); | 969 | PCAN_UFD_FLTEXT_CALIBRATION); |
943 | 970 | ||
944 | /* tell USB adapter that the driver is being unloaded */ | 971 | /* tell USB adapter that the driver is being unloaded */ |
945 | pcan_usb_fd_drv_loaded(dev, 0); | 972 | pcan_usb_fd_drv_loaded(dev, 0); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 756053c028be..4085c4b31047 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1811,7 +1811,7 @@ struct bnx2x { | |||
1811 | int stats_state; | 1811 | int stats_state; |
1812 | 1812 | ||
1813 | /* used for synchronization of concurrent threads statistics handling */ | 1813 | /* used for synchronization of concurrent threads statistics handling */ |
1814 | spinlock_t stats_lock; | 1814 | struct mutex stats_lock; |
1815 | 1815 | ||
1816 | /* used by dmae command loader */ | 1816 | /* used by dmae command loader */ |
1817 | struct dmae_command stats_dmae; | 1817 | struct dmae_command stats_dmae; |
@@ -1935,8 +1935,6 @@ struct bnx2x { | |||
1935 | 1935 | ||
1936 | int fp_array_size; | 1936 | int fp_array_size; |
1937 | u32 dump_preset_idx; | 1937 | u32 dump_preset_idx; |
1938 | bool stats_started; | ||
1939 | struct semaphore stats_sema; | ||
1940 | 1938 | ||
1941 | u8 phys_port_id[ETH_ALEN]; | 1939 | u8 phys_port_id[ETH_ALEN]; |
1942 | 1940 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 177cb0e722e7..b9f85fccb419 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -129,8 +129,8 @@ struct bnx2x_mac_vals { | |||
129 | u32 xmac_val; | 129 | u32 xmac_val; |
130 | u32 emac_addr; | 130 | u32 emac_addr; |
131 | u32 emac_val; | 131 | u32 emac_val; |
132 | u32 umac_addr; | 132 | u32 umac_addr[2]; |
133 | u32 umac_val; | 133 | u32 umac_val[2]; |
134 | u32 bmac_addr; | 134 | u32 bmac_addr; |
135 | u32 bmac_val[2]; | 135 | u32 bmac_val[2]; |
136 | }; | 136 | }; |
@@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp) | |||
7866 | return 0; | 7866 | return 0; |
7867 | } | 7867 | } |
7868 | 7868 | ||
7869 | /* previous driver DMAE transaction may have occurred when pre-boot stage ended | ||
7870 | * and boot began, or when kdump kernel was loaded. Either case would invalidate | ||
7871 | * the addresses of the transaction, resulting in was-error bit set in the pci | ||
7872 | * causing all hw-to-host pcie transactions to timeout. If this happened we want | ||
7873 | * to clear the interrupt which detected this from the pglueb and the was done | ||
7874 | * bit | ||
7875 | */ | ||
7876 | static void bnx2x_clean_pglue_errors(struct bnx2x *bp) | ||
7877 | { | ||
7878 | if (!CHIP_IS_E1x(bp)) | ||
7879 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
7880 | 1 << BP_ABS_FUNC(bp)); | ||
7881 | } | ||
7882 | |||
7869 | static int bnx2x_init_hw_func(struct bnx2x *bp) | 7883 | static int bnx2x_init_hw_func(struct bnx2x *bp) |
7870 | { | 7884 | { |
7871 | int port = BP_PORT(bp); | 7885 | int port = BP_PORT(bp); |
@@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) | |||
7958 | 7972 | ||
7959 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); | 7973 | bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); |
7960 | 7974 | ||
7961 | if (!CHIP_IS_E1x(bp)) | 7975 | bnx2x_clean_pglue_errors(bp); |
7962 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); | ||
7963 | 7976 | ||
7964 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); | 7977 | bnx2x_init_block(bp, BLOCK_ATC, init_phase); |
7965 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); | 7978 | bnx2x_init_block(bp, BLOCK_DMAE, init_phase); |
@@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp) | |||
10141 | return base + (BP_ABS_FUNC(bp)) * stride; | 10154 | return base + (BP_ABS_FUNC(bp)) * stride; |
10142 | } | 10155 | } |
10143 | 10156 | ||
10157 | static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp, | ||
10158 | u8 port, u32 reset_reg, | ||
10159 | struct bnx2x_mac_vals *vals) | ||
10160 | { | ||
10161 | u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; | ||
10162 | u32 base_addr; | ||
10163 | |||
10164 | if (!(mask & reset_reg)) | ||
10165 | return false; | ||
10166 | |||
10167 | BNX2X_DEV_INFO("Disable umac Rx %02x\n", port); | ||
10168 | base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | ||
10169 | vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG; | ||
10170 | vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]); | ||
10171 | REG_WR(bp, vals->umac_addr[port], 0); | ||
10172 | |||
10173 | return true; | ||
10174 | } | ||
10175 | |||
10144 | static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | 10176 | static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, |
10145 | struct bnx2x_mac_vals *vals) | 10177 | struct bnx2x_mac_vals *vals) |
10146 | { | 10178 | { |
@@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
10149 | u8 port = BP_PORT(bp); | 10181 | u8 port = BP_PORT(bp); |
10150 | 10182 | ||
10151 | /* reset addresses as they also mark which values were changed */ | 10183 | /* reset addresses as they also mark which values were changed */ |
10152 | vals->bmac_addr = 0; | 10184 | memset(vals, 0, sizeof(*vals)); |
10153 | vals->umac_addr = 0; | ||
10154 | vals->xmac_addr = 0; | ||
10155 | vals->emac_addr = 0; | ||
10156 | 10185 | ||
10157 | reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); | 10186 | reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2); |
10158 | 10187 | ||
@@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, | |||
10201 | REG_WR(bp, vals->xmac_addr, 0); | 10230 | REG_WR(bp, vals->xmac_addr, 0); |
10202 | mac_stopped = true; | 10231 | mac_stopped = true; |
10203 | } | 10232 | } |
10204 | mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; | 10233 | |
10205 | if (mask & reset_reg) { | 10234 | mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0, |
10206 | BNX2X_DEV_INFO("Disable umac Rx\n"); | 10235 | reset_reg, vals); |
10207 | base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; | 10236 | mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1, |
10208 | vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; | 10237 | reset_reg, vals); |
10209 | vals->umac_val = REG_RD(bp, vals->umac_addr); | ||
10210 | REG_WR(bp, vals->umac_addr, 0); | ||
10211 | mac_stopped = true; | ||
10212 | } | ||
10213 | } | 10238 | } |
10214 | 10239 | ||
10215 | if (mac_stopped) | 10240 | if (mac_stopped) |
@@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10505 | /* Close the MAC Rx to prevent BRB from filling up */ | 10530 | /* Close the MAC Rx to prevent BRB from filling up */ |
10506 | bnx2x_prev_unload_close_mac(bp, &mac_vals); | 10531 | bnx2x_prev_unload_close_mac(bp, &mac_vals); |
10507 | 10532 | ||
10508 | /* close LLH filters towards the BRB */ | 10533 | /* close LLH filters for both ports towards the BRB */ |
10509 | bnx2x_set_rx_filter(&bp->link_params, 0); | 10534 | bnx2x_set_rx_filter(&bp->link_params, 0); |
10535 | bp->link_params.port ^= 1; | ||
10536 | bnx2x_set_rx_filter(&bp->link_params, 0); | ||
10537 | bp->link_params.port ^= 1; | ||
10510 | 10538 | ||
10511 | /* Check if the UNDI driver was previously loaded */ | 10539 | /* Check if the UNDI driver was previously loaded */ |
10512 | if (bnx2x_prev_is_after_undi(bp)) { | 10540 | if (bnx2x_prev_is_after_undi(bp)) { |
@@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10553 | 10581 | ||
10554 | if (mac_vals.xmac_addr) | 10582 | if (mac_vals.xmac_addr) |
10555 | REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); | 10583 | REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val); |
10556 | if (mac_vals.umac_addr) | 10584 | if (mac_vals.umac_addr[0]) |
10557 | REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val); | 10585 | REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]); |
10586 | if (mac_vals.umac_addr[1]) | ||
10587 | REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]); | ||
10558 | if (mac_vals.emac_addr) | 10588 | if (mac_vals.emac_addr) |
10559 | REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); | 10589 | REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val); |
10560 | if (mac_vals.bmac_addr) { | 10590 | if (mac_vals.bmac_addr) { |
@@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
10571 | return bnx2x_prev_mcp_done(bp); | 10601 | return bnx2x_prev_mcp_done(bp); |
10572 | } | 10602 | } |
10573 | 10603 | ||
10574 | /* previous driver DMAE transaction may have occurred when pre-boot stage ended | ||
10575 | * and boot began, or when kdump kernel was loaded. Either case would invalidate | ||
10576 | * the addresses of the transaction, resulting in was-error bit set in the pci | ||
10577 | * causing all hw-to-host pcie transactions to timeout. If this happened we want | ||
10578 | * to clear the interrupt which detected this from the pglueb and the was done | ||
10579 | * bit | ||
10580 | */ | ||
10581 | static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp) | ||
10582 | { | ||
10583 | if (!CHIP_IS_E1x(bp)) { | ||
10584 | u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); | ||
10585 | if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { | ||
10586 | DP(BNX2X_MSG_SP, | ||
10587 | "'was error' bit was found to be set in pglueb upon startup. Clearing\n"); | ||
10588 | REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
10589 | 1 << BP_FUNC(bp)); | ||
10590 | } | ||
10591 | } | ||
10592 | } | ||
10593 | |||
10594 | static int bnx2x_prev_unload(struct bnx2x *bp) | 10604 | static int bnx2x_prev_unload(struct bnx2x *bp) |
10595 | { | 10605 | { |
10596 | int time_counter = 10; | 10606 | int time_counter = 10; |
@@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp) | |||
10600 | /* clear hw from errors which may have resulted from an interrupted | 10610 | /* clear hw from errors which may have resulted from an interrupted |
10601 | * dmae transaction. | 10611 | * dmae transaction. |
10602 | */ | 10612 | */ |
10603 | bnx2x_prev_interrupted_dmae(bp); | 10613 | bnx2x_clean_pglue_errors(bp); |
10604 | 10614 | ||
10605 | /* Release previously held locks */ | 10615 | /* Release previously held locks */ |
10606 | hw_lock_reg = (BP_FUNC(bp) <= 5) ? | 10616 | hw_lock_reg = (BP_FUNC(bp) <= 5) ? |
@@ -12044,9 +12054,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
12044 | mutex_init(&bp->port.phy_mutex); | 12054 | mutex_init(&bp->port.phy_mutex); |
12045 | mutex_init(&bp->fw_mb_mutex); | 12055 | mutex_init(&bp->fw_mb_mutex); |
12046 | mutex_init(&bp->drv_info_mutex); | 12056 | mutex_init(&bp->drv_info_mutex); |
12057 | mutex_init(&bp->stats_lock); | ||
12047 | bp->drv_info_mng_owner = false; | 12058 | bp->drv_info_mng_owner = false; |
12048 | spin_lock_init(&bp->stats_lock); | ||
12049 | sema_init(&bp->stats_sema, 1); | ||
12050 | 12059 | ||
12051 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 12060 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
12052 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 12061 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
@@ -13673,9 +13682,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
13673 | cancel_delayed_work_sync(&bp->sp_task); | 13682 | cancel_delayed_work_sync(&bp->sp_task); |
13674 | cancel_delayed_work_sync(&bp->period_task); | 13683 | cancel_delayed_work_sync(&bp->period_task); |
13675 | 13684 | ||
13676 | spin_lock_bh(&bp->stats_lock); | 13685 | mutex_lock(&bp->stats_lock); |
13677 | bp->stats_state = STATS_STATE_DISABLED; | 13686 | bp->stats_state = STATS_STATE_DISABLED; |
13678 | spin_unlock_bh(&bp->stats_lock); | 13687 | mutex_unlock(&bp->stats_lock); |
13679 | 13688 | ||
13680 | bnx2x_save_statistics(bp); | 13689 | bnx2x_save_statistics(bp); |
13681 | 13690 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 8638d6c97caa..d95f7b4e19e1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) | |||
2238 | 2238 | ||
2239 | cookie.vf = vf; | 2239 | cookie.vf = vf; |
2240 | cookie.state = VF_ACQUIRED; | 2240 | cookie.state = VF_ACQUIRED; |
2241 | bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); | 2241 | rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); |
2242 | if (rc) | ||
2243 | goto op_err; | ||
2242 | } | 2244 | } |
2243 | 2245 | ||
2244 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); | 2246 | DP(BNX2X_MSG_IOV, "set state to acquired\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 612cafb5df53..266b055c2360 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp) | |||
123 | */ | 123 | */ |
124 | static void bnx2x_storm_stats_post(struct bnx2x *bp) | 124 | static void bnx2x_storm_stats_post(struct bnx2x *bp) |
125 | { | 125 | { |
126 | if (!bp->stats_pending) { | 126 | int rc; |
127 | int rc; | ||
128 | 127 | ||
129 | spin_lock_bh(&bp->stats_lock); | 128 | if (bp->stats_pending) |
130 | 129 | return; | |
131 | if (bp->stats_pending) { | ||
132 | spin_unlock_bh(&bp->stats_lock); | ||
133 | return; | ||
134 | } | ||
135 | |||
136 | bp->fw_stats_req->hdr.drv_stats_counter = | ||
137 | cpu_to_le16(bp->stats_counter++); | ||
138 | 130 | ||
139 | DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", | 131 | bp->fw_stats_req->hdr.drv_stats_counter = |
140 | le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); | 132 | cpu_to_le16(bp->stats_counter++); |
141 | 133 | ||
142 | /* adjust the ramrod to include VF queues statistics */ | 134 | DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", |
143 | bnx2x_iov_adjust_stats_req(bp); | 135 | le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); |
144 | bnx2x_dp_stats(bp); | ||
145 | 136 | ||
146 | /* send FW stats ramrod */ | 137 | /* adjust the ramrod to include VF queues statistics */ |
147 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, | 138 | bnx2x_iov_adjust_stats_req(bp); |
148 | U64_HI(bp->fw_stats_req_mapping), | 139 | bnx2x_dp_stats(bp); |
149 | U64_LO(bp->fw_stats_req_mapping), | ||
150 | NONE_CONNECTION_TYPE); | ||
151 | if (rc == 0) | ||
152 | bp->stats_pending = 1; | ||
153 | 140 | ||
154 | spin_unlock_bh(&bp->stats_lock); | 141 | /* send FW stats ramrod */ |
155 | } | 142 | rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, |
143 | U64_HI(bp->fw_stats_req_mapping), | ||
144 | U64_LO(bp->fw_stats_req_mapping), | ||
145 | NONE_CONNECTION_TYPE); | ||
146 | if (rc == 0) | ||
147 | bp->stats_pending = 1; | ||
156 | } | 148 | } |
157 | 149 | ||
158 | static void bnx2x_hw_stats_post(struct bnx2x *bp) | 150 | static void bnx2x_hw_stats_post(struct bnx2x *bp) |
@@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp) | |||
221 | */ | 213 | */ |
222 | 214 | ||
223 | /* should be called under stats_sema */ | 215 | /* should be called under stats_sema */ |
224 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | 216 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) |
225 | { | 217 | { |
226 | struct dmae_command *dmae; | 218 | struct dmae_command *dmae; |
227 | u32 opcode; | 219 | u32 opcode; |
@@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
519 | } | 511 | } |
520 | 512 | ||
521 | /* should be called under stats_sema */ | 513 | /* should be called under stats_sema */ |
522 | static void __bnx2x_stats_start(struct bnx2x *bp) | 514 | static void bnx2x_stats_start(struct bnx2x *bp) |
523 | { | 515 | { |
524 | if (IS_PF(bp)) { | 516 | if (IS_PF(bp)) { |
525 | if (bp->port.pmf) | 517 | if (bp->port.pmf) |
@@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp) | |||
531 | bnx2x_hw_stats_post(bp); | 523 | bnx2x_hw_stats_post(bp); |
532 | bnx2x_storm_stats_post(bp); | 524 | bnx2x_storm_stats_post(bp); |
533 | } | 525 | } |
534 | |||
535 | bp->stats_started = true; | ||
536 | } | ||
537 | |||
538 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
539 | { | ||
540 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
541 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
542 | __bnx2x_stats_start(bp); | ||
543 | up(&bp->stats_sema); | ||
544 | } | 526 | } |
545 | 527 | ||
546 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 528 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
547 | { | 529 | { |
548 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
549 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
550 | bnx2x_stats_comp(bp); | 530 | bnx2x_stats_comp(bp); |
551 | __bnx2x_stats_pmf_update(bp); | 531 | bnx2x_stats_pmf_update(bp); |
552 | __bnx2x_stats_start(bp); | 532 | bnx2x_stats_start(bp); |
553 | up(&bp->stats_sema); | ||
554 | } | ||
555 | |||
556 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
557 | { | ||
558 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
559 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
560 | __bnx2x_stats_pmf_update(bp); | ||
561 | up(&bp->stats_sema); | ||
562 | } | 533 | } |
563 | 534 | ||
564 | static void bnx2x_stats_restart(struct bnx2x *bp) | 535 | static void bnx2x_stats_restart(struct bnx2x *bp) |
@@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
568 | */ | 539 | */ |
569 | if (IS_VF(bp)) | 540 | if (IS_VF(bp)) |
570 | return; | 541 | return; |
571 | if (down_timeout(&bp->stats_sema, HZ/10)) | 542 | |
572 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
573 | bnx2x_stats_comp(bp); | 543 | bnx2x_stats_comp(bp); |
574 | __bnx2x_stats_start(bp); | 544 | bnx2x_stats_start(bp); |
575 | up(&bp->stats_sema); | ||
576 | } | 545 | } |
577 | 546 | ||
578 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 547 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
@@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1246 | { | 1215 | { |
1247 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1216 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1248 | 1217 | ||
1249 | /* we run update from timer context, so give up | 1218 | if (bnx2x_edebug_stats_stopped(bp)) |
1250 | * if somebody is in the middle of transition | ||
1251 | */ | ||
1252 | if (down_trylock(&bp->stats_sema)) | ||
1253 | return; | 1219 | return; |
1254 | 1220 | ||
1255 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
1256 | goto out; | ||
1257 | |||
1258 | if (IS_PF(bp)) { | 1221 | if (IS_PF(bp)) { |
1259 | if (*stats_comp != DMAE_COMP_VAL) | 1222 | if (*stats_comp != DMAE_COMP_VAL) |
1260 | goto out; | 1223 | return; |
1261 | 1224 | ||
1262 | if (bp->port.pmf) | 1225 | if (bp->port.pmf) |
1263 | bnx2x_hw_stats_update(bp); | 1226 | bnx2x_hw_stats_update(bp); |
@@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1267 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1230 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
1268 | bnx2x_panic(); | 1231 | bnx2x_panic(); |
1269 | } | 1232 | } |
1270 | goto out; | 1233 | return; |
1271 | } | 1234 | } |
1272 | } else { | 1235 | } else { |
1273 | /* vf doesn't collect HW statistics, and doesn't get completions | 1236 | /* vf doesn't collect HW statistics, and doesn't get completions |
@@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1281 | 1244 | ||
1282 | /* vf is done */ | 1245 | /* vf is done */ |
1283 | if (IS_VF(bp)) | 1246 | if (IS_VF(bp)) |
1284 | goto out; | 1247 | return; |
1285 | 1248 | ||
1286 | if (netif_msg_timer(bp)) { | 1249 | if (netif_msg_timer(bp)) { |
1287 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1250 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
@@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1292 | 1255 | ||
1293 | bnx2x_hw_stats_post(bp); | 1256 | bnx2x_hw_stats_post(bp); |
1294 | bnx2x_storm_stats_post(bp); | 1257 | bnx2x_storm_stats_post(bp); |
1295 | |||
1296 | out: | ||
1297 | up(&bp->stats_sema); | ||
1298 | } | 1258 | } |
1299 | 1259 | ||
1300 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1260 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
@@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) | |||
1358 | 1318 | ||
1359 | static void bnx2x_stats_stop(struct bnx2x *bp) | 1319 | static void bnx2x_stats_stop(struct bnx2x *bp) |
1360 | { | 1320 | { |
1361 | int update = 0; | 1321 | bool update = false; |
1362 | |||
1363 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
1364 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
1365 | |||
1366 | bp->stats_started = false; | ||
1367 | 1322 | ||
1368 | bnx2x_stats_comp(bp); | 1323 | bnx2x_stats_comp(bp); |
1369 | 1324 | ||
@@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1381 | bnx2x_hw_stats_post(bp); | 1336 | bnx2x_hw_stats_post(bp); |
1382 | bnx2x_stats_comp(bp); | 1337 | bnx2x_stats_comp(bp); |
1383 | } | 1338 | } |
1384 | |||
1385 | up(&bp->stats_sema); | ||
1386 | } | 1339 | } |
1387 | 1340 | ||
1388 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1341 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
@@ -1410,18 +1363,28 @@ static const struct { | |||
1410 | 1363 | ||
1411 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1364 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1412 | { | 1365 | { |
1413 | enum bnx2x_stats_state state; | 1366 | enum bnx2x_stats_state state = bp->stats_state; |
1414 | void (*action)(struct bnx2x *bp); | 1367 | |
1415 | if (unlikely(bp->panic)) | 1368 | if (unlikely(bp->panic)) |
1416 | return; | 1369 | return; |
1417 | 1370 | ||
1418 | spin_lock_bh(&bp->stats_lock); | 1371 | /* Statistics update run from timer context, and we don't want to stop |
1419 | state = bp->stats_state; | 1372 | * that context in case someone is in the middle of a transition. |
1373 | * For other events, wait a bit until lock is taken. | ||
1374 | */ | ||
1375 | if (!mutex_trylock(&bp->stats_lock)) { | ||
1376 | if (event == STATS_EVENT_UPDATE) | ||
1377 | return; | ||
1378 | |||
1379 | DP(BNX2X_MSG_STATS, | ||
1380 | "Unlikely stats' lock contention [event %d]\n", event); | ||
1381 | mutex_lock(&bp->stats_lock); | ||
1382 | } | ||
1383 | |||
1384 | bnx2x_stats_stm[state][event].action(bp); | ||
1420 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1385 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1421 | action = bnx2x_stats_stm[state][event].action; | ||
1422 | spin_unlock_bh(&bp->stats_lock); | ||
1423 | 1386 | ||
1424 | action(bp); | 1387 | mutex_unlock(&bp->stats_lock); |
1425 | 1388 | ||
1426 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1427 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
@@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, | |||
1998 | } | 1961 | } |
1999 | } | 1962 | } |
2000 | 1963 | ||
2001 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | 1964 | int bnx2x_stats_safe_exec(struct bnx2x *bp, |
2002 | void (func_to_exec)(void *cookie), | 1965 | void (func_to_exec)(void *cookie), |
2003 | void *cookie){ | 1966 | void *cookie) |
2004 | if (down_timeout(&bp->stats_sema, HZ/10)) | 1967 | { |
2005 | BNX2X_ERR("Unable to acquire stats lock\n"); | 1968 | int cnt = 10, rc = 0; |
1969 | |||
1970 | /* Wait for statistics to end [while blocking further requests], | ||
1971 | * then run supplied function 'safely'. | ||
1972 | */ | ||
1973 | mutex_lock(&bp->stats_lock); | ||
1974 | |||
2006 | bnx2x_stats_comp(bp); | 1975 | bnx2x_stats_comp(bp); |
1976 | while (bp->stats_pending && cnt--) | ||
1977 | if (bnx2x_storm_stats_update(bp)) | ||
1978 | usleep_range(1000, 2000); | ||
1979 | if (bp->stats_pending) { | ||
1980 | BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n"); | ||
1981 | rc = -EBUSY; | ||
1982 | goto out; | ||
1983 | } | ||
1984 | |||
2007 | func_to_exec(cookie); | 1985 | func_to_exec(cookie); |
2008 | __bnx2x_stats_start(bp); | 1986 | |
2009 | up(&bp->stats_sema); | 1987 | out: |
1988 | /* No need to restart statistics - if they're enabled, the timer | ||
1989 | * will restart the statistics. | ||
1990 | */ | ||
1991 | mutex_unlock(&bp->stats_lock); | ||
1992 | |||
1993 | return rc; | ||
2010 | } | 1994 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 2beceaefdeea..965539a9dabe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -539,9 +539,9 @@ struct bnx2x; | |||
539 | void bnx2x_memset_stats(struct bnx2x *bp); | 539 | void bnx2x_memset_stats(struct bnx2x *bp); |
540 | void bnx2x_stats_init(struct bnx2x *bp); | 540 | void bnx2x_stats_init(struct bnx2x *bp); |
541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); | 541 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); |
542 | void bnx2x_stats_safe_exec(struct bnx2x *bp, | 542 | int bnx2x_stats_safe_exec(struct bnx2x *bp, |
543 | void (func_to_exec)(void *cookie), | 543 | void (func_to_exec)(void *cookie), |
544 | void *cookie); | 544 | void *cookie); |
545 | 545 | ||
546 | /** | 546 | /** |
547 | * bnx2x_save_statistics - save statistics when unloading. | 547 | * bnx2x_save_statistics - save statistics when unloading. |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index bf46ca935e2a..6c80eb2e61f4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -376,8 +376,6 @@ enum { | |||
376 | enum { | 376 | enum { |
377 | INGQ_EXTRAS = 2, /* firmware event queue and */ | 377 | INGQ_EXTRAS = 2, /* firmware event queue and */ |
378 | /* forwarded interrupts */ | 378 | /* forwarded interrupts */ |
379 | MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2 | ||
380 | + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES, | ||
381 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES | 379 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES |
382 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, | 380 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, |
383 | }; | 381 | }; |
@@ -623,11 +621,13 @@ struct sge { | |||
623 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ | 621 | unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ |
624 | 622 | ||
625 | unsigned int egr_start; | 623 | unsigned int egr_start; |
624 | unsigned int egr_sz; | ||
626 | unsigned int ingr_start; | 625 | unsigned int ingr_start; |
627 | void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ | 626 | unsigned int ingr_sz; |
628 | struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ | 627 | void **egr_map; /* qid->queue egress queue map */ |
629 | DECLARE_BITMAP(starving_fl, MAX_EGRQ); | 628 | struct sge_rspq **ingr_map; /* qid->queue ingress queue map */ |
630 | DECLARE_BITMAP(txq_maperr, MAX_EGRQ); | 629 | unsigned long *starving_fl; |
630 | unsigned long *txq_maperr; | ||
631 | struct timer_list rx_timer; /* refills starving FLs */ | 631 | struct timer_list rx_timer; /* refills starving FLs */ |
632 | struct timer_list tx_timer; /* checks Tx queues */ | 632 | struct timer_list tx_timer; /* checks Tx queues */ |
633 | }; | 633 | }; |
@@ -1143,6 +1143,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, | |||
1143 | 1143 | ||
1144 | unsigned int qtimer_val(const struct adapter *adap, | 1144 | unsigned int qtimer_val(const struct adapter *adap, |
1145 | const struct sge_rspq *q); | 1145 | const struct sge_rspq *q); |
1146 | |||
1147 | int t4_init_devlog_params(struct adapter *adapter); | ||
1146 | int t4_init_sge_params(struct adapter *adapter); | 1148 | int t4_init_sge_params(struct adapter *adapter); |
1147 | int t4_init_tp_params(struct adapter *adap); | 1149 | int t4_init_tp_params(struct adapter *adap); |
1148 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); | 1150 | int t4_filter_field_shift(const struct adapter *adap, int filter_sel); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 0918c16bb154..f0285bcbe598 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | |||
@@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) | |||
670 | "0.9375" }; | 670 | "0.9375" }; |
671 | 671 | ||
672 | int i; | 672 | int i; |
673 | u16 incr[NMTUS][NCCTRL_WIN]; | 673 | u16 (*incr)[NCCTRL_WIN]; |
674 | struct adapter *adap = seq->private; | 674 | struct adapter *adap = seq->private; |
675 | 675 | ||
676 | incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL); | ||
677 | if (!incr) | ||
678 | return -ENOMEM; | ||
679 | |||
676 | t4_read_cong_tbl(adap, incr); | 680 | t4_read_cong_tbl(adap, incr); |
677 | 681 | ||
678 | for (i = 0; i < NCCTRL_WIN; ++i) { | 682 | for (i = 0; i < NCCTRL_WIN; ++i) { |
@@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) | |||
685 | adap->params.a_wnd[i], | 689 | adap->params.a_wnd[i], |
686 | dec_fac[adap->params.b_wnd[i]]); | 690 | dec_fac[adap->params.b_wnd[i]]); |
687 | } | 691 | } |
692 | |||
693 | kfree(incr); | ||
688 | return 0; | 694 | return 0; |
689 | } | 695 | } |
690 | 696 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e40e283ff36c..58c537f16763 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap) | |||
920 | { | 920 | { |
921 | int i; | 921 | int i; |
922 | 922 | ||
923 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | 923 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
924 | struct sge_rspq *q = adap->sge.ingr_map[i]; | 924 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
925 | 925 | ||
926 | if (q && q->handler) { | 926 | if (q && q->handler) { |
@@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap) | |||
934 | } | 934 | } |
935 | } | 935 | } |
936 | 936 | ||
937 | /* Disable interrupt and napi handler */ | ||
938 | static void disable_interrupts(struct adapter *adap) | ||
939 | { | ||
940 | if (adap->flags & FULL_INIT_DONE) { | ||
941 | t4_intr_disable(adap); | ||
942 | if (adap->flags & USING_MSIX) { | ||
943 | free_msix_queue_irqs(adap); | ||
944 | free_irq(adap->msix_info[0].vec, adap); | ||
945 | } else { | ||
946 | free_irq(adap->pdev->irq, adap); | ||
947 | } | ||
948 | quiesce_rx(adap); | ||
949 | } | ||
950 | } | ||
951 | |||
937 | /* | 952 | /* |
938 | * Enable NAPI scheduling and interrupt generation for all Rx queues. | 953 | * Enable NAPI scheduling and interrupt generation for all Rx queues. |
939 | */ | 954 | */ |
@@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap) | |||
941 | { | 956 | { |
942 | int i; | 957 | int i; |
943 | 958 | ||
944 | for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { | 959 | for (i = 0; i < adap->sge.ingr_sz; i++) { |
945 | struct sge_rspq *q = adap->sge.ingr_map[i]; | 960 | struct sge_rspq *q = adap->sge.ingr_map[i]; |
946 | 961 | ||
947 | if (!q) | 962 | if (!q) |
@@ -992,8 +1007,8 @@ static int setup_sge_queues(struct adapter *adap) | |||
992 | int err, msi_idx, i, j; | 1007 | int err, msi_idx, i, j; |
993 | struct sge *s = &adap->sge; | 1008 | struct sge *s = &adap->sge; |
994 | 1009 | ||
995 | bitmap_zero(s->starving_fl, MAX_EGRQ); | 1010 | bitmap_zero(s->starving_fl, s->egr_sz); |
996 | bitmap_zero(s->txq_maperr, MAX_EGRQ); | 1011 | bitmap_zero(s->txq_maperr, s->egr_sz); |
997 | 1012 | ||
998 | if (adap->flags & USING_MSIX) | 1013 | if (adap->flags & USING_MSIX) |
999 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ | 1014 | msi_idx = 1; /* vector 0 is for non-queue interrupts */ |
@@ -1005,6 +1020,19 @@ static int setup_sge_queues(struct adapter *adap) | |||
1005 | msi_idx = -((int)s->intrq.abs_id + 1); | 1020 | msi_idx = -((int)s->intrq.abs_id + 1); |
1006 | } | 1021 | } |
1007 | 1022 | ||
1023 | /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, | ||
1024 | * don't forget to update the following which need to be | ||
1025 | * synchronized to and changes here. | ||
1026 | * | ||
1027 | * 1. The calculations of MAX_INGQ in cxgb4.h. | ||
1028 | * | ||
1029 | * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs | ||
1030 | * to accommodate any new/deleted Ingress Queues | ||
1031 | * which need MSI-X Vectors. | ||
1032 | * | ||
1033 | * 3. Update sge_qinfo_show() to include information on the | ||
1034 | * new/deleted queues. | ||
1035 | */ | ||
1008 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], | 1036 | err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], |
1009 | msi_idx, NULL, fwevtq_handler); | 1037 | msi_idx, NULL, fwevtq_handler); |
1010 | if (err) { | 1038 | if (err) { |
@@ -4246,19 +4274,12 @@ static int cxgb_up(struct adapter *adap) | |||
4246 | 4274 | ||
4247 | static void cxgb_down(struct adapter *adapter) | 4275 | static void cxgb_down(struct adapter *adapter) |
4248 | { | 4276 | { |
4249 | t4_intr_disable(adapter); | ||
4250 | cancel_work_sync(&adapter->tid_release_task); | 4277 | cancel_work_sync(&adapter->tid_release_task); |
4251 | cancel_work_sync(&adapter->db_full_task); | 4278 | cancel_work_sync(&adapter->db_full_task); |
4252 | cancel_work_sync(&adapter->db_drop_task); | 4279 | cancel_work_sync(&adapter->db_drop_task); |
4253 | adapter->tid_release_task_busy = false; | 4280 | adapter->tid_release_task_busy = false; |
4254 | adapter->tid_release_head = NULL; | 4281 | adapter->tid_release_head = NULL; |
4255 | 4282 | ||
4256 | if (adapter->flags & USING_MSIX) { | ||
4257 | free_msix_queue_irqs(adapter); | ||
4258 | free_irq(adapter->msix_info[0].vec, adapter); | ||
4259 | } else | ||
4260 | free_irq(adapter->pdev->irq, adapter); | ||
4261 | quiesce_rx(adapter); | ||
4262 | t4_sge_stop(adapter); | 4283 | t4_sge_stop(adapter); |
4263 | t4_free_sge_resources(adapter); | 4284 | t4_free_sge_resources(adapter); |
4264 | adapter->flags &= ~FULL_INIT_DONE; | 4285 | adapter->flags &= ~FULL_INIT_DONE; |
@@ -4739,8 +4760,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) | |||
4739 | if (ret < 0) | 4760 | if (ret < 0) |
4740 | return ret; | 4761 | return ret; |
4741 | 4762 | ||
4742 | ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, | 4763 | ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, |
4743 | 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); | 4764 | MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, |
4765 | FW_CMD_CAP_PF); | ||
4744 | if (ret < 0) | 4766 | if (ret < 0) |
4745 | return ret; | 4767 | return ret; |
4746 | 4768 | ||
@@ -5094,10 +5116,15 @@ static int adap_init0(struct adapter *adap) | |||
5094 | enum dev_state state; | 5116 | enum dev_state state; |
5095 | u32 params[7], val[7]; | 5117 | u32 params[7], val[7]; |
5096 | struct fw_caps_config_cmd caps_cmd; | 5118 | struct fw_caps_config_cmd caps_cmd; |
5097 | struct fw_devlog_cmd devlog_cmd; | ||
5098 | u32 devlog_meminfo; | ||
5099 | int reset = 1; | 5119 | int reset = 1; |
5100 | 5120 | ||
5121 | /* Grab Firmware Device Log parameters as early as possible so we have | ||
5122 | * access to it for debugging, etc. | ||
5123 | */ | ||
5124 | ret = t4_init_devlog_params(adap); | ||
5125 | if (ret < 0) | ||
5126 | return ret; | ||
5127 | |||
5101 | /* Contact FW, advertising Master capability */ | 5128 | /* Contact FW, advertising Master capability */ |
5102 | ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); | 5129 | ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); |
5103 | if (ret < 0) { | 5130 | if (ret < 0) { |
@@ -5175,30 +5202,6 @@ static int adap_init0(struct adapter *adap) | |||
5175 | if (ret < 0) | 5202 | if (ret < 0) |
5176 | goto bye; | 5203 | goto bye; |
5177 | 5204 | ||
5178 | /* Read firmware device log parameters. We really need to find a way | ||
5179 | * to get these parameters initialized with some default values (which | ||
5180 | * are likely to be correct) for the case where we either don't | ||
5181 | * attache to the firmware or it's crashed when we probe the adapter. | ||
5182 | * That way we'll still be able to perform early firmware startup | ||
5183 | * debugging ... If the request to get the Firmware's Device Log | ||
5184 | * parameters fails, we'll live so we don't make that a fatal error. | ||
5185 | */ | ||
5186 | memset(&devlog_cmd, 0, sizeof(devlog_cmd)); | ||
5187 | devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | | ||
5188 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | ||
5189 | devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); | ||
5190 | ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), | ||
5191 | &devlog_cmd); | ||
5192 | if (ret == 0) { | ||
5193 | devlog_meminfo = | ||
5194 | ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); | ||
5195 | adap->params.devlog.memtype = | ||
5196 | FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); | ||
5197 | adap->params.devlog.start = | ||
5198 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; | ||
5199 | adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog); | ||
5200 | } | ||
5201 | |||
5202 | /* | 5205 | /* |
5203 | * Find out what ports are available to us. Note that we need to do | 5206 | * Find out what ports are available to us. Note that we need to do |
5204 | * this before calling adap_init0_no_config() since it needs nports | 5207 | * this before calling adap_init0_no_config() since it needs nports |
@@ -5299,6 +5302,51 @@ static int adap_init0(struct adapter *adap) | |||
5299 | adap->tids.nftids = val[4] - val[3] + 1; | 5302 | adap->tids.nftids = val[4] - val[3] + 1; |
5300 | adap->sge.ingr_start = val[5]; | 5303 | adap->sge.ingr_start = val[5]; |
5301 | 5304 | ||
5305 | /* qids (ingress/egress) returned from firmware can be anywhere | ||
5306 | * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. | ||
5307 | * Hence driver needs to allocate memory for this range to | ||
5308 | * store the queue info. Get the highest IQFLINT/EQ index returned | ||
5309 | * in FW_EQ_*_CMD.alloc command. | ||
5310 | */ | ||
5311 | params[0] = FW_PARAM_PFVF(EQ_END); | ||
5312 | params[1] = FW_PARAM_PFVF(IQFLINT_END); | ||
5313 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); | ||
5314 | if (ret < 0) | ||
5315 | goto bye; | ||
5316 | adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; | ||
5317 | adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; | ||
5318 | |||
5319 | adap->sge.egr_map = kcalloc(adap->sge.egr_sz, | ||
5320 | sizeof(*adap->sge.egr_map), GFP_KERNEL); | ||
5321 | if (!adap->sge.egr_map) { | ||
5322 | ret = -ENOMEM; | ||
5323 | goto bye; | ||
5324 | } | ||
5325 | |||
5326 | adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, | ||
5327 | sizeof(*adap->sge.ingr_map), GFP_KERNEL); | ||
5328 | if (!adap->sge.ingr_map) { | ||
5329 | ret = -ENOMEM; | ||
5330 | goto bye; | ||
5331 | } | ||
5332 | |||
5333 | /* Allocate the memory for the vaious egress queue bitmaps | ||
5334 | * ie starving_fl and txq_maperr. | ||
5335 | */ | ||
5336 | adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | ||
5337 | sizeof(long), GFP_KERNEL); | ||
5338 | if (!adap->sge.starving_fl) { | ||
5339 | ret = -ENOMEM; | ||
5340 | goto bye; | ||
5341 | } | ||
5342 | |||
5343 | adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), | ||
5344 | sizeof(long), GFP_KERNEL); | ||
5345 | if (!adap->sge.txq_maperr) { | ||
5346 | ret = -ENOMEM; | ||
5347 | goto bye; | ||
5348 | } | ||
5349 | |||
5302 | params[0] = FW_PARAM_PFVF(CLIP_START); | 5350 | params[0] = FW_PARAM_PFVF(CLIP_START); |
5303 | params[1] = FW_PARAM_PFVF(CLIP_END); | 5351 | params[1] = FW_PARAM_PFVF(CLIP_END); |
5304 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); | 5352 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); |
@@ -5507,6 +5555,10 @@ static int adap_init0(struct adapter *adap) | |||
5507 | * happened to HW/FW, stop issuing commands. | 5555 | * happened to HW/FW, stop issuing commands. |
5508 | */ | 5556 | */ |
5509 | bye: | 5557 | bye: |
5558 | kfree(adap->sge.egr_map); | ||
5559 | kfree(adap->sge.ingr_map); | ||
5560 | kfree(adap->sge.starving_fl); | ||
5561 | kfree(adap->sge.txq_maperr); | ||
5510 | if (ret != -ETIMEDOUT && ret != -EIO) | 5562 | if (ret != -ETIMEDOUT && ret != -EIO) |
5511 | t4_fw_bye(adap, adap->mbox); | 5563 | t4_fw_bye(adap, adap->mbox); |
5512 | return ret; | 5564 | return ret; |
@@ -5534,6 +5586,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, | |||
5534 | netif_carrier_off(dev); | 5586 | netif_carrier_off(dev); |
5535 | } | 5587 | } |
5536 | spin_unlock(&adap->stats_lock); | 5588 | spin_unlock(&adap->stats_lock); |
5589 | disable_interrupts(adap); | ||
5537 | if (adap->flags & FULL_INIT_DONE) | 5590 | if (adap->flags & FULL_INIT_DONE) |
5538 | cxgb_down(adap); | 5591 | cxgb_down(adap); |
5539 | rtnl_unlock(); | 5592 | rtnl_unlock(); |
@@ -5942,6 +5995,10 @@ static void free_some_resources(struct adapter *adapter) | |||
5942 | 5995 | ||
5943 | t4_free_mem(adapter->l2t); | 5996 | t4_free_mem(adapter->l2t); |
5944 | t4_free_mem(adapter->tids.tid_tab); | 5997 | t4_free_mem(adapter->tids.tid_tab); |
5998 | kfree(adapter->sge.egr_map); | ||
5999 | kfree(adapter->sge.ingr_map); | ||
6000 | kfree(adapter->sge.starving_fl); | ||
6001 | kfree(adapter->sge.txq_maperr); | ||
5945 | disable_msi(adapter); | 6002 | disable_msi(adapter); |
5946 | 6003 | ||
5947 | for_each_port(adapter, i) | 6004 | for_each_port(adapter, i) |
@@ -6267,6 +6324,8 @@ static void remove_one(struct pci_dev *pdev) | |||
6267 | if (is_offload(adapter)) | 6324 | if (is_offload(adapter)) |
6268 | detach_ulds(adapter); | 6325 | detach_ulds(adapter); |
6269 | 6326 | ||
6327 | disable_interrupts(adapter); | ||
6328 | |||
6270 | for_each_port(adapter, i) | 6329 | for_each_port(adapter, i) |
6271 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) | 6330 | if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
6272 | unregister_netdev(adapter->port[i]); | 6331 | unregister_netdev(adapter->port[i]); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index c46e7a938317..c438f3895c40 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2239,7 +2239,7 @@ static void sge_rx_timer_cb(unsigned long data) | |||
2239 | struct adapter *adap = (struct adapter *)data; | 2239 | struct adapter *adap = (struct adapter *)data; |
2240 | struct sge *s = &adap->sge; | 2240 | struct sge *s = &adap->sge; |
2241 | 2241 | ||
2242 | for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) | 2242 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
2243 | for (m = s->starving_fl[i]; m; m &= m - 1) { | 2243 | for (m = s->starving_fl[i]; m; m &= m - 1) { |
2244 | struct sge_eth_rxq *rxq; | 2244 | struct sge_eth_rxq *rxq; |
2245 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; | 2245 | unsigned int id = __ffs(m) + i * BITS_PER_LONG; |
@@ -2327,7 +2327,7 @@ static void sge_tx_timer_cb(unsigned long data) | |||
2327 | struct adapter *adap = (struct adapter *)data; | 2327 | struct adapter *adap = (struct adapter *)data; |
2328 | struct sge *s = &adap->sge; | 2328 | struct sge *s = &adap->sge; |
2329 | 2329 | ||
2330 | for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++) | 2330 | for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) |
2331 | for (m = s->txq_maperr[i]; m; m &= m - 1) { | 2331 | for (m = s->txq_maperr[i]; m; m &= m - 1) { |
2332 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; | 2332 | unsigned long id = __ffs(m) + i * BITS_PER_LONG; |
2333 | struct sge_ofld_txq *txq = s->egr_map[id]; | 2333 | struct sge_ofld_txq *txq = s->egr_map[id]; |
@@ -2809,7 +2809,8 @@ void t4_free_sge_resources(struct adapter *adap) | |||
2809 | free_rspq_fl(adap, &adap->sge.intrq, NULL); | 2809 | free_rspq_fl(adap, &adap->sge.intrq, NULL); |
2810 | 2810 | ||
2811 | /* clear the reverse egress queue map */ | 2811 | /* clear the reverse egress queue map */ |
2812 | memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map)); | 2812 | memset(adap->sge.egr_map, 0, |
2813 | adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); | ||
2813 | } | 2814 | } |
2814 | 2815 | ||
2815 | void t4_sge_start(struct adapter *adap) | 2816 | void t4_sge_start(struct adapter *adap) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index afbe1682ff48..5ed8db977432 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -4459,6 +4459,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter, | |||
4459 | } | 4459 | } |
4460 | 4460 | ||
4461 | /** | 4461 | /** |
4462 | * t4_init_devlog_params - initialize adapter->params.devlog | ||
4463 | * @adap: the adapter | ||
4464 | * | ||
4465 | * Initialize various fields of the adapter's Firmware Device Log | ||
4466 | * Parameters structure. | ||
4467 | */ | ||
4468 | int t4_init_devlog_params(struct adapter *adap) | ||
4469 | { | ||
4470 | struct devlog_params *dparams = &adap->params.devlog; | ||
4471 | u32 pf_dparams; | ||
4472 | unsigned int devlog_meminfo; | ||
4473 | struct fw_devlog_cmd devlog_cmd; | ||
4474 | int ret; | ||
4475 | |||
4476 | /* If we're dealing with newer firmware, the Device Log Paramerters | ||
4477 | * are stored in a designated register which allows us to access the | ||
4478 | * Device Log even if we can't talk to the firmware. | ||
4479 | */ | ||
4480 | pf_dparams = | ||
4481 | t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG)); | ||
4482 | if (pf_dparams) { | ||
4483 | unsigned int nentries, nentries128; | ||
4484 | |||
4485 | dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams); | ||
4486 | dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4; | ||
4487 | |||
4488 | nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams); | ||
4489 | nentries = (nentries128 + 1) * 128; | ||
4490 | dparams->size = nentries * sizeof(struct fw_devlog_e); | ||
4491 | |||
4492 | return 0; | ||
4493 | } | ||
4494 | |||
4495 | /* Otherwise, ask the firmware for it's Device Log Parameters. | ||
4496 | */ | ||
4497 | memset(&devlog_cmd, 0, sizeof(devlog_cmd)); | ||
4498 | devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) | | ||
4499 | FW_CMD_REQUEST_F | FW_CMD_READ_F); | ||
4500 | devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd)); | ||
4501 | ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd), | ||
4502 | &devlog_cmd); | ||
4503 | if (ret) | ||
4504 | return ret; | ||
4505 | |||
4506 | devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog); | ||
4507 | dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo); | ||
4508 | dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4; | ||
4509 | dparams->size = ntohl(devlog_cmd.memsize_devlog); | ||
4510 | |||
4511 | return 0; | ||
4512 | } | ||
4513 | |||
4514 | /** | ||
4462 | * t4_init_sge_params - initialize adap->params.sge | 4515 | * t4_init_sge_params - initialize adap->params.sge |
4463 | * @adapter: the adapter | 4516 | * @adapter: the adapter |
4464 | * | 4517 | * |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 231a725f6d5d..326674b19983 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -63,6 +63,8 @@ | |||
63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | 63 | #define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) |
64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | 64 | #define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) |
65 | 65 | ||
66 | #define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) | ||
67 | |||
66 | #define SGE_PF_KDOORBELL_A 0x0 | 68 | #define SGE_PF_KDOORBELL_A 0x0 |
67 | 69 | ||
68 | #define QID_S 15 | 70 | #define QID_S 15 |
@@ -707,6 +709,7 @@ | |||
707 | #define PFNUM_V(x) ((x) << PFNUM_S) | 709 | #define PFNUM_V(x) ((x) << PFNUM_S) |
708 | 710 | ||
709 | #define PCIE_FW_A 0x30b8 | 711 | #define PCIE_FW_A 0x30b8 |
712 | #define PCIE_FW_PF_A 0x30bc | ||
710 | 713 | ||
711 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 | 714 | #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908 |
712 | 715 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index d136ca6a0c8a..03fbfd1fb3df 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -101,7 +101,7 @@ enum fw_wr_opcodes { | |||
101 | FW_RI_BIND_MW_WR = 0x18, | 101 | FW_RI_BIND_MW_WR = 0x18, |
102 | FW_RI_FR_NSMR_WR = 0x19, | 102 | FW_RI_FR_NSMR_WR = 0x19, |
103 | FW_RI_INV_LSTAG_WR = 0x1a, | 103 | FW_RI_INV_LSTAG_WR = 0x1a, |
104 | FW_LASTC2E_WR = 0x40 | 104 | FW_LASTC2E_WR = 0x70 |
105 | }; | 105 | }; |
106 | 106 | ||
107 | struct fw_wr_hdr { | 107 | struct fw_wr_hdr { |
@@ -993,6 +993,7 @@ enum fw_memtype_cf { | |||
993 | FW_MEMTYPE_CF_EXTMEM = 0x2, | 993 | FW_MEMTYPE_CF_EXTMEM = 0x2, |
994 | FW_MEMTYPE_CF_FLASH = 0x4, | 994 | FW_MEMTYPE_CF_FLASH = 0x4, |
995 | FW_MEMTYPE_CF_INTERNAL = 0x5, | 995 | FW_MEMTYPE_CF_INTERNAL = 0x5, |
996 | FW_MEMTYPE_CF_EXTMEM1 = 0x6, | ||
996 | }; | 997 | }; |
997 | 998 | ||
998 | struct fw_caps_config_cmd { | 999 | struct fw_caps_config_cmd { |
@@ -1035,6 +1036,7 @@ enum fw_params_mnem { | |||
1035 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ | 1036 | FW_PARAMS_MNEM_PFVF = 2, /* function params */ |
1036 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ | 1037 | FW_PARAMS_MNEM_REG = 3, /* limited register access */ |
1037 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ | 1038 | FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ |
1039 | FW_PARAMS_MNEM_CHNET = 5, /* chnet params */ | ||
1038 | FW_PARAMS_MNEM_LAST | 1040 | FW_PARAMS_MNEM_LAST |
1039 | }; | 1041 | }; |
1040 | 1042 | ||
@@ -3102,7 +3104,8 @@ enum fw_devlog_facility { | |||
3102 | FW_DEVLOG_FACILITY_FCOE = 0x2E, | 3104 | FW_DEVLOG_FACILITY_FCOE = 0x2E, |
3103 | FW_DEVLOG_FACILITY_FOISCSI = 0x30, | 3105 | FW_DEVLOG_FACILITY_FOISCSI = 0x30, |
3104 | FW_DEVLOG_FACILITY_FOFCOE = 0x32, | 3106 | FW_DEVLOG_FACILITY_FOFCOE = 0x32, |
3105 | FW_DEVLOG_FACILITY_MAX = 0x32, | 3107 | FW_DEVLOG_FACILITY_CHNET = 0x34, |
3108 | FW_DEVLOG_FACILITY_MAX = 0x34, | ||
3106 | }; | 3109 | }; |
3107 | 3110 | ||
3108 | /* log message format */ | 3111 | /* log message format */ |
@@ -3139,4 +3142,36 @@ struct fw_devlog_cmd { | |||
3139 | (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ | 3142 | (((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \ |
3140 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) | 3143 | FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M) |
3141 | 3144 | ||
3145 | /* P C I E F W P F 7 R E G I S T E R */ | ||
3146 | |||
3147 | /* PF7 stores the Firmware Device Log parameters which allows Host Drivers to | ||
3148 | * access the "devlog" which needing to contact firmware. The encoding is | ||
3149 | * mostly the same as that returned by the DEVLOG command except for the size | ||
3150 | * which is encoded as the number of entries in multiples-1 of 128 here rather | ||
3151 | * than the memory size as is done in the DEVLOG command. Thus, 0 means 128 | ||
3152 | * and 15 means 2048. This of course in turn constrains the allowed values | ||
3153 | * for the devlog size ... | ||
3154 | */ | ||
3155 | #define PCIE_FW_PF_DEVLOG 7 | ||
3156 | |||
3157 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28 | ||
3158 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf | ||
3159 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \ | ||
3160 | ((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S) | ||
3161 | #define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \ | ||
3162 | (((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \ | ||
3163 | PCIE_FW_PF_DEVLOG_NENTRIES128_M) | ||
3164 | |||
3165 | #define PCIE_FW_PF_DEVLOG_ADDR16_S 4 | ||
3166 | #define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff | ||
3167 | #define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S) | ||
3168 | #define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \ | ||
3169 | (((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M) | ||
3170 | |||
3171 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0 | ||
3172 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf | ||
3173 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S) | ||
3174 | #define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ | ||
3175 | (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) | ||
3176 | |||
3142 | #endif /* _T4FW_INTERFACE_H_ */ | 3177 | #endif /* _T4FW_INTERFACE_H_ */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index e2bd3f747858..b9d1cbac0eee 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | |||
@@ -36,13 +36,13 @@ | |||
36 | #define __T4FW_VERSION_H__ | 36 | #define __T4FW_VERSION_H__ |
37 | 37 | ||
38 | #define T4FW_VERSION_MAJOR 0x01 | 38 | #define T4FW_VERSION_MAJOR 0x01 |
39 | #define T4FW_VERSION_MINOR 0x0C | 39 | #define T4FW_VERSION_MINOR 0x0D |
40 | #define T4FW_VERSION_MICRO 0x19 | 40 | #define T4FW_VERSION_MICRO 0x20 |
41 | #define T4FW_VERSION_BUILD 0x00 | 41 | #define T4FW_VERSION_BUILD 0x00 |
42 | 42 | ||
43 | #define T5FW_VERSION_MAJOR 0x01 | 43 | #define T5FW_VERSION_MAJOR 0x01 |
44 | #define T5FW_VERSION_MINOR 0x0C | 44 | #define T5FW_VERSION_MINOR 0x0D |
45 | #define T5FW_VERSION_MICRO 0x19 | 45 | #define T5FW_VERSION_MICRO 0x20 |
46 | #define T5FW_VERSION_BUILD 0x00 | 46 | #define T5FW_VERSION_BUILD 0x00 |
47 | 47 | ||
48 | #endif | 48 | #endif |
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 5ba14b32c370..7715982230e5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c | |||
@@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, | |||
1004 | ? (tq->pidx - 1) | 1004 | ? (tq->pidx - 1) |
1005 | : (tq->size - 1)); | 1005 | : (tq->size - 1)); |
1006 | __be64 *src = (__be64 *)&tq->desc[index]; | 1006 | __be64 *src = (__be64 *)&tq->desc[index]; |
1007 | __be64 __iomem *dst = (__be64 *)(tq->bar2_addr + | 1007 | __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr + |
1008 | SGE_UDB_WCDOORBELL); | 1008 | SGE_UDB_WCDOORBELL); |
1009 | unsigned int count = EQ_UNIT / sizeof(__be64); | 1009 | unsigned int count = EQ_UNIT / sizeof(__be64); |
1010 | 1010 | ||
@@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq, | |||
1018 | * DMA. | 1018 | * DMA. |
1019 | */ | 1019 | */ |
1020 | while (count) { | 1020 | while (count) { |
1021 | writeq(*src, dst); | 1021 | /* the (__force u64) is because the compiler |
1022 | * doesn't understand the endian swizzling | ||
1023 | * going on | ||
1024 | */ | ||
1025 | writeq((__force u64)*src, dst); | ||
1022 | src++; | 1026 | src++; |
1023 | dst++; | 1027 | dst++; |
1024 | count--; | 1028 | count--; |
@@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1252 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); | 1256 | BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1); |
1253 | wr = (void *)&txq->q.desc[txq->q.pidx]; | 1257 | wr = (void *)&txq->q.desc[txq->q.pidx]; |
1254 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); | 1258 | wr->equiq_to_len16 = cpu_to_be32(wr_mid); |
1255 | wr->r3[0] = cpu_to_be64(0); | 1259 | wr->r3[0] = cpu_to_be32(0); |
1256 | wr->r3[1] = cpu_to_be64(0); | 1260 | wr->r3[1] = cpu_to_be32(0); |
1257 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); | 1261 | skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); |
1258 | end = (u64 *)wr + flits; | 1262 | end = (u64 *)wr + flits; |
1259 | 1263 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index c21e2e954ad8..966ee900ed00 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | |||
@@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
210 | 210 | ||
211 | if (rpl) { | 211 | if (rpl) { |
212 | /* request bit in high-order BE word */ | 212 | /* request bit in high-order BE word */ |
213 | WARN_ON((be32_to_cpu(*(const u32 *)cmd) | 213 | WARN_ON((be32_to_cpu(*(const __be32 *)cmd) |
214 | & FW_CMD_REQUEST_F) == 0); | 214 | & FW_CMD_REQUEST_F) == 0); |
215 | get_mbox_rpl(adapter, rpl, size, mbox_data); | 215 | get_mbox_rpl(adapter, rpl, size, mbox_data); |
216 | WARN_ON((be32_to_cpu(*(u32 *)rpl) | 216 | WARN_ON((be32_to_cpu(*(__be32 *)rpl) |
217 | & FW_CMD_REQUEST_F) != 0); | 217 | & FW_CMD_REQUEST_F) != 0); |
218 | } | 218 | } |
219 | t4_write_reg(adapter, mbox_ctl, | 219 | t4_write_reg(adapter, mbox_ctl, |
@@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter, | |||
484 | * o The BAR2 Queue ID. | 484 | * o The BAR2 Queue ID. |
485 | * o The BAR2 Queue ID Offset into the BAR2 page. | 485 | * o The BAR2 Queue ID Offset into the BAR2 page. |
486 | */ | 486 | */ |
487 | bar2_page_offset = ((qid >> qpp_shift) << page_shift); | 487 | bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift); |
488 | bar2_qid = qid & qpp_mask; | 488 | bar2_qid = qid & qpp_mask; |
489 | bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; | 489 | bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; |
490 | 490 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 78e1ce09b1ab..f6a3a7abd468 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
1954 | struct fec_enet_private *fep = netdev_priv(ndev); | 1954 | struct fec_enet_private *fep = netdev_priv(ndev); |
1955 | struct device_node *node; | 1955 | struct device_node *node; |
1956 | int err = -ENXIO, i; | 1956 | int err = -ENXIO, i; |
1957 | u32 mii_speed, holdtime; | ||
1957 | 1958 | ||
1958 | /* | 1959 | /* |
1959 | * The i.MX28 dual fec interfaces are not equal. | 1960 | * The i.MX28 dual fec interfaces are not equal. |
@@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev) | |||
1991 | * Reference Manual has an error on this, and gets fixed on i.MX6Q | 1992 | * Reference Manual has an error on this, and gets fixed on i.MX6Q |
1992 | * document. | 1993 | * document. |
1993 | */ | 1994 | */ |
1994 | fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); | 1995 | mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); |
1995 | if (fep->quirks & FEC_QUIRK_ENET_MAC) | 1996 | if (fep->quirks & FEC_QUIRK_ENET_MAC) |
1996 | fep->phy_speed--; | 1997 | mii_speed--; |
1997 | fep->phy_speed <<= 1; | 1998 | if (mii_speed > 63) { |
1999 | dev_err(&pdev->dev, | ||
2000 | "fec clock (%lu) to fast to get right mii speed\n", | ||
2001 | clk_get_rate(fep->clk_ipg)); | ||
2002 | err = -EINVAL; | ||
2003 | goto err_out; | ||
2004 | } | ||
2005 | |||
2006 | /* | ||
2007 | * The i.MX28 and i.MX6 types have another filed in the MSCR (aka | ||
2008 | * MII_SPEED) register that defines the MDIO output hold time. Earlier | ||
2009 | * versions are RAZ there, so just ignore the difference and write the | ||
2010 | * register always. | ||
2011 | * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. | ||
2012 | * HOLDTIME + 1 is the number of clk cycles the fec is holding the | ||
2013 | * output. | ||
2014 | * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). | ||
2015 | * Given that ceil(clkrate / 5000000) <= 64, the calculation for | ||
2016 | * holdtime cannot result in a value greater than 3. | ||
2017 | */ | ||
2018 | holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; | ||
2019 | |||
2020 | fep->phy_speed = mii_speed << 1 | holdtime << 8; | ||
2021 | |||
1998 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 2022 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1999 | 2023 | ||
2000 | fep->mii_bus = mdiobus_alloc(); | 2024 | fep->mii_bus = mdiobus_alloc(); |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index bfdccbd58be0..4dd40e057f40 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev) | |||
3893 | ugeth->phy_interface = phy_interface; | 3893 | ugeth->phy_interface = phy_interface; |
3894 | ugeth->max_speed = max_speed; | 3894 | ugeth->max_speed = max_speed; |
3895 | 3895 | ||
3896 | /* Carrier starts down, phylib will bring it up */ | ||
3897 | netif_carrier_off(dev); | ||
3898 | |||
3896 | err = register_netdev(dev); | 3899 | err = register_netdev(dev); |
3897 | if (err) { | 3900 | if (err) { |
3898 | if (netif_msg_probe(ugeth)) | 3901 | if (netif_msg_probe(ugeth)) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 20b3c7b21e63..08ab90a70965 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
@@ -2002,7 +2002,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, | |||
2002 | goto reset_slave; | 2002 | goto reset_slave; |
2003 | slave_state[slave].vhcr_dma = ((u64) param) << 48; | 2003 | slave_state[slave].vhcr_dma = ((u64) param) << 48; |
2004 | priv->mfunc.master.slave_state[slave].cookie = 0; | 2004 | priv->mfunc.master.slave_state[slave].cookie = 0; |
2005 | mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]); | ||
2006 | break; | 2005 | break; |
2007 | case MLX4_COMM_CMD_VHCR1: | 2006 | case MLX4_COMM_CMD_VHCR1: |
2008 | if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) | 2007 | if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0) |
@@ -2234,6 +2233,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) | |||
2234 | for (i = 0; i < dev->num_slaves; ++i) { | 2233 | for (i = 0; i < dev->num_slaves; ++i) { |
2235 | s_state = &priv->mfunc.master.slave_state[i]; | 2234 | s_state = &priv->mfunc.master.slave_state[i]; |
2236 | s_state->last_cmd = MLX4_COMM_CMD_RESET; | 2235 | s_state->last_cmd = MLX4_COMM_CMD_RESET; |
2236 | mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]); | ||
2237 | for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) | 2237 | for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j) |
2238 | s_state->event_eq[j].eqn = -1; | 2238 | s_state->event_eq[j].eqn = -1; |
2239 | __raw_writel((__force u32) 0, | 2239 | __raw_writel((__force u32) 0, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 354e254b53cf..51b20663c2a6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -2917,13 +2917,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2917 | netif_carrier_off(dev); | 2917 | netif_carrier_off(dev); |
2918 | mlx4_en_set_default_moderation(priv); | 2918 | mlx4_en_set_default_moderation(priv); |
2919 | 2919 | ||
2920 | err = register_netdev(dev); | ||
2921 | if (err) { | ||
2922 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
2923 | goto out; | ||
2924 | } | ||
2925 | priv->registered = 1; | ||
2926 | |||
2927 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | 2920 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); |
2928 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | 2921 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
2929 | 2922 | ||
@@ -2969,6 +2962,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
2969 | mdev->profile.prof[priv->port].tx_ppp, | 2962 | mdev->profile.prof[priv->port].tx_ppp, |
2970 | mdev->profile.prof[priv->port].tx_pause); | 2963 | mdev->profile.prof[priv->port].tx_pause); |
2971 | 2964 | ||
2965 | err = register_netdev(dev); | ||
2966 | if (err) { | ||
2967 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
2968 | goto out; | ||
2969 | } | ||
2970 | |||
2971 | priv->registered = 1; | ||
2972 | |||
2972 | return 0; | 2973 | return 0; |
2973 | 2974 | ||
2974 | out: | 2975 | out: |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 264bc15c1ff2..6e70ffee8e87 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work) | |||
153 | 153 | ||
154 | /* All active slaves need to receive the event */ | 154 | /* All active slaves need to receive the event */ |
155 | if (slave == ALL_SLAVES) { | 155 | if (slave == ALL_SLAVES) { |
156 | for (i = 0; i < dev->num_slaves; i++) { | 156 | for (i = 0; i <= dev->persist->num_vfs; i++) { |
157 | if (i != dev->caps.function && | 157 | if (mlx4_GEN_EQE(dev, i, eqe)) |
158 | master->slave_state[i].active) | 158 | mlx4_warn(dev, "Failed to generate event for slave %d\n", |
159 | if (mlx4_GEN_EQE(dev, i, eqe)) | 159 | i); |
160 | mlx4_warn(dev, "Failed to generate event for slave %d\n", | ||
161 | i); | ||
162 | } | 160 | } |
163 | } else { | 161 | } else { |
164 | if (mlx4_GEN_EQE(dev, slave, eqe)) | 162 | if (mlx4_GEN_EQE(dev, slave, eqe)) |
@@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, | |||
203 | struct mlx4_eqe *eqe) | 201 | struct mlx4_eqe *eqe) |
204 | { | 202 | { |
205 | struct mlx4_priv *priv = mlx4_priv(dev); | 203 | struct mlx4_priv *priv = mlx4_priv(dev); |
206 | struct mlx4_slave_state *s_slave = | ||
207 | &priv->mfunc.master.slave_state[slave]; | ||
208 | 204 | ||
209 | if (!s_slave->active) { | 205 | if (slave < 0 || slave > dev->persist->num_vfs || |
210 | /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/ | 206 | slave == dev->caps.function || |
207 | !priv->mfunc.master.slave_state[slave].active) | ||
211 | return; | 208 | return; |
212 | } | ||
213 | 209 | ||
214 | slave_event(dev, slave, eqe); | 210 | slave_event(dev, slave, eqe); |
215 | } | 211 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index c258f8625aac..11bcd27e218f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -3099,6 +3099,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) | |||
3099 | if (!priv->mfunc.master.slave_state) | 3099 | if (!priv->mfunc.master.slave_state) |
3100 | return -EINVAL; | 3100 | return -EINVAL; |
3101 | 3101 | ||
3102 | /* check for slave valid, slave not PF, and slave active */ | ||
3103 | if (slave < 0 || slave > dev->persist->num_vfs || | ||
3104 | slave == dev->caps.function || | ||
3105 | !priv->mfunc.master.slave_state[slave].active) | ||
3106 | return 0; | ||
3107 | |||
3102 | event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; | 3108 | event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; |
3103 | 3109 | ||
3104 | /* Create the event only if the slave is registered */ | 3110 | /* Create the event only if the slave is registered */ |
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index c9558e6d57ad..a87b177bd723 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
@@ -4937,10 +4937,16 @@ static int rocker_port_master_changed(struct net_device *dev) | |||
4937 | struct net_device *master = netdev_master_upper_dev_get(dev); | 4937 | struct net_device *master = netdev_master_upper_dev_get(dev); |
4938 | int err = 0; | 4938 | int err = 0; |
4939 | 4939 | ||
4940 | /* There are currently three cases handled here: | ||
4941 | * 1. Joining a bridge | ||
4942 | * 2. Leaving a previously joined bridge | ||
4943 | * 3. Other, e.g. being added to or removed from a bond or openvswitch, | ||
4944 | * in which case nothing is done | ||
4945 | */ | ||
4940 | if (master && master->rtnl_link_ops && | 4946 | if (master && master->rtnl_link_ops && |
4941 | !strcmp(master->rtnl_link_ops->kind, "bridge")) | 4947 | !strcmp(master->rtnl_link_ops->kind, "bridge")) |
4942 | err = rocker_port_bridge_join(rocker_port, master); | 4948 | err = rocker_port_bridge_join(rocker_port, master); |
4943 | else | 4949 | else if (rocker_port_is_bridged(rocker_port)) |
4944 | err = rocker_port_bridge_leave(rocker_port); | 4950 | err = rocker_port_bridge_leave(rocker_port); |
4945 | 4951 | ||
4946 | return err; | 4952 | return err; |
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 924ea98bd531..54549a6223dd 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h | |||
@@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr); | |||
114 | rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); | 114 | rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); |
115 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); | 115 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev); |
116 | void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); | 116 | void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); |
117 | bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6); | 117 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, |
118 | const void *iaddr, bool is_v6); | ||
119 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); | ||
118 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, | 120 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, |
119 | const void *iaddr, bool is_v6); | 121 | const void *iaddr, bool is_v6); |
120 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); | 122 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); |
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 131bde98188d..c30b5c300c05 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
@@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) | |||
81 | hash = (addr->atype == IPVL_IPV6) ? | 81 | hash = (addr->atype == IPVL_IPV6) ? |
82 | ipvlan_get_v6_hash(&addr->ip6addr) : | 82 | ipvlan_get_v6_hash(&addr->ip6addr) : |
83 | ipvlan_get_v4_hash(&addr->ip4addr); | 83 | ipvlan_get_v4_hash(&addr->ip4addr); |
84 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); | 84 | if (hlist_unhashed(&addr->hlnode)) |
85 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); | ||
85 | } | 86 | } |
86 | 87 | ||
87 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) | 88 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) |
88 | { | 89 | { |
89 | hlist_del_rcu(&addr->hlnode); | 90 | hlist_del_init_rcu(&addr->hlnode); |
90 | if (sync) | 91 | if (sync) |
91 | synchronize_rcu(); | 92 | synchronize_rcu(); |
92 | } | 93 | } |
93 | 94 | ||
94 | bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) | 95 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, |
96 | const void *iaddr, bool is_v6) | ||
95 | { | 97 | { |
96 | struct ipvl_port *port = ipvlan->port; | ||
97 | struct ipvl_addr *addr; | 98 | struct ipvl_addr *addr; |
98 | 99 | ||
99 | list_for_each_entry(addr, &ipvlan->addrs, anode) { | 100 | list_for_each_entry(addr, &ipvlan->addrs, anode) { |
@@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) | |||
101 | ipv6_addr_equal(&addr->ip6addr, iaddr)) || | 102 | ipv6_addr_equal(&addr->ip6addr, iaddr)) || |
102 | (!is_v6 && addr->atype == IPVL_IPV4 && | 103 | (!is_v6 && addr->atype == IPVL_IPV4 && |
103 | addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) | 104 | addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) |
104 | return true; | 105 | return addr; |
105 | } | 106 | } |
107 | return NULL; | ||
108 | } | ||
109 | |||
110 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) | ||
111 | { | ||
112 | struct ipvl_dev *ipvlan; | ||
106 | 113 | ||
107 | if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) | 114 | ASSERT_RTNL(); |
108 | return true; | ||
109 | 115 | ||
116 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | ||
117 | if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) | ||
118 | return true; | ||
119 | } | ||
110 | return false; | 120 | return false; |
111 | } | 121 | } |
112 | 122 | ||
@@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, | |||
192 | if (skb->protocol == htons(ETH_P_PAUSE)) | 202 | if (skb->protocol == htons(ETH_P_PAUSE)) |
193 | return; | 203 | return; |
194 | 204 | ||
195 | list_for_each_entry(ipvlan, &port->ipvlans, pnode) { | 205 | rcu_read_lock(); |
206 | list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { | ||
196 | if (local && (ipvlan == in_dev)) | 207 | if (local && (ipvlan == in_dev)) |
197 | continue; | 208 | continue; |
198 | 209 | ||
@@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, | |||
219 | mcast_acct: | 230 | mcast_acct: |
220 | ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); | 231 | ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); |
221 | } | 232 | } |
233 | rcu_read_unlock(); | ||
222 | 234 | ||
223 | /* Locally generated? ...Forward a copy to the main-device as | 235 | /* Locally generated? ...Forward a copy to the main-device as |
224 | * well. On the RX side we'll ignore it (wont give it to any | 236 | * well. On the RX side we'll ignore it (wont give it to any |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1701ede2df89..77b92a0fe557 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -511,7 +511,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) | |||
511 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { | 511 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { |
512 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { | 512 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { |
513 | ipvlan_ht_addr_del(addr, !dev->dismantle); | 513 | ipvlan_ht_addr_del(addr, !dev->dismantle); |
514 | list_del_rcu(&addr->anode); | 514 | list_del(&addr->anode); |
515 | } | 515 | } |
516 | } | 516 | } |
517 | list_del_rcu(&ipvlan->pnode); | 517 | list_del_rcu(&ipvlan->pnode); |
@@ -613,7 +613,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
613 | { | 613 | { |
614 | struct ipvl_addr *addr; | 614 | struct ipvl_addr *addr; |
615 | 615 | ||
616 | if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) { | 616 | if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) { |
617 | netif_err(ipvlan, ifup, ipvlan->dev, | 617 | netif_err(ipvlan, ifup, ipvlan->dev, |
618 | "Failed to add IPv6=%pI6c addr for %s intf\n", | 618 | "Failed to add IPv6=%pI6c addr for %s intf\n", |
619 | ip6_addr, ipvlan->dev->name); | 619 | ip6_addr, ipvlan->dev->name); |
@@ -626,9 +626,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
626 | addr->master = ipvlan; | 626 | addr->master = ipvlan; |
627 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); | 627 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); |
628 | addr->atype = IPVL_IPV6; | 628 | addr->atype = IPVL_IPV6; |
629 | list_add_tail_rcu(&addr->anode, &ipvlan->addrs); | 629 | list_add_tail(&addr->anode, &ipvlan->addrs); |
630 | ipvlan->ipv6cnt++; | 630 | ipvlan->ipv6cnt++; |
631 | ipvlan_ht_addr_add(ipvlan, addr); | 631 | /* If the interface is not up, the address will be added to the hash |
632 | * list by ipvlan_open. | ||
633 | */ | ||
634 | if (netif_running(ipvlan->dev)) | ||
635 | ipvlan_ht_addr_add(ipvlan, addr); | ||
632 | 636 | ||
633 | return 0; | 637 | return 0; |
634 | } | 638 | } |
@@ -637,12 +641,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
637 | { | 641 | { |
638 | struct ipvl_addr *addr; | 642 | struct ipvl_addr *addr; |
639 | 643 | ||
640 | addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true); | 644 | addr = ipvlan_find_addr(ipvlan, ip6_addr, true); |
641 | if (!addr) | 645 | if (!addr) |
642 | return; | 646 | return; |
643 | 647 | ||
644 | ipvlan_ht_addr_del(addr, true); | 648 | ipvlan_ht_addr_del(addr, true); |
645 | list_del_rcu(&addr->anode); | 649 | list_del(&addr->anode); |
646 | ipvlan->ipv6cnt--; | 650 | ipvlan->ipv6cnt--; |
647 | WARN_ON(ipvlan->ipv6cnt < 0); | 651 | WARN_ON(ipvlan->ipv6cnt < 0); |
648 | kfree_rcu(addr, rcu); | 652 | kfree_rcu(addr, rcu); |
@@ -681,7 +685,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
681 | { | 685 | { |
682 | struct ipvl_addr *addr; | 686 | struct ipvl_addr *addr; |
683 | 687 | ||
684 | if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) { | 688 | if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) { |
685 | netif_err(ipvlan, ifup, ipvlan->dev, | 689 | netif_err(ipvlan, ifup, ipvlan->dev, |
686 | "Failed to add IPv4=%pI4 on %s intf.\n", | 690 | "Failed to add IPv4=%pI4 on %s intf.\n", |
687 | ip4_addr, ipvlan->dev->name); | 691 | ip4_addr, ipvlan->dev->name); |
@@ -694,9 +698,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
694 | addr->master = ipvlan; | 698 | addr->master = ipvlan; |
695 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); | 699 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); |
696 | addr->atype = IPVL_IPV4; | 700 | addr->atype = IPVL_IPV4; |
697 | list_add_tail_rcu(&addr->anode, &ipvlan->addrs); | 701 | list_add_tail(&addr->anode, &ipvlan->addrs); |
698 | ipvlan->ipv4cnt++; | 702 | ipvlan->ipv4cnt++; |
699 | ipvlan_ht_addr_add(ipvlan, addr); | 703 | /* If the interface is not up, the address will be added to the hash |
704 | * list by ipvlan_open. | ||
705 | */ | ||
706 | if (netif_running(ipvlan->dev)) | ||
707 | ipvlan_ht_addr_add(ipvlan, addr); | ||
700 | ipvlan_set_broadcast_mac_filter(ipvlan, true); | 708 | ipvlan_set_broadcast_mac_filter(ipvlan, true); |
701 | 709 | ||
702 | return 0; | 710 | return 0; |
@@ -706,12 +714,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
706 | { | 714 | { |
707 | struct ipvl_addr *addr; | 715 | struct ipvl_addr *addr; |
708 | 716 | ||
709 | addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false); | 717 | addr = ipvlan_find_addr(ipvlan, ip4_addr, false); |
710 | if (!addr) | 718 | if (!addr) |
711 | return; | 719 | return; |
712 | 720 | ||
713 | ipvlan_ht_addr_del(addr, true); | 721 | ipvlan_ht_addr_del(addr, true); |
714 | list_del_rcu(&addr->anode); | 722 | list_del(&addr->anode); |
715 | ipvlan->ipv4cnt--; | 723 | ipvlan->ipv4cnt--; |
716 | WARN_ON(ipvlan->ipv4cnt < 0); | 724 | WARN_ON(ipvlan->ipv4cnt < 0); |
717 | if (!ipvlan->ipv4cnt) | 725 | if (!ipvlan->ipv4cnt) |
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 724a9b50df7a..75d6f26729a3 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c | |||
@@ -189,7 +189,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
189 | skb_put(skb, sizeof(padbytes)); | 189 | skb_put(skb, sizeof(padbytes)); |
190 | } | 190 | } |
191 | 191 | ||
192 | usbnet_set_skb_tx_stats(skb, 1); | 192 | usbnet_set_skb_tx_stats(skb, 1, 0); |
193 | return skb; | 193 | return skb; |
194 | } | 194 | } |
195 | 195 | ||
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 9311a08565be..4545e78840b0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -522,6 +522,7 @@ static const struct driver_info wwan_info = { | |||
522 | #define DELL_VENDOR_ID 0x413C | 522 | #define DELL_VENDOR_ID 0x413C |
523 | #define REALTEK_VENDOR_ID 0x0bda | 523 | #define REALTEK_VENDOR_ID 0x0bda |
524 | #define SAMSUNG_VENDOR_ID 0x04e8 | 524 | #define SAMSUNG_VENDOR_ID 0x04e8 |
525 | #define LENOVO_VENDOR_ID 0x17ef | ||
525 | 526 | ||
526 | static const struct usb_device_id products[] = { | 527 | static const struct usb_device_id products[] = { |
527 | /* BLACKLIST !! | 528 | /* BLACKLIST !! |
@@ -702,6 +703,13 @@ static const struct usb_device_id products[] = { | |||
702 | .driver_info = 0, | 703 | .driver_info = 0, |
703 | }, | 704 | }, |
704 | 705 | ||
706 | /* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ | ||
707 | { | ||
708 | USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM, | ||
709 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
710 | .driver_info = 0, | ||
711 | }, | ||
712 | |||
705 | /* WHITELIST!!! | 713 | /* WHITELIST!!! |
706 | * | 714 | * |
707 | * CDC Ether uses two interfaces, not necessarily consecutive. | 715 | * CDC Ether uses two interfaces, not necessarily consecutive. |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 70cbea551139..c3e4da9e79ca 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -1177,13 +1177,12 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) | |||
1177 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; | 1177 | ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; |
1178 | ctx->tx_ntbs++; | 1178 | ctx->tx_ntbs++; |
1179 | 1179 | ||
1180 | /* usbnet has already counted all the framing overhead. | 1180 | /* usbnet will count all the framing overhead by default. |
1181 | * Adjust the stats so that the tx_bytes counter show real | 1181 | * Adjust the stats so that the tx_bytes counter show real |
1182 | * payload data instead. | 1182 | * payload data instead. |
1183 | */ | 1183 | */ |
1184 | dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; | 1184 | usbnet_set_skb_tx_stats(skb_out, n, |
1185 | 1185 | ctx->tx_curr_frame_payload - skb_out->len); | |
1186 | usbnet_set_skb_tx_stats(skb_out, n); | ||
1187 | 1186 | ||
1188 | return skb_out; | 1187 | return skb_out; |
1189 | 1188 | ||
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 5065538dd03b..ac4d03b328b1 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -493,6 +493,7 @@ enum rtl8152_flags { | |||
493 | /* Define these values to match your device */ | 493 | /* Define these values to match your device */ |
494 | #define VENDOR_ID_REALTEK 0x0bda | 494 | #define VENDOR_ID_REALTEK 0x0bda |
495 | #define VENDOR_ID_SAMSUNG 0x04e8 | 495 | #define VENDOR_ID_SAMSUNG 0x04e8 |
496 | #define VENDOR_ID_LENOVO 0x17ef | ||
496 | 497 | ||
497 | #define MCU_TYPE_PLA 0x0100 | 498 | #define MCU_TYPE_PLA 0x0100 |
498 | #define MCU_TYPE_USB 0x0000 | 499 | #define MCU_TYPE_USB 0x0000 |
@@ -4114,6 +4115,7 @@ static struct usb_device_id rtl8152_table[] = { | |||
4114 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, | 4115 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, |
4115 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, | 4116 | {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, |
4116 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, | 4117 | {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, |
4118 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, | ||
4117 | {} | 4119 | {} |
4118 | }; | 4120 | }; |
4119 | 4121 | ||
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 7650cdc8fe6b..953de13267df 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c | |||
@@ -144,7 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | |||
144 | skb_put(skb, sizeof(padbytes)); | 144 | skb_put(skb, sizeof(padbytes)); |
145 | } | 145 | } |
146 | 146 | ||
147 | usbnet_set_skb_tx_stats(skb, 1); | 147 | usbnet_set_skb_tx_stats(skb, 1, 0); |
148 | return skb; | 148 | return skb; |
149 | } | 149 | } |
150 | 150 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 0f3ff285f6a1..777757ae1973 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1346,9 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1346 | } else | 1346 | } else |
1347 | urb->transfer_flags |= URB_ZERO_PACKET; | 1347 | urb->transfer_flags |= URB_ZERO_PACKET; |
1348 | } | 1348 | } |
1349 | entry->length = urb->transfer_buffer_length = length; | 1349 | urb->transfer_buffer_length = length; |
1350 | if (!(info->flags & FLAG_MULTI_PACKET)) | 1350 | |
1351 | usbnet_set_skb_tx_stats(skb, 1); | 1351 | if (info->flags & FLAG_MULTI_PACKET) { |
1352 | /* Driver has set number of packets and a length delta. | ||
1353 | * Calculate the complete length and ensure that it's | ||
1354 | * positive. | ||
1355 | */ | ||
1356 | entry->length += length; | ||
1357 | if (WARN_ON_ONCE(entry->length <= 0)) | ||
1358 | entry->length = length; | ||
1359 | } else { | ||
1360 | usbnet_set_skb_tx_stats(skb, 1, length); | ||
1361 | } | ||
1352 | 1362 | ||
1353 | spin_lock_irqsave(&dev->txq.lock, flags); | 1363 | spin_lock_irqsave(&dev->txq.lock, flags); |
1354 | retval = usb_autopm_get_interface_async(dev->intf); | 1364 | retval = usb_autopm_get_interface_async(dev->intf); |
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index cb366adc820b..f50a6bc5d06e 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c | |||
@@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif) | |||
219 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 219 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
220 | struct ath_vif *avp = (void *)vif->drv_priv; | 220 | struct ath_vif *avp = (void *)vif->drv_priv; |
221 | struct ath_buf *bf = avp->av_bcbuf; | 221 | struct ath_buf *bf = avp->av_bcbuf; |
222 | struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon; | ||
222 | 223 | ||
223 | ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", | 224 | ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", |
224 | avp->av_bslot); | 225 | avp->av_bslot); |
225 | 226 | ||
226 | tasklet_disable(&sc->bcon_tasklet); | 227 | tasklet_disable(&sc->bcon_tasklet); |
227 | 228 | ||
229 | cur_conf->enable_beacon &= ~BIT(avp->av_bslot); | ||
230 | |||
228 | if (bf && bf->bf_mpdu) { | 231 | if (bf && bf->bf_mpdu) { |
229 | struct sk_buff *skb = bf->bf_mpdu; | 232 | struct sk_buff *skb = bf->bf_mpdu; |
230 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | 233 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
@@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc, | |||
521 | } | 524 | } |
522 | 525 | ||
523 | if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { | 526 | if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { |
524 | if ((vif->type != NL80211_IFTYPE_AP) || | 527 | if (vif->type != NL80211_IFTYPE_AP) { |
525 | (sc->nbcnvifs > 1)) { | ||
526 | ath_dbg(common, CONFIG, | 528 | ath_dbg(common, CONFIG, |
527 | "An AP interface is already present !\n"); | 529 | "An AP interface is already present !\n"); |
528 | return false; | 530 | return false; |
@@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif, | |||
616 | * enabling/disabling SWBA. | 618 | * enabling/disabling SWBA. |
617 | */ | 619 | */ |
618 | if (changed & BSS_CHANGED_BEACON_ENABLED) { | 620 | if (changed & BSS_CHANGED_BEACON_ENABLED) { |
619 | if (!bss_conf->enable_beacon && | 621 | bool enabled = cur_conf->enable_beacon; |
620 | (sc->nbcnvifs <= 1)) { | 622 | |
621 | cur_conf->enable_beacon = false; | 623 | if (!bss_conf->enable_beacon) { |
622 | } else if (bss_conf->enable_beacon) { | 624 | cur_conf->enable_beacon &= ~BIT(avp->av_bslot); |
623 | cur_conf->enable_beacon = true; | 625 | } else { |
624 | ath9k_cache_beacon_config(sc, ctx, bss_conf); | 626 | cur_conf->enable_beacon |= BIT(avp->av_bslot); |
627 | if (!enabled) | ||
628 | ath9k_cache_beacon_config(sc, ctx, bss_conf); | ||
625 | } | 629 | } |
626 | } | 630 | } |
627 | 631 | ||
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 2b79a568e803..d23737342f4f 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h | |||
@@ -54,7 +54,7 @@ struct ath_beacon_config { | |||
54 | u16 dtim_period; | 54 | u16 dtim_period; |
55 | u16 bmiss_timeout; | 55 | u16 bmiss_timeout; |
56 | u8 dtim_count; | 56 | u8 dtim_count; |
57 | bool enable_beacon; | 57 | u8 enable_beacon; |
58 | bool ibss_creator; | 58 | bool ibss_creator; |
59 | u32 nexttbtt; | 59 | u32 nexttbtt; |
60 | u32 intval; | 60 | u32 intval; |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 1d9ad5bfe0c8..5cdbdb038371 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -457,7 +457,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah) | |||
457 | ah->power_mode = ATH9K_PM_UNDEFINED; | 457 | ah->power_mode = ATH9K_PM_UNDEFINED; |
458 | ah->htc_reset_init = true; | 458 | ah->htc_reset_init = true; |
459 | 459 | ||
460 | ah->tpc_enabled = true; | 460 | ah->tpc_enabled = false; |
461 | 461 | ||
462 | ah->ani_function = ATH9K_ANI_ALL; | 462 | ah->ani_function = ATH9K_ANI_ALL; |
463 | if (!AR_SREV_9300_20_OR_LATER(ah)) | 463 | if (!AR_SREV_9300_20_OR_LATER(ah)) |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c index defb7a44e0bc..7748a1ccf14f 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c | |||
@@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) | |||
126 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); | 126 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan"); |
127 | if (drvr->bus_if->wowl_supported) | 127 | if (drvr->bus_if->wowl_supported) |
128 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); | 128 | brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); |
129 | brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); | 129 | if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID) |
130 | brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0); | ||
130 | 131 | ||
131 | /* set chip related quirks */ | 132 | /* set chip related quirks */ |
132 | switch (drvr->bus_if->chip) { | 133 | switch (drvr->bus_if->chip) { |
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h index a6f22c32a279..3811878ab9cd 100644 --- a/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/iwlwifi/dvm/dev.h | |||
@@ -708,7 +708,6 @@ struct iwl_priv { | |||
708 | unsigned long reload_jiffies; | 708 | unsigned long reload_jiffies; |
709 | int reload_count; | 709 | int reload_count; |
710 | bool ucode_loaded; | 710 | bool ucode_loaded; |
711 | bool init_ucode_run; /* Don't run init uCode again */ | ||
712 | 711 | ||
713 | u8 plcp_delta_threshold; | 712 | u8 plcp_delta_threshold; |
714 | 713 | ||
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 4dbef7e58c2e..5244e43bfafb 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c | |||
@@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) | |||
418 | if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) | 418 | if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len) |
419 | return 0; | 419 | return 0; |
420 | 420 | ||
421 | if (priv->init_ucode_run) | ||
422 | return 0; | ||
423 | |||
424 | iwl_init_notification_wait(&priv->notif_wait, &calib_wait, | 421 | iwl_init_notification_wait(&priv->notif_wait, &calib_wait, |
425 | calib_complete, ARRAY_SIZE(calib_complete), | 422 | calib_complete, ARRAY_SIZE(calib_complete), |
426 | iwlagn_wait_calib, priv); | 423 | iwlagn_wait_calib, priv); |
@@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv) | |||
440 | */ | 437 | */ |
441 | ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, | 438 | ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, |
442 | UCODE_CALIB_TIMEOUT); | 439 | UCODE_CALIB_TIMEOUT); |
443 | if (!ret) | ||
444 | priv->init_ucode_run = true; | ||
445 | 440 | ||
446 | goto out; | 441 | goto out; |
447 | 442 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 66ca000f0da1..aefdd9b7c105 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c | |||
@@ -1319,6 +1319,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) | |||
1319 | op->name, err); | 1319 | op->name, err); |
1320 | #endif | 1320 | #endif |
1321 | } | 1321 | } |
1322 | kfree(pieces); | ||
1322 | return; | 1323 | return; |
1323 | 1324 | ||
1324 | try_again: | 1325 | try_again: |
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index dd457df9601e..9140b0b701c7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c | |||
@@ -1336,6 +1336,9 @@ static void rs_mac80211_tx_status(void *mvm_r, | |||
1336 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 1336 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
1337 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1337 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1338 | 1338 | ||
1339 | if (!iwl_mvm_sta_from_mac80211(sta)->vif) | ||
1340 | return; | ||
1341 | |||
1339 | if (!ieee80211_is_data(hdr->frame_control) || | 1342 | if (!ieee80211_is_data(hdr->frame_control) || |
1340 | info->flags & IEEE80211_TX_CTL_NO_ACK) | 1343 | info->flags & IEEE80211_TX_CTL_NO_ACK) |
1341 | return; | 1344 | return; |
@@ -2569,6 +2572,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, | |||
2569 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 2572 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
2570 | struct iwl_lq_sta *lq_sta = mvm_sta; | 2573 | struct iwl_lq_sta *lq_sta = mvm_sta; |
2571 | 2574 | ||
2575 | if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) { | ||
2576 | /* if vif isn't initialized mvm doesn't know about | ||
2577 | * this station, so don't do anything with the it | ||
2578 | */ | ||
2579 | sta = NULL; | ||
2580 | mvm_sta = NULL; | ||
2581 | } | ||
2582 | |||
2572 | /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ | 2583 | /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */ |
2573 | 2584 | ||
2574 | /* Treat uninitialized rate scaling data same as non-existing. */ | 2585 | /* Treat uninitialized rate scaling data same as non-existing. */ |
@@ -2886,6 +2897,9 @@ static void rs_rate_update(void *mvm_r, | |||
2886 | (struct iwl_op_mode *)mvm_r; | 2897 | (struct iwl_op_mode *)mvm_r; |
2887 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); | 2898 | struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); |
2888 | 2899 | ||
2900 | if (!iwl_mvm_sta_from_mac80211(sta)->vif) | ||
2901 | return; | ||
2902 | |||
2889 | /* Stop any ongoing aggregations as rs starts off assuming no agg */ | 2903 | /* Stop any ongoing aggregations as rs starts off assuming no agg */ |
2890 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) | 2904 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) |
2891 | ieee80211_stop_tx_ba_session(sta, tid); | 2905 | ieee80211_stop_tx_ba_session(sta, tid); |
@@ -3659,9 +3673,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf, | |||
3659 | 3673 | ||
3660 | MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); | 3674 | MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32); |
3661 | 3675 | ||
3662 | static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) | 3676 | static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir) |
3663 | { | 3677 | { |
3664 | struct iwl_lq_sta *lq_sta = mvm_sta; | 3678 | struct iwl_lq_sta *lq_sta = priv_sta; |
3679 | struct iwl_mvm_sta *mvmsta; | ||
3680 | |||
3681 | mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta); | ||
3682 | |||
3683 | if (!mvmsta->vif) | ||
3684 | return; | ||
3665 | 3685 | ||
3666 | debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, | 3686 | debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, |
3667 | lq_sta, &rs_sta_dbgfs_scale_table_ops); | 3687 | lq_sta, &rs_sta_dbgfs_scale_table_ops); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 7906b97c81b9..ba34dda1ae36 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -953,8 +953,10 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
953 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | 953 | mvmsta = iwl_mvm_sta_from_mac80211(sta); |
954 | tid_data = &mvmsta->tid_data[tid]; | 954 | tid_data = &mvmsta->tid_data[tid]; |
955 | 955 | ||
956 | if (WARN_ONCE(tid_data->txq_id != scd_flow, "Q %d, tid %d, flow %d", | 956 | if (tid_data->txq_id != scd_flow) { |
957 | tid_data->txq_id, tid, scd_flow)) { | 957 | IWL_ERR(mvm, |
958 | "invalid BA notification: Q %d, tid %d, flow %d\n", | ||
959 | tid_data->txq_id, tid, scd_flow); | ||
958 | rcu_read_unlock(); | 960 | rcu_read_unlock(); |
959 | return 0; | 961 | return 0; |
960 | } | 962 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2794cd2d3a64..b18569734922 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -368,10 +368,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
368 | /* 3165 Series */ | 368 | /* 3165 Series */ |
369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, |
370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, | 370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, |
371 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, | ||
372 | {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, | ||
373 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, | 371 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, |
374 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, | 372 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, |
373 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, | ||
374 | {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, | ||
375 | {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, | ||
376 | {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, | ||
375 | 377 | ||
376 | /* 7265 Series */ | 378 | /* 7265 Series */ |
377 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 379 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index a62170ea0481..8c45cf44ce24 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c | |||
@@ -1124,12 +1124,22 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) | |||
1124 | /*This is for new trx flow*/ | 1124 | /*This is for new trx flow*/ |
1125 | struct rtl_tx_buffer_desc *pbuffer_desc = NULL; | 1125 | struct rtl_tx_buffer_desc *pbuffer_desc = NULL; |
1126 | u8 temp_one = 1; | 1126 | u8 temp_one = 1; |
1127 | u8 *entry; | ||
1127 | 1128 | ||
1128 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); | 1129 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); |
1129 | ring = &rtlpci->tx_ring[BEACON_QUEUE]; | 1130 | ring = &rtlpci->tx_ring[BEACON_QUEUE]; |
1130 | pskb = __skb_dequeue(&ring->queue); | 1131 | pskb = __skb_dequeue(&ring->queue); |
1131 | if (pskb) | 1132 | if (rtlpriv->use_new_trx_flow) |
1133 | entry = (u8 *)(&ring->buffer_desc[ring->idx]); | ||
1134 | else | ||
1135 | entry = (u8 *)(&ring->desc[ring->idx]); | ||
1136 | if (pskb) { | ||
1137 | pci_unmap_single(rtlpci->pdev, | ||
1138 | rtlpriv->cfg->ops->get_desc( | ||
1139 | (u8 *)entry, true, HW_DESC_TXBUFF_ADDR), | ||
1140 | pskb->len, PCI_DMA_TODEVICE); | ||
1132 | kfree_skb(pskb); | 1141 | kfree_skb(pskb); |
1142 | } | ||
1133 | 1143 | ||
1134 | /*NB: the beacon data buffer must be 32-bit aligned. */ | 1144 | /*NB: the beacon data buffer must be 32-bit aligned. */ |
1135 | pskb = ieee80211_beacon_get(hw, mac->vif); | 1145 | pskb = ieee80211_beacon_get(hw, mac->vif); |