diff options
Diffstat (limited to 'drivers/net/ethernet')
35 files changed, 2168 insertions, 178 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index e30e2a2f354c..7de824184979 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -747,21 +747,6 @@ struct bnx2x_fastpath { | |||
747 | 747 | ||
748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | 748 | #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
749 | 749 | ||
750 | #define BNX2X_IP_CSUM_ERR(cqe) \ | ||
751 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
752 | ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \ | ||
753 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
754 | ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) | ||
755 | |||
756 | #define BNX2X_L4_CSUM_ERR(cqe) \ | ||
757 | (!((cqe)->fast_path_cqe.status_flags & \ | ||
758 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \ | ||
759 | ((cqe)->fast_path_cqe.type_error_flags & \ | ||
760 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
761 | |||
762 | #define BNX2X_RX_CSUM_OK(cqe) \ | ||
763 | (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe))) | ||
764 | |||
765 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ | 750 | #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ |
766 | (((le16_to_cpu(flags) & \ | 751 | (((le16_to_cpu(flags) & \ |
767 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ | 752 | PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ad0743bf4bde..8098eea9704d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -190,7 +190,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) | |||
190 | 190 | ||
191 | if ((netif_tx_queue_stopped(txq)) && | 191 | if ((netif_tx_queue_stopped(txq)) && |
192 | (bp->state == BNX2X_STATE_OPEN) && | 192 | (bp->state == BNX2X_STATE_OPEN) && |
193 | (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) | 193 | (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)) |
194 | netif_tx_wake_queue(txq); | 194 | netif_tx_wake_queue(txq); |
195 | 195 | ||
196 | __netif_tx_unlock(txq); | 196 | __netif_tx_unlock(txq); |
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, | |||
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, | ||
621 | struct bnx2x_fastpath *fp) | ||
622 | { | ||
623 | /* Do nothing if no IP/L4 csum validation was done */ | ||
624 | |||
625 | if (cqe->fast_path_cqe.status_flags & | ||
626 | (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | | ||
627 | ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) | ||
628 | return; | ||
629 | |||
630 | /* If both IP/L4 validation were done, check if an error was found. */ | ||
631 | |||
632 | if (cqe->fast_path_cqe.type_error_flags & | ||
633 | (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | | ||
634 | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) | ||
635 | fp->eth_q_stats.hw_csum_err++; | ||
636 | else | ||
637 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
638 | } | ||
620 | 639 | ||
621 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | 640 | int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) |
622 | { | 641 | { |
@@ -806,13 +825,9 @@ reuse_rx: | |||
806 | 825 | ||
807 | skb_checksum_none_assert(skb); | 826 | skb_checksum_none_assert(skb); |
808 | 827 | ||
809 | if (bp->dev->features & NETIF_F_RXCSUM) { | 828 | if (bp->dev->features & NETIF_F_RXCSUM) |
829 | bnx2x_csum_validate(skb, cqe, fp); | ||
810 | 830 | ||
811 | if (likely(BNX2X_RX_CSUM_OK(cqe))) | ||
812 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
813 | else | ||
814 | fp->eth_q_stats.hw_csum_err++; | ||
815 | } | ||
816 | 831 | ||
817 | skb_record_rx_queue(skb, fp->rx_queue); | 832 | skb_record_rx_queue(skb, fp->rx_queue); |
818 | 833 | ||
@@ -2501,8 +2516,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget) | |||
2501 | /* we split the first BD into headers and data BDs | 2516 | /* we split the first BD into headers and data BDs |
2502 | * to ease the pain of our fellow microcode engineers | 2517 | * to ease the pain of our fellow microcode engineers |
2503 | * we use one mapping for both BDs | 2518 | * we use one mapping for both BDs |
2504 | * So far this has only been observed to happen | ||
2505 | * in Other Operating Systems(TM) | ||
2506 | */ | 2519 | */ |
2507 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, | 2520 | static noinline u16 bnx2x_tx_split(struct bnx2x *bp, |
2508 | struct bnx2x_fp_txdata *txdata, | 2521 | struct bnx2x_fp_txdata *txdata, |
@@ -3156,7 +3169,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3156 | 3169 | ||
3157 | txdata->tx_bd_prod += nbd; | 3170 | txdata->tx_bd_prod += nbd; |
3158 | 3171 | ||
3159 | if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { | 3172 | if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) { |
3160 | netif_tx_stop_queue(txq); | 3173 | netif_tx_stop_queue(txq); |
3161 | 3174 | ||
3162 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep | 3175 | /* paired memory barrier is in bnx2x_tx_int(), we have to keep |
@@ -3165,7 +3178,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3165 | smp_mb(); | 3178 | smp_mb(); |
3166 | 3179 | ||
3167 | fp->eth_q_stats.driver_xoff++; | 3180 | fp->eth_q_stats.driver_xoff++; |
3168 | if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) | 3181 | if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) |
3169 | netif_tx_wake_queue(txq); | 3182 | netif_tx_wake_queue(txq); |
3170 | } | 3183 | } |
3171 | txdata->tx_pkt++; | 3184 | txdata->tx_pkt++; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index a3fb7215cd89..6e7d5c0843b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #define I2C_BSC0 0 | 40 | #define I2C_BSC0 0 |
41 | #define I2C_BSC1 1 | 41 | #define I2C_BSC1 1 |
42 | #define I2C_WA_RETRY_CNT 3 | 42 | #define I2C_WA_RETRY_CNT 3 |
43 | #define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1) | ||
43 | #define MCPR_IMC_COMMAND_READ_OP 1 | 44 | #define MCPR_IMC_COMMAND_READ_OP 1 |
44 | #define MCPR_IMC_COMMAND_WRITE_OP 2 | 45 | #define MCPR_IMC_COMMAND_WRITE_OP 2 |
45 | 46 | ||
@@ -7659,6 +7660,28 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, | |||
7659 | return -EINVAL; | 7660 | return -EINVAL; |
7660 | } | 7661 | } |
7661 | 7662 | ||
7663 | static void bnx2x_warpcore_power_module(struct link_params *params, | ||
7664 | struct bnx2x_phy *phy, | ||
7665 | u8 power) | ||
7666 | { | ||
7667 | u32 pin_cfg; | ||
7668 | struct bnx2x *bp = params->bp; | ||
7669 | |||
7670 | pin_cfg = (REG_RD(bp, params->shmem_base + | ||
7671 | offsetof(struct shmem_region, | ||
7672 | dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & | ||
7673 | PORT_HW_CFG_E3_PWR_DIS_MASK) >> | ||
7674 | PORT_HW_CFG_E3_PWR_DIS_SHIFT; | ||
7675 | |||
7676 | if (pin_cfg == PIN_CFG_NA) | ||
7677 | return; | ||
7678 | DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", | ||
7679 | power, pin_cfg); | ||
7680 | /* Low ==> corresponding SFP+ module is powered | ||
7681 | * high ==> the SFP+ module is powered down | ||
7682 | */ | ||
7683 | bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); | ||
7684 | } | ||
7662 | static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, | 7685 | static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, |
7663 | struct link_params *params, | 7686 | struct link_params *params, |
7664 | u16 addr, u8 byte_cnt, | 7687 | u16 addr, u8 byte_cnt, |
@@ -7678,6 +7701,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, | |||
7678 | /* 4 byte aligned address */ | 7701 | /* 4 byte aligned address */ |
7679 | addr32 = addr & (~0x3); | 7702 | addr32 = addr & (~0x3); |
7680 | do { | 7703 | do { |
7704 | if (cnt == I2C_WA_PWR_ITER) { | ||
7705 | bnx2x_warpcore_power_module(params, phy, 0); | ||
7706 | /* Note that 100us are not enough here */ | ||
7707 | usleep_range(1000,1000); | ||
7708 | bnx2x_warpcore_power_module(params, phy, 1); | ||
7709 | } | ||
7681 | rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, | 7710 | rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, |
7682 | data_array); | 7711 | data_array); |
7683 | } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); | 7712 | } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); |
@@ -8200,29 +8229,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params, | |||
8200 | bnx2x_set_e1e2_module_fault_led(params, gpio_mode); | 8229 | bnx2x_set_e1e2_module_fault_led(params, gpio_mode); |
8201 | } | 8230 | } |
8202 | 8231 | ||
8203 | static void bnx2x_warpcore_power_module(struct link_params *params, | ||
8204 | struct bnx2x_phy *phy, | ||
8205 | u8 power) | ||
8206 | { | ||
8207 | u32 pin_cfg; | ||
8208 | struct bnx2x *bp = params->bp; | ||
8209 | |||
8210 | pin_cfg = (REG_RD(bp, params->shmem_base + | ||
8211 | offsetof(struct shmem_region, | ||
8212 | dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & | ||
8213 | PORT_HW_CFG_E3_PWR_DIS_MASK) >> | ||
8214 | PORT_HW_CFG_E3_PWR_DIS_SHIFT; | ||
8215 | |||
8216 | if (pin_cfg == PIN_CFG_NA) | ||
8217 | return; | ||
8218 | DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", | ||
8219 | power, pin_cfg); | ||
8220 | /* Low ==> corresponding SFP+ module is powered | ||
8221 | * high ==> the SFP+ module is powered down | ||
8222 | */ | ||
8223 | bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); | ||
8224 | } | ||
8225 | |||
8226 | static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, | 8232 | static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, |
8227 | struct link_params *params) | 8233 | struct link_params *params) |
8228 | { | 8234 | { |
@@ -9748,7 +9754,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, | |||
9748 | 9754 | ||
9749 | msleep(1); | 9755 | msleep(1); |
9750 | 9756 | ||
9751 | if (!(CHIP_IS_E1(bp))) | 9757 | if (!(CHIP_IS_E1x(bp))) |
9752 | port = BP_PATH(bp); | 9758 | port = BP_PATH(bp); |
9753 | else | 9759 | else |
9754 | port = params->port; | 9760 | port = params->port; |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index edeeb516807a..e47ff8be1d7b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
14275 | } | 14275 | } |
14276 | } | 14276 | } |
14277 | 14277 | ||
14278 | if (tg3_flag(tp, 5755_PLUS)) | 14278 | if (tg3_flag(tp, 5755_PLUS) || |
14279 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) | ||
14279 | tg3_flag_set(tp, SHORT_DMA_BUG); | 14280 | tg3_flag_set(tp, SHORT_DMA_BUG); |
14280 | 14281 | ||
14281 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) | 14282 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 8d06ea381741..921c2082af4c 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -122,15 +122,15 @@ static int be_mcc_compl_process(struct be_adapter *adapter, | |||
122 | goto done; | 122 | goto done; |
123 | 123 | ||
124 | if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { | 124 | if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { |
125 | dev_warn(&adapter->pdev->dev, "This domain(VM) is not " | 125 | dev_warn(&adapter->pdev->dev, |
126 | "permitted to execute this cmd (opcode %d)\n", | 126 | "opcode %d-%d is not permitted\n", |
127 | opcode); | 127 | opcode, subsystem); |
128 | } else { | 128 | } else { |
129 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | 129 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & |
130 | CQE_STATUS_EXTD_MASK; | 130 | CQE_STATUS_EXTD_MASK; |
131 | dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" | 131 | dev_err(&adapter->pdev->dev, |
132 | "status %d, extd-status %d\n", | 132 | "opcode %d-%d failed:status %d-%d\n", |
133 | opcode, compl_status, extd_status); | 133 | opcode, subsystem, compl_status, extd_status); |
134 | } | 134 | } |
135 | } | 135 | } |
136 | done: | 136 | done: |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 9625bf420c16..b3f3fc3d1323 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -1566,7 +1566,7 @@ struct be_hw_stats_v1 { | |||
1566 | u32 rsvd0[BE_TXP_SW_SZ]; | 1566 | u32 rsvd0[BE_TXP_SW_SZ]; |
1567 | struct be_erx_stats_v1 erx; | 1567 | struct be_erx_stats_v1 erx; |
1568 | struct be_pmem_stats pmem; | 1568 | struct be_pmem_stats pmem; |
1569 | u32 rsvd1[3]; | 1569 | u32 rsvd1[18]; |
1570 | }; | 1570 | }; |
1571 | 1571 | ||
1572 | struct be_cmd_req_get_stats_v1 { | 1572 | struct be_cmd_req_get_stats_v1 { |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08efd308d78a..501dfa9c88ec 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
736 | 736 | ||
737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); | 737 | copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); |
738 | if (copied) { | 738 | if (copied) { |
739 | int gso_segs = skb_shinfo(skb)->gso_segs; | ||
740 | |||
739 | /* record the sent skb in the sent_skb table */ | 741 | /* record the sent skb in the sent_skb table */ |
740 | BUG_ON(txo->sent_skb_list[start]); | 742 | BUG_ON(txo->sent_skb_list[start]); |
741 | txo->sent_skb_list[start] = skb; | 743 | txo->sent_skb_list[start] = skb; |
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
753 | 755 | ||
754 | be_txq_notify(adapter, txq->id, wrb_cnt); | 756 | be_txq_notify(adapter, txq->id, wrb_cnt); |
755 | 757 | ||
756 | be_tx_stats_update(txo, wrb_cnt, copied, | 758 | be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); |
757 | skb_shinfo(skb)->gso_segs, stopped); | ||
758 | } else { | 759 | } else { |
759 | txq->head = start; | 760 | txq->head = start; |
760 | dev_kfree_skb_any(skb); | 761 | dev_kfree_skb_any(skb); |
@@ -3236,7 +3237,7 @@ static void be_netdev_init(struct net_device *netdev) | |||
3236 | 3237 | ||
3237 | netdev->flags |= IFF_MULTICAST; | 3238 | netdev->flags |= IFF_MULTICAST; |
3238 | 3239 | ||
3239 | netif_set_gso_max_size(netdev, 65535); | 3240 | netif_set_gso_max_size(netdev, 65535 - ETH_HLEN); |
3240 | 3241 | ||
3241 | netdev->netdev_ops = &be_netdev_ops; | 3242 | netdev->netdev_ops = &be_netdev_ops; |
3242 | 3243 | ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 79b07ec6726f..0cafe4fe9406 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
@@ -122,8 +122,10 @@ config IGB_DCA | |||
122 | 122 | ||
123 | config IGB_PTP | 123 | config IGB_PTP |
124 | bool "PTP Hardware Clock (PHC)" | 124 | bool "PTP Hardware Clock (PHC)" |
125 | default y | 125 | default n |
126 | depends on IGB && PTP_1588_CLOCK | 126 | depends on IGB && EXPERIMENTAL |
127 | select PPS | ||
128 | select PTP_1588_CLOCK | ||
127 | ---help--- | 129 | ---help--- |
128 | Say Y here if you want to use PTP Hardware Clock (PHC) in the | 130 | Say Y here if you want to use PTP Hardware Clock (PHC) in the |
129 | driver. Only the basic clock operations have been implemented. | 131 | driver. Only the basic clock operations have been implemented. |
@@ -223,7 +225,9 @@ config IXGBE_DCB | |||
223 | config IXGBE_PTP | 225 | config IXGBE_PTP |
224 | bool "PTP Clock Support" | 226 | bool "PTP Clock Support" |
225 | default n | 227 | default n |
226 | depends on IXGBE && PTP_1588_CLOCK | 228 | depends on IXGBE && EXPERIMENTAL |
229 | select PPS | ||
230 | select PTP_1588_CLOCK | ||
227 | ---help--- | 231 | ---help--- |
228 | Say Y here if you want support for 1588 Timestamping with a | 232 | Say Y here if you want support for 1588 Timestamping with a |
229 | PHC device, using the PTP 1588 Clock support. This is | 233 | PHC device, using the PTP 1588 Clock support. This is |
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index d863075df7a4..905e2147d918 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c | |||
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev, | |||
258 | * When SoL/IDER sessions are active, autoneg/speed/duplex | 258 | * When SoL/IDER sessions are active, autoneg/speed/duplex |
259 | * cannot be changed | 259 | * cannot be changed |
260 | */ | 260 | */ |
261 | if (hw->phy.ops.check_reset_block(hw)) { | 261 | if (hw->phy.ops.check_reset_block && |
262 | hw->phy.ops.check_reset_block(hw)) { | ||
262 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); | 263 | e_err("Cannot change link characteristics when SoL/IDER is active.\n"); |
263 | return -EINVAL; | 264 | return -EINVAL; |
264 | } | 265 | } |
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) | |||
1615 | * PHY loopback cannot be performed if SoL/IDER | 1616 | * PHY loopback cannot be performed if SoL/IDER |
1616 | * sessions are active | 1617 | * sessions are active |
1617 | */ | 1618 | */ |
1618 | if (hw->phy.ops.check_reset_block(hw)) { | 1619 | if (hw->phy.ops.check_reset_block && |
1620 | hw->phy.ops.check_reset_block(hw)) { | ||
1619 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); | 1621 | e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); |
1620 | *data = 0; | 1622 | *data = 0; |
1621 | goto out; | 1623 | goto out; |
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index 026e8b3ab52e..a13439928488 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c | |||
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw) | |||
709 | * In the case of the phy reset being blocked, we already have a link. | 709 | * In the case of the phy reset being blocked, we already have a link. |
710 | * We do not need to set it up again. | 710 | * We do not need to set it up again. |
711 | */ | 711 | */ |
712 | if (hw->phy.ops.check_reset_block(hw)) | 712 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
713 | return 0; | 713 | return 0; |
714 | 714 | ||
715 | /* | 715 | /* |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a4b0435b00dc..31d37a2b5ba8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -6237,7 +6237,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; | 6237 | adapter->hw.phy.ms_type = e1000_ms_hw_default; |
6238 | } | 6238 | } |
6239 | 6239 | ||
6240 | if (hw->phy.ops.check_reset_block(hw)) | 6240 | if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) |
6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); | 6241 | e_info("PHY reset is blocked due to SOL/IDER session.\n"); |
6242 | 6242 | ||
6243 | /* Set initial default active device features */ | 6243 | /* Set initial default active device features */ |
@@ -6404,7 +6404,7 @@ err_register: | |||
6404 | if (!(adapter->flags & FLAG_HAS_AMT)) | 6404 | if (!(adapter->flags & FLAG_HAS_AMT)) |
6405 | e1000e_release_hw_control(adapter); | 6405 | e1000e_release_hw_control(adapter); |
6406 | err_eeprom: | 6406 | err_eeprom: |
6407 | if (!hw->phy.ops.check_reset_block(hw)) | 6407 | if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) |
6408 | e1000_phy_hw_reset(&adapter->hw); | 6408 | e1000_phy_hw_reset(&adapter->hw); |
6409 | err_hw_init: | 6409 | err_hw_init: |
6410 | kfree(adapter->tx_ring); | 6410 | kfree(adapter->tx_ring); |
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 0334d013bc3c..b860d4f7ea2a 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c | |||
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | |||
2155 | s32 ret_val; | 2155 | s32 ret_val; |
2156 | u32 ctrl; | 2156 | u32 ctrl; |
2157 | 2157 | ||
2158 | ret_val = phy->ops.check_reset_block(hw); | 2158 | if (phy->ops.check_reset_block) { |
2159 | if (ret_val) | 2159 | ret_val = phy->ops.check_reset_block(hw); |
2160 | return 0; | 2160 | if (ret_val) |
2161 | return 0; | ||
2162 | } | ||
2161 | 2163 | ||
2162 | ret_val = phy->ops.acquire(hw); | 2164 | ret_val = phy->ops.acquire(hw); |
2163 | if (ret_val) | 2165 | if (ret_val) |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index e65083958421..5e84eaac48c1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c | |||
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) | |||
206 | mac->rar_entry_count = E1000_RAR_ENTRIES_82580; | 206 | mac->rar_entry_count = E1000_RAR_ENTRIES_82580; |
207 | break; | 207 | break; |
208 | case e1000_i350: | 208 | case e1000_i350: |
209 | case e1000_i210: | ||
210 | case e1000_i211: | ||
211 | mac->rar_entry_count = E1000_RAR_ENTRIES_I350; | 209 | mac->rar_entry_count = E1000_RAR_ENTRIES_I350; |
212 | break; | 210 | break; |
213 | default: | 211 | default: |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 3ef3c5284e52..7af291e236bf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t { | |||
196 | __IXGBE_HANG_CHECK_ARMED, | 196 | __IXGBE_HANG_CHECK_ARMED, |
197 | __IXGBE_RX_RSC_ENABLED, | 197 | __IXGBE_RX_RSC_ENABLED, |
198 | __IXGBE_RX_CSUM_UDP_ZERO_ERR, | 198 | __IXGBE_RX_CSUM_UDP_ZERO_ERR, |
199 | __IXGBE_RX_FCOE_BUFSZ, | 199 | __IXGBE_RX_FCOE, |
200 | }; | 200 | }; |
201 | 201 | ||
202 | #define check_for_tx_hang(ring) \ | 202 | #define check_for_tx_hang(ring) \ |
@@ -290,7 +290,7 @@ struct ixgbe_ring_feature { | |||
290 | #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) | 290 | #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) |
291 | static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) | 291 | static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) |
292 | { | 292 | { |
293 | return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0; | 293 | return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0; |
294 | } | 294 | } |
295 | #else | 295 | #else |
296 | #define ixgbe_rx_pg_order(_ring) 0 | 296 | #define ixgbe_rx_pg_order(_ring) 0 |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index af1a5314b494..c377706e81a8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | |||
@@ -634,7 +634,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, | |||
634 | f = &adapter->ring_feature[RING_F_FCOE]; | 634 | f = &adapter->ring_feature[RING_F_FCOE]; |
635 | if ((rxr_idx >= f->mask) && | 635 | if ((rxr_idx >= f->mask) && |
636 | (rxr_idx < f->mask + f->indices)) | 636 | (rxr_idx < f->mask + f->indices)) |
637 | set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state); | 637 | set_bit(__IXGBE_RX_FCOE, &ring->state); |
638 | } | 638 | } |
639 | 639 | ||
640 | #endif /* IXGBE_FCOE */ | 640 | #endif /* IXGBE_FCOE */ |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bf20457ea23a..18ca3bcadf0c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1058,17 +1058,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, | |||
1058 | #ifdef IXGBE_FCOE | 1058 | #ifdef IXGBE_FCOE |
1059 | /** | 1059 | /** |
1060 | * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type | 1060 | * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type |
1061 | * @adapter: address of board private structure | 1061 | * @ring: structure containing ring specific data |
1062 | * @rx_desc: advanced rx descriptor | 1062 | * @rx_desc: advanced rx descriptor |
1063 | * | 1063 | * |
1064 | * Returns : true if it is FCoE pkt | 1064 | * Returns : true if it is FCoE pkt |
1065 | */ | 1065 | */ |
1066 | static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, | 1066 | static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, |
1067 | union ixgbe_adv_rx_desc *rx_desc) | 1067 | union ixgbe_adv_rx_desc *rx_desc) |
1068 | { | 1068 | { |
1069 | __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | 1069 | __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
1070 | 1070 | ||
1071 | return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 1071 | return test_bit(__IXGBE_RX_FCOE, &ring->state) && |
1072 | ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == | 1072 | ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) == |
1073 | (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << | 1073 | (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << |
1074 | IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); | 1074 | IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); |
@@ -1148,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, | |||
1148 | 1148 | ||
1149 | /* alloc new page for storage */ | 1149 | /* alloc new page for storage */ |
1150 | if (likely(!page)) { | 1150 | if (likely(!page)) { |
1151 | page = alloc_pages(GFP_ATOMIC | __GFP_COLD, | 1151 | page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, |
1152 | ixgbe_rx_pg_order(rx_ring)); | 1152 | ixgbe_rx_pg_order(rx_ring)); |
1153 | if (unlikely(!page)) { | 1153 | if (unlikely(!page)) { |
1154 | rx_ring->rx_stats.alloc_rx_page_failed++; | 1154 | rx_ring->rx_stats.alloc_rx_page_failed++; |
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
1390 | union ixgbe_adv_rx_desc *rx_desc, | 1390 | union ixgbe_adv_rx_desc *rx_desc, |
1391 | struct sk_buff *skb) | 1391 | struct sk_buff *skb) |
1392 | { | 1392 | { |
1393 | struct net_device *dev = rx_ring->netdev; | ||
1394 | |||
1393 | ixgbe_update_rsc_stats(rx_ring, skb); | 1395 | ixgbe_update_rsc_stats(rx_ring, skb); |
1394 | 1396 | ||
1395 | ixgbe_rx_hash(rx_ring, rx_desc, skb); | 1397 | ixgbe_rx_hash(rx_ring, rx_desc, skb); |
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, | |||
1401 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); | 1403 | ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); |
1402 | #endif | 1404 | #endif |
1403 | 1405 | ||
1404 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | 1406 | if ((dev->features & NETIF_F_HW_VLAN_RX) && |
1407 | ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { | ||
1405 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); | 1408 | u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
1406 | __vlan_hwaccel_put_tag(skb, vid); | 1409 | __vlan_hwaccel_put_tag(skb, vid); |
1407 | } | 1410 | } |
1408 | 1411 | ||
1409 | skb_record_rx_queue(skb, rx_ring->queue_index); | 1412 | skb_record_rx_queue(skb, rx_ring->queue_index); |
1410 | 1413 | ||
1411 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | 1414 | skb->protocol = eth_type_trans(skb, dev); |
1412 | } | 1415 | } |
1413 | 1416 | ||
1414 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, | 1417 | static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, |
@@ -1546,6 +1549,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, | |||
1546 | skb->truesize -= ixgbe_rx_bufsz(rx_ring); | 1549 | skb->truesize -= ixgbe_rx_bufsz(rx_ring); |
1547 | } | 1550 | } |
1548 | 1551 | ||
1552 | #ifdef IXGBE_FCOE | ||
1553 | /* do not attempt to pad FCoE Frames as this will disrupt DDP */ | ||
1554 | if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) | ||
1555 | return false; | ||
1556 | |||
1557 | #endif | ||
1549 | /* if skb_pad returns an error the skb was freed */ | 1558 | /* if skb_pad returns an error the skb was freed */ |
1550 | if (unlikely(skb->len < 60)) { | 1559 | if (unlikely(skb->len < 60)) { |
1551 | int pad_len = 60 - skb->len; | 1560 | int pad_len = 60 - skb->len; |
@@ -1772,7 +1781,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1772 | 1781 | ||
1773 | #ifdef IXGBE_FCOE | 1782 | #ifdef IXGBE_FCOE |
1774 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ | 1783 | /* if ddp, not passing to ULD unless for FCP_RSP or error */ |
1775 | if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { | 1784 | if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { |
1776 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); | 1785 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); |
1777 | if (!ddp_bytes) { | 1786 | if (!ddp_bytes) { |
1778 | dev_kfree_skb_any(skb); | 1787 | dev_kfree_skb_any(skb); |
@@ -3607,10 +3616,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) | |||
3607 | if (hw->mac.type == ixgbe_mac_82598EB) | 3616 | if (hw->mac.type == ixgbe_mac_82598EB) |
3608 | netif_set_gso_max_size(adapter->netdev, 32768); | 3617 | netif_set_gso_max_size(adapter->netdev, 32768); |
3609 | 3618 | ||
3610 | |||
3611 | /* Enable VLAN tag insert/strip */ | ||
3612 | adapter->netdev->features |= NETIF_F_HW_VLAN_RX; | ||
3613 | |||
3614 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); | 3619 | hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); |
3615 | 3620 | ||
3616 | #ifdef IXGBE_FCOE | 3621 | #ifdef IXGBE_FCOE |
@@ -6701,11 +6706,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
6701 | { | 6706 | { |
6702 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 6707 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
6703 | 6708 | ||
6704 | #ifdef CONFIG_DCB | ||
6705 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) | ||
6706 | features &= ~NETIF_F_HW_VLAN_RX; | ||
6707 | #endif | ||
6708 | |||
6709 | /* return error if RXHASH is being enabled when RSS is not supported */ | 6709 | /* return error if RXHASH is being enabled when RSS is not supported */ |
6710 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) | 6710 | if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) |
6711 | features &= ~NETIF_F_RXHASH; | 6711 | features &= ~NETIF_F_RXHASH; |
@@ -6718,7 +6718,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, | |||
6718 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) | 6718 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) |
6719 | features &= ~NETIF_F_LRO; | 6719 | features &= ~NETIF_F_LRO; |
6720 | 6720 | ||
6721 | |||
6722 | return features; | 6721 | return features; |
6723 | } | 6722 | } |
6724 | 6723 | ||
@@ -6766,6 +6765,11 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
6766 | need_reset = true; | 6765 | need_reset = true; |
6767 | } | 6766 | } |
6768 | 6767 | ||
6768 | if (features & NETIF_F_HW_VLAN_RX) | ||
6769 | ixgbe_vlan_strip_enable(adapter); | ||
6770 | else | ||
6771 | ixgbe_vlan_strip_disable(adapter); | ||
6772 | |||
6769 | if (changed & NETIF_F_RXALL) | 6773 | if (changed & NETIF_F_RXALL) |
6770 | need_reset = true; | 6774 | need_reset = true; |
6771 | 6775 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index ddc6a4d19302..dcebd128becf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | |||
@@ -708,6 +708,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) | |||
708 | { | 708 | { |
709 | struct ixgbe_hw *hw = &adapter->hw; | 709 | struct ixgbe_hw *hw = &adapter->hw; |
710 | u32 incval = 0; | 710 | u32 incval = 0; |
711 | u32 timinca = 0; | ||
711 | u32 shift = 0; | 712 | u32 shift = 0; |
712 | u32 cycle_speed; | 713 | u32 cycle_speed; |
713 | unsigned long flags; | 714 | unsigned long flags; |
@@ -730,8 +731,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) | |||
730 | break; | 731 | break; |
731 | } | 732 | } |
732 | 733 | ||
733 | /* Bail if the cycle speed didn't change */ | 734 | /* |
734 | if (adapter->cycle_speed == cycle_speed) | 735 | * grab the current TIMINCA value from the register so that it can be |
736 | * double checked. If the register value has been cleared, it must be | ||
737 | * reset to the correct value for generating a cyclecounter. If | ||
738 | * TIMINCA is zero, the SYSTIME registers do not increment at all. | ||
739 | */ | ||
740 | timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA); | ||
741 | |||
742 | /* Bail if the cycle speed didn't change and TIMINCA is non-zero */ | ||
743 | if (adapter->cycle_speed == cycle_speed && timinca) | ||
735 | return; | 744 | return; |
736 | 745 | ||
737 | /* disable the SDP clock out */ | 746 | /* disable the SDP clock out */ |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 04d901d0ff63..f0f06b2bc28b 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -436,7 +436,9 @@ struct mv643xx_eth_private { | |||
436 | /* | 436 | /* |
437 | * Hardware-specific parameters. | 437 | * Hardware-specific parameters. |
438 | */ | 438 | */ |
439 | #if defined(CONFIG_HAVE_CLK) | ||
439 | struct clk *clk; | 440 | struct clk *clk; |
441 | #endif | ||
440 | unsigned int t_clk; | 442 | unsigned int t_clk; |
441 | }; | 443 | }; |
442 | 444 | ||
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
2895 | mp->dev = dev; | 2897 | mp->dev = dev; |
2896 | 2898 | ||
2897 | /* | 2899 | /* |
2898 | * Get the clk rate, if there is one, otherwise use the default. | 2900 | * Start with a default rate, and if there is a clock, allow |
2901 | * it to override the default. | ||
2899 | */ | 2902 | */ |
2903 | mp->t_clk = 133000000; | ||
2904 | #if defined(CONFIG_HAVE_CLK) | ||
2900 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); | 2905 | mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); |
2901 | if (!IS_ERR(mp->clk)) { | 2906 | if (!IS_ERR(mp->clk)) { |
2902 | clk_prepare_enable(mp->clk); | 2907 | clk_prepare_enable(mp->clk); |
2903 | mp->t_clk = clk_get_rate(mp->clk); | 2908 | mp->t_clk = clk_get_rate(mp->clk); |
2904 | } else { | ||
2905 | mp->t_clk = 133000000; | ||
2906 | printk(KERN_WARNING "Unable to get clock"); | ||
2907 | } | 2909 | } |
2908 | 2910 | #endif | |
2909 | set_params(mp, pd); | 2911 | set_params(mp, pd); |
2910 | netif_set_real_num_tx_queues(dev, mp->txq_count); | 2912 | netif_set_real_num_tx_queues(dev, mp->txq_count); |
2911 | netif_set_real_num_rx_queues(dev, mp->rxq_count); | 2913 | netif_set_real_num_rx_queues(dev, mp->rxq_count); |
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev) | |||
2995 | phy_detach(mp->phy); | 2997 | phy_detach(mp->phy); |
2996 | cancel_work_sync(&mp->tx_timeout_task); | 2998 | cancel_work_sync(&mp->tx_timeout_task); |
2997 | 2999 | ||
3000 | #if defined(CONFIG_HAVE_CLK) | ||
2998 | if (!IS_ERR(mp->clk)) { | 3001 | if (!IS_ERR(mp->clk)) { |
2999 | clk_disable_unprepare(mp->clk); | 3002 | clk_disable_unprepare(mp->clk); |
3000 | clk_put(mp->clk); | 3003 | clk_put(mp->clk); |
3001 | } | 3004 | } |
3005 | #endif | ||
3006 | |||
3002 | free_netdev(mp->dev); | 3007 | free_netdev(mp->dev); |
3003 | 3008 | ||
3004 | platform_set_drvdata(pdev, NULL); | 3009 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cace36f2ab92..28a54451a3e5 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features) | |||
4381 | struct sky2_port *sky2 = netdev_priv(dev); | 4381 | struct sky2_port *sky2 = netdev_priv(dev); |
4382 | netdev_features_t changed = dev->features ^ features; | 4382 | netdev_features_t changed = dev->features ^ features; |
4383 | 4383 | ||
4384 | if (changed & NETIF_F_RXCSUM) { | 4384 | if ((changed & NETIF_F_RXCSUM) && |
4385 | bool on = features & NETIF_F_RXCSUM; | 4385 | !(sky2->hw->flags & SKY2_HW_NEW_LE)) { |
4386 | sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), | 4386 | sky2_write32(sky2->hw, |
4387 | on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | 4387 | Q_ADDR(rxqaddr[sky2->port], Q_CSR), |
4388 | (features & NETIF_F_RXCSUM) | ||
4389 | ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); | ||
4388 | } | 4390 | } |
4389 | 4391 | ||
4390 | if (changed & NETIF_F_RXHASH) | 4392 | if (changed & NETIF_F_RXHASH) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 926d8aac941c..073b85b45fc5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -929,15 +929,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |||
929 | if (priv->rx_cq[i].buf) | 929 | if (priv->rx_cq[i].buf) |
930 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); | 930 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); |
931 | } | 931 | } |
932 | |||
933 | if (priv->base_tx_qpn) { | ||
934 | mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num); | ||
935 | priv->base_tx_qpn = 0; | ||
936 | } | ||
932 | } | 937 | } |
933 | 938 | ||
934 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | 939 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) |
935 | { | 940 | { |
936 | struct mlx4_en_port_profile *prof = priv->prof; | 941 | struct mlx4_en_port_profile *prof = priv->prof; |
937 | int i; | 942 | int i; |
938 | int base_tx_qpn, err; | 943 | int err; |
939 | 944 | ||
940 | err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); | 945 | err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); |
941 | if (err) { | 946 | if (err) { |
942 | en_err(priv, "failed reserving range for TX rings\n"); | 947 | en_err(priv, "failed reserving range for TX rings\n"); |
943 | return err; | 948 | return err; |
@@ -949,7 +954,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
949 | prof->tx_ring_size, i, TX)) | 954 | prof->tx_ring_size, i, TX)) |
950 | goto err; | 955 | goto err; |
951 | 956 | ||
952 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, | 957 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, |
953 | prof->tx_ring_size, TXBB_SIZE)) | 958 | prof->tx_ring_size, TXBB_SIZE)) |
954 | goto err; | 959 | goto err; |
955 | } | 960 | } |
@@ -969,7 +974,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
969 | 974 | ||
970 | err: | 975 | err: |
971 | en_err(priv, "Failed to allocate NIC resources\n"); | 976 | en_err(priv, "Failed to allocate NIC resources\n"); |
972 | mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); | ||
973 | return -ENOMEM; | 977 | return -ENOMEM; |
974 | } | 978 | } |
975 | 979 | ||
@@ -1204,9 +1208,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1204 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | 1208 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
1205 | 1209 | ||
1206 | /* Configure port */ | 1210 | /* Configure port */ |
1211 | mlx4_en_calc_rx_buf(dev); | ||
1207 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | 1212 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
1208 | MLX4_EN_MIN_MTU, | 1213 | priv->rx_skb_size + ETH_FCS_LEN, |
1209 | 0, 0, 0, 0); | 1214 | prof->tx_pause, prof->tx_ppp, |
1215 | prof->rx_pause, prof->rx_ppp); | ||
1210 | if (err) { | 1216 | if (err) { |
1211 | en_err(priv, "Failed setting port general configurations " | 1217 | en_err(priv, "Failed setting port general configurations " |
1212 | "for port %d, with error %d\n", priv->port, err); | 1218 | "for port %d, with error %d\n", priv->port, err); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index ee6f4fe00837..a0313de122de 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -1975,6 +1975,8 @@ slave_start: | |||
1975 | if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && | 1975 | if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && |
1976 | !mlx4_is_mfunc(dev)) { | 1976 | !mlx4_is_mfunc(dev)) { |
1977 | dev->flags &= ~MLX4_FLAG_MSI_X; | 1977 | dev->flags &= ~MLX4_FLAG_MSI_X; |
1978 | dev->caps.num_comp_vectors = 1; | ||
1979 | dev->caps.comp_pool = 0; | ||
1978 | pci_disable_msix(pdev); | 1980 | pci_disable_msix(pdev); |
1979 | err = mlx4_setup_hca(dev); | 1981 | err = mlx4_setup_hca(dev); |
1980 | } | 1982 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 6ae350921b1a..225c20d47900 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -495,6 +495,7 @@ struct mlx4_en_priv { | |||
495 | int vids[128]; | 495 | int vids[128]; |
496 | bool wol; | 496 | bool wol; |
497 | struct device *ddev; | 497 | struct device *ddev; |
498 | int base_tx_qpn; | ||
498 | 499 | ||
499 | #ifdef CONFIG_MLX4_EN_DCB | 500 | #ifdef CONFIG_MLX4_EN_DCB |
500 | struct ieee_ets ets; | 501 | struct ieee_ets ets; |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8d2666fcffd7..083d6715335c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev) | |||
946 | /* Update stats */ | 946 | /* Update stats */ |
947 | ndev->stats.tx_packets++; | 947 | ndev->stats.tx_packets++; |
948 | ndev->stats.tx_bytes += skb->len; | 948 | ndev->stats.tx_bytes += skb->len; |
949 | |||
950 | /* Free buffer */ | ||
951 | dev_kfree_skb_irq(skb); | ||
952 | } | 949 | } |
950 | dev_kfree_skb_irq(skb); | ||
953 | 951 | ||
954 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); | 952 | txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); |
955 | } | 953 | } |
956 | 954 | ||
957 | if (netif_queue_stopped(ndev)) | 955 | if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) { |
958 | netif_wake_queue(ndev); | 956 | if (netif_queue_stopped(ndev)) |
957 | netif_wake_queue(ndev); | ||
958 | } | ||
959 | } | 959 | } |
960 | 960 | ||
961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) | 961 | static int __lpc_handle_recv(struct net_device *ndev, int budget) |
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = { | |||
1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, | 1320 | .ndo_set_rx_mode = lpc_eth_set_multicast_list, |
1321 | .ndo_do_ioctl = lpc_eth_ioctl, | 1321 | .ndo_do_ioctl = lpc_eth_ioctl, |
1322 | .ndo_set_mac_address = lpc_set_mac_address, | 1322 | .ndo_set_mac_address = lpc_set_mac_address, |
1323 | .ndo_change_mtu = eth_change_mtu, | ||
1323 | }; | 1324 | }; |
1324 | 1325 | ||
1325 | static int lpc_eth_drv_probe(struct platform_device *pdev) | 1326 | static int lpc_eth_drv_probe(struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 46e77a2c5121..ad98f4d7919d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter) | |||
479 | 479 | ||
480 | for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { | 480 | for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { |
481 | pfn = pci_info[i].id; | 481 | pfn = pci_info[i].id; |
482 | if (pfn > QLCNIC_MAX_PCI_FUNC) { | 482 | if (pfn >= QLCNIC_MAX_PCI_FUNC) { |
483 | ret = QL_STATUS_INVALID_PARAM; | 483 | ret = QL_STATUS_INVALID_PARAM; |
484 | goto err_eswitch; | 484 | goto err_eswitch; |
485 | } | 485 | } |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 9757ce3543a0..d7a04e091101 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3894,6 +3894,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) | |||
3894 | case RTL_GIGA_MAC_VER_22: | 3894 | case RTL_GIGA_MAC_VER_22: |
3895 | case RTL_GIGA_MAC_VER_23: | 3895 | case RTL_GIGA_MAC_VER_23: |
3896 | case RTL_GIGA_MAC_VER_24: | 3896 | case RTL_GIGA_MAC_VER_24: |
3897 | case RTL_GIGA_MAC_VER_34: | ||
3897 | RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); | 3898 | RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); |
3898 | break; | 3899 | break; |
3899 | default: | 3900 | default: |
@@ -5889,11 +5890,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) | |||
5889 | if (status & LinkChg) | 5890 | if (status & LinkChg) |
5890 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); | 5891 | __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); |
5891 | 5892 | ||
5892 | napi_disable(&tp->napi); | 5893 | rtl_irq_enable_all(tp); |
5893 | rtl_irq_disable(tp); | ||
5894 | |||
5895 | napi_enable(&tp->napi); | ||
5896 | napi_schedule(&tp->napi); | ||
5897 | } | 5894 | } |
5898 | 5895 | ||
5899 | static void rtl_task(struct work_struct *work) | 5896 | static void rtl_task(struct work_struct *work) |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 667169b82526..79bf09b41971 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1011,7 +1011,7 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | /* Packet receive function */ | 1013 | /* Packet receive function */ |
1014 | static int sh_eth_rx(struct net_device *ndev) | 1014 | static int sh_eth_rx(struct net_device *ndev, u32 intr_status) |
1015 | { | 1015 | { |
1016 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1016 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1017 | struct sh_eth_rxdesc *rxdesc; | 1017 | struct sh_eth_rxdesc *rxdesc; |
@@ -1102,9 +1102,11 @@ static int sh_eth_rx(struct net_device *ndev) | |||
1102 | /* Restart Rx engine if stopped. */ | 1102 | /* Restart Rx engine if stopped. */ |
1103 | /* If we don't need to check status, don't. -KDU */ | 1103 | /* If we don't need to check status, don't. -KDU */ |
1104 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { | 1104 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { |
1105 | /* fix the values for the next receiving */ | 1105 | /* fix the values for the next receiving if RDE is set */ |
1106 | mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) - | 1106 | if (intr_status & EESR_RDE) |
1107 | sh_eth_read(ndev, RDLAR)) >> 4; | 1107 | mdp->cur_rx = mdp->dirty_rx = |
1108 | (sh_eth_read(ndev, RDFAR) - | ||
1109 | sh_eth_read(ndev, RDLAR)) >> 4; | ||
1108 | sh_eth_write(ndev, EDRRR_R, EDRRR); | 1110 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
1109 | } | 1111 | } |
1110 | 1112 | ||
@@ -1273,7 +1275,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1273 | EESR_RTSF | /* short frame recv */ | 1275 | EESR_RTSF | /* short frame recv */ |
1274 | EESR_PRE | /* PHY-LSI recv error */ | 1276 | EESR_PRE | /* PHY-LSI recv error */ |
1275 | EESR_CERF)){ /* recv frame CRC error */ | 1277 | EESR_CERF)){ /* recv frame CRC error */ |
1276 | sh_eth_rx(ndev); | 1278 | sh_eth_rx(ndev, intr_status); |
1277 | } | 1279 | } |
1278 | 1280 | ||
1279 | /* Tx Check */ | 1281 | /* Tx Check */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 036428348faa..9f448279e12a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
@@ -13,7 +13,7 @@ config STMMAC_ETH | |||
13 | if STMMAC_ETH | 13 | if STMMAC_ETH |
14 | 14 | ||
15 | config STMMAC_PLATFORM | 15 | config STMMAC_PLATFORM |
16 | tristate "STMMAC platform bus support" | 16 | bool "STMMAC Platform bus support" |
17 | depends on STMMAC_ETH | 17 | depends on STMMAC_ETH |
18 | default y | 18 | default y |
19 | ---help--- | 19 | ---help--- |
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM | |||
26 | If unsure, say N. | 26 | If unsure, say N. |
27 | 27 | ||
28 | config STMMAC_PCI | 28 | config STMMAC_PCI |
29 | tristate "STMMAC support on PCI bus (EXPERIMENTAL)" | 29 | bool "STMMAC PCI bus support (EXPERIMENTAL)" |
30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL | 30 | depends on STMMAC_ETH && PCI && EXPERIMENTAL |
31 | ---help--- | 31 | ---help--- |
32 | This is to select the Synopsys DWMAC available on PCI devices, | 32 | This is to select the Synopsys DWMAC available on PCI devices, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 6b5d060ee9de..dc20c56efc9d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/clk.h> | 26 | #include <linux/clk.h> |
27 | #include <linux/stmmac.h> | 27 | #include <linux/stmmac.h> |
28 | #include <linux/phy.h> | 28 | #include <linux/phy.h> |
29 | #include <linux/pci.h> | ||
29 | #include "common.h" | 30 | #include "common.h" |
30 | #ifdef CONFIG_STMMAC_TIMER | 31 | #ifdef CONFIG_STMMAC_TIMER |
31 | #include "stmmac_timer.h" | 32 | #include "stmmac_timer.h" |
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev); | |||
95 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); | 96 | extern void stmmac_set_ethtool_ops(struct net_device *netdev); |
96 | extern const struct stmmac_desc_ops enh_desc_ops; | 97 | extern const struct stmmac_desc_ops enh_desc_ops; |
97 | extern const struct stmmac_desc_ops ndesc_ops; | 98 | extern const struct stmmac_desc_ops ndesc_ops; |
98 | |||
99 | int stmmac_freeze(struct net_device *ndev); | 99 | int stmmac_freeze(struct net_device *ndev); |
100 | int stmmac_restore(struct net_device *ndev); | 100 | int stmmac_restore(struct net_device *ndev); |
101 | int stmmac_resume(struct net_device *ndev); | 101 | int stmmac_resume(struct net_device *ndev); |
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, | |||
109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) | 109 | static inline int stmmac_clk_enable(struct stmmac_priv *priv) |
110 | { | 110 | { |
111 | if (!IS_ERR(priv->stmmac_clk)) | 111 | if (!IS_ERR(priv->stmmac_clk)) |
112 | return clk_enable(priv->stmmac_clk); | 112 | return clk_prepare_enable(priv->stmmac_clk); |
113 | 113 | ||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv) | |||
119 | if (IS_ERR(priv->stmmac_clk)) | 119 | if (IS_ERR(priv->stmmac_clk)) |
120 | return; | 120 | return; |
121 | 121 | ||
122 | clk_disable(priv->stmmac_clk); | 122 | clk_disable_unprepare(priv->stmmac_clk); |
123 | } | 123 | } |
124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) | 124 | static inline int stmmac_clk_get(struct stmmac_priv *priv) |
125 | { | 125 | { |
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv) | |||
143 | return 0; | 143 | return 0; |
144 | } | 144 | } |
145 | #endif /* CONFIG_HAVE_CLK */ | 145 | #endif /* CONFIG_HAVE_CLK */ |
146 | |||
147 | |||
148 | #ifdef CONFIG_STMMAC_PLATFORM | ||
149 | extern struct platform_driver stmmac_pltfr_driver; | ||
150 | static inline int stmmac_register_platform(void) | ||
151 | { | ||
152 | int err; | ||
153 | |||
154 | err = platform_driver_register(&stmmac_pltfr_driver); | ||
155 | if (err) | ||
156 | pr_err("stmmac: failed to register the platform driver\n"); | ||
157 | |||
158 | return err; | ||
159 | } | ||
160 | static inline void stmmac_unregister_platform(void) | ||
161 | { | ||
162 | platform_driver_register(&stmmac_pltfr_driver); | ||
163 | } | ||
164 | #else | ||
165 | static inline int stmmac_register_platform(void) | ||
166 | { | ||
167 | pr_debug("stmmac: do not register the platf driver\n"); | ||
168 | |||
169 | return -EINVAL; | ||
170 | } | ||
171 | static inline void stmmac_unregister_platform(void) | ||
172 | { | ||
173 | } | ||
174 | #endif /* CONFIG_STMMAC_PLATFORM */ | ||
175 | |||
176 | #ifdef CONFIG_STMMAC_PCI | ||
177 | extern struct pci_driver stmmac_pci_driver; | ||
178 | static inline int stmmac_register_pci(void) | ||
179 | { | ||
180 | int err; | ||
181 | |||
182 | err = pci_register_driver(&stmmac_pci_driver); | ||
183 | if (err) | ||
184 | pr_err("stmmac: failed to register the PCI driver\n"); | ||
185 | |||
186 | return err; | ||
187 | } | ||
188 | static inline void stmmac_unregister_pci(void) | ||
189 | { | ||
190 | pci_unregister_driver(&stmmac_pci_driver); | ||
191 | } | ||
192 | #else | ||
193 | static inline int stmmac_register_pci(void) | ||
194 | { | ||
195 | pr_debug("stmmac: do not register the PCI driver\n"); | ||
196 | |||
197 | return -EINVAL; | ||
198 | } | ||
199 | static inline void stmmac_unregister_pci(void) | ||
200 | { | ||
201 | } | ||
202 | #endif /* CONFIG_STMMAC_PCI */ | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 70966330f44e..51b3b68528ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) | |||
833 | 833 | ||
834 | /** | 834 | /** |
835 | * stmmac_selec_desc_mode | 835 | * stmmac_selec_desc_mode |
836 | * @dev : device pointer | 836 | * @priv : private structure |
837 | * Description: select the Enhanced/Alternate or Normal descriptors */ | 837 | * Description: select the Enhanced/Alternate or Normal descriptors |
838 | */ | ||
838 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) | 839 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) |
839 | { | 840 | { |
840 | if (priv->plat->enh_desc) { | 841 | if (priv->plat->enh_desc) { |
@@ -1861,6 +1862,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
1861 | /** | 1862 | /** |
1862 | * stmmac_dvr_probe | 1863 | * stmmac_dvr_probe |
1863 | * @device: device pointer | 1864 | * @device: device pointer |
1865 | * @plat_dat: platform data pointer | ||
1866 | * @addr: iobase memory address | ||
1864 | * Description: this is the main probe function used to | 1867 | * Description: this is the main probe function used to |
1865 | * call the alloc_etherdev, allocate the priv structure. | 1868 | * call the alloc_etherdev, allocate the priv structure. |
1866 | */ | 1869 | */ |
@@ -2090,6 +2093,34 @@ int stmmac_restore(struct net_device *ndev) | |||
2090 | } | 2093 | } |
2091 | #endif /* CONFIG_PM */ | 2094 | #endif /* CONFIG_PM */ |
2092 | 2095 | ||
2096 | /* Driver can be configured w/ and w/ both PCI and Platf drivers | ||
2097 | * depending on the configuration selected. | ||
2098 | */ | ||
2099 | static int __init stmmac_init(void) | ||
2100 | { | ||
2101 | int err_plt = 0; | ||
2102 | int err_pci = 0; | ||
2103 | |||
2104 | err_plt = stmmac_register_platform(); | ||
2105 | err_pci = stmmac_register_pci(); | ||
2106 | |||
2107 | if ((err_pci) && (err_plt)) { | ||
2108 | pr_err("stmmac: driver registration failed\n"); | ||
2109 | return -EINVAL; | ||
2110 | } | ||
2111 | |||
2112 | return 0; | ||
2113 | } | ||
2114 | |||
2115 | static void __exit stmmac_exit(void) | ||
2116 | { | ||
2117 | stmmac_unregister_platform(); | ||
2118 | stmmac_unregister_pci(); | ||
2119 | } | ||
2120 | |||
2121 | module_init(stmmac_init); | ||
2122 | module_exit(stmmac_exit); | ||
2123 | |||
2093 | #ifndef MODULE | 2124 | #ifndef MODULE |
2094 | static int __init stmmac_cmdline_opt(char *str) | 2125 | static int __init stmmac_cmdline_opt(char *str) |
2095 | { | 2126 | { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 58fab5303e9c..cf826e6b6aa1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = { | |||
179 | 179 | ||
180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); | 180 | MODULE_DEVICE_TABLE(pci, stmmac_id_table); |
181 | 181 | ||
182 | static struct pci_driver stmmac_driver = { | 182 | struct pci_driver stmmac_pci_driver = { |
183 | .name = STMMAC_RESOURCE_NAME, | 183 | .name = STMMAC_RESOURCE_NAME, |
184 | .id_table = stmmac_id_table, | 184 | .id_table = stmmac_id_table, |
185 | .probe = stmmac_pci_probe, | 185 | .probe = stmmac_pci_probe, |
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = { | |||
190 | #endif | 190 | #endif |
191 | }; | 191 | }; |
192 | 192 | ||
193 | /** | ||
194 | * stmmac_init_module - Entry point for the driver | ||
195 | * Description: This function is the entry point for the driver. | ||
196 | */ | ||
197 | static int __init stmmac_init_module(void) | ||
198 | { | ||
199 | int ret; | ||
200 | |||
201 | ret = pci_register_driver(&stmmac_driver); | ||
202 | if (ret < 0) | ||
203 | pr_err("%s: ERROR: driver registration failed\n", __func__); | ||
204 | |||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * stmmac_cleanup_module - Cleanup routine for the driver | ||
210 | * Description: This function is the cleanup routine for the driver. | ||
211 | */ | ||
212 | static void __exit stmmac_cleanup_module(void) | ||
213 | { | ||
214 | pci_unregister_driver(&stmmac_driver); | ||
215 | } | ||
216 | |||
217 | module_init(stmmac_init_module); | ||
218 | module_exit(stmmac_cleanup_module); | ||
219 | |||
220 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); | 193 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); |
221 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); | 194 | MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); |
222 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 195 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3dd8f0803808..680d2b8dfe27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = { | |||
255 | }; | 255 | }; |
256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); | 256 | MODULE_DEVICE_TABLE(of, stmmac_dt_ids); |
257 | 257 | ||
258 | static struct platform_driver stmmac_driver = { | 258 | struct platform_driver stmmac_pltfr_driver = { |
259 | .probe = stmmac_pltfr_probe, | 259 | .probe = stmmac_pltfr_probe, |
260 | .remove = stmmac_pltfr_remove, | 260 | .remove = stmmac_pltfr_remove, |
261 | .driver = { | 261 | .driver = { |
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = { | |||
266 | }, | 266 | }, |
267 | }; | 267 | }; |
268 | 268 | ||
269 | module_platform_driver(stmmac_driver); | ||
270 | |||
271 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); | 269 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); |
272 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | 270 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); |
273 | MODULE_LICENSE("GPL"); | 271 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 703c8cce2a2c..8c726b7004d3 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c | |||
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx) | |||
3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | 3598 | static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) |
3599 | { | 3599 | { |
3600 | struct netdev_queue *txq; | 3600 | struct netdev_queue *txq; |
3601 | unsigned int tx_bytes; | ||
3602 | u16 pkt_cnt, tmp; | 3601 | u16 pkt_cnt, tmp; |
3603 | int cons, index; | 3602 | int cons, index; |
3604 | u64 cs; | 3603 | u64 cs; |
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) | |||
3621 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, | 3620 | netif_printk(np, tx_done, KERN_DEBUG, np->dev, |
3622 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); | 3621 | "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); |
3623 | 3622 | ||
3624 | tx_bytes = 0; | 3623 | while (pkt_cnt--) |
3625 | tmp = pkt_cnt; | ||
3626 | while (tmp--) { | ||
3627 | tx_bytes += rp->tx_buffs[cons].skb->len; | ||
3628 | cons = release_tx_packet(np, rp, cons); | 3624 | cons = release_tx_packet(np, rp, cons); |
3629 | } | ||
3630 | 3625 | ||
3631 | rp->cons = cons; | 3626 | rp->cons = cons; |
3632 | smp_mb(); | 3627 | smp_mb(); |
3633 | 3628 | ||
3634 | netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes); | ||
3635 | |||
3636 | out: | 3629 | out: |
3637 | if (unlikely(netif_tx_queue_stopped(txq) && | 3630 | if (unlikely(netif_tx_queue_stopped(txq) && |
3638 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { | 3631 | (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { |
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np) | |||
4333 | struct tx_ring_info *rp = &np->tx_rings[i]; | 4326 | struct tx_ring_info *rp = &np->tx_rings[i]; |
4334 | 4327 | ||
4335 | niu_free_tx_ring_info(np, rp); | 4328 | niu_free_tx_ring_info(np, rp); |
4336 | netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i)); | ||
4337 | } | 4329 | } |
4338 | kfree(np->tx_rings); | 4330 | kfree(np->tx_rings); |
4339 | np->tx_rings = NULL; | 4331 | np->tx_rings = NULL; |
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, | |||
6739 | prod = NEXT_TX(rp, prod); | 6731 | prod = NEXT_TX(rp, prod); |
6740 | } | 6732 | } |
6741 | 6733 | ||
6742 | netdev_tx_sent_queue(txq, skb->len); | ||
6743 | |||
6744 | if (prod < rp->prod) | 6734 | if (prod < rp->prod) |
6745 | rp->wrap_bit ^= TX_RING_KICK_WRAP; | 6735 | rp->wrap_bit ^= TX_RING_KICK_WRAP; |
6746 | rp->prod = prod; | 6736 | rp->prod = prod; |
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig index 2d9218f86bca..098b1c42b393 100644 --- a/drivers/net/ethernet/tile/Kconfig +++ b/drivers/net/ethernet/tile/Kconfig | |||
@@ -7,6 +7,8 @@ config TILE_NET | |||
7 | depends on TILE | 7 | depends on TILE |
8 | default y | 8 | default y |
9 | select CRC32 | 9 | select CRC32 |
10 | select TILE_GXIO_MPIPE if TILEGX | ||
11 | select HIGH_RES_TIMERS if TILEGX | ||
10 | ---help--- | 12 | ---help--- |
11 | This is a standard Linux network device driver for the | 13 | This is a standard Linux network device driver for the |
12 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. | 14 | on-chip Tilera Gigabit Ethernet and XAUI interfaces. |
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile index f634f142cab4..0ef9eefd3211 100644 --- a/drivers/net/ethernet/tile/Makefile +++ b/drivers/net/ethernet/tile/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_TILE_NET) += tile_net.o | 5 | obj-$(CONFIG_TILE_NET) += tile_net.o |
6 | ifdef CONFIG_TILEGX | 6 | ifdef CONFIG_TILEGX |
7 | tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o | 7 | tile_net-y := tilegx.o |
8 | else | 8 | else |
9 | tile_net-objs := tilepro.o | 9 | tile_net-y := tilepro.o |
10 | endif | 10 | endif |
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c new file mode 100644 index 000000000000..83b4b388ad49 --- /dev/null +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -0,0 +1,1898 @@ | |||
1 | /* | ||
2 | * Copyright 2012 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> /* printk() */ | ||
20 | #include <linux/slab.h> /* kmalloc() */ | ||
21 | #include <linux/errno.h> /* error codes */ | ||
22 | #include <linux/types.h> /* size_t */ | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/in.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/netdevice.h> /* struct device, and other headers */ | ||
27 | #include <linux/etherdevice.h> /* eth_type_trans */ | ||
28 | #include <linux/skbuff.h> | ||
29 | #include <linux/ioctl.h> | ||
30 | #include <linux/cdev.h> | ||
31 | #include <linux/hugetlb.h> | ||
32 | #include <linux/in6.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/hrtimer.h> | ||
35 | #include <linux/ktime.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/ctype.h> | ||
38 | #include <linux/ip.h> | ||
39 | #include <linux/tcp.h> | ||
40 | |||
41 | #include <asm/checksum.h> | ||
42 | #include <asm/homecache.h> | ||
43 | #include <gxio/mpipe.h> | ||
44 | #include <arch/sim.h> | ||
45 | |||
46 | /* Default transmit lockup timeout period, in jiffies. */ | ||
47 | #define TILE_NET_TIMEOUT (5 * HZ) | ||
48 | |||
49 | /* The maximum number of distinct channels (idesc.channel is 5 bits). */ | ||
50 | #define TILE_NET_CHANNELS 32 | ||
51 | |||
52 | /* Maximum number of idescs to handle per "poll". */ | ||
53 | #define TILE_NET_BATCH 128 | ||
54 | |||
55 | /* Maximum number of packets to handle per "poll". */ | ||
56 | #define TILE_NET_WEIGHT 64 | ||
57 | |||
58 | /* Number of entries in each iqueue. */ | ||
59 | #define IQUEUE_ENTRIES 512 | ||
60 | |||
61 | /* Number of entries in each equeue. */ | ||
62 | #define EQUEUE_ENTRIES 2048 | ||
63 | |||
64 | /* Total header bytes per equeue slot. Must be big enough for 2 bytes | ||
65 | * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to | ||
66 | * 60 bytes of actual TCP header. We round up to align to cache lines. | ||
67 | */ | ||
68 | #define HEADER_BYTES 128 | ||
69 | |||
70 | /* Maximum completions per cpu per device (must be a power of two). | ||
71 | * ISSUE: What is the right number here? If this is too small, then | ||
72 | * egress might block waiting for free space in a completions array. | ||
73 | * ISSUE: At the least, allocate these only for initialized echannels. | ||
74 | */ | ||
75 | #define TILE_NET_MAX_COMPS 64 | ||
76 | |||
77 | #define MAX_FRAGS (MAX_SKB_FRAGS + 1) | ||
78 | |||
79 | /* Size of completions data to allocate. | ||
80 | * ISSUE: Probably more than needed since we don't use all the channels. | ||
81 | */ | ||
82 | #define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps)) | ||
83 | |||
84 | /* Size of NotifRing data to allocate. */ | ||
85 | #define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t)) | ||
86 | |||
87 | /* Timeout to wake the per-device TX timer after we stop the queue. | ||
88 | * We don't want the timeout too short (adds overhead, and might end | ||
89 | * up causing stop/wake/stop/wake cycles) or too long (affects performance). | ||
90 | * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets. | ||
91 | */ | ||
92 | #define TX_TIMER_DELAY_USEC 30 | ||
93 | |||
94 | /* Timeout to wake the per-cpu egress timer to free completions. */ | ||
95 | #define EGRESS_TIMER_DELAY_USEC 1000 | ||
96 | |||
97 | MODULE_AUTHOR("Tilera Corporation"); | ||
98 | MODULE_LICENSE("GPL"); | ||
99 | |||
100 | /* A "packet fragment" (a chunk of memory). */ | ||
101 | struct frag { | ||
102 | void *buf; | ||
103 | size_t length; | ||
104 | }; | ||
105 | |||
106 | /* A single completion. */ | ||
107 | struct tile_net_comp { | ||
108 | /* The "complete_count" when the completion will be complete. */ | ||
109 | s64 when; | ||
110 | /* The buffer to be freed when the completion is complete. */ | ||
111 | struct sk_buff *skb; | ||
112 | }; | ||
113 | |||
114 | /* The completions for a given cpu and echannel. */ | ||
115 | struct tile_net_comps { | ||
116 | /* The completions. */ | ||
117 | struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS]; | ||
118 | /* The number of completions used. */ | ||
119 | unsigned long comp_next; | ||
120 | /* The number of completions freed. */ | ||
121 | unsigned long comp_last; | ||
122 | }; | ||
123 | |||
124 | /* The transmit wake timer for a given cpu and echannel. */ | ||
125 | struct tile_net_tx_wake { | ||
126 | struct hrtimer timer; | ||
127 | struct net_device *dev; | ||
128 | }; | ||
129 | |||
130 | /* Info for a specific cpu. */ | ||
131 | struct tile_net_info { | ||
132 | /* The NAPI struct. */ | ||
133 | struct napi_struct napi; | ||
134 | /* Packet queue. */ | ||
135 | gxio_mpipe_iqueue_t iqueue; | ||
136 | /* Our cpu. */ | ||
137 | int my_cpu; | ||
138 | /* True if iqueue is valid. */ | ||
139 | bool has_iqueue; | ||
140 | /* NAPI flags. */ | ||
141 | bool napi_added; | ||
142 | bool napi_enabled; | ||
143 | /* Number of small sk_buffs which must still be provided. */ | ||
144 | unsigned int num_needed_small_buffers; | ||
145 | /* Number of large sk_buffs which must still be provided. */ | ||
146 | unsigned int num_needed_large_buffers; | ||
147 | /* A timer for handling egress completions. */ | ||
148 | struct hrtimer egress_timer; | ||
149 | /* True if "egress_timer" is scheduled. */ | ||
150 | bool egress_timer_scheduled; | ||
151 | /* Comps for each egress channel. */ | ||
152 | struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS]; | ||
153 | /* Transmit wake timer for each egress channel. */ | ||
154 | struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS]; | ||
155 | }; | ||
156 | |||
157 | /* Info for egress on a particular egress channel. */ | ||
158 | struct tile_net_egress { | ||
159 | /* The "equeue". */ | ||
160 | gxio_mpipe_equeue_t *equeue; | ||
161 | /* The headers for TSO. */ | ||
162 | unsigned char *headers; | ||
163 | }; | ||
164 | |||
165 | /* Info for a specific device. */ | ||
166 | struct tile_net_priv { | ||
167 | /* Our network device. */ | ||
168 | struct net_device *dev; | ||
169 | /* The primary link. */ | ||
170 | gxio_mpipe_link_t link; | ||
171 | /* The primary channel, if open, else -1. */ | ||
172 | int channel; | ||
173 | /* The "loopify" egress link, if needed. */ | ||
174 | gxio_mpipe_link_t loopify_link; | ||
175 | /* The "loopify" egress channel, if open, else -1. */ | ||
176 | int loopify_channel; | ||
177 | /* The egress channel (channel or loopify_channel). */ | ||
178 | int echannel; | ||
179 | /* Total stats. */ | ||
180 | struct net_device_stats stats; | ||
181 | }; | ||
182 | |||
183 | /* Egress info, indexed by "priv->echannel" (lazily created as needed). */ | ||
184 | static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS]; | ||
185 | |||
186 | /* Devices currently associated with each channel. | ||
187 | * NOTE: The array entry can become NULL after ifconfig down, but | ||
188 | * we do not free the underlying net_device structures, so it is | ||
189 | * safe to use a pointer after reading it from this array. | ||
190 | */ | ||
191 | static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS]; | ||
192 | |||
193 | /* A mutex for "tile_net_devs_for_channel". */ | ||
194 | static DEFINE_MUTEX(tile_net_devs_for_channel_mutex); | ||
195 | |||
196 | /* The per-cpu info. */ | ||
197 | static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info); | ||
198 | |||
199 | /* The "context" for all devices. */ | ||
200 | static gxio_mpipe_context_t context; | ||
201 | |||
202 | /* Buffer sizes and mpipe enum codes for buffer stacks. | ||
203 | * See arch/tile/include/gxio/mpipe.h for the set of possible values. | ||
204 | */ | ||
205 | #define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128 | ||
206 | #define BUFFER_SIZE_SMALL 128 | ||
207 | #define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664 | ||
208 | #define BUFFER_SIZE_LARGE 1664 | ||
209 | |||
210 | /* The small/large "buffer stacks". */ | ||
211 | static int small_buffer_stack = -1; | ||
212 | static int large_buffer_stack = -1; | ||
213 | |||
214 | /* Amount of memory allocated for each buffer stack. */ | ||
215 | static size_t buffer_stack_size; | ||
216 | |||
217 | /* The actual memory allocated for the buffer stacks. */ | ||
218 | static void *small_buffer_stack_va; | ||
219 | static void *large_buffer_stack_va; | ||
220 | |||
221 | /* The buckets. */ | ||
222 | static int first_bucket = -1; | ||
223 | static int num_buckets = 1; | ||
224 | |||
225 | /* The ingress irq. */ | ||
226 | static int ingress_irq = -1; | ||
227 | |||
228 | /* Text value of tile_net.cpus if passed as a module parameter. */ | ||
229 | static char *network_cpus_string; | ||
230 | |||
231 | /* The actual cpus in "network_cpus". */ | ||
232 | static struct cpumask network_cpus_map; | ||
233 | |||
234 | /* If "loopify=LINK" was specified, this is "LINK". */ | ||
235 | static char *loopify_link_name; | ||
236 | |||
237 | /* If "tile_net.custom" was specified, this is non-NULL. */ | ||
238 | static char *custom_str; | ||
239 | |||
240 | /* The "tile_net.cpus" argument specifies the cpus that are dedicated | ||
241 | * to handle ingress packets. | ||
242 | * | ||
243 | * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where | ||
244 | * m, n, x, y are integer numbers that represent the cpus that can be | ||
245 | * neither a dedicated cpu nor a dataplane cpu. | ||
246 | */ | ||
247 | static bool network_cpus_init(void) | ||
248 | { | ||
249 | char buf[1024]; | ||
250 | int rc; | ||
251 | |||
252 | if (network_cpus_string == NULL) | ||
253 | return false; | ||
254 | |||
255 | rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map); | ||
256 | if (rc != 0) { | ||
257 | pr_warn("tile_net.cpus=%s: malformed cpu list\n", | ||
258 | network_cpus_string); | ||
259 | return false; | ||
260 | } | ||
261 | |||
262 | /* Remove dedicated cpus. */ | ||
263 | cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask); | ||
264 | |||
265 | if (cpumask_empty(&network_cpus_map)) { | ||
266 | pr_warn("Ignoring empty tile_net.cpus='%s'.\n", | ||
267 | network_cpus_string); | ||
268 | return false; | ||
269 | } | ||
270 | |||
271 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | ||
272 | pr_info("Linux network CPUs: %s\n", buf); | ||
273 | return true; | ||
274 | } | ||
275 | |||
276 | module_param_named(cpus, network_cpus_string, charp, 0444); | ||
277 | MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts"); | ||
278 | |||
279 | /* The "tile_net.loopify=LINK" argument causes the named device to | ||
280 | * actually use "loop0" for ingress, and "loop1" for egress. This | ||
281 | * allows an app to sit between the actual link and linux, passing | ||
282 | * (some) packets along to linux, and forwarding (some) packets sent | ||
283 | * out by linux. | ||
284 | */ | ||
285 | module_param_named(loopify, loopify_link_name, charp, 0444); | ||
286 | MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress"); | ||
287 | |||
288 | /* The "tile_net.custom" argument causes us to ignore the "conventional" | ||
289 | * classifier metadata, in particular, the "l2_offset". | ||
290 | */ | ||
291 | module_param_named(custom, custom_str, charp, 0444); | ||
292 | MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier"); | ||
293 | |||
294 | /* Atomically update a statistics field. | ||
295 | * Note that on TILE-Gx, this operation is fire-and-forget on the | ||
296 | * issuing core (single-cycle dispatch) and takes only a few cycles | ||
297 | * longer than a regular store when the request reaches the home cache. | ||
298 | * No expensive bus management overhead is required. | ||
299 | */ | ||
300 | static void tile_net_stats_add(unsigned long value, unsigned long *field) | ||
301 | { | ||
302 | BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long)); | ||
303 | atomic_long_add(value, (atomic_long_t *)field); | ||
304 | } | ||
305 | |||
306 | /* Allocate and push a buffer. */ | ||
307 | static bool tile_net_provide_buffer(bool small) | ||
308 | { | ||
309 | int stack = small ? small_buffer_stack : large_buffer_stack; | ||
310 | const unsigned long buffer_alignment = 128; | ||
311 | struct sk_buff *skb; | ||
312 | int len; | ||
313 | |||
314 | len = sizeof(struct sk_buff **) + buffer_alignment; | ||
315 | len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE); | ||
316 | skb = dev_alloc_skb(len); | ||
317 | if (skb == NULL) | ||
318 | return false; | ||
319 | |||
320 | /* Make room for a back-pointer to 'skb' and guarantee alignment. */ | ||
321 | skb_reserve(skb, sizeof(struct sk_buff **)); | ||
322 | skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1)); | ||
323 | |||
324 | /* Save a back-pointer to 'skb'. */ | ||
325 | *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb; | ||
326 | |||
327 | /* Make sure "skb" and the back-pointer have been flushed. */ | ||
328 | wmb(); | ||
329 | |||
330 | gxio_mpipe_push_buffer(&context, stack, | ||
331 | (void *)va_to_tile_io_addr(skb->data)); | ||
332 | |||
333 | return true; | ||
334 | } | ||
335 | |||
336 | /* Convert a raw mpipe buffer to its matching skb pointer. */ | ||
337 | static struct sk_buff *mpipe_buf_to_skb(void *va) | ||
338 | { | ||
339 | /* Acquire the associated "skb". */ | ||
340 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | ||
341 | struct sk_buff *skb = *skb_ptr; | ||
342 | |||
343 | /* Paranoia. */ | ||
344 | if (skb->data != va) { | ||
345 | /* Panic here since there's a reasonable chance | ||
346 | * that corrupt buffers means generic memory | ||
347 | * corruption, with unpredictable system effects. | ||
348 | */ | ||
349 | panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p", | ||
350 | va, skb, skb->data); | ||
351 | } | ||
352 | |||
353 | return skb; | ||
354 | } | ||
355 | |||
356 | static void tile_net_pop_all_buffers(int stack) | ||
357 | { | ||
358 | for (;;) { | ||
359 | tile_io_addr_t addr = | ||
360 | (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack); | ||
361 | if (addr == 0) | ||
362 | break; | ||
363 | dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr))); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /* Provide linux buffers to mPIPE. */ | ||
368 | static void tile_net_provide_needed_buffers(void) | ||
369 | { | ||
370 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
371 | |||
372 | while (info->num_needed_small_buffers != 0) { | ||
373 | if (!tile_net_provide_buffer(true)) | ||
374 | goto oops; | ||
375 | info->num_needed_small_buffers--; | ||
376 | } | ||
377 | |||
378 | while (info->num_needed_large_buffers != 0) { | ||
379 | if (!tile_net_provide_buffer(false)) | ||
380 | goto oops; | ||
381 | info->num_needed_large_buffers--; | ||
382 | } | ||
383 | |||
384 | return; | ||
385 | |||
386 | oops: | ||
387 | /* Add a description to the page allocation failure dump. */ | ||
388 | pr_notice("Tile %d still needs some buffers\n", info->my_cpu); | ||
389 | } | ||
390 | |||
391 | static inline bool filter_packet(struct net_device *dev, void *buf) | ||
392 | { | ||
393 | /* Filter packets received before we're up. */ | ||
394 | if (dev == NULL || !(dev->flags & IFF_UP)) | ||
395 | return true; | ||
396 | |||
397 | /* Filter out packets that aren't for us. */ | ||
398 | if (!(dev->flags & IFF_PROMISC) && | ||
399 | !is_multicast_ether_addr(buf) && | ||
400 | compare_ether_addr(dev->dev_addr, buf) != 0) | ||
401 | return true; | ||
402 | |||
403 | return false; | ||
404 | } | ||
405 | |||
406 | static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb, | ||
407 | gxio_mpipe_idesc_t *idesc, unsigned long len) | ||
408 | { | ||
409 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
410 | struct tile_net_priv *priv = netdev_priv(dev); | ||
411 | |||
412 | /* Encode the actual packet length. */ | ||
413 | skb_put(skb, len); | ||
414 | |||
415 | skb->protocol = eth_type_trans(skb, dev); | ||
416 | |||
417 | /* Acknowledge "good" hardware checksums. */ | ||
418 | if (idesc->cs && idesc->csum_seed_val == 0xFFFF) | ||
419 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
420 | |||
421 | netif_receive_skb(skb); | ||
422 | |||
423 | /* Update stats. */ | ||
424 | tile_net_stats_add(1, &priv->stats.rx_packets); | ||
425 | tile_net_stats_add(len, &priv->stats.rx_bytes); | ||
426 | |||
427 | /* Need a new buffer. */ | ||
428 | if (idesc->size == BUFFER_SIZE_SMALL_ENUM) | ||
429 | info->num_needed_small_buffers++; | ||
430 | else | ||
431 | info->num_needed_large_buffers++; | ||
432 | } | ||
433 | |||
434 | /* Handle a packet. Return true if "processed", false if "filtered". */ | ||
435 | static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc) | ||
436 | { | ||
437 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
438 | struct net_device *dev = tile_net_devs_for_channel[idesc->channel]; | ||
439 | uint8_t l2_offset; | ||
440 | void *va; | ||
441 | void *buf; | ||
442 | unsigned long len; | ||
443 | bool filter; | ||
444 | |||
445 | /* Drop packets for which no buffer was available. | ||
446 | * NOTE: This happens under heavy load. | ||
447 | */ | ||
448 | if (idesc->be) { | ||
449 | struct tile_net_priv *priv = netdev_priv(dev); | ||
450 | tile_net_stats_add(1, &priv->stats.rx_dropped); | ||
451 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
452 | if (net_ratelimit()) | ||
453 | pr_info("Dropping packet (insufficient buffers).\n"); | ||
454 | return false; | ||
455 | } | ||
456 | |||
457 | /* Get the "l2_offset", if allowed. */ | ||
458 | l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc); | ||
459 | |||
460 | /* Get the raw buffer VA (includes "headroom"). */ | ||
461 | va = tile_io_addr_to_va((unsigned long)(long)idesc->va); | ||
462 | |||
463 | /* Get the actual packet start/length. */ | ||
464 | buf = va + l2_offset; | ||
465 | len = idesc->l2_size - l2_offset; | ||
466 | |||
467 | /* Point "va" at the raw buffer. */ | ||
468 | va -= NET_IP_ALIGN; | ||
469 | |||
470 | filter = filter_packet(dev, buf); | ||
471 | if (filter) { | ||
472 | gxio_mpipe_iqueue_drop(&info->iqueue, idesc); | ||
473 | } else { | ||
474 | struct sk_buff *skb = mpipe_buf_to_skb(va); | ||
475 | |||
476 | /* Skip headroom, and any custom header. */ | ||
477 | skb_reserve(skb, NET_IP_ALIGN + l2_offset); | ||
478 | |||
479 | tile_net_receive_skb(dev, skb, idesc, len); | ||
480 | } | ||
481 | |||
482 | gxio_mpipe_iqueue_consume(&info->iqueue, idesc); | ||
483 | return !filter; | ||
484 | } | ||
485 | |||
486 | /* Handle some packets for the current CPU. | ||
487 | * | ||
488 | * This function handles up to TILE_NET_BATCH idescs per call. | ||
489 | * | ||
490 | * ISSUE: Since we do not provide new buffers until this function is | ||
491 | * complete, we must initially provide enough buffers for each network | ||
492 | * cpu to fill its iqueue and also its batched idescs. | ||
493 | * | ||
494 | * ISSUE: The "rotting packet" race condition occurs if a packet | ||
495 | * arrives after the queue appears to be empty, and before the | ||
496 | * hypervisor interrupt is re-enabled. | ||
497 | */ | ||
498 | static int tile_net_poll(struct napi_struct *napi, int budget) | ||
499 | { | ||
500 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
501 | unsigned int work = 0; | ||
502 | gxio_mpipe_idesc_t *idesc; | ||
503 | int i, n; | ||
504 | |||
505 | /* Process packets. */ | ||
506 | while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) { | ||
507 | for (i = 0; i < n; i++) { | ||
508 | if (i == TILE_NET_BATCH) | ||
509 | goto done; | ||
510 | if (tile_net_handle_packet(idesc + i)) { | ||
511 | if (++work >= budget) | ||
512 | goto done; | ||
513 | } | ||
514 | } | ||
515 | } | ||
516 | |||
517 | /* There are no packets left. */ | ||
518 | napi_complete(&info->napi); | ||
519 | |||
520 | /* Re-enable hypervisor interrupts. */ | ||
521 | gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring); | ||
522 | |||
523 | /* HACK: Avoid the "rotting packet" problem. */ | ||
524 | if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0) | ||
525 | napi_schedule(&info->napi); | ||
526 | |||
527 | /* ISSUE: Handle completions? */ | ||
528 | |||
529 | done: | ||
530 | tile_net_provide_needed_buffers(); | ||
531 | |||
532 | return work; | ||
533 | } | ||
534 | |||
535 | /* Handle an ingress interrupt on the current cpu. */ | ||
536 | static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused) | ||
537 | { | ||
538 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
539 | napi_schedule(&info->napi); | ||
540 | return IRQ_HANDLED; | ||
541 | } | ||
542 | |||
543 | /* Free some completions. This must be called with interrupts blocked. */ | ||
544 | static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue, | ||
545 | struct tile_net_comps *comps, | ||
546 | int limit, bool force_update) | ||
547 | { | ||
548 | int n = 0; | ||
549 | while (comps->comp_last < comps->comp_next) { | ||
550 | unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS; | ||
551 | struct tile_net_comp *comp = &comps->comp_queue[cid]; | ||
552 | if (!gxio_mpipe_equeue_is_complete(equeue, comp->when, | ||
553 | force_update || n == 0)) | ||
554 | break; | ||
555 | dev_kfree_skb_irq(comp->skb); | ||
556 | comps->comp_last++; | ||
557 | if (++n == limit) | ||
558 | break; | ||
559 | } | ||
560 | return n; | ||
561 | } | ||
562 | |||
563 | /* Add a completion. This must be called with interrupts blocked. | ||
564 | * tile_net_equeue_try_reserve() will have ensured a free completion entry. | ||
565 | */ | ||
566 | static void add_comp(gxio_mpipe_equeue_t *equeue, | ||
567 | struct tile_net_comps *comps, | ||
568 | uint64_t when, struct sk_buff *skb) | ||
569 | { | ||
570 | int cid = comps->comp_next % TILE_NET_MAX_COMPS; | ||
571 | comps->comp_queue[cid].when = when; | ||
572 | comps->comp_queue[cid].skb = skb; | ||
573 | comps->comp_next++; | ||
574 | } | ||
575 | |||
576 | static void tile_net_schedule_tx_wake_timer(struct net_device *dev) | ||
577 | { | ||
578 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
579 | struct tile_net_priv *priv = netdev_priv(dev); | ||
580 | |||
581 | hrtimer_start(&info->tx_wake[priv->echannel].timer, | ||
582 | ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), | ||
583 | HRTIMER_MODE_REL_PINNED); | ||
584 | } | ||
585 | |||
586 | static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) | ||
587 | { | ||
588 | struct tile_net_tx_wake *tx_wake = | ||
589 | container_of(t, struct tile_net_tx_wake, timer); | ||
590 | netif_wake_subqueue(tx_wake->dev, smp_processor_id()); | ||
591 | return HRTIMER_NORESTART; | ||
592 | } | ||
593 | |||
594 | /* Make sure the egress timer is scheduled. */ | ||
595 | static void tile_net_schedule_egress_timer(void) | ||
596 | { | ||
597 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
598 | |||
599 | if (!info->egress_timer_scheduled) { | ||
600 | hrtimer_start(&info->egress_timer, | ||
601 | ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL), | ||
602 | HRTIMER_MODE_REL_PINNED); | ||
603 | info->egress_timer_scheduled = true; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | /* The "function" for "info->egress_timer". | ||
608 | * | ||
609 | * This timer will reschedule itself as long as there are any pending | ||
610 | * completions expected for this tile. | ||
611 | */ | ||
612 | static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t) | ||
613 | { | ||
614 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
615 | unsigned long irqflags; | ||
616 | bool pending = false; | ||
617 | int i; | ||
618 | |||
619 | local_irq_save(irqflags); | ||
620 | |||
621 | /* The timer is no longer scheduled. */ | ||
622 | info->egress_timer_scheduled = false; | ||
623 | |||
624 | /* Free all possible comps for this tile. */ | ||
625 | for (i = 0; i < TILE_NET_CHANNELS; i++) { | ||
626 | struct tile_net_egress *egress = &egress_for_echannel[i]; | ||
627 | struct tile_net_comps *comps = info->comps_for_echannel[i]; | ||
628 | if (comps->comp_last >= comps->comp_next) | ||
629 | continue; | ||
630 | tile_net_free_comps(egress->equeue, comps, -1, true); | ||
631 | pending = pending || (comps->comp_last < comps->comp_next); | ||
632 | } | ||
633 | |||
634 | /* Reschedule timer if needed. */ | ||
635 | if (pending) | ||
636 | tile_net_schedule_egress_timer(); | ||
637 | |||
638 | local_irq_restore(irqflags); | ||
639 | |||
640 | return HRTIMER_NORESTART; | ||
641 | } | ||
642 | |||
643 | /* Helper function for "tile_net_update()". | ||
644 | * "dev" (i.e. arg) is the device being brought up or down, | ||
645 | * or NULL if all devices are now down. | ||
646 | */ | ||
647 | static void tile_net_update_cpu(void *arg) | ||
648 | { | ||
649 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
650 | struct net_device *dev = arg; | ||
651 | |||
652 | if (!info->has_iqueue) | ||
653 | return; | ||
654 | |||
655 | if (dev != NULL) { | ||
656 | if (!info->napi_added) { | ||
657 | netif_napi_add(dev, &info->napi, | ||
658 | tile_net_poll, TILE_NET_WEIGHT); | ||
659 | info->napi_added = true; | ||
660 | } | ||
661 | if (!info->napi_enabled) { | ||
662 | napi_enable(&info->napi); | ||
663 | info->napi_enabled = true; | ||
664 | } | ||
665 | enable_percpu_irq(ingress_irq, 0); | ||
666 | } else { | ||
667 | disable_percpu_irq(ingress_irq); | ||
668 | if (info->napi_enabled) { | ||
669 | napi_disable(&info->napi); | ||
670 | info->napi_enabled = false; | ||
671 | } | ||
672 | /* FIXME: Drain the iqueue. */ | ||
673 | } | ||
674 | } | ||
675 | |||
676 | /* Helper function for tile_net_open() and tile_net_stop(). | ||
677 | * Always called under tile_net_devs_for_channel_mutex. | ||
678 | */ | ||
679 | static int tile_net_update(struct net_device *dev) | ||
680 | { | ||
681 | static gxio_mpipe_rules_t rules; /* too big to fit on the stack */ | ||
682 | bool saw_channel = false; | ||
683 | int channel; | ||
684 | int rc; | ||
685 | int cpu; | ||
686 | |||
687 | gxio_mpipe_rules_init(&rules, &context); | ||
688 | |||
689 | for (channel = 0; channel < TILE_NET_CHANNELS; channel++) { | ||
690 | if (tile_net_devs_for_channel[channel] == NULL) | ||
691 | continue; | ||
692 | if (!saw_channel) { | ||
693 | saw_channel = true; | ||
694 | gxio_mpipe_rules_begin(&rules, first_bucket, | ||
695 | num_buckets, NULL); | ||
696 | gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN); | ||
697 | } | ||
698 | gxio_mpipe_rules_add_channel(&rules, channel); | ||
699 | } | ||
700 | |||
701 | /* NOTE: This can fail if there is no classifier. | ||
702 | * ISSUE: Can anything else cause it to fail? | ||
703 | */ | ||
704 | rc = gxio_mpipe_rules_commit(&rules); | ||
705 | if (rc != 0) { | ||
706 | netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc); | ||
707 | return -EIO; | ||
708 | } | ||
709 | |||
710 | /* Update all cpus, sequentially (to protect "netif_napi_add()"). */ | ||
711 | for_each_online_cpu(cpu) | ||
712 | smp_call_function_single(cpu, tile_net_update_cpu, | ||
713 | (saw_channel ? dev : NULL), 1); | ||
714 | |||
715 | /* HACK: Allow packets to flow in the simulator. */ | ||
716 | if (saw_channel) | ||
717 | sim_enable_mpipe_links(0, -1); | ||
718 | |||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | /* Allocate and initialize mpipe buffer stacks, and register them in | ||
723 | * the mPIPE TLBs, for both small and large packet sizes. | ||
724 | * This routine supports tile_net_init_mpipe(), below. | ||
725 | */ | ||
726 | static int init_buffer_stacks(struct net_device *dev, int num_buffers) | ||
727 | { | ||
728 | pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH); | ||
729 | int rc; | ||
730 | |||
731 | /* Compute stack bytes; we round up to 64KB and then use | ||
732 | * alloc_pages() so we get the required 64KB alignment as well. | ||
733 | */ | ||
734 | buffer_stack_size = | ||
735 | ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers), | ||
736 | 64 * 1024); | ||
737 | |||
738 | /* Allocate two buffer stack indices. */ | ||
739 | rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0); | ||
740 | if (rc < 0) { | ||
741 | netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n", | ||
742 | rc); | ||
743 | return rc; | ||
744 | } | ||
745 | small_buffer_stack = rc; | ||
746 | large_buffer_stack = rc + 1; | ||
747 | |||
748 | /* Allocate the small memory stack. */ | ||
749 | small_buffer_stack_va = | ||
750 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
751 | if (small_buffer_stack_va == NULL) { | ||
752 | netdev_err(dev, | ||
753 | "Could not alloc %zd bytes for buffer stacks\n", | ||
754 | buffer_stack_size); | ||
755 | return -ENOMEM; | ||
756 | } | ||
757 | rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack, | ||
758 | BUFFER_SIZE_SMALL_ENUM, | ||
759 | small_buffer_stack_va, | ||
760 | buffer_stack_size, 0); | ||
761 | if (rc != 0) { | ||
762 | netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc); | ||
763 | return rc; | ||
764 | } | ||
765 | rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack, | ||
766 | hash_pte, 0); | ||
767 | if (rc != 0) { | ||
768 | netdev_err(dev, | ||
769 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
770 | rc); | ||
771 | return rc; | ||
772 | } | ||
773 | |||
774 | /* Allocate the large buffer stack. */ | ||
775 | large_buffer_stack_va = | ||
776 | alloc_pages_exact(buffer_stack_size, GFP_KERNEL); | ||
777 | if (large_buffer_stack_va == NULL) { | ||
778 | netdev_err(dev, | ||
779 | "Could not alloc %zd bytes for buffer stacks\n", | ||
780 | buffer_stack_size); | ||
781 | return -ENOMEM; | ||
782 | } | ||
783 | rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack, | ||
784 | BUFFER_SIZE_LARGE_ENUM, | ||
785 | large_buffer_stack_va, | ||
786 | buffer_stack_size, 0); | ||
787 | if (rc != 0) { | ||
788 | netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n", | ||
789 | rc); | ||
790 | return rc; | ||
791 | } | ||
792 | rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack, | ||
793 | hash_pte, 0); | ||
794 | if (rc != 0) { | ||
795 | netdev_err(dev, | ||
796 | "gxio_mpipe_register_buffer_memory failed: %d\n", | ||
797 | rc); | ||
798 | return rc; | ||
799 | } | ||
800 | |||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | /* Allocate per-cpu resources (memory for completions and idescs). | ||
805 | * This routine supports tile_net_init_mpipe(), below. | ||
806 | */ | ||
807 | static int alloc_percpu_mpipe_resources(struct net_device *dev, | ||
808 | int cpu, int ring) | ||
809 | { | ||
810 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
811 | int order, i, rc; | ||
812 | struct page *page; | ||
813 | void *addr; | ||
814 | |||
815 | /* Allocate the "comps". */ | ||
816 | order = get_order(COMPS_SIZE); | ||
817 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
818 | if (page == NULL) { | ||
819 | netdev_err(dev, "Failed to alloc %zd bytes comps memory\n", | ||
820 | COMPS_SIZE); | ||
821 | return -ENOMEM; | ||
822 | } | ||
823 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
824 | memset(addr, 0, COMPS_SIZE); | ||
825 | for (i = 0; i < TILE_NET_CHANNELS; i++) | ||
826 | info->comps_for_echannel[i] = | ||
827 | addr + i * sizeof(struct tile_net_comps); | ||
828 | |||
829 | /* If this is a network cpu, create an iqueue. */ | ||
830 | if (cpu_isset(cpu, network_cpus_map)) { | ||
831 | order = get_order(NOTIF_RING_SIZE); | ||
832 | page = homecache_alloc_pages(GFP_KERNEL, order, cpu); | ||
833 | if (page == NULL) { | ||
834 | netdev_err(dev, | ||
835 | "Failed to alloc %zd bytes iqueue memory\n", | ||
836 | NOTIF_RING_SIZE); | ||
837 | return -ENOMEM; | ||
838 | } | ||
839 | addr = pfn_to_kaddr(page_to_pfn(page)); | ||
840 | rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++, | ||
841 | addr, NOTIF_RING_SIZE, 0); | ||
842 | if (rc < 0) { | ||
843 | netdev_err(dev, | ||
844 | "gxio_mpipe_iqueue_init failed: %d\n", rc); | ||
845 | return rc; | ||
846 | } | ||
847 | info->has_iqueue = true; | ||
848 | } | ||
849 | |||
850 | return ring; | ||
851 | } | ||
852 | |||
853 | /* Initialize NotifGroup and buckets. | ||
854 | * This routine supports tile_net_init_mpipe(), below. | ||
855 | */ | ||
856 | static int init_notif_group_and_buckets(struct net_device *dev, | ||
857 | int ring, int network_cpus_count) | ||
858 | { | ||
859 | int group, rc; | ||
860 | |||
861 | /* Allocate one NotifGroup. */ | ||
862 | rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0); | ||
863 | if (rc < 0) { | ||
864 | netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n", | ||
865 | rc); | ||
866 | return rc; | ||
867 | } | ||
868 | group = rc; | ||
869 | |||
870 | /* Initialize global num_buckets value. */ | ||
871 | if (network_cpus_count > 4) | ||
872 | num_buckets = 256; | ||
873 | else if (network_cpus_count > 1) | ||
874 | num_buckets = 16; | ||
875 | |||
876 | /* Allocate some buckets, and set global first_bucket value. */ | ||
877 | rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0); | ||
878 | if (rc < 0) { | ||
879 | netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc); | ||
880 | return rc; | ||
881 | } | ||
882 | first_bucket = rc; | ||
883 | |||
884 | /* Init group and buckets. */ | ||
885 | rc = gxio_mpipe_init_notif_group_and_buckets( | ||
886 | &context, group, ring, network_cpus_count, | ||
887 | first_bucket, num_buckets, | ||
888 | GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY); | ||
889 | if (rc != 0) { | ||
890 | netdev_err( | ||
891 | dev, | ||
892 | "gxio_mpipe_init_notif_group_and_buckets failed: %d\n", | ||
893 | rc); | ||
894 | return rc; | ||
895 | } | ||
896 | |||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | /* Create an irq and register it, then activate the irq and request | ||
901 | * interrupts on all cores. Note that "ingress_irq" being initialized | ||
902 | * is how we know not to call tile_net_init_mpipe() again. | ||
903 | * This routine supports tile_net_init_mpipe(), below. | ||
904 | */ | ||
905 | static int tile_net_setup_interrupts(struct net_device *dev) | ||
906 | { | ||
907 | int cpu, rc; | ||
908 | |||
909 | rc = create_irq(); | ||
910 | if (rc < 0) { | ||
911 | netdev_err(dev, "create_irq failed: %d\n", rc); | ||
912 | return rc; | ||
913 | } | ||
914 | ingress_irq = rc; | ||
915 | tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); | ||
916 | rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, | ||
917 | 0, NULL, NULL); | ||
918 | if (rc != 0) { | ||
919 | netdev_err(dev, "request_irq failed: %d\n", rc); | ||
920 | destroy_irq(ingress_irq); | ||
921 | ingress_irq = -1; | ||
922 | return rc; | ||
923 | } | ||
924 | |||
925 | for_each_online_cpu(cpu) { | ||
926 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
927 | if (info->has_iqueue) { | ||
928 | gxio_mpipe_request_notif_ring_interrupt( | ||
929 | &context, cpu_x(cpu), cpu_y(cpu), | ||
930 | 1, ingress_irq, info->iqueue.ring); | ||
931 | } | ||
932 | } | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | /* Undo any state set up partially by a failed call to tile_net_init_mpipe. */ | ||
938 | static void tile_net_init_mpipe_fail(void) | ||
939 | { | ||
940 | int cpu; | ||
941 | |||
942 | /* Do cleanups that require the mpipe context first. */ | ||
943 | if (small_buffer_stack >= 0) | ||
944 | tile_net_pop_all_buffers(small_buffer_stack); | ||
945 | if (large_buffer_stack >= 0) | ||
946 | tile_net_pop_all_buffers(large_buffer_stack); | ||
947 | |||
948 | /* Destroy mpipe context so the hardware no longer owns any memory. */ | ||
949 | gxio_mpipe_destroy(&context); | ||
950 | |||
951 | for_each_online_cpu(cpu) { | ||
952 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
953 | free_pages((unsigned long)(info->comps_for_echannel[0]), | ||
954 | get_order(COMPS_SIZE)); | ||
955 | info->comps_for_echannel[0] = NULL; | ||
956 | free_pages((unsigned long)(info->iqueue.idescs), | ||
957 | get_order(NOTIF_RING_SIZE)); | ||
958 | info->iqueue.idescs = NULL; | ||
959 | } | ||
960 | |||
961 | if (small_buffer_stack_va) | ||
962 | free_pages_exact(small_buffer_stack_va, buffer_stack_size); | ||
963 | if (large_buffer_stack_va) | ||
964 | free_pages_exact(large_buffer_stack_va, buffer_stack_size); | ||
965 | |||
966 | small_buffer_stack_va = NULL; | ||
967 | large_buffer_stack_va = NULL; | ||
968 | large_buffer_stack = -1; | ||
969 | small_buffer_stack = -1; | ||
970 | first_bucket = -1; | ||
971 | } | ||
972 | |||
973 | /* The first time any tilegx network device is opened, we initialize | ||
974 | * the global mpipe state. If this step fails, we fail to open the | ||
975 | * device, but if it succeeds, we never need to do it again, and since | ||
976 | * tile_net can't be unloaded, we never undo it. | ||
977 | * | ||
978 | * Note that some resources in this path (buffer stack indices, | ||
979 | * bindings from init_buffer_stack, etc.) are hypervisor resources | ||
980 | * that are freed implicitly by gxio_mpipe_destroy(). | ||
981 | */ | ||
982 | static int tile_net_init_mpipe(struct net_device *dev) | ||
983 | { | ||
984 | int i, num_buffers, rc; | ||
985 | int cpu; | ||
986 | int first_ring, ring; | ||
987 | int network_cpus_count = cpus_weight(network_cpus_map); | ||
988 | |||
989 | if (!hash_default) { | ||
990 | netdev_err(dev, "Networking requires hash_default!\n"); | ||
991 | return -EIO; | ||
992 | } | ||
993 | |||
994 | rc = gxio_mpipe_init(&context, 0); | ||
995 | if (rc != 0) { | ||
996 | netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc); | ||
997 | return -EIO; | ||
998 | } | ||
999 | |||
1000 | /* Set up the buffer stacks. */ | ||
1001 | num_buffers = | ||
1002 | network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH); | ||
1003 | rc = init_buffer_stacks(dev, num_buffers); | ||
1004 | if (rc != 0) | ||
1005 | goto fail; | ||
1006 | |||
1007 | /* Provide initial buffers. */ | ||
1008 | rc = -ENOMEM; | ||
1009 | for (i = 0; i < num_buffers; i++) { | ||
1010 | if (!tile_net_provide_buffer(true)) { | ||
1011 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
1012 | goto fail; | ||
1013 | } | ||
1014 | } | ||
1015 | for (i = 0; i < num_buffers; i++) { | ||
1016 | if (!tile_net_provide_buffer(false)) { | ||
1017 | netdev_err(dev, "Cannot allocate initial sk_bufs!\n"); | ||
1018 | goto fail; | ||
1019 | } | ||
1020 | } | ||
1021 | |||
1022 | /* Allocate one NotifRing for each network cpu. */ | ||
1023 | rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0); | ||
1024 | if (rc < 0) { | ||
1025 | netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n", | ||
1026 | rc); | ||
1027 | goto fail; | ||
1028 | } | ||
1029 | |||
1030 | /* Init NotifRings per-cpu. */ | ||
1031 | first_ring = rc; | ||
1032 | ring = first_ring; | ||
1033 | for_each_online_cpu(cpu) { | ||
1034 | rc = alloc_percpu_mpipe_resources(dev, cpu, ring); | ||
1035 | if (rc < 0) | ||
1036 | goto fail; | ||
1037 | ring = rc; | ||
1038 | } | ||
1039 | |||
1040 | /* Initialize NotifGroup and buckets. */ | ||
1041 | rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count); | ||
1042 | if (rc != 0) | ||
1043 | goto fail; | ||
1044 | |||
1045 | /* Create and enable interrupts. */ | ||
1046 | rc = tile_net_setup_interrupts(dev); | ||
1047 | if (rc != 0) | ||
1048 | goto fail; | ||
1049 | |||
1050 | return 0; | ||
1051 | |||
1052 | fail: | ||
1053 | tile_net_init_mpipe_fail(); | ||
1054 | return rc; | ||
1055 | } | ||
1056 | |||
1057 | /* Create persistent egress info for a given egress channel. | ||
1058 | * Note that this may be shared between, say, "gbe0" and "xgbe0". | ||
1059 | * ISSUE: Defer header allocation until TSO is actually needed? | ||
1060 | */ | ||
1061 | static int tile_net_init_egress(struct net_device *dev, int echannel) | ||
1062 | { | ||
1063 | struct page *headers_page, *edescs_page, *equeue_page; | ||
1064 | gxio_mpipe_edesc_t *edescs; | ||
1065 | gxio_mpipe_equeue_t *equeue; | ||
1066 | unsigned char *headers; | ||
1067 | int headers_order, edescs_order, equeue_order; | ||
1068 | size_t edescs_size; | ||
1069 | int edma; | ||
1070 | int rc = -ENOMEM; | ||
1071 | |||
1072 | /* Only initialize once. */ | ||
1073 | if (egress_for_echannel[echannel].equeue != NULL) | ||
1074 | return 0; | ||
1075 | |||
1076 | /* Allocate memory for the "headers". */ | ||
1077 | headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES); | ||
1078 | headers_page = alloc_pages(GFP_KERNEL, headers_order); | ||
1079 | if (headers_page == NULL) { | ||
1080 | netdev_warn(dev, | ||
1081 | "Could not alloc %zd bytes for TSO headers.\n", | ||
1082 | PAGE_SIZE << headers_order); | ||
1083 | goto fail; | ||
1084 | } | ||
1085 | headers = pfn_to_kaddr(page_to_pfn(headers_page)); | ||
1086 | |||
1087 | /* Allocate memory for the "edescs". */ | ||
1088 | edescs_size = EQUEUE_ENTRIES * sizeof(*edescs); | ||
1089 | edescs_order = get_order(edescs_size); | ||
1090 | edescs_page = alloc_pages(GFP_KERNEL, edescs_order); | ||
1091 | if (edescs_page == NULL) { | ||
1092 | netdev_warn(dev, | ||
1093 | "Could not alloc %zd bytes for eDMA ring.\n", | ||
1094 | edescs_size); | ||
1095 | goto fail_headers; | ||
1096 | } | ||
1097 | edescs = pfn_to_kaddr(page_to_pfn(edescs_page)); | ||
1098 | |||
1099 | /* Allocate memory for the "equeue". */ | ||
1100 | equeue_order = get_order(sizeof(*equeue)); | ||
1101 | equeue_page = alloc_pages(GFP_KERNEL, equeue_order); | ||
1102 | if (equeue_page == NULL) { | ||
1103 | netdev_warn(dev, | ||
1104 | "Could not alloc %zd bytes for equeue info.\n", | ||
1105 | PAGE_SIZE << equeue_order); | ||
1106 | goto fail_edescs; | ||
1107 | } | ||
1108 | equeue = pfn_to_kaddr(page_to_pfn(equeue_page)); | ||
1109 | |||
1110 | /* Allocate an edma ring. Note that in practice this can't | ||
1111 | * fail, which is good, because we will leak an edma ring if so. | ||
1112 | */ | ||
1113 | rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0); | ||
1114 | if (rc < 0) { | ||
1115 | netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n", | ||
1116 | rc); | ||
1117 | goto fail_equeue; | ||
1118 | } | ||
1119 | edma = rc; | ||
1120 | |||
1121 | /* Initialize the equeue. */ | ||
1122 | rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel, | ||
1123 | edescs, edescs_size, 0); | ||
1124 | if (rc != 0) { | ||
1125 | netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc); | ||
1126 | goto fail_equeue; | ||
1127 | } | ||
1128 | |||
1129 | /* Done. */ | ||
1130 | egress_for_echannel[echannel].equeue = equeue; | ||
1131 | egress_for_echannel[echannel].headers = headers; | ||
1132 | return 0; | ||
1133 | |||
1134 | fail_equeue: | ||
1135 | __free_pages(equeue_page, equeue_order); | ||
1136 | |||
1137 | fail_edescs: | ||
1138 | __free_pages(edescs_page, edescs_order); | ||
1139 | |||
1140 | fail_headers: | ||
1141 | __free_pages(headers_page, headers_order); | ||
1142 | |||
1143 | fail: | ||
1144 | return rc; | ||
1145 | } | ||
1146 | |||
1147 | /* Return channel number for a newly-opened link. */ | ||
1148 | static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link, | ||
1149 | const char *link_name) | ||
1150 | { | ||
1151 | int rc = gxio_mpipe_link_open(link, &context, link_name, 0); | ||
1152 | if (rc < 0) { | ||
1153 | netdev_err(dev, "Failed to open '%s'\n", link_name); | ||
1154 | return rc; | ||
1155 | } | ||
1156 | rc = gxio_mpipe_link_channel(link); | ||
1157 | if (rc < 0 || rc >= TILE_NET_CHANNELS) { | ||
1158 | netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc); | ||
1159 | gxio_mpipe_link_close(link); | ||
1160 | return -EINVAL; | ||
1161 | } | ||
1162 | return rc; | ||
1163 | } | ||
1164 | |||
1165 | /* Help the kernel activate the given network interface. */ | ||
1166 | static int tile_net_open(struct net_device *dev) | ||
1167 | { | ||
1168 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1169 | int cpu, rc; | ||
1170 | |||
1171 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
1172 | |||
1173 | /* Do one-time initialization the first time any device is opened. */ | ||
1174 | if (ingress_irq < 0) { | ||
1175 | rc = tile_net_init_mpipe(dev); | ||
1176 | if (rc != 0) | ||
1177 | goto fail; | ||
1178 | } | ||
1179 | |||
1180 | /* Determine if this is the "loopify" device. */ | ||
1181 | if (unlikely((loopify_link_name != NULL) && | ||
1182 | !strcmp(dev->name, loopify_link_name))) { | ||
1183 | rc = tile_net_link_open(dev, &priv->link, "loop0"); | ||
1184 | if (rc < 0) | ||
1185 | goto fail; | ||
1186 | priv->channel = rc; | ||
1187 | rc = tile_net_link_open(dev, &priv->loopify_link, "loop1"); | ||
1188 | if (rc < 0) | ||
1189 | goto fail; | ||
1190 | priv->loopify_channel = rc; | ||
1191 | priv->echannel = rc; | ||
1192 | } else { | ||
1193 | rc = tile_net_link_open(dev, &priv->link, dev->name); | ||
1194 | if (rc < 0) | ||
1195 | goto fail; | ||
1196 | priv->channel = rc; | ||
1197 | priv->echannel = rc; | ||
1198 | } | ||
1199 | |||
1200 | /* Initialize egress info (if needed). Once ever, per echannel. */ | ||
1201 | rc = tile_net_init_egress(dev, priv->echannel); | ||
1202 | if (rc != 0) | ||
1203 | goto fail; | ||
1204 | |||
1205 | tile_net_devs_for_channel[priv->channel] = dev; | ||
1206 | |||
1207 | rc = tile_net_update(dev); | ||
1208 | if (rc != 0) | ||
1209 | goto fail; | ||
1210 | |||
1211 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1212 | |||
1213 | /* Initialize the transmit wake timer for this device for each cpu. */ | ||
1214 | for_each_online_cpu(cpu) { | ||
1215 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
1216 | struct tile_net_tx_wake *tx_wake = | ||
1217 | &info->tx_wake[priv->echannel]; | ||
1218 | |||
1219 | hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, | ||
1220 | HRTIMER_MODE_REL); | ||
1221 | tx_wake->timer.function = tile_net_handle_tx_wake_timer; | ||
1222 | tx_wake->dev = dev; | ||
1223 | } | ||
1224 | |||
1225 | for_each_online_cpu(cpu) | ||
1226 | netif_start_subqueue(dev, cpu); | ||
1227 | netif_carrier_on(dev); | ||
1228 | return 0; | ||
1229 | |||
1230 | fail: | ||
1231 | if (priv->loopify_channel >= 0) { | ||
1232 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
1233 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
1234 | priv->loopify_channel = -1; | ||
1235 | } | ||
1236 | if (priv->channel >= 0) { | ||
1237 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
1238 | netdev_warn(dev, "Failed to close link!\n"); | ||
1239 | priv->channel = -1; | ||
1240 | } | ||
1241 | priv->echannel = -1; | ||
1242 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
1243 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1244 | |||
1245 | /* Don't return raw gxio error codes to generic Linux. */ | ||
1246 | return (rc > -512) ? rc : -EIO; | ||
1247 | } | ||
1248 | |||
1249 | /* Help the kernel deactivate the given network interface. */ | ||
1250 | static int tile_net_stop(struct net_device *dev) | ||
1251 | { | ||
1252 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1253 | int cpu; | ||
1254 | |||
1255 | for_each_online_cpu(cpu) { | ||
1256 | struct tile_net_info *info = &per_cpu(per_cpu_info, cpu); | ||
1257 | struct tile_net_tx_wake *tx_wake = | ||
1258 | &info->tx_wake[priv->echannel]; | ||
1259 | |||
1260 | hrtimer_cancel(&tx_wake->timer); | ||
1261 | netif_stop_subqueue(dev, cpu); | ||
1262 | } | ||
1263 | |||
1264 | mutex_lock(&tile_net_devs_for_channel_mutex); | ||
1265 | tile_net_devs_for_channel[priv->channel] = NULL; | ||
1266 | (void)tile_net_update(dev); | ||
1267 | if (priv->loopify_channel >= 0) { | ||
1268 | if (gxio_mpipe_link_close(&priv->loopify_link) != 0) | ||
1269 | netdev_warn(dev, "Failed to close loopify link!\n"); | ||
1270 | priv->loopify_channel = -1; | ||
1271 | } | ||
1272 | if (priv->channel >= 0) { | ||
1273 | if (gxio_mpipe_link_close(&priv->link) != 0) | ||
1274 | netdev_warn(dev, "Failed to close link!\n"); | ||
1275 | priv->channel = -1; | ||
1276 | } | ||
1277 | priv->echannel = -1; | ||
1278 | mutex_unlock(&tile_net_devs_for_channel_mutex); | ||
1279 | |||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | /* Determine the VA for a fragment. */ | ||
1284 | static inline void *tile_net_frag_buf(skb_frag_t *f) | ||
1285 | { | ||
1286 | unsigned long pfn = page_to_pfn(skb_frag_page(f)); | ||
1287 | return pfn_to_kaddr(pfn) + f->page_offset; | ||
1288 | } | ||
1289 | |||
1290 | /* Acquire a completion entry and an egress slot, or if we can't, | ||
1291 | * stop the queue and schedule the tx_wake timer. | ||
1292 | */ | ||
1293 | static s64 tile_net_equeue_try_reserve(struct net_device *dev, | ||
1294 | struct tile_net_comps *comps, | ||
1295 | gxio_mpipe_equeue_t *equeue, | ||
1296 | int num_edescs) | ||
1297 | { | ||
1298 | /* Try to acquire a completion entry. */ | ||
1299 | if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 || | ||
1300 | tile_net_free_comps(equeue, comps, 32, false) != 0) { | ||
1301 | |||
1302 | /* Try to acquire an egress slot. */ | ||
1303 | s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
1304 | if (slot >= 0) | ||
1305 | return slot; | ||
1306 | |||
1307 | /* Freeing some completions gives the equeue time to drain. */ | ||
1308 | tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false); | ||
1309 | |||
1310 | slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs); | ||
1311 | if (slot >= 0) | ||
1312 | return slot; | ||
1313 | } | ||
1314 | |||
1315 | /* Still nothing; give up and stop the queue for a short while. */ | ||
1316 | netif_stop_subqueue(dev, smp_processor_id()); | ||
1317 | tile_net_schedule_tx_wake_timer(dev); | ||
1318 | return -1; | ||
1319 | } | ||
1320 | |||
1321 | /* Determine how many edesc's are needed for TSO. | ||
1322 | * | ||
1323 | * Sometimes, if "sendfile()" requires copying, we will be called with | ||
1324 | * "data" containing the header and payload, with "frags" being empty. | ||
1325 | * Sometimes, for example when using NFS over TCP, a single segment can | ||
1326 | * span 3 fragments. This requires special care. | ||
1327 | */ | ||
1328 | static int tso_count_edescs(struct sk_buff *skb) | ||
1329 | { | ||
1330 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1331 | unsigned int data_len = skb->data_len; | ||
1332 | unsigned int p_len = sh->gso_size; | ||
1333 | long f_id = -1; /* id of the current fragment */ | ||
1334 | long f_size = -1; /* size of the current fragment */ | ||
1335 | long f_used = -1; /* bytes used from the current fragment */ | ||
1336 | long n; /* size of the current piece of payload */ | ||
1337 | int num_edescs = 0; | ||
1338 | int segment; | ||
1339 | |||
1340 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1341 | |||
1342 | unsigned int p_used = 0; | ||
1343 | |||
1344 | /* One edesc for header and for each piece of the payload. */ | ||
1345 | for (num_edescs++; p_used < p_len; num_edescs++) { | ||
1346 | |||
1347 | /* Advance as needed. */ | ||
1348 | while (f_used >= f_size) { | ||
1349 | f_id++; | ||
1350 | f_size = sh->frags[f_id].size; | ||
1351 | f_used = 0; | ||
1352 | } | ||
1353 | |||
1354 | /* Use bytes from the current fragment. */ | ||
1355 | n = p_len - p_used; | ||
1356 | if (n > f_size - f_used) | ||
1357 | n = f_size - f_used; | ||
1358 | f_used += n; | ||
1359 | p_used += n; | ||
1360 | } | ||
1361 | |||
1362 | /* The last segment may be less than gso_size. */ | ||
1363 | data_len -= p_len; | ||
1364 | if (data_len < p_len) | ||
1365 | p_len = data_len; | ||
1366 | } | ||
1367 | |||
1368 | return num_edescs; | ||
1369 | } | ||
1370 | |||
1371 | /* Prepare modified copies of the skbuff headers. | ||
1372 | * FIXME: add support for IPv6. | ||
1373 | */ | ||
1374 | static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, | ||
1375 | s64 slot) | ||
1376 | { | ||
1377 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1378 | struct iphdr *ih; | ||
1379 | struct tcphdr *th; | ||
1380 | unsigned int data_len = skb->data_len; | ||
1381 | unsigned char *data = skb->data; | ||
1382 | unsigned int ih_off, th_off, sh_len, p_len; | ||
1383 | unsigned int isum_seed, tsum_seed, id, seq; | ||
1384 | long f_id = -1; /* id of the current fragment */ | ||
1385 | long f_size = -1; /* size of the current fragment */ | ||
1386 | long f_used = -1; /* bytes used from the current fragment */ | ||
1387 | long n; /* size of the current piece of payload */ | ||
1388 | int segment; | ||
1389 | |||
1390 | /* Locate original headers and compute various lengths. */ | ||
1391 | ih = ip_hdr(skb); | ||
1392 | th = tcp_hdr(skb); | ||
1393 | ih_off = skb_network_offset(skb); | ||
1394 | th_off = skb_transport_offset(skb); | ||
1395 | sh_len = th_off + tcp_hdrlen(skb); | ||
1396 | p_len = sh->gso_size; | ||
1397 | |||
1398 | /* Set up seed values for IP and TCP csum and initialize id and seq. */ | ||
1399 | isum_seed = ((0xFFFF - ih->check) + | ||
1400 | (0xFFFF - ih->tot_len) + | ||
1401 | (0xFFFF - ih->id)); | ||
1402 | tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); | ||
1403 | id = ntohs(ih->id); | ||
1404 | seq = ntohl(th->seq); | ||
1405 | |||
1406 | /* Prepare all the headers. */ | ||
1407 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1408 | unsigned char *buf; | ||
1409 | unsigned int p_used = 0; | ||
1410 | |||
1411 | /* Copy to the header memory for this segment. */ | ||
1412 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
1413 | NET_IP_ALIGN; | ||
1414 | memcpy(buf, data, sh_len); | ||
1415 | |||
1416 | /* Update copied ip header. */ | ||
1417 | ih = (struct iphdr *)(buf + ih_off); | ||
1418 | ih->tot_len = htons(sh_len + p_len - ih_off); | ||
1419 | ih->id = htons(id); | ||
1420 | ih->check = csum_long(isum_seed + ih->tot_len + | ||
1421 | ih->id) ^ 0xffff; | ||
1422 | |||
1423 | /* Update copied tcp header. */ | ||
1424 | th = (struct tcphdr *)(buf + th_off); | ||
1425 | th->seq = htonl(seq); | ||
1426 | th->check = csum_long(tsum_seed + htons(sh_len + p_len)); | ||
1427 | if (segment != sh->gso_segs - 1) { | ||
1428 | th->fin = 0; | ||
1429 | th->psh = 0; | ||
1430 | } | ||
1431 | |||
1432 | /* Skip past the header. */ | ||
1433 | slot++; | ||
1434 | |||
1435 | /* Skip past the payload. */ | ||
1436 | while (p_used < p_len) { | ||
1437 | |||
1438 | /* Advance as needed. */ | ||
1439 | while (f_used >= f_size) { | ||
1440 | f_id++; | ||
1441 | f_size = sh->frags[f_id].size; | ||
1442 | f_used = 0; | ||
1443 | } | ||
1444 | |||
1445 | /* Use bytes from the current fragment. */ | ||
1446 | n = p_len - p_used; | ||
1447 | if (n > f_size - f_used) | ||
1448 | n = f_size - f_used; | ||
1449 | f_used += n; | ||
1450 | p_used += n; | ||
1451 | |||
1452 | slot++; | ||
1453 | } | ||
1454 | |||
1455 | id++; | ||
1456 | seq += p_len; | ||
1457 | |||
1458 | /* The last segment may be less than gso_size. */ | ||
1459 | data_len -= p_len; | ||
1460 | if (data_len < p_len) | ||
1461 | p_len = data_len; | ||
1462 | } | ||
1463 | |||
1464 | /* Flush the headers so they are ready for hardware DMA. */ | ||
1465 | wmb(); | ||
1466 | } | ||
1467 | |||
1468 | /* Pass all the data to mpipe for egress. */ | ||
1469 | static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | ||
1470 | struct sk_buff *skb, unsigned char *headers, s64 slot) | ||
1471 | { | ||
1472 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1473 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1474 | unsigned int data_len = skb->data_len; | ||
1475 | unsigned int p_len = sh->gso_size; | ||
1476 | gxio_mpipe_edesc_t edesc_head = { { 0 } }; | ||
1477 | gxio_mpipe_edesc_t edesc_body = { { 0 } }; | ||
1478 | long f_id = -1; /* id of the current fragment */ | ||
1479 | long f_size = -1; /* size of the current fragment */ | ||
1480 | long f_used = -1; /* bytes used from the current fragment */ | ||
1481 | long n; /* size of the current piece of payload */ | ||
1482 | unsigned long tx_packets = 0, tx_bytes = 0; | ||
1483 | unsigned int csum_start, sh_len; | ||
1484 | int segment; | ||
1485 | |||
1486 | /* Prepare to egress the headers: set up header edesc. */ | ||
1487 | csum_start = skb_checksum_start_offset(skb); | ||
1488 | sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1489 | edesc_head.csum = 1; | ||
1490 | edesc_head.csum_start = csum_start; | ||
1491 | edesc_head.csum_dest = csum_start + skb->csum_offset; | ||
1492 | edesc_head.xfer_size = sh_len; | ||
1493 | |||
1494 | /* This is only used to specify the TLB. */ | ||
1495 | edesc_head.stack_idx = large_buffer_stack; | ||
1496 | edesc_body.stack_idx = large_buffer_stack; | ||
1497 | |||
1498 | /* Egress all the edescs. */ | ||
1499 | for (segment = 0; segment < sh->gso_segs; segment++) { | ||
1500 | void *va; | ||
1501 | unsigned char *buf; | ||
1502 | unsigned int p_used = 0; | ||
1503 | |||
1504 | /* Egress the header. */ | ||
1505 | buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES + | ||
1506 | NET_IP_ALIGN; | ||
1507 | edesc_head.va = va_to_tile_io_addr(buf); | ||
1508 | gxio_mpipe_equeue_put_at(equeue, edesc_head, slot); | ||
1509 | slot++; | ||
1510 | |||
1511 | /* Egress the payload. */ | ||
1512 | while (p_used < p_len) { | ||
1513 | |||
1514 | /* Advance as needed. */ | ||
1515 | while (f_used >= f_size) { | ||
1516 | f_id++; | ||
1517 | f_size = sh->frags[f_id].size; | ||
1518 | f_used = 0; | ||
1519 | } | ||
1520 | |||
1521 | va = tile_net_frag_buf(&sh->frags[f_id]) + f_used; | ||
1522 | |||
1523 | /* Use bytes from the current fragment. */ | ||
1524 | n = p_len - p_used; | ||
1525 | if (n > f_size - f_used) | ||
1526 | n = f_size - f_used; | ||
1527 | f_used += n; | ||
1528 | p_used += n; | ||
1529 | |||
1530 | /* Egress a piece of the payload. */ | ||
1531 | edesc_body.va = va_to_tile_io_addr(va); | ||
1532 | edesc_body.xfer_size = n; | ||
1533 | edesc_body.bound = !(p_used < p_len); | ||
1534 | gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); | ||
1535 | slot++; | ||
1536 | } | ||
1537 | |||
1538 | tx_packets++; | ||
1539 | tx_bytes += sh_len + p_len; | ||
1540 | |||
1541 | /* The last segment may be less than gso_size. */ | ||
1542 | data_len -= p_len; | ||
1543 | if (data_len < p_len) | ||
1544 | p_len = data_len; | ||
1545 | } | ||
1546 | |||
1547 | /* Update stats. */ | ||
1548 | tile_net_stats_add(tx_packets, &priv->stats.tx_packets); | ||
1549 | tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes); | ||
1550 | } | ||
1551 | |||
1552 | /* Do "TSO" handling for egress. | ||
1553 | * | ||
1554 | * Normally drivers set NETIF_F_TSO only to support hardware TSO; | ||
1555 | * otherwise the stack uses scatter-gather to implement GSO in software. | ||
1556 | * On our testing, enabling GSO support (via NETIF_F_SG) drops network | ||
1557 | * performance down to around 7.5 Gbps on the 10G interfaces, although | ||
1558 | * also dropping cpu utilization way down, to under 8%. But | ||
1559 | * implementing "TSO" in the driver brings performance back up to line | ||
1560 | * rate, while dropping cpu usage even further, to less than 4%. In | ||
1561 | * practice, profiling of GSO shows that skb_segment() is what causes | ||
1562 | * the performance overheads; we benefit in the driver from using | ||
1563 | * preallocated memory to duplicate the TCP/IP headers. | ||
1564 | */ | ||
1565 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | ||
1566 | { | ||
1567 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1568 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1569 | int channel = priv->echannel; | ||
1570 | struct tile_net_egress *egress = &egress_for_echannel[channel]; | ||
1571 | struct tile_net_comps *comps = info->comps_for_echannel[channel]; | ||
1572 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
1573 | unsigned long irqflags; | ||
1574 | int num_edescs; | ||
1575 | s64 slot; | ||
1576 | |||
1577 | /* Determine how many mpipe edesc's are needed. */ | ||
1578 | num_edescs = tso_count_edescs(skb); | ||
1579 | |||
1580 | local_irq_save(irqflags); | ||
1581 | |||
1582 | /* Try to acquire a completion entry and an egress slot. */ | ||
1583 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
1584 | if (slot < 0) { | ||
1585 | local_irq_restore(irqflags); | ||
1586 | return NETDEV_TX_BUSY; | ||
1587 | } | ||
1588 | |||
1589 | /* Set up copies of header data properly. */ | ||
1590 | tso_headers_prepare(skb, egress->headers, slot); | ||
1591 | |||
1592 | /* Actually pass the data to the network hardware. */ | ||
1593 | tso_egress(dev, equeue, skb, egress->headers, slot); | ||
1594 | |||
1595 | /* Add a completion record. */ | ||
1596 | add_comp(equeue, comps, slot + num_edescs - 1, skb); | ||
1597 | |||
1598 | local_irq_restore(irqflags); | ||
1599 | |||
1600 | /* Make sure the egress timer is scheduled. */ | ||
1601 | tile_net_schedule_egress_timer(); | ||
1602 | |||
1603 | return NETDEV_TX_OK; | ||
1604 | } | ||
1605 | |||
1606 | /* Analyze the body and frags for a transmit request. */ | ||
1607 | static unsigned int tile_net_tx_frags(struct frag *frags, | ||
1608 | struct sk_buff *skb, | ||
1609 | void *b_data, unsigned int b_len) | ||
1610 | { | ||
1611 | unsigned int i, n = 0; | ||
1612 | |||
1613 | struct skb_shared_info *sh = skb_shinfo(skb); | ||
1614 | |||
1615 | if (b_len != 0) { | ||
1616 | frags[n].buf = b_data; | ||
1617 | frags[n++].length = b_len; | ||
1618 | } | ||
1619 | |||
1620 | for (i = 0; i < sh->nr_frags; i++) { | ||
1621 | skb_frag_t *f = &sh->frags[i]; | ||
1622 | frags[n].buf = tile_net_frag_buf(f); | ||
1623 | frags[n++].length = skb_frag_size(f); | ||
1624 | } | ||
1625 | |||
1626 | return n; | ||
1627 | } | ||
1628 | |||
1629 | /* Help the kernel transmit a packet. */ | ||
1630 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | ||
1631 | { | ||
1632 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1633 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1634 | struct tile_net_egress *egress = &egress_for_echannel[priv->echannel]; | ||
1635 | gxio_mpipe_equeue_t *equeue = egress->equeue; | ||
1636 | struct tile_net_comps *comps = | ||
1637 | info->comps_for_echannel[priv->echannel]; | ||
1638 | unsigned int len = skb->len; | ||
1639 | unsigned char *data = skb->data; | ||
1640 | unsigned int num_edescs; | ||
1641 | struct frag frags[MAX_FRAGS]; | ||
1642 | gxio_mpipe_edesc_t edescs[MAX_FRAGS]; | ||
1643 | unsigned long irqflags; | ||
1644 | gxio_mpipe_edesc_t edesc = { { 0 } }; | ||
1645 | unsigned int i; | ||
1646 | s64 slot; | ||
1647 | |||
1648 | if (skb_is_gso(skb)) | ||
1649 | return tile_net_tx_tso(skb, dev); | ||
1650 | |||
1651 | num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | ||
1652 | |||
1653 | /* This is only used to specify the TLB. */ | ||
1654 | edesc.stack_idx = large_buffer_stack; | ||
1655 | |||
1656 | /* Prepare the edescs. */ | ||
1657 | for (i = 0; i < num_edescs; i++) { | ||
1658 | edesc.xfer_size = frags[i].length; | ||
1659 | edesc.va = va_to_tile_io_addr(frags[i].buf); | ||
1660 | edescs[i] = edesc; | ||
1661 | } | ||
1662 | |||
1663 | /* Mark the final edesc. */ | ||
1664 | edescs[num_edescs - 1].bound = 1; | ||
1665 | |||
1666 | /* Add checksum info to the initial edesc, if needed. */ | ||
1667 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1668 | unsigned int csum_start = skb_checksum_start_offset(skb); | ||
1669 | edescs[0].csum = 1; | ||
1670 | edescs[0].csum_start = csum_start; | ||
1671 | edescs[0].csum_dest = csum_start + skb->csum_offset; | ||
1672 | } | ||
1673 | |||
1674 | local_irq_save(irqflags); | ||
1675 | |||
1676 | /* Try to acquire a completion entry and an egress slot. */ | ||
1677 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | ||
1678 | if (slot < 0) { | ||
1679 | local_irq_restore(irqflags); | ||
1680 | return NETDEV_TX_BUSY; | ||
1681 | } | ||
1682 | |||
1683 | for (i = 0; i < num_edescs; i++) | ||
1684 | gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++); | ||
1685 | |||
1686 | /* Add a completion record. */ | ||
1687 | add_comp(equeue, comps, slot - 1, skb); | ||
1688 | |||
1689 | /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */ | ||
1690 | tile_net_stats_add(1, &priv->stats.tx_packets); | ||
1691 | tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN), | ||
1692 | &priv->stats.tx_bytes); | ||
1693 | |||
1694 | local_irq_restore(irqflags); | ||
1695 | |||
1696 | /* Make sure the egress timer is scheduled. */ | ||
1697 | tile_net_schedule_egress_timer(); | ||
1698 | |||
1699 | return NETDEV_TX_OK; | ||
1700 | } | ||
1701 | |||
1702 | /* Return subqueue id on this core (one per core). */ | ||
1703 | static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) | ||
1704 | { | ||
1705 | return smp_processor_id(); | ||
1706 | } | ||
1707 | |||
1708 | /* Deal with a transmit timeout. */ | ||
1709 | static void tile_net_tx_timeout(struct net_device *dev) | ||
1710 | { | ||
1711 | int cpu; | ||
1712 | |||
1713 | for_each_online_cpu(cpu) | ||
1714 | netif_wake_subqueue(dev, cpu); | ||
1715 | } | ||
1716 | |||
1717 | /* Ioctl commands. */ | ||
1718 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1719 | { | ||
1720 | return -EOPNOTSUPP; | ||
1721 | } | ||
1722 | |||
1723 | /* Get system network statistics for device. */ | ||
1724 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | ||
1725 | { | ||
1726 | struct tile_net_priv *priv = netdev_priv(dev); | ||
1727 | return &priv->stats; | ||
1728 | } | ||
1729 | |||
1730 | /* Change the MTU. */ | ||
1731 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | ||
1732 | { | ||
1733 | if ((new_mtu < 68) || (new_mtu > 1500)) | ||
1734 | return -EINVAL; | ||
1735 | dev->mtu = new_mtu; | ||
1736 | return 0; | ||
1737 | } | ||
1738 | |||
1739 | /* Change the Ethernet address of the NIC. | ||
1740 | * | ||
1741 | * The hypervisor driver does not support changing MAC address. However, | ||
1742 | * the hardware does not do anything with the MAC address, so the address | ||
1743 | * which gets used on outgoing packets, and which is accepted on incoming | ||
1744 | * packets, is completely up to us. | ||
1745 | * | ||
1746 | * Returns 0 on success, negative on failure. | ||
1747 | */ | ||
1748 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | ||
1749 | { | ||
1750 | struct sockaddr *addr = p; | ||
1751 | |||
1752 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1753 | return -EINVAL; | ||
1754 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
1755 | return 0; | ||
1756 | } | ||
1757 | |||
1758 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1759 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
1760 | * without having to re-enable interrupts. It's not called while | ||
1761 | * the interrupt routine is executing. | ||
1762 | */ | ||
1763 | static void tile_net_netpoll(struct net_device *dev) | ||
1764 | { | ||
1765 | disable_percpu_irq(ingress_irq); | ||
1766 | tile_net_handle_ingress_irq(ingress_irq, NULL); | ||
1767 | enable_percpu_irq(ingress_irq, 0); | ||
1768 | } | ||
1769 | #endif | ||
1770 | |||
1771 | static const struct net_device_ops tile_net_ops = { | ||
1772 | .ndo_open = tile_net_open, | ||
1773 | .ndo_stop = tile_net_stop, | ||
1774 | .ndo_start_xmit = tile_net_tx, | ||
1775 | .ndo_select_queue = tile_net_select_queue, | ||
1776 | .ndo_do_ioctl = tile_net_ioctl, | ||
1777 | .ndo_get_stats = tile_net_get_stats, | ||
1778 | .ndo_change_mtu = tile_net_change_mtu, | ||
1779 | .ndo_tx_timeout = tile_net_tx_timeout, | ||
1780 | .ndo_set_mac_address = tile_net_set_mac_address, | ||
1781 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1782 | .ndo_poll_controller = tile_net_netpoll, | ||
1783 | #endif | ||
1784 | }; | ||
1785 | |||
1786 | /* The setup function. | ||
1787 | * | ||
1788 | * This uses ether_setup() to assign various fields in dev, including | ||
1789 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | ||
1790 | */ | ||
1791 | static void tile_net_setup(struct net_device *dev) | ||
1792 | { | ||
1793 | ether_setup(dev); | ||
1794 | dev->netdev_ops = &tile_net_ops; | ||
1795 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | ||
1796 | dev->features |= NETIF_F_LLTX; | ||
1797 | dev->features |= NETIF_F_HW_CSUM; | ||
1798 | dev->features |= NETIF_F_SG; | ||
1799 | dev->features |= NETIF_F_TSO; | ||
1800 | dev->mtu = 1500; | ||
1801 | } | ||
1802 | |||
1803 | /* Allocate the device structure, register the device, and obtain the | ||
1804 | * MAC address from the hypervisor. | ||
1805 | */ | ||
1806 | static void tile_net_dev_init(const char *name, const uint8_t *mac) | ||
1807 | { | ||
1808 | int ret; | ||
1809 | int i; | ||
1810 | int nz_addr = 0; | ||
1811 | struct net_device *dev; | ||
1812 | struct tile_net_priv *priv; | ||
1813 | |||
1814 | /* HACK: Ignore "loop" links. */ | ||
1815 | if (strncmp(name, "loop", 4) == 0) | ||
1816 | return; | ||
1817 | |||
1818 | /* Allocate the device structure. Normally, "name" is a | ||
1819 | * template, instantiated by register_netdev(), but not for us. | ||
1820 | */ | ||
1821 | dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup, | ||
1822 | NR_CPUS, 1); | ||
1823 | if (!dev) { | ||
1824 | pr_err("alloc_netdev_mqs(%s) failed\n", name); | ||
1825 | return; | ||
1826 | } | ||
1827 | |||
1828 | /* Initialize "priv". */ | ||
1829 | priv = netdev_priv(dev); | ||
1830 | memset(priv, 0, sizeof(*priv)); | ||
1831 | priv->dev = dev; | ||
1832 | priv->channel = -1; | ||
1833 | priv->loopify_channel = -1; | ||
1834 | priv->echannel = -1; | ||
1835 | |||
1836 | /* Get the MAC address and set it in the device struct; this must | ||
1837 | * be done before the device is opened. If the MAC is all zeroes, | ||
1838 | * we use a random address, since we're probably on the simulator. | ||
1839 | */ | ||
1840 | for (i = 0; i < 6; i++) | ||
1841 | nz_addr |= mac[i]; | ||
1842 | |||
1843 | if (nz_addr) { | ||
1844 | memcpy(dev->dev_addr, mac, 6); | ||
1845 | dev->addr_len = 6; | ||
1846 | } else { | ||
1847 | random_ether_addr(dev->dev_addr); | ||
1848 | } | ||
1849 | |||
1850 | /* Register the network device. */ | ||
1851 | ret = register_netdev(dev); | ||
1852 | if (ret) { | ||
1853 | netdev_err(dev, "register_netdev failed %d\n", ret); | ||
1854 | free_netdev(dev); | ||
1855 | return; | ||
1856 | } | ||
1857 | } | ||
1858 | |||
1859 | /* Per-cpu module initialization. */ | ||
1860 | static void tile_net_init_module_percpu(void *unused) | ||
1861 | { | ||
1862 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | ||
1863 | int my_cpu = smp_processor_id(); | ||
1864 | |||
1865 | info->has_iqueue = false; | ||
1866 | |||
1867 | info->my_cpu = my_cpu; | ||
1868 | |||
1869 | /* Initialize the egress timer. */ | ||
1870 | hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
1871 | info->egress_timer.function = tile_net_handle_egress_timer; | ||
1872 | } | ||
1873 | |||
1874 | /* Module initialization. */ | ||
1875 | static int __init tile_net_init_module(void) | ||
1876 | { | ||
1877 | int i; | ||
1878 | char name[GXIO_MPIPE_LINK_NAME_LEN]; | ||
1879 | uint8_t mac[6]; | ||
1880 | |||
1881 | pr_info("Tilera Network Driver\n"); | ||
1882 | |||
1883 | mutex_init(&tile_net_devs_for_channel_mutex); | ||
1884 | |||
1885 | /* Initialize each CPU. */ | ||
1886 | on_each_cpu(tile_net_init_module_percpu, NULL, 1); | ||
1887 | |||
1888 | /* Find out what devices we have, and initialize them. */ | ||
1889 | for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++) | ||
1890 | tile_net_dev_init(name, mac); | ||
1891 | |||
1892 | if (!network_cpus_init()) | ||
1893 | network_cpus_map = *cpu_online_mask; | ||
1894 | |||
1895 | return 0; | ||
1896 | } | ||
1897 | |||
1898 | module_init(tile_net_init_module); | ||