aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c59
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c164
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
10 files changed, 202 insertions, 71 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 75b08c63d39f..29a09271b64a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -767,16 +767,17 @@
767#define MTL_Q_RQOMR 0x40 767#define MTL_Q_RQOMR 0x40
768#define MTL_Q_RQMPOCR 0x44 768#define MTL_Q_RQMPOCR 0x44
769#define MTL_Q_RQDR 0x4c 769#define MTL_Q_RQDR 0x4c
770#define MTL_Q_RQFCR 0x50
770#define MTL_Q_IER 0x70 771#define MTL_Q_IER 0x70
771#define MTL_Q_ISR 0x74 772#define MTL_Q_ISR 0x74
772 773
773/* MTL queue register entry bit positions and sizes */ 774/* MTL queue register entry bit positions and sizes */
775#define MTL_Q_RQFCR_RFA_INDEX 1
776#define MTL_Q_RQFCR_RFA_WIDTH 6
777#define MTL_Q_RQFCR_RFD_INDEX 17
778#define MTL_Q_RQFCR_RFD_WIDTH 6
774#define MTL_Q_RQOMR_EHFC_INDEX 7 779#define MTL_Q_RQOMR_EHFC_INDEX 7
775#define MTL_Q_RQOMR_EHFC_WIDTH 1 780#define MTL_Q_RQOMR_EHFC_WIDTH 1
776#define MTL_Q_RQOMR_RFA_INDEX 8
777#define MTL_Q_RQOMR_RFA_WIDTH 3
778#define MTL_Q_RQOMR_RFD_INDEX 13
779#define MTL_Q_RQOMR_RFD_WIDTH 3
780#define MTL_Q_RQOMR_RQS_INDEX 16 781#define MTL_Q_RQOMR_RQS_INDEX 16
781#define MTL_Q_RQOMR_RQS_WIDTH 9 782#define MTL_Q_RQOMR_RQS_WIDTH 9
782#define MTL_Q_RQOMR_RSF_INDEX 5 783#define MTL_Q_RQOMR_RSF_INDEX 5
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 53f5f66ec2ee..4c66cd1d1e60 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2079 2079
2080 for (i = 0; i < pdata->rx_q_count; i++) { 2080 for (i = 0; i < pdata->rx_q_count; i++) {
2081 /* Activate flow control when less than 4k left in fifo */ 2081 /* Activate flow control when less than 4k left in fifo */
2082 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); 2082 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
2083 2083
2084 /* De-activate flow control when more than 6k left in fifo */ 2084 /* De-activate flow control when more than 6k left in fifo */
2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4); 2085 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
2086 } 2086 }
2087} 2087}
2088 2088
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..e468ed3f210f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
3175 } 3175 }
3176#endif 3176#endif
3177 if (!bnx2x_fp_lock_napi(fp)) 3177 if (!bnx2x_fp_lock_napi(fp))
3178 return work_done; 3178 return budget;
3179 3179
3180 for_each_cos_in_tx_queue(fp, cos) 3180 for_each_cos_in_tx_queue(fp, cos)
3181 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) 3181 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b29e027c476e..e356afa44e7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1335 int err; 1335 int err;
1336 1336
1337 if (!enic_poll_lock_napi(&enic->rq[rq])) 1337 if (!enic_poll_lock_napi(&enic->rq[rq]))
1338 return work_done; 1338 return budget;
1339 /* Service RQ 1339 /* Service RQ
1340 */ 1340 */
1341 1341
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a62fc38f045e..1c75829eb166 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
192#define IS_TSO_HEADER(txq, addr) \ 192#define IS_TSO_HEADER(txq, addr) \
193 ((addr >= txq->tso_hdrs_dma) && \ 193 ((addr >= txq->tso_hdrs_dma) && \
194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 194 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
195
196#define DESC_DMA_MAP_SINGLE 0
197#define DESC_DMA_MAP_PAGE 1
198
195/* 199/*
196 * RX/TX descriptors. 200 * RX/TX descriptors.
197 */ 201 */
@@ -362,6 +366,7 @@ struct tx_queue {
362 dma_addr_t tso_hdrs_dma; 366 dma_addr_t tso_hdrs_dma;
363 367
364 struct tx_desc *tx_desc_area; 368 struct tx_desc *tx_desc_area;
369 char *tx_desc_mapping; /* array to track the type of the dma mapping */
365 dma_addr_t tx_desc_dma; 370 dma_addr_t tx_desc_dma;
366 int tx_desc_area_size; 371 int tx_desc_area_size;
367 372
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
750 if (txq->tx_curr_desc == txq->tx_ring_size) 755 if (txq->tx_curr_desc == txq->tx_ring_size)
751 txq->tx_curr_desc = 0; 756 txq->tx_curr_desc = 0;
752 desc = &txq->tx_desc_area[tx_index]; 757 desc = &txq->tx_desc_area[tx_index];
758 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
753 759
754 desc->l4i_chk = 0; 760 desc->l4i_chk = 0;
755 desc->byte_cnt = length; 761 desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
879 skb_frag_t *this_frag; 885 skb_frag_t *this_frag;
880 int tx_index; 886 int tx_index;
881 struct tx_desc *desc; 887 struct tx_desc *desc;
882 void *addr;
883 888
884 this_frag = &skb_shinfo(skb)->frags[frag]; 889 this_frag = &skb_shinfo(skb)->frags[frag];
885 addr = page_address(this_frag->page.p) + this_frag->page_offset;
886 tx_index = txq->tx_curr_desc++; 890 tx_index = txq->tx_curr_desc++;
887 if (txq->tx_curr_desc == txq->tx_ring_size) 891 if (txq->tx_curr_desc == txq->tx_ring_size)
888 txq->tx_curr_desc = 0; 892 txq->tx_curr_desc = 0;
889 desc = &txq->tx_desc_area[tx_index]; 893 desc = &txq->tx_desc_area[tx_index];
894 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
890 895
891 /* 896 /*
892 * The last fragment will generate an interrupt 897 * The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
902 907
903 desc->l4i_chk = 0; 908 desc->l4i_chk = 0;
904 desc->byte_cnt = skb_frag_size(this_frag); 909 desc->byte_cnt = skb_frag_size(this_frag);
905 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr, 910 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
906 desc->byte_cnt, DMA_TO_DEVICE); 911 this_frag, 0, desc->byte_cnt,
912 DMA_TO_DEVICE);
907 } 913 }
908} 914}
909 915
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
936 if (txq->tx_curr_desc == txq->tx_ring_size) 942 if (txq->tx_curr_desc == txq->tx_ring_size)
937 txq->tx_curr_desc = 0; 943 txq->tx_curr_desc = 0;
938 desc = &txq->tx_desc_area[tx_index]; 944 desc = &txq->tx_desc_area[tx_index];
945 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
939 946
940 if (nr_frags) { 947 if (nr_frags) {
941 txq_submit_frag_skb(txq, skb); 948 txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1047 int tx_index; 1054 int tx_index;
1048 struct tx_desc *desc; 1055 struct tx_desc *desc;
1049 u32 cmd_sts; 1056 u32 cmd_sts;
1057 char desc_dma_map;
1050 1058
1051 tx_index = txq->tx_used_desc; 1059 tx_index = txq->tx_used_desc;
1052 desc = &txq->tx_desc_area[tx_index]; 1060 desc = &txq->tx_desc_area[tx_index];
1061 desc_dma_map = txq->tx_desc_mapping[tx_index];
1062
1053 cmd_sts = desc->cmd_sts; 1063 cmd_sts = desc->cmd_sts;
1054 1064
1055 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 1065 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1065 reclaimed++; 1075 reclaimed++;
1066 txq->tx_desc_count--; 1076 txq->tx_desc_count--;
1067 1077
1068 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) 1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1069 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr, 1079
1070 desc->byte_cnt, DMA_TO_DEVICE); 1080 if (desc_dma_map == DESC_DMA_MAP_PAGE)
1081 dma_unmap_page(mp->dev->dev.parent,
1082 desc->buf_ptr,
1083 desc->byte_cnt,
1084 DMA_TO_DEVICE);
1085 else
1086 dma_unmap_single(mp->dev->dev.parent,
1087 desc->buf_ptr,
1088 desc->byte_cnt,
1089 DMA_TO_DEVICE);
1090 }
1071 1091
1072 if (cmd_sts & TX_ENABLE_INTERRUPT) { 1092 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1073 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb); 1093 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1996 struct tx_queue *txq = mp->txq + index; 2016 struct tx_queue *txq = mp->txq + index;
1997 struct tx_desc *tx_desc; 2017 struct tx_desc *tx_desc;
1998 int size; 2018 int size;
2019 int ret;
1999 int i; 2020 int i;
2000 2021
2001 txq->index = index; 2022 txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
2048 nexti * sizeof(struct tx_desc); 2069 nexti * sizeof(struct tx_desc);
2049 } 2070 }
2050 2071
2072 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2073 GFP_KERNEL);
2074 if (!txq->tx_desc_mapping) {
2075 ret = -ENOMEM;
2076 goto err_free_desc_area;
2077 }
2078
2051 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ 2079 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2052 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, 2080 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2053 txq->tx_ring_size * TSO_HEADER_SIZE, 2081 txq->tx_ring_size * TSO_HEADER_SIZE,
2054 &txq->tso_hdrs_dma, GFP_KERNEL); 2082 &txq->tso_hdrs_dma, GFP_KERNEL);
2055 if (txq->tso_hdrs == NULL) { 2083 if (txq->tso_hdrs == NULL) {
2056 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2084 ret = -ENOMEM;
2057 txq->tx_desc_area, txq->tx_desc_dma); 2085 goto err_free_desc_mapping;
2058 return -ENOMEM;
2059 } 2086 }
2060 skb_queue_head_init(&txq->tx_skb); 2087 skb_queue_head_init(&txq->tx_skb);
2061 2088
2062 return 0; 2089 return 0;
2090
2091err_free_desc_mapping:
2092 kfree(txq->tx_desc_mapping);
2093err_free_desc_area:
2094 if (index == 0 && size <= mp->tx_desc_sram_size)
2095 iounmap(txq->tx_desc_area);
2096 else
2097 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2098 txq->tx_desc_area, txq->tx_desc_dma);
2099 return ret;
2063} 2100}
2064 2101
2065static void txq_deinit(struct tx_queue *txq) 2102static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
2077 else 2114 else
2078 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, 2115 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2079 txq->tx_desc_area, txq->tx_desc_dma); 2116 txq->tx_desc_area, txq->tx_desc_dma);
2117 kfree(txq->tx_desc_mapping);
2118
2080 if (txq->tso_hdrs) 2119 if (txq->tso_hdrs)
2081 dma_free_coherent(mp->dev->dev.parent, 2120 dma_free_coherent(mp->dev->dev.parent,
2082 txq->tx_ring_size * TSO_HEADER_SIZE, 2121 txq->tx_ring_size * TSO_HEADER_SIZE,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..c531c8ae1be4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2388 2388
2389 work_done = netxen_process_rcv_ring(sds_ring, budget); 2389 work_done = netxen_process_rcv_ring(sds_ring, budget);
2390 2390
2391 if ((work_done < budget) && tx_complete) { 2391 if (!tx_complete)
2392 work_done = budget;
2393
2394 if (work_done < budget) {
2392 napi_complete(&sds_ring->napi); 2395 napi_complete(&sds_ring->napi);
2393 if (test_bit(__NX_DEV_UP, &adapter->state)) 2396 if (test_bit(__NX_DEV_UP, &adapter->state))
2394 netxen_nic_enable_int(sds_ring); 2397 netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6576243222af..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
396 [TSU_ADRL31] = 0x01fc, 396 [TSU_ADRL31] = 0x01fc,
397}; 397};
398 398
399static void sh_eth_rcv_snd_disable(struct net_device *ndev);
400static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
401
399static bool sh_eth_is_gether(struct sh_eth_private *mdp) 402static bool sh_eth_is_gether(struct sh_eth_private *mdp)
400{ 403{
401 return mdp->reg_offset == sh_eth_offset_gigabit; 404 return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1123 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1124 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1122 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1125 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1126 dma_addr_t dma_addr;
1123 1127
1124 mdp->cur_rx = 0; 1128 mdp->cur_rx = 0;
1125 mdp->cur_tx = 0; 1129 mdp->cur_tx = 0;
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
1133 /* skb */ 1137 /* skb */
1134 mdp->rx_skbuff[i] = NULL; 1138 mdp->rx_skbuff[i] = NULL;
1135 skb = netdev_alloc_skb(ndev, skbuff_size); 1139 skb = netdev_alloc_skb(ndev, skbuff_size);
1136 mdp->rx_skbuff[i] = skb;
1137 if (skb == NULL) 1140 if (skb == NULL)
1138 break; 1141 break;
1139 sh_eth_set_receive_align(skb); 1142 sh_eth_set_receive_align(skb);
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
1142 rxdesc = &mdp->rx_ring[i]; 1145 rxdesc = &mdp->rx_ring[i];
1143 /* The size of the buffer is a multiple of 16 bytes. */ 1146 /* The size of the buffer is a multiple of 16 bytes. */
1144 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1147 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1145 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, 1148 dma_addr = dma_map_single(&ndev->dev, skb->data,
1146 DMA_FROM_DEVICE); 1149 rxdesc->buffer_length,
1147 rxdesc->addr = virt_to_phys(skb->data); 1150 DMA_FROM_DEVICE);
1151 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1152 kfree_skb(skb);
1153 break;
1154 }
1155 mdp->rx_skbuff[i] = skb;
1156 rxdesc->addr = dma_addr;
1148 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1157 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1149 1158
1150 /* Rx descriptor address set */ 1159 /* Rx descriptor address set */
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1316 RFLR); 1325 RFLR);
1317 1326
1318 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); 1327 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
1319 if (start) 1328 if (start) {
1329 mdp->irq_enabled = true;
1320 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1330 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1331 }
1321 1332
1322 /* PAUSE Prohibition */ 1333 /* PAUSE Prohibition */
1323 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | 1334 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1356 return ret; 1367 return ret;
1357} 1368}
1358 1369
1370static void sh_eth_dev_exit(struct net_device *ndev)
1371{
1372 struct sh_eth_private *mdp = netdev_priv(ndev);
1373 int i;
1374
1375 /* Deactivate all TX descriptors, so DMA should stop at next
1376 * packet boundary if it's currently running
1377 */
1378 for (i = 0; i < mdp->num_tx_ring; i++)
1379 mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
1380
1381 /* Disable TX FIFO egress to MAC */
1382 sh_eth_rcv_snd_disable(ndev);
1383
1384 /* Stop RX DMA at next packet boundary */
1385 sh_eth_write(ndev, 0, EDRRR);
1386
1387 /* Aside from TX DMA, we can't tell when the hardware is
1388 * really stopped, so we need to reset to make sure.
1389 * Before doing that, wait for long enough to *probably*
1390 * finish transmitting the last packet and poll stats.
1391 */
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev);
1395}
1396
1359/* free Tx skb function */ 1397/* free Tx skb function */
1360static int sh_eth_txfree(struct net_device *ndev) 1398static int sh_eth_txfree(struct net_device *ndev)
1361{ 1399{
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1400 u16 pkt_len = 0; 1438 u16 pkt_len = 0;
1401 u32 desc_status; 1439 u32 desc_status;
1402 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; 1440 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1441 dma_addr_t dma_addr;
1403 1442
1404 boguscnt = min(boguscnt, *quota); 1443 boguscnt = min(boguscnt, *quota);
1405 limit = boguscnt; 1444 limit = boguscnt;
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1447 mdp->rx_skbuff[entry] = NULL; 1486 mdp->rx_skbuff[entry] = NULL;
1448 if (mdp->cd->rpadir) 1487 if (mdp->cd->rpadir)
1449 skb_reserve(skb, NET_IP_ALIGN); 1488 skb_reserve(skb, NET_IP_ALIGN);
1450 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1489 dma_unmap_single(&ndev->dev, rxdesc->addr,
1451 ALIGN(mdp->rx_buf_sz, 16), 1490 ALIGN(mdp->rx_buf_sz, 16),
1452 DMA_FROM_DEVICE); 1491 DMA_FROM_DEVICE);
1453 skb_put(skb, pkt_len); 1492 skb_put(skb, pkt_len);
1454 skb->protocol = eth_type_trans(skb, ndev); 1493 skb->protocol = eth_type_trans(skb, ndev);
1455 netif_receive_skb(skb); 1494 netif_receive_skb(skb);
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1469 1508
1470 if (mdp->rx_skbuff[entry] == NULL) { 1509 if (mdp->rx_skbuff[entry] == NULL) {
1471 skb = netdev_alloc_skb(ndev, skbuff_size); 1510 skb = netdev_alloc_skb(ndev, skbuff_size);
1472 mdp->rx_skbuff[entry] = skb;
1473 if (skb == NULL) 1511 if (skb == NULL)
1474 break; /* Better luck next round. */ 1512 break; /* Better luck next round. */
1475 sh_eth_set_receive_align(skb); 1513 sh_eth_set_receive_align(skb);
1476 dma_map_single(&ndev->dev, skb->data, 1514 dma_addr = dma_map_single(&ndev->dev, skb->data,
1477 rxdesc->buffer_length, DMA_FROM_DEVICE); 1515 rxdesc->buffer_length,
1516 DMA_FROM_DEVICE);
1517 if (dma_mapping_error(&ndev->dev, dma_addr)) {
1518 kfree_skb(skb);
1519 break;
1520 }
1521 mdp->rx_skbuff[entry] = skb;
1478 1522
1479 skb_checksum_none_assert(skb); 1523 skb_checksum_none_assert(skb);
1480 rxdesc->addr = virt_to_phys(skb->data); 1524 rxdesc->addr = dma_addr;
1481 } 1525 }
1482 if (entry >= mdp->num_rx_ring - 1) 1526 if (entry >= mdp->num_rx_ring - 1)
1483 rxdesc->status |= 1527 rxdesc->status |=
@@ -1573,7 +1617,6 @@ ignore_link:
1573 if (intr_status & EESR_RFRMER) { 1617 if (intr_status & EESR_RFRMER) {
1574 /* Receive Frame Overflow int */ 1618 /* Receive Frame Overflow int */
1575 ndev->stats.rx_frame_errors++; 1619 ndev->stats.rx_frame_errors++;
1576 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1577 } 1620 }
1578 } 1621 }
1579 1622
@@ -1592,13 +1635,11 @@ ignore_link:
1592 if (intr_status & EESR_RDE) { 1635 if (intr_status & EESR_RDE) {
1593 /* Receive Descriptor Empty int */ 1636 /* Receive Descriptor Empty int */
1594 ndev->stats.rx_over_errors++; 1637 ndev->stats.rx_over_errors++;
1595 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1596 } 1638 }
1597 1639
1598 if (intr_status & EESR_RFE) { 1640 if (intr_status & EESR_RFE) {
1599 /* Receive FIFO Overflow int */ 1641 /* Receive FIFO Overflow int */
1600 ndev->stats.rx_fifo_errors++; 1642 ndev->stats.rx_fifo_errors++;
1601 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1602 } 1643 }
1603 1644
1604 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1645 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1653 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check)) 1694 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
1654 ret = IRQ_HANDLED; 1695 ret = IRQ_HANDLED;
1655 else 1696 else
1656 goto other_irq; 1697 goto out;
1698
1699 if (!likely(mdp->irq_enabled)) {
1700 sh_eth_write(ndev, 0, EESIPR);
1701 goto out;
1702 }
1657 1703
1658 if (intr_status & EESR_RX_CHECK) { 1704 if (intr_status & EESR_RX_CHECK) {
1659 if (napi_schedule_prep(&mdp->napi)) { 1705 if (napi_schedule_prep(&mdp->napi)) {
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1684 sh_eth_error(ndev, intr_status); 1730 sh_eth_error(ndev, intr_status);
1685 } 1731 }
1686 1732
1687other_irq: 1733out:
1688 spin_unlock(&mdp->lock); 1734 spin_unlock(&mdp->lock);
1689 1735
1690 return ret; 1736 return ret;
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
1712 napi_complete(napi); 1758 napi_complete(napi);
1713 1759
1714 /* Reenable Rx interrupts */ 1760 /* Reenable Rx interrupts */
1715 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 1761 if (mdp->irq_enabled)
1762 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1716out: 1763out:
1717 return budget - quota; 1764 return budget - quota;
1718} 1765}
@@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
1968 return -EINVAL; 2015 return -EINVAL;
1969 2016
1970 if (netif_running(ndev)) { 2017 if (netif_running(ndev)) {
2018 netif_device_detach(ndev);
1971 netif_tx_disable(ndev); 2019 netif_tx_disable(ndev);
1972 /* Disable interrupts by clearing the interrupt mask. */ 2020
1973 sh_eth_write(ndev, 0x0000, EESIPR); 2021 /* Serialise with the interrupt handler and NAPI, then
1974 /* Stop the chip's Tx and Rx processes. */ 2022 * disable interrupts. We have to clear the
1975 sh_eth_write(ndev, 0, EDTRR); 2023 * irq_enabled flag first to ensure that interrupts
1976 sh_eth_write(ndev, 0, EDRRR); 2024 * won't be re-enabled.
2025 */
2026 mdp->irq_enabled = false;
1977 synchronize_irq(ndev->irq); 2027 synchronize_irq(ndev->irq);
1978 } 2028 napi_synchronize(&mdp->napi);
2029 sh_eth_write(ndev, 0x0000, EESIPR);
1979 2030
1980 /* Free all the skbuffs in the Rx queue. */ 2031 sh_eth_dev_exit(ndev);
1981 sh_eth_ring_free(ndev); 2032
1982 /* Free DMA buffer */ 2033 /* Free all the skbuffs in the Rx queue. */
1983 sh_eth_free_dma_buffer(mdp); 2034 sh_eth_ring_free(ndev);
2035 /* Free DMA buffer */
2036 sh_eth_free_dma_buffer(mdp);
2037 }
1984 2038
1985 /* Set new parameters */ 2039 /* Set new parameters */
1986 mdp->num_rx_ring = ring->rx_pending; 2040 mdp->num_rx_ring = ring->rx_pending;
1987 mdp->num_tx_ring = ring->tx_pending; 2041 mdp->num_tx_ring = ring->tx_pending;
1988 2042
1989 ret = sh_eth_ring_init(ndev);
1990 if (ret < 0) {
1991 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1992 return ret;
1993 }
1994 ret = sh_eth_dev_init(ndev, false);
1995 if (ret < 0) {
1996 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1997 return ret;
1998 }
1999
2000 if (netif_running(ndev)) { 2043 if (netif_running(ndev)) {
2044 ret = sh_eth_ring_init(ndev);
2045 if (ret < 0) {
2046 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2047 __func__);
2048 return ret;
2049 }
2050 ret = sh_eth_dev_init(ndev, false);
2051 if (ret < 0) {
2052 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2053 __func__);
2054 return ret;
2055 }
2056
2057 mdp->irq_enabled = true;
2001 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); 2058 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
2002 /* Setting the Rx mode will start the Rx process. */ 2059 /* Setting the Rx mode will start the Rx process. */
2003 sh_eth_write(ndev, EDRRR_R, EDRRR); 2060 sh_eth_write(ndev, EDRRR_R, EDRRR);
2004 netif_wake_queue(ndev); 2061 netif_device_attach(ndev);
2005 } 2062 }
2006 2063
2007 return 0; 2064 return 0;
@@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2117 } 2174 }
2118 spin_unlock_irqrestore(&mdp->lock, flags); 2175 spin_unlock_irqrestore(&mdp->lock, flags);
2119 2176
2177 if (skb_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK;
2179
2120 entry = mdp->cur_tx % mdp->num_tx_ring; 2180 entry = mdp->cur_tx % mdp->num_tx_ring;
2121 mdp->tx_skbuff[entry] = skb; 2181 mdp->tx_skbuff[entry] = skb;
2122 txdesc = &mdp->tx_ring[entry]; 2182 txdesc = &mdp->tx_ring[entry];
@@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2126 skb->len + 2); 2186 skb->len + 2);
2127 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2187 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2128 DMA_TO_DEVICE); 2188 DMA_TO_DEVICE);
2129 if (skb->len < ETH_ZLEN) 2189 if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
2130 txdesc->buffer_length = ETH_ZLEN; 2190 kfree_skb(skb);
2131 else 2191 return NETDEV_TX_OK;
2132 txdesc->buffer_length = skb->len; 2192 }
2193 txdesc->buffer_length = skb->len;
2133 2194
2134 if (entry >= mdp->num_tx_ring - 1) 2195 if (entry >= mdp->num_tx_ring - 1)
2135 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev)
2181 2242
2182 netif_stop_queue(ndev); 2243 netif_stop_queue(ndev);
2183 2244
2184 /* Disable interrupts by clearing the interrupt mask. */ 2245 /* Serialise with the interrupt handler and NAPI, then disable
2246 * interrupts. We have to clear the irq_enabled flag first to
2247 * ensure that interrupts won't be re-enabled.
2248 */
2249 mdp->irq_enabled = false;
2250 synchronize_irq(ndev->irq);
2251 napi_disable(&mdp->napi);
2185 sh_eth_write(ndev, 0x0000, EESIPR); 2252 sh_eth_write(ndev, 0x0000, EESIPR);
2186 2253
2187 /* Stop the chip's Tx and Rx processes. */ 2254 sh_eth_dev_exit(ndev);
2188 sh_eth_write(ndev, 0, EDTRR);
2189 sh_eth_write(ndev, 0, EDRRR);
2190 2255
2191 sh_eth_get_stats(ndev);
2192 /* PHY Disconnect */ 2256 /* PHY Disconnect */
2193 if (mdp->phydev) { 2257 if (mdp->phydev) {
2194 phy_stop(mdp->phydev); 2258 phy_stop(mdp->phydev);
@@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev)
2198 2262
2199 free_irq(ndev->irq, ndev); 2263 free_irq(ndev->irq, ndev);
2200 2264
2201 napi_disable(&mdp->napi);
2202
2203 /* Free all the skbuffs in the Rx queue. */ 2265 /* Free all the skbuffs in the Rx queue. */
2204 sh_eth_ring_free(ndev); 2266 sh_eth_ring_free(ndev);
2205 2267
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 71f5de1171bd..332d3c16d483 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -513,6 +513,7 @@ struct sh_eth_private {
513 u32 rx_buf_sz; /* Based on MTU+slack. */ 513 u32 rx_buf_sz; /* Based on MTU+slack. */
514 int edmac_endian; 514 int edmac_endian;
515 struct napi_struct napi; 515 struct napi_struct napi;
516 bool irq_enabled;
516 /* MII transceiver section. */ 517 /* MII transceiver section. */
517 u32 phy_id; /* PHY ID */ 518 u32 phy_id; /* PHY ID */
518 struct mii_bus *mii_bus; /* MDIO bus control */ 519 struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8c6b7c1651e5..cf62ff4c8c56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2778 * @addr: iobase memory address 2778 * @addr: iobase memory address
2779 * Description: this is the main probe function used to 2779 * Description: this is the main probe function used to
2780 * call the alloc_etherdev, allocate the priv structure. 2780 * call the alloc_etherdev, allocate the priv structure.
2781 * Return:
2782 * on success the new private structure is returned, otherwise the error
2783 * pointer.
2781 */ 2784 */
2782struct stmmac_priv *stmmac_dvr_probe(struct device *device, 2785struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2783 struct plat_stmmacenet_data *plat_dat, 2786 struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2789 2792
2790 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 2793 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2791 if (!ndev) 2794 if (!ndev)
2792 return NULL; 2795 return ERR_PTR(-ENOMEM);
2793 2796
2794 SET_NETDEV_DEV(ndev, device); 2797 SET_NETDEV_DEV(ndev, device);
2795 2798
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e068d48b0f21..a39131f494ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
1683 if (vid == priv->data.default_vlan) 1683 if (vid == priv->data.default_vlan)
1684 return 0; 1684 return 0;
1685 1685
1686 if (priv->data.dual_emac) {
1687 /* In dual EMAC, reserved VLAN id should not be used for
1688 * creating VLAN interfaces as this can break the dual
1689 * EMAC port separation
1690 */
1691 int i;
1692
1693 for (i = 0; i < priv->data.slaves; i++) {
1694 if (vid == priv->slaves[i].port_vlan)
1695 return -EINVAL;
1696 }
1697 }
1698
1686 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); 1699 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
1687 return cpsw_add_vlan_ale_entry(priv, vid); 1700 return cpsw_add_vlan_ale_entry(priv, vid);
1688} 1701}
@@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
1696 if (vid == priv->data.default_vlan) 1709 if (vid == priv->data.default_vlan)
1697 return 0; 1710 return 0;
1698 1711
1712 if (priv->data.dual_emac) {
1713 int i;
1714
1715 for (i = 0; i < priv->data.slaves; i++) {
1716 if (vid == priv->slaves[i].port_vlan)
1717 return -EINVAL;
1718 }
1719 }
1720
1699 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); 1721 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
1700 ret = cpsw_ale_del_vlan(priv->ale, vid, 0); 1722 ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
1701 if (ret != 0) 1723 if (ret != 0)