diff options
author | David S. Miller <davem@davemloft.net> | 2014-12-10 15:48:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-12-10 15:48:20 -0500 |
commit | 22f10923dd39141785273e423b9acf347297e15f (patch) | |
tree | cc1f19103817433a426b3e329d6326f5e9d8e8f7 /drivers/net/ethernet/renesas | |
parent | 785c20a08bead1e58ad53f2dc324782da7a0c9ea (diff) | |
parent | 69204cf7eb9c5a72067ce6922d4699378251d053 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/renesas/sh_eth.c
Overlapping changes in both conflict cases.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/renesas')
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.c | 96 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.h | 5 |
2 files changed, 51 insertions, 50 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ad2e285aefd4..c29ba80ae02b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -918,21 +918,13 @@ static int sh_eth_reset(struct net_device *ndev) | |||
918 | return ret; | 918 | return ret; |
919 | } | 919 | } |
920 | 920 | ||
921 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | ||
922 | static void sh_eth_set_receive_align(struct sk_buff *skb) | 921 | static void sh_eth_set_receive_align(struct sk_buff *skb) |
923 | { | 922 | { |
924 | int reserve; | 923 | uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1); |
925 | 924 | ||
926 | reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1)); | ||
927 | if (reserve) | 925 | if (reserve) |
928 | skb_reserve(skb, reserve); | 926 | skb_reserve(skb, SH_ETH_RX_ALIGN - reserve); |
929 | } | 927 | } |
930 | #else | ||
931 | static void sh_eth_set_receive_align(struct sk_buff *skb) | ||
932 | { | ||
933 | skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN); | ||
934 | } | ||
935 | #endif | ||
936 | 928 | ||
937 | 929 | ||
938 | /* CPU <-> EDMAC endian convert */ | 930 | /* CPU <-> EDMAC endian convert */ |
@@ -1120,6 +1112,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1120 | struct sh_eth_txdesc *txdesc = NULL; | 1112 | struct sh_eth_txdesc *txdesc = NULL; |
1121 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; | 1113 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
1122 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; | 1114 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
1115 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | ||
1123 | 1116 | ||
1124 | mdp->cur_rx = 0; | 1117 | mdp->cur_rx = 0; |
1125 | mdp->cur_tx = 0; | 1118 | mdp->cur_tx = 0; |
@@ -1132,21 +1125,21 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
1132 | for (i = 0; i < mdp->num_rx_ring; i++) { | 1125 | for (i = 0; i < mdp->num_rx_ring; i++) { |
1133 | /* skb */ | 1126 | /* skb */ |
1134 | mdp->rx_skbuff[i] = NULL; | 1127 | mdp->rx_skbuff[i] = NULL; |
1135 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); | 1128 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1136 | mdp->rx_skbuff[i] = skb; | 1129 | mdp->rx_skbuff[i] = skb; |
1137 | if (skb == NULL) | 1130 | if (skb == NULL) |
1138 | break; | 1131 | break; |
1139 | dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, | ||
1140 | DMA_FROM_DEVICE); | ||
1141 | sh_eth_set_receive_align(skb); | 1132 | sh_eth_set_receive_align(skb); |
1142 | 1133 | ||
1143 | /* RX descriptor */ | 1134 | /* RX descriptor */ |
1144 | rxdesc = &mdp->rx_ring[i]; | 1135 | rxdesc = &mdp->rx_ring[i]; |
1136 | /* The size of the buffer is a multiple of 16 bytes. */ | ||
1137 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
1138 | dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length, | ||
1139 | DMA_FROM_DEVICE); | ||
1145 | rxdesc->addr = virt_to_phys(skb->data); | 1140 | rxdesc->addr = virt_to_phys(skb->data); |
1146 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1141 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
1147 | 1142 | ||
1148 | /* The size of the buffer is 16 byte boundary. */ | ||
1149 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | ||
1150 | /* Rx descriptor address set */ | 1143 | /* Rx descriptor address set */ |
1151 | if (i == 0) { | 1144 | if (i == 0) { |
1152 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); | 1145 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); |
@@ -1399,6 +1392,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1399 | struct sk_buff *skb; | 1392 | struct sk_buff *skb; |
1400 | u16 pkt_len = 0; | 1393 | u16 pkt_len = 0; |
1401 | u32 desc_status; | 1394 | u32 desc_status; |
1395 | int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1; | ||
1402 | 1396 | ||
1403 | boguscnt = min(boguscnt, *quota); | 1397 | boguscnt = min(boguscnt, *quota); |
1404 | limit = boguscnt; | 1398 | limit = boguscnt; |
@@ -1447,7 +1441,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1447 | if (mdp->cd->rpadir) | 1441 | if (mdp->cd->rpadir) |
1448 | skb_reserve(skb, NET_IP_ALIGN); | 1442 | skb_reserve(skb, NET_IP_ALIGN); |
1449 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, | 1443 | dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, |
1450 | mdp->rx_buf_sz, | 1444 | ALIGN(mdp->rx_buf_sz, 16), |
1451 | DMA_FROM_DEVICE); | 1445 | DMA_FROM_DEVICE); |
1452 | skb_put(skb, pkt_len); | 1446 | skb_put(skb, pkt_len); |
1453 | skb->protocol = eth_type_trans(skb, ndev); | 1447 | skb->protocol = eth_type_trans(skb, ndev); |
@@ -1467,13 +1461,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
1467 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1461 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
1468 | 1462 | ||
1469 | if (mdp->rx_skbuff[entry] == NULL) { | 1463 | if (mdp->rx_skbuff[entry] == NULL) { |
1470 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); | 1464 | skb = netdev_alloc_skb(ndev, skbuff_size); |
1471 | mdp->rx_skbuff[entry] = skb; | 1465 | mdp->rx_skbuff[entry] = skb; |
1472 | if (skb == NULL) | 1466 | if (skb == NULL) |
1473 | break; /* Better luck next round. */ | 1467 | break; /* Better luck next round. */ |
1474 | dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz, | ||
1475 | DMA_FROM_DEVICE); | ||
1476 | sh_eth_set_receive_align(skb); | 1468 | sh_eth_set_receive_align(skb); |
1469 | dma_map_single(&ndev->dev, skb->data, | ||
1470 | rxdesc->buffer_length, DMA_FROM_DEVICE); | ||
1477 | 1471 | ||
1478 | skb_checksum_none_assert(skb); | 1472 | skb_checksum_none_assert(skb); |
1479 | rxdesc->addr = virt_to_phys(skb->data); | 1473 | rxdesc->addr = virt_to_phys(skb->data); |
@@ -2043,6 +2037,8 @@ static int sh_eth_open(struct net_device *ndev) | |||
2043 | if (ret) | 2037 | if (ret) |
2044 | goto out_free_irq; | 2038 | goto out_free_irq; |
2045 | 2039 | ||
2040 | mdp->is_opened = 1; | ||
2041 | |||
2046 | return ret; | 2042 | return ret; |
2047 | 2043 | ||
2048 | out_free_irq: | 2044 | out_free_irq: |
@@ -2132,6 +2128,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
2132 | return NETDEV_TX_OK; | 2128 | return NETDEV_TX_OK; |
2133 | } | 2129 | } |
2134 | 2130 | ||
2131 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | ||
2132 | { | ||
2133 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
2134 | |||
2135 | if (sh_eth_is_rz_fast_ether(mdp)) | ||
2136 | return &ndev->stats; | ||
2137 | |||
2138 | if (!mdp->is_opened) | ||
2139 | return &ndev->stats; | ||
2140 | |||
2141 | ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); | ||
2142 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ | ||
2143 | ndev->stats.collisions += sh_eth_read(ndev, CDCR); | ||
2144 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ | ||
2145 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); | ||
2146 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ | ||
2147 | |||
2148 | if (sh_eth_is_gether(mdp)) { | ||
2149 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); | ||
2150 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ | ||
2151 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); | ||
2152 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ | ||
2153 | } else { | ||
2154 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); | ||
2155 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ | ||
2156 | } | ||
2157 | |||
2158 | return &ndev->stats; | ||
2159 | } | ||
2160 | |||
2135 | /* device close function */ | 2161 | /* device close function */ |
2136 | static int sh_eth_close(struct net_device *ndev) | 2162 | static int sh_eth_close(struct net_device *ndev) |
2137 | { | 2163 | { |
@@ -2146,6 +2172,7 @@ static int sh_eth_close(struct net_device *ndev) | |||
2146 | sh_eth_write(ndev, 0, EDTRR); | 2172 | sh_eth_write(ndev, 0, EDTRR); |
2147 | sh_eth_write(ndev, 0, EDRRR); | 2173 | sh_eth_write(ndev, 0, EDRRR); |
2148 | 2174 | ||
2175 | sh_eth_get_stats(ndev); | ||
2149 | /* PHY Disconnect */ | 2176 | /* PHY Disconnect */ |
2150 | if (mdp->phydev) { | 2177 | if (mdp->phydev) { |
2151 | phy_stop(mdp->phydev); | 2178 | phy_stop(mdp->phydev); |
@@ -2164,36 +2191,9 @@ static int sh_eth_close(struct net_device *ndev) | |||
2164 | 2191 | ||
2165 | pm_runtime_put_sync(&mdp->pdev->dev); | 2192 | pm_runtime_put_sync(&mdp->pdev->dev); |
2166 | 2193 | ||
2167 | return 0; | 2194 | mdp->is_opened = 0; |
2168 | } | ||
2169 | |||
2170 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | ||
2171 | { | ||
2172 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
2173 | |||
2174 | if (sh_eth_is_rz_fast_ether(mdp)) | ||
2175 | return &ndev->stats; | ||
2176 | 2195 | ||
2177 | pm_runtime_get_sync(&mdp->pdev->dev); | 2196 | return 0; |
2178 | |||
2179 | ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR); | ||
2180 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ | ||
2181 | ndev->stats.collisions += sh_eth_read(ndev, CDCR); | ||
2182 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ | ||
2183 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); | ||
2184 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ | ||
2185 | if (sh_eth_is_gether(mdp)) { | ||
2186 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); | ||
2187 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ | ||
2188 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); | ||
2189 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ | ||
2190 | } else { | ||
2191 | ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); | ||
2192 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ | ||
2193 | } | ||
2194 | pm_runtime_put_sync(&mdp->pdev->dev); | ||
2195 | |||
2196 | return &ndev->stats; | ||
2197 | } | 2197 | } |
2198 | 2198 | ||
2199 | /* ioctl to device function */ | 2199 | /* ioctl to device function */ |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index b37c427144ee..22301bf9c21d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -162,9 +162,9 @@ enum { | |||
162 | 162 | ||
163 | /* Driver's parameters */ | 163 | /* Driver's parameters */ |
164 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 164 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
165 | #define SH4_SKB_RX_ALIGN 32 | 165 | #define SH_ETH_RX_ALIGN 32 |
166 | #else | 166 | #else |
167 | #define SH2_SH3_SKB_RX_ALIGN 2 | 167 | #define SH_ETH_RX_ALIGN 2 |
168 | #endif | 168 | #endif |
169 | 169 | ||
170 | /* Register's bits | 170 | /* Register's bits |
@@ -522,6 +522,7 @@ struct sh_eth_private { | |||
522 | 522 | ||
523 | unsigned no_ether_link:1; | 523 | unsigned no_ether_link:1; |
524 | unsigned ether_link_active_low:1; | 524 | unsigned ether_link_active_low:1; |
525 | unsigned is_opened:1; | ||
525 | }; | 526 | }; |
526 | 527 | ||
527 | static inline void sh_eth_soft_swap(char *src, int len) | 528 | static inline void sh_eth_soft_swap(char *src, int len) |