aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
authorMaxime Bizon <mbizon@freebox.fr>2013-06-04 17:53:35 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-10 17:28:28 -0400
commit6f00a0229627ca189529cad3f9154ac2f9e5c7db (patch)
tree20b010b5faa5fca26951f3f3575da92a9cf317fc /drivers/net/ethernet/broadcom
parent0ae99b5fede6f3a8d252d50bb4aba29544295219 (diff)
bcm63xx_enet: add support for Broadcom BCM63xx integrated gigabit switch
Newer Broadcom BCM63xx SoCs: 6328, 6362 and 6368 have an integrated switch which needs to be driven slightly differently from the traditional external switches. This patch introduces changes in arch/mips/bcm63xx in order to: - register a bcm63xx_enetsw driver instead of bcm63xx_enet driver - update DMA channels configuration & state RAM base addresses - add a new platform data configuration knob to define the number of ports per switch/device and force link on some ports - define the required switch registers On the driver side, the following changes are required: - the switch ports need to be polled to ensure the link is up and running and RX/TX can properly work - basic switch configuration needs to be performed for the switch to forward packets to the CPU - update the MIB counters since the integrated Signed-off-by: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: Jonas Gorski <jogo@openwrt.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c939
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h71
2 files changed, 998 insertions, 12 deletions
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index edaf76dc2487..fbbfc4acd53f 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -59,8 +59,43 @@ static inline void enet_writel(struct bcm_enet_priv *priv,
59} 59}
60 60
61/* 61/*
62 * io helpers to access shared registers 62 * io helpers to access switch registers
63 */ 63 */
64static inline u32 enetsw_readl(struct bcm_enet_priv *priv, u32 off)
65{
66 return bcm_readl(priv->base + off);
67}
68
69static inline void enetsw_writel(struct bcm_enet_priv *priv,
70 u32 val, u32 off)
71{
72 bcm_writel(val, priv->base + off);
73}
74
75static inline u16 enetsw_readw(struct bcm_enet_priv *priv, u32 off)
76{
77 return bcm_readw(priv->base + off);
78}
79
80static inline void enetsw_writew(struct bcm_enet_priv *priv,
81 u16 val, u32 off)
82{
83 bcm_writew(val, priv->base + off);
84}
85
86static inline u8 enetsw_readb(struct bcm_enet_priv *priv, u32 off)
87{
88 return bcm_readb(priv->base + off);
89}
90
91static inline void enetsw_writeb(struct bcm_enet_priv *priv,
92 u8 val, u32 off)
93{
94 bcm_writeb(val, priv->base + off);
95}
96
97
98/* io helpers to access shared registers */
64static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) 99static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
65{ 100{
66 return bcm_readl(bcm_enet_shared_base[0] + off); 101 return bcm_readl(bcm_enet_shared_base[0] + off);
@@ -218,7 +253,6 @@ static int bcm_enet_refill_rx(struct net_device *dev)
218 if (!skb) 253 if (!skb)
219 break; 254 break;
220 priv->rx_skb[desc_idx] = skb; 255 priv->rx_skb[desc_idx] = skb;
221
222 p = dma_map_single(&priv->pdev->dev, skb->data, 256 p = dma_map_single(&priv->pdev->dev, skb->data,
223 priv->rx_skb_size, 257 priv->rx_skb_size,
224 DMA_FROM_DEVICE); 258 DMA_FROM_DEVICE);
@@ -321,7 +355,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
321 } 355 }
322 356
323 /* recycle packet if it's marked as bad */ 357 /* recycle packet if it's marked as bad */
324 if (unlikely(len_stat & DMADESC_ERR_MASK)) { 358 if (!priv->enet_is_sw &&
359 unlikely(len_stat & DMADESC_ERR_MASK)) {
325 dev->stats.rx_errors++; 360 dev->stats.rx_errors++;
326 361
327 if (len_stat & DMADESC_OVSIZE_MASK) 362 if (len_stat & DMADESC_OVSIZE_MASK)
@@ -552,6 +587,26 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
552 goto out_unlock; 587 goto out_unlock;
553 } 588 }
554 589
590 /* pad small packets sent on a switch device */
591 if (priv->enet_is_sw && skb->len < 64) {
592 int needed = 64 - skb->len;
593 char *data;
594
595 if (unlikely(skb_tailroom(skb) < needed)) {
596 struct sk_buff *nskb;
597
598 nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
599 if (!nskb) {
600 ret = NETDEV_TX_BUSY;
601 goto out_unlock;
602 }
603 dev_kfree_skb(skb);
604 skb = nskb;
605 }
606 data = skb_put(skb, needed);
607 memset(data, 0, needed);
608 }
609
555 /* point to the next available desc */ 610 /* point to the next available desc */
556 desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; 611 desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
557 priv->tx_skb[priv->tx_curr_desc] = skb; 612 priv->tx_skb[priv->tx_curr_desc] = skb;
@@ -959,9 +1014,9 @@ static int bcm_enet_open(struct net_device *dev)
959 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); 1014 enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
960 1015
961 /* set dma maximum burst len */ 1016 /* set dma maximum burst len */
962 enet_dmac_writel(priv, BCMENET_DMA_MAXBURST, 1017 enet_dmac_writel(priv, priv->dma_maxburst,
963 ENETDMAC_MAXBURST_REG(priv->rx_chan)); 1018 ENETDMAC_MAXBURST_REG(priv->rx_chan));
964 enet_dmac_writel(priv, BCMENET_DMA_MAXBURST, 1019 enet_dmac_writel(priv, priv->dma_maxburst,
965 ENETDMAC_MAXBURST_REG(priv->tx_chan)); 1020 ENETDMAC_MAXBURST_REG(priv->tx_chan));
966 1021
967 /* set correct transmit fifo watermark */ 1022 /* set correct transmit fifo watermark */
@@ -1567,7 +1622,7 @@ static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
1567 * it's appended 1622 * it's appended
1568 */ 1623 */
1569 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, 1624 priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
1570 BCMENET_DMA_MAXBURST * 4); 1625 priv->dma_maxburst * 4);
1571 return 0; 1626 return 0;
1572} 1627}
1573 1628
@@ -1674,6 +1729,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
1674 return -ENOMEM; 1729 return -ENOMEM;
1675 priv = netdev_priv(dev); 1730 priv = netdev_priv(dev);
1676 1731
1732 priv->enet_is_sw = false;
1733 priv->dma_maxburst = BCMENET_DMA_MAXBURST;
1734
1677 ret = compute_hw_mtu(priv, dev->mtu); 1735 ret = compute_hw_mtu(priv, dev->mtu);
1678 if (ret) 1736 if (ret)
1679 goto out; 1737 goto out;
@@ -1898,8 +1956,861 @@ struct platform_driver bcm63xx_enet_driver = {
1898}; 1956};
1899 1957
1900/* 1958/*
1901 * reserve & remap memory space shared between all macs 1959 * switch mii access callbacks
1902 */ 1960 */
1961static int bcmenet_sw_mdio_read(struct bcm_enet_priv *priv,
1962 int ext, int phy_id, int location)
1963{
1964 u32 reg;
1965 int ret;
1966
1967 spin_lock_bh(&priv->enetsw_mdio_lock);
1968 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1969
1970 reg = ENETSW_MDIOC_RD_MASK |
1971 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1972 (location << ENETSW_MDIOC_REG_SHIFT);
1973
1974 if (ext)
1975 reg |= ENETSW_MDIOC_EXT_MASK;
1976
1977 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
1978 udelay(50);
1979 ret = enetsw_readw(priv, ENETSW_MDIOD_REG);
1980 spin_unlock_bh(&priv->enetsw_mdio_lock);
1981 return ret;
1982}
1983
1984static void bcmenet_sw_mdio_write(struct bcm_enet_priv *priv,
1985 int ext, int phy_id, int location,
1986 uint16_t data)
1987{
1988 u32 reg;
1989
1990 spin_lock_bh(&priv->enetsw_mdio_lock);
1991 enetsw_writel(priv, 0, ENETSW_MDIOC_REG);
1992
1993 reg = ENETSW_MDIOC_WR_MASK |
1994 (phy_id << ENETSW_MDIOC_PHYID_SHIFT) |
1995 (location << ENETSW_MDIOC_REG_SHIFT);
1996
1997 if (ext)
1998 reg |= ENETSW_MDIOC_EXT_MASK;
1999
2000 reg |= data;
2001
2002 enetsw_writel(priv, reg, ENETSW_MDIOC_REG);
2003 udelay(50);
2004 spin_unlock_bh(&priv->enetsw_mdio_lock);
2005}
2006
2007static inline int bcm_enet_port_is_rgmii(int portid)
2008{
2009 return portid >= ENETSW_RGMII_PORT0;
2010}
2011
2012/*
2013 * enet sw PHY polling
2014 */
2015static void swphy_poll_timer(unsigned long data)
2016{
2017 struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data;
2018 unsigned int i;
2019
2020 for (i = 0; i < priv->num_ports; i++) {
2021 struct bcm63xx_enetsw_port *port;
2022 int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
2023 int external_phy = bcm_enet_port_is_rgmii(i);
2024 u8 override;
2025
2026 port = &priv->used_ports[i];
2027 if (!port->used)
2028 continue;
2029
2030 if (port->bypass_link)
2031 continue;
2032
2033 /* dummy read to clear */
2034 for (j = 0; j < 2; j++)
2035 val = bcmenet_sw_mdio_read(priv, external_phy,
2036 port->phy_id, MII_BMSR);
2037
2038 if (val == 0xffff)
2039 continue;
2040
2041 up = (val & BMSR_LSTATUS) ? 1 : 0;
2042 if (!(up ^ priv->sw_port_link[i]))
2043 continue;
2044
2045 priv->sw_port_link[i] = up;
2046
2047 /* link changed */
2048 if (!up) {
2049 dev_info(&priv->pdev->dev, "link DOWN on %s\n",
2050 port->name);
2051 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2052 ENETSW_PORTOV_REG(i));
2053 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2054 ENETSW_PTCTRL_TXDIS_MASK,
2055 ENETSW_PTCTRL_REG(i));
2056 continue;
2057 }
2058
2059 advertise = bcmenet_sw_mdio_read(priv, external_phy,
2060 port->phy_id, MII_ADVERTISE);
2061
2062 lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2063 MII_LPA);
2064
2065 lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
2066 MII_STAT1000);
2067
2068 /* figure out media and duplex from advertise and LPA values */
2069 media = mii_nway_result(lpa & advertise);
2070 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
2071 if (lpa2 & LPA_1000FULL)
2072 duplex = 1;
2073
2074 if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
2075 speed = 1000;
2076 else {
2077 if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
2078 speed = 100;
2079 else
2080 speed = 10;
2081 }
2082
2083 dev_info(&priv->pdev->dev,
2084 "link UP on %s, %dMbps, %s-duplex\n",
2085 port->name, speed, duplex ? "full" : "half");
2086
2087 override = ENETSW_PORTOV_ENABLE_MASK |
2088 ENETSW_PORTOV_LINKUP_MASK;
2089
2090 if (speed == 1000)
2091 override |= ENETSW_IMPOV_1000_MASK;
2092 else if (speed == 100)
2093 override |= ENETSW_IMPOV_100_MASK;
2094 if (duplex)
2095 override |= ENETSW_IMPOV_FDX_MASK;
2096
2097 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2098 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2099 }
2100
2101 priv->swphy_poll.expires = jiffies + HZ;
2102 add_timer(&priv->swphy_poll);
2103}
2104
2105/*
2106 * open callback, allocate dma rings & buffers and start rx operation
2107 */
2108static int bcm_enetsw_open(struct net_device *dev)
2109{
2110 struct bcm_enet_priv *priv;
2111 struct device *kdev;
2112 int i, ret;
2113 unsigned int size;
2114 void *p;
2115 u32 val;
2116
2117 priv = netdev_priv(dev);
2118 kdev = &priv->pdev->dev;
2119
2120 /* mask all interrupts and request them */
2121 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
2122 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
2123
2124 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2125 IRQF_DISABLED, dev->name, dev);
2126 if (ret)
2127 goto out_freeirq;
2128
2129 if (priv->irq_tx != -1) {
2130 ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
2131 IRQF_DISABLED, dev->name, dev);
2132 if (ret)
2133 goto out_freeirq_rx;
2134 }
2135
2136 /* allocate rx dma ring */
2137 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2138 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2139 if (!p) {
2140 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2141 ret = -ENOMEM;
2142 goto out_freeirq_tx;
2143 }
2144
2145 memset(p, 0, size);
2146 priv->rx_desc_alloc_size = size;
2147 priv->rx_desc_cpu = p;
2148
2149 /* allocate tx dma ring */
2150 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2151 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2152 if (!p) {
2153 dev_err(kdev, "cannot allocate tx ring\n");
2154 ret = -ENOMEM;
2155 goto out_free_rx_ring;
2156 }
2157
2158 memset(p, 0, size);
2159 priv->tx_desc_alloc_size = size;
2160 priv->tx_desc_cpu = p;
2161
2162 priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
2163 GFP_KERNEL);
2164 if (!priv->tx_skb) {
2165 dev_err(kdev, "cannot allocate rx skb queue\n");
2166 ret = -ENOMEM;
2167 goto out_free_tx_ring;
2168 }
2169
2170 priv->tx_desc_count = priv->tx_ring_size;
2171 priv->tx_dirty_desc = 0;
2172 priv->tx_curr_desc = 0;
2173 spin_lock_init(&priv->tx_lock);
2174
2175 /* init & fill rx ring with skbs */
2176 priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
2177 GFP_KERNEL);
2178 if (!priv->rx_skb) {
2179 dev_err(kdev, "cannot allocate rx skb queue\n");
2180 ret = -ENOMEM;
2181 goto out_free_tx_skb;
2182 }
2183
2184 priv->rx_desc_count = 0;
2185 priv->rx_dirty_desc = 0;
2186 priv->rx_curr_desc = 0;
2187
2188 /* disable all ports */
2189 for (i = 0; i < priv->num_ports; i++) {
2190 enetsw_writeb(priv, ENETSW_PORTOV_ENABLE_MASK,
2191 ENETSW_PORTOV_REG(i));
2192 enetsw_writeb(priv, ENETSW_PTCTRL_RXDIS_MASK |
2193 ENETSW_PTCTRL_TXDIS_MASK,
2194 ENETSW_PTCTRL_REG(i));
2195
2196 priv->sw_port_link[i] = 0;
2197 }
2198
2199 /* reset mib */
2200 val = enetsw_readb(priv, ENETSW_GMCR_REG);
2201 val |= ENETSW_GMCR_RST_MIB_MASK;
2202 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2203 mdelay(1);
2204 val &= ~ENETSW_GMCR_RST_MIB_MASK;
2205 enetsw_writeb(priv, val, ENETSW_GMCR_REG);
2206 mdelay(1);
2207
2208 /* force CPU port state */
2209 val = enetsw_readb(priv, ENETSW_IMPOV_REG);
2210 val |= ENETSW_IMPOV_FORCE_MASK | ENETSW_IMPOV_LINKUP_MASK;
2211 enetsw_writeb(priv, val, ENETSW_IMPOV_REG);
2212
2213 /* enable switch forward engine */
2214 val = enetsw_readb(priv, ENETSW_SWMODE_REG);
2215 val |= ENETSW_SWMODE_FWD_EN_MASK;
2216 enetsw_writeb(priv, val, ENETSW_SWMODE_REG);
2217
2218 /* enable jumbo on all ports */
2219 enetsw_writel(priv, 0x1ff, ENETSW_JMBCTL_PORT_REG);
2220 enetsw_writew(priv, 9728, ENETSW_JMBCTL_MAXSIZE_REG);
2221
2222 /* initialize flow control buffer allocation */
2223 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
2224 ENETDMA_BUFALLOC_REG(priv->rx_chan));
2225
2226 if (bcm_enet_refill_rx(dev)) {
2227 dev_err(kdev, "cannot allocate rx skb queue\n");
2228 ret = -ENOMEM;
2229 goto out;
2230 }
2231
2232 /* write rx & tx ring addresses */
2233 enet_dmas_writel(priv, priv->rx_desc_dma,
2234 ENETDMAS_RSTART_REG(priv->rx_chan));
2235 enet_dmas_writel(priv, priv->tx_desc_dma,
2236 ENETDMAS_RSTART_REG(priv->tx_chan));
2237
2238 /* clear remaining state ram for rx & tx channel */
2239 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan));
2240 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan));
2241 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan));
2242 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan));
2243 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan));
2244 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan));
2245
2246 /* set dma maximum burst len */
2247 enet_dmac_writel(priv, priv->dma_maxburst,
2248 ENETDMAC_MAXBURST_REG(priv->rx_chan));
2249 enet_dmac_writel(priv, priv->dma_maxburst,
2250 ENETDMAC_MAXBURST_REG(priv->tx_chan));
2251
2252 /* set flow control low/high threshold to 1/3 / 2/3 */
2253 val = priv->rx_ring_size / 3;
2254 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
2255 val = (priv->rx_ring_size * 2) / 3;
2256 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
2257
2258 /* all set, enable mac and interrupts, start dma engine and
2259 * kick rx dma channel
2260 */
2261 wmb();
2262 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2263 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2264 ENETDMAC_CHANCFG_REG(priv->rx_chan));
2265
2266 /* watch "packet transferred" interrupt in rx and tx */
2267 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268 ENETDMAC_IR_REG(priv->rx_chan));
2269 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2270 ENETDMAC_IR_REG(priv->tx_chan));
2271
2272 /* make sure we enable napi before rx interrupt */
2273 napi_enable(&priv->napi);
2274
2275 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2276 ENETDMAC_IRMASK_REG(priv->rx_chan));
2277 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2278 ENETDMAC_IRMASK_REG(priv->tx_chan));
2279
2280 netif_carrier_on(dev);
2281 netif_start_queue(dev);
2282
2283 /* apply override config for bypass_link ports here. */
2284 for (i = 0; i < priv->num_ports; i++) {
2285 struct bcm63xx_enetsw_port *port;
2286 u8 override;
2287 port = &priv->used_ports[i];
2288 if (!port->used)
2289 continue;
2290
2291 if (!port->bypass_link)
2292 continue;
2293
2294 override = ENETSW_PORTOV_ENABLE_MASK |
2295 ENETSW_PORTOV_LINKUP_MASK;
2296
2297 switch (port->force_speed) {
2298 case 1000:
2299 override |= ENETSW_IMPOV_1000_MASK;
2300 break;
2301 case 100:
2302 override |= ENETSW_IMPOV_100_MASK;
2303 break;
2304 case 10:
2305 break;
2306 default:
2307 pr_warn("invalid forced speed on port %s: assume 10\n",
2308 port->name);
2309 break;
2310 }
2311
2312 if (port->force_duplex_full)
2313 override |= ENETSW_IMPOV_FDX_MASK;
2314
2315
2316 enetsw_writeb(priv, override, ENETSW_PORTOV_REG(i));
2317 enetsw_writeb(priv, 0, ENETSW_PTCTRL_REG(i));
2318 }
2319
2320 /* start phy polling timer */
2321 init_timer(&priv->swphy_poll);
2322 priv->swphy_poll.function = swphy_poll_timer;
2323 priv->swphy_poll.data = (unsigned long)priv;
2324 priv->swphy_poll.expires = jiffies;
2325 add_timer(&priv->swphy_poll);
2326 return 0;
2327
2328out:
2329 for (i = 0; i < priv->rx_ring_size; i++) {
2330 struct bcm_enet_desc *desc;
2331
2332 if (!priv->rx_skb[i])
2333 continue;
2334
2335 desc = &priv->rx_desc_cpu[i];
2336 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2337 DMA_FROM_DEVICE);
2338 kfree_skb(priv->rx_skb[i]);
2339 }
2340 kfree(priv->rx_skb);
2341
2342out_free_tx_skb:
2343 kfree(priv->tx_skb);
2344
2345out_free_tx_ring:
2346 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2347 priv->tx_desc_cpu, priv->tx_desc_dma);
2348
2349out_free_rx_ring:
2350 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2351 priv->rx_desc_cpu, priv->rx_desc_dma);
2352
2353out_freeirq_tx:
2354 if (priv->irq_tx != -1)
2355 free_irq(priv->irq_tx, dev);
2356
2357out_freeirq_rx:
2358 free_irq(priv->irq_rx, dev);
2359
2360out_freeirq:
2361 return ret;
2362}
2363
2364/* stop callback */
2365static int bcm_enetsw_stop(struct net_device *dev)
2366{
2367 struct bcm_enet_priv *priv;
2368 struct device *kdev;
2369 int i;
2370
2371 priv = netdev_priv(dev);
2372 kdev = &priv->pdev->dev;
2373
2374 del_timer_sync(&priv->swphy_poll);
2375 netif_stop_queue(dev);
2376 napi_disable(&priv->napi);
2377 del_timer_sync(&priv->rx_timeout);
2378
2379 /* mask all interrupts */
2380 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan));
2381 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan));
2382
2383 /* disable dma & mac */
2384 bcm_enet_disable_dma(priv, priv->tx_chan);
2385 bcm_enet_disable_dma(priv, priv->rx_chan);
2386
2387 /* force reclaim of all tx buffers */
2388 bcm_enet_tx_reclaim(dev, 1);
2389
2390 /* free the rx skb ring */
2391 for (i = 0; i < priv->rx_ring_size; i++) {
2392 struct bcm_enet_desc *desc;
2393
2394 if (!priv->rx_skb[i])
2395 continue;
2396
2397 desc = &priv->rx_desc_cpu[i];
2398 dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
2399 DMA_FROM_DEVICE);
2400 kfree_skb(priv->rx_skb[i]);
2401 }
2402
2403 /* free remaining allocated memory */
2404 kfree(priv->rx_skb);
2405 kfree(priv->tx_skb);
2406 dma_free_coherent(kdev, priv->rx_desc_alloc_size,
2407 priv->rx_desc_cpu, priv->rx_desc_dma);
2408 dma_free_coherent(kdev, priv->tx_desc_alloc_size,
2409 priv->tx_desc_cpu, priv->tx_desc_dma);
2410 if (priv->irq_tx != -1)
2411 free_irq(priv->irq_tx, dev);
2412 free_irq(priv->irq_rx, dev);
2413
2414 return 0;
2415}
2416
2417/* try to sort out phy external status by walking the used_port field
2418 * in the bcm_enet_priv structure. in case the phy address is not
2419 * assigned to any physical port on the switch, assume it is external
2420 * (and yell at the user).
2421 */
2422static int bcm_enetsw_phy_is_external(struct bcm_enet_priv *priv, int phy_id)
2423{
2424 int i;
2425
2426 for (i = 0; i < priv->num_ports; ++i) {
2427 if (!priv->used_ports[i].used)
2428 continue;
2429 if (priv->used_ports[i].phy_id == phy_id)
2430 return bcm_enet_port_is_rgmii(i);
2431 }
2432
2433 printk_once(KERN_WARNING "bcm63xx_enet: could not find a used port with phy_id %i, assuming phy is external\n",
2434 phy_id);
2435 return 1;
2436}
2437
2438/* can't use bcmenet_sw_mdio_read directly as we need to sort out
2439 * external/internal status of the given phy_id first.
2440 */
2441static int bcm_enetsw_mii_mdio_read(struct net_device *dev, int phy_id,
2442 int location)
2443{
2444 struct bcm_enet_priv *priv;
2445
2446 priv = netdev_priv(dev);
2447 return bcmenet_sw_mdio_read(priv,
2448 bcm_enetsw_phy_is_external(priv, phy_id),
2449 phy_id, location);
2450}
2451
2452/* can't use bcmenet_sw_mdio_write directly as we need to sort out
2453 * external/internal status of the given phy_id first.
2454 */
2455static void bcm_enetsw_mii_mdio_write(struct net_device *dev, int phy_id,
2456 int location,
2457 int val)
2458{
2459 struct bcm_enet_priv *priv;
2460
2461 priv = netdev_priv(dev);
2462 bcmenet_sw_mdio_write(priv, bcm_enetsw_phy_is_external(priv, phy_id),
2463 phy_id, location, val);
2464}
2465
2466static int bcm_enetsw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2467{
2468 struct mii_if_info mii;
2469
2470 mii.dev = dev;
2471 mii.mdio_read = bcm_enetsw_mii_mdio_read;
2472 mii.mdio_write = bcm_enetsw_mii_mdio_write;
2473 mii.phy_id = 0;
2474 mii.phy_id_mask = 0x3f;
2475 mii.reg_num_mask = 0x1f;
2476 return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
2477
2478}
2479
2480static const struct net_device_ops bcm_enetsw_ops = {
2481 .ndo_open = bcm_enetsw_open,
2482 .ndo_stop = bcm_enetsw_stop,
2483 .ndo_start_xmit = bcm_enet_start_xmit,
2484 .ndo_change_mtu = bcm_enet_change_mtu,
2485 .ndo_do_ioctl = bcm_enetsw_ioctl,
2486};
2487
2488
2489static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
2490 { "rx_packets", DEV_STAT(rx_packets), -1 },
2491 { "tx_packets", DEV_STAT(tx_packets), -1 },
2492 { "rx_bytes", DEV_STAT(rx_bytes), -1 },
2493 { "tx_bytes", DEV_STAT(tx_bytes), -1 },
2494 { "rx_errors", DEV_STAT(rx_errors), -1 },
2495 { "tx_errors", DEV_STAT(tx_errors), -1 },
2496 { "rx_dropped", DEV_STAT(rx_dropped), -1 },
2497 { "tx_dropped", DEV_STAT(tx_dropped), -1 },
2498
2499 { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETHSW_MIB_RX_GD_OCT },
2500 { "tx_unicast", GEN_STAT(mib.tx_unicast), ETHSW_MIB_RX_BRDCAST },
2501 { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETHSW_MIB_RX_BRDCAST },
2502 { "tx_multicast", GEN_STAT(mib.tx_mult), ETHSW_MIB_RX_MULT },
2503 { "tx_64_octets", GEN_STAT(mib.tx_64), ETHSW_MIB_RX_64 },
2504 { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETHSW_MIB_RX_65_127 },
2505 { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETHSW_MIB_RX_128_255 },
2506 { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETHSW_MIB_RX_256_511 },
2507 { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETHSW_MIB_RX_512_1023},
2508 { "tx_1024_1522_oct", GEN_STAT(mib.tx_1024_max),
2509 ETHSW_MIB_RX_1024_1522 },
2510 { "tx_1523_2047_oct", GEN_STAT(mib.tx_1523_2047),
2511 ETHSW_MIB_RX_1523_2047 },
2512 { "tx_2048_4095_oct", GEN_STAT(mib.tx_2048_4095),
2513 ETHSW_MIB_RX_2048_4095 },
2514 { "tx_4096_8191_oct", GEN_STAT(mib.tx_4096_8191),
2515 ETHSW_MIB_RX_4096_8191 },
2516 { "tx_8192_9728_oct", GEN_STAT(mib.tx_8192_9728),
2517 ETHSW_MIB_RX_8192_9728 },
2518 { "tx_oversize", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR },
2519 { "tx_oversize_drop", GEN_STAT(mib.tx_ovr), ETHSW_MIB_RX_OVR_DISC },
2520 { "tx_dropped", GEN_STAT(mib.tx_drop), ETHSW_MIB_RX_DROP },
2521 { "tx_undersize", GEN_STAT(mib.tx_underrun), ETHSW_MIB_RX_UND },
2522 { "tx_pause", GEN_STAT(mib.tx_pause), ETHSW_MIB_RX_PAUSE },
2523
2524 { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETHSW_MIB_TX_ALL_OCT },
2525 { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETHSW_MIB_TX_BRDCAST },
2526 { "rx_multicast", GEN_STAT(mib.rx_mult), ETHSW_MIB_TX_MULT },
2527 { "rx_unicast", GEN_STAT(mib.rx_unicast), ETHSW_MIB_TX_MULT },
2528 { "rx_pause", GEN_STAT(mib.rx_pause), ETHSW_MIB_TX_PAUSE },
2529 { "rx_dropped", GEN_STAT(mib.rx_drop), ETHSW_MIB_TX_DROP_PKTS },
2530
2531};
2532
2533#define BCM_ENETSW_STATS_LEN \
2534 (sizeof(bcm_enetsw_gstrings_stats) / sizeof(struct bcm_enet_stats))
2535
2536static void bcm_enetsw_get_strings(struct net_device *netdev,
2537 u32 stringset, u8 *data)
2538{
2539 int i;
2540
2541 switch (stringset) {
2542 case ETH_SS_STATS:
2543 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2544 memcpy(data + i * ETH_GSTRING_LEN,
2545 bcm_enetsw_gstrings_stats[i].stat_string,
2546 ETH_GSTRING_LEN);
2547 }
2548 break;
2549 }
2550}
2551
2552static int bcm_enetsw_get_sset_count(struct net_device *netdev,
2553 int string_set)
2554{
2555 switch (string_set) {
2556 case ETH_SS_STATS:
2557 return BCM_ENETSW_STATS_LEN;
2558 default:
2559 return -EINVAL;
2560 }
2561}
2562
2563static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
2564 struct ethtool_drvinfo *drvinfo)
2565{
2566 strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
2567 strncpy(drvinfo->version, bcm_enet_driver_version, 32);
2568 strncpy(drvinfo->fw_version, "N/A", 32);
2569 strncpy(drvinfo->bus_info, "bcm63xx", 32);
2570 drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
2571}
2572
2573static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
2574 struct ethtool_stats *stats,
2575 u64 *data)
2576{
2577 struct bcm_enet_priv *priv;
2578 int i;
2579
2580 priv = netdev_priv(netdev);
2581
2582 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2583 const struct bcm_enet_stats *s;
2584 u32 lo, hi;
2585 char *p;
2586 int reg;
2587
2588 s = &bcm_enetsw_gstrings_stats[i];
2589
2590 reg = s->mib_reg;
2591 if (reg == -1)
2592 continue;
2593
2594 lo = enetsw_readl(priv, ENETSW_MIB_REG(reg));
2595 p = (char *)priv + s->stat_offset;
2596
2597 if (s->sizeof_stat == sizeof(u64)) {
2598 hi = enetsw_readl(priv, ENETSW_MIB_REG(reg + 1));
2599 *(u64 *)p = ((u64)hi << 32 | lo);
2600 } else {
2601 *(u32 *)p = lo;
2602 }
2603 }
2604
2605 for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
2606 const struct bcm_enet_stats *s;
2607 char *p;
2608
2609 s = &bcm_enetsw_gstrings_stats[i];
2610
2611 if (s->mib_reg == -1)
2612 p = (char *)&netdev->stats + s->stat_offset;
2613 else
2614 p = (char *)priv + s->stat_offset;
2615
2616 data[i] = (s->sizeof_stat == sizeof(u64)) ?
2617 *(u64 *)p : *(u32 *)p;
2618 }
2619}
2620
2621static void bcm_enetsw_get_ringparam(struct net_device *dev,
2622 struct ethtool_ringparam *ering)
2623{
2624 struct bcm_enet_priv *priv;
2625
2626 priv = netdev_priv(dev);
2627
2628 /* rx/tx ring is actually only limited by memory */
2629 ering->rx_max_pending = 8192;
2630 ering->tx_max_pending = 8192;
2631 ering->rx_mini_max_pending = 0;
2632 ering->rx_jumbo_max_pending = 0;
2633 ering->rx_pending = priv->rx_ring_size;
2634 ering->tx_pending = priv->tx_ring_size;
2635}
2636
2637static int bcm_enetsw_set_ringparam(struct net_device *dev,
2638 struct ethtool_ringparam *ering)
2639{
2640 struct bcm_enet_priv *priv;
2641 int was_running;
2642
2643 priv = netdev_priv(dev);
2644
2645 was_running = 0;
2646 if (netif_running(dev)) {
2647 bcm_enetsw_stop(dev);
2648 was_running = 1;
2649 }
2650
2651 priv->rx_ring_size = ering->rx_pending;
2652 priv->tx_ring_size = ering->tx_pending;
2653
2654 if (was_running) {
2655 int err;
2656
2657 err = bcm_enetsw_open(dev);
2658 if (err)
2659 dev_close(dev);
2660 }
2661 return 0;
2662}
2663
2664static struct ethtool_ops bcm_enetsw_ethtool_ops = {
2665 .get_strings = bcm_enetsw_get_strings,
2666 .get_sset_count = bcm_enetsw_get_sset_count,
2667 .get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
2668 .get_drvinfo = bcm_enetsw_get_drvinfo,
2669 .get_ringparam = bcm_enetsw_get_ringparam,
2670 .set_ringparam = bcm_enetsw_set_ringparam,
2671};
2672
2673/* allocate netdevice, request register memory and register device. */
2674static int bcm_enetsw_probe(struct platform_device *pdev)
2675{
2676 struct bcm_enet_priv *priv;
2677 struct net_device *dev;
2678 struct bcm63xx_enetsw_platform_data *pd;
2679 struct resource *res_mem;
2680 int ret, irq_rx, irq_tx;
2681
2682 /* stop if shared driver failed, assume driver->probe will be
2683 * called in the same order we register devices (correct ?)
2684 */
2685 if (!bcm_enet_shared_base[0])
2686 return -ENODEV;
2687
2688 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2689 irq_rx = platform_get_irq(pdev, 0);
2690 irq_tx = platform_get_irq(pdev, 1);
2691 if (!res_mem || irq_rx < 0)
2692 return -ENODEV;
2693
2694 ret = 0;
2695 dev = alloc_etherdev(sizeof(*priv));
2696 if (!dev)
2697 return -ENOMEM;
2698 priv = netdev_priv(dev);
2699 memset(priv, 0, sizeof(*priv));
2700
2701 /* initialize default and fetch platform data */
2702 priv->enet_is_sw = true;
2703 priv->irq_rx = irq_rx;
2704 priv->irq_tx = irq_tx;
2705 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
2706 priv->tx_ring_size = BCMENET_DEF_TX_DESC;
2707 priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
2708
2709 pd = pdev->dev.platform_data;
2710 if (pd) {
2711 memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
2712 memcpy(priv->used_ports, pd->used_ports,
2713 sizeof(pd->used_ports));
2714 priv->num_ports = pd->num_ports;
2715 }
2716
2717 ret = compute_hw_mtu(priv, dev->mtu);
2718 if (ret)
2719 goto out;
2720
2721 if (!request_mem_region(res_mem->start, resource_size(res_mem),
2722 "bcm63xx_enetsw")) {
2723 ret = -EBUSY;
2724 goto out;
2725 }
2726
2727 priv->base = ioremap(res_mem->start, resource_size(res_mem));
2728 if (priv->base == NULL) {
2729 ret = -ENOMEM;
2730 goto out_release_mem;
2731 }
2732
2733 priv->mac_clk = clk_get(&pdev->dev, "enetsw");
2734 if (IS_ERR(priv->mac_clk)) {
2735 ret = PTR_ERR(priv->mac_clk);
2736 goto out_unmap;
2737 }
2738 clk_enable(priv->mac_clk);
2739
2740 priv->rx_chan = 0;
2741 priv->tx_chan = 1;
2742 spin_lock_init(&priv->rx_lock);
2743
2744 /* init rx timeout (used for oom) */
2745 init_timer(&priv->rx_timeout);
2746 priv->rx_timeout.function = bcm_enet_refill_rx_timer;
2747 priv->rx_timeout.data = (unsigned long)dev;
2748
2749 /* register netdevice */
2750 dev->netdev_ops = &bcm_enetsw_ops;
2751 netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
2752 SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
2753 SET_NETDEV_DEV(dev, &pdev->dev);
2754
2755 spin_lock_init(&priv->enetsw_mdio_lock);
2756
2757 ret = register_netdev(dev);
2758 if (ret)
2759 goto out_put_clk;
2760
2761 netif_carrier_off(dev);
2762 platform_set_drvdata(pdev, dev);
2763 priv->pdev = pdev;
2764 priv->net_dev = dev;
2765
2766 return 0;
2767
2768out_put_clk:
2769 clk_put(priv->mac_clk);
2770
2771out_unmap:
2772 iounmap(priv->base);
2773
2774out_release_mem:
2775 release_mem_region(res_mem->start, resource_size(res_mem));
2776out:
2777 free_netdev(dev);
2778 return ret;
2779}
2780
2781
2782/* exit func, stops hardware and unregisters netdevice */
2783static int bcm_enetsw_remove(struct platform_device *pdev)
2784{
2785 struct bcm_enet_priv *priv;
2786 struct net_device *dev;
2787 struct resource *res;
2788
2789 /* stop netdevice */
2790 dev = platform_get_drvdata(pdev);
2791 priv = netdev_priv(dev);
2792 unregister_netdev(dev);
2793
2794 /* release device resources */
2795 iounmap(priv->base);
2796 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2797 release_mem_region(res->start, resource_size(res));
2798
2799 platform_set_drvdata(pdev, NULL);
2800 free_netdev(dev);
2801 return 0;
2802}
2803
2804struct platform_driver bcm63xx_enetsw_driver = {
2805 .probe = bcm_enetsw_probe,
2806 .remove = bcm_enetsw_remove,
2807 .driver = {
2808 .name = "bcm63xx_enetsw",
2809 .owner = THIS_MODULE,
2810 },
2811};
2812
2813/* reserve & remap memory space shared between all macs */
1903static int bcm_enet_shared_probe(struct platform_device *pdev) 2814static int bcm_enet_shared_probe(struct platform_device *pdev)
1904{ 2815{
1905 struct resource *res; 2816 struct resource *res;
@@ -1925,8 +2836,7 @@ static int bcm_enet_shared_remove(struct platform_device *pdev)
1925 return 0; 2836 return 0;
1926} 2837}
1927 2838
1928/* 2839/* this "shared" driver is needed because both macs share a single
1929 * this "shared" driver is needed because both macs share a single
1930 * address space 2840 * address space
1931 */ 2841 */
1932struct platform_driver bcm63xx_enet_shared_driver = { 2842struct platform_driver bcm63xx_enet_shared_driver = {
@@ -1938,9 +2848,7 @@ struct platform_driver bcm63xx_enet_shared_driver = {
1938 }, 2848 },
1939}; 2849};
1940 2850
1941/* 2851/* entry point */
1942 * entry point
1943 */
1944static int __init bcm_enet_init(void) 2852static int __init bcm_enet_init(void)
1945{ 2853{
1946 int ret; 2854 int ret;
@@ -1953,12 +2861,19 @@ static int __init bcm_enet_init(void)
1953 if (ret) 2861 if (ret)
1954 platform_driver_unregister(&bcm63xx_enet_shared_driver); 2862 platform_driver_unregister(&bcm63xx_enet_shared_driver);
1955 2863
2864 ret = platform_driver_register(&bcm63xx_enetsw_driver);
2865 if (ret) {
2866 platform_driver_unregister(&bcm63xx_enet_driver);
2867 platform_driver_unregister(&bcm63xx_enet_shared_driver);
2868 }
2869
1956 return ret; 2870 return ret;
1957} 2871}
1958 2872
1959static void __exit bcm_enet_exit(void) 2873static void __exit bcm_enet_exit(void)
1960{ 2874{
1961 platform_driver_unregister(&bcm63xx_enet_driver); 2875 platform_driver_unregister(&bcm63xx_enet_driver);
2876 platform_driver_unregister(&bcm63xx_enetsw_driver);
1962 platform_driver_unregister(&bcm63xx_enet_shared_driver); 2877 platform_driver_unregister(&bcm63xx_enet_shared_driver);
1963} 2878}
1964 2879
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 133d5857b9e2..721ffbaef8d2 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -18,6 +18,7 @@
18 18
19/* maximum burst len for dma (4 bytes unit) */ 19/* maximum burst len for dma (4 bytes unit) */
20#define BCMENET_DMA_MAXBURST 16 20#define BCMENET_DMA_MAXBURST 16
21#define BCMENETSW_DMA_MAXBURST 8
21 22
22/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value 23/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
23 * must be low enough so that a DMA transfer of above burst length can 24 * must be low enough so that a DMA transfer of above burst length can
@@ -84,11 +85,60 @@
84#define ETH_MIB_RX_CNTRL 54 85#define ETH_MIB_RX_CNTRL 54
85 86
86 87
88/*
89 * SW MIB Counters register definitions
90*/
91#define ETHSW_MIB_TX_ALL_OCT 0
92#define ETHSW_MIB_TX_DROP_PKTS 2
93#define ETHSW_MIB_TX_QOS_PKTS 3
94#define ETHSW_MIB_TX_BRDCAST 4
95#define ETHSW_MIB_TX_MULT 5
96#define ETHSW_MIB_TX_UNI 6
97#define ETHSW_MIB_TX_COL 7
98#define ETHSW_MIB_TX_1_COL 8
99#define ETHSW_MIB_TX_M_COL 9
100#define ETHSW_MIB_TX_DEF 10
101#define ETHSW_MIB_TX_LATE 11
102#define ETHSW_MIB_TX_EX_COL 12
103#define ETHSW_MIB_TX_PAUSE 14
104#define ETHSW_MIB_TX_QOS_OCT 15
105
106#define ETHSW_MIB_RX_ALL_OCT 17
107#define ETHSW_MIB_RX_UND 19
108#define ETHSW_MIB_RX_PAUSE 20
109#define ETHSW_MIB_RX_64 21
110#define ETHSW_MIB_RX_65_127 22
111#define ETHSW_MIB_RX_128_255 23
112#define ETHSW_MIB_RX_256_511 24
113#define ETHSW_MIB_RX_512_1023 25
114#define ETHSW_MIB_RX_1024_1522 26
115#define ETHSW_MIB_RX_OVR 27
116#define ETHSW_MIB_RX_JAB 28
117#define ETHSW_MIB_RX_ALIGN 29
118#define ETHSW_MIB_RX_CRC 30
119#define ETHSW_MIB_RX_GD_OCT 31
120#define ETHSW_MIB_RX_DROP 33
121#define ETHSW_MIB_RX_UNI 34
122#define ETHSW_MIB_RX_MULT 35
123#define ETHSW_MIB_RX_BRDCAST 36
124#define ETHSW_MIB_RX_SA_CHANGE 37
125#define ETHSW_MIB_RX_FRAG 38
126#define ETHSW_MIB_RX_OVR_DISC 39
127#define ETHSW_MIB_RX_SYM 40
128#define ETHSW_MIB_RX_QOS_PKTS 41
129#define ETHSW_MIB_RX_QOS_OCT 42
130#define ETHSW_MIB_RX_1523_2047 44
131#define ETHSW_MIB_RX_2048_4095 45
132#define ETHSW_MIB_RX_4096_8191 46
133#define ETHSW_MIB_RX_8192_9728 47
134
135
87struct bcm_enet_mib_counters { 136struct bcm_enet_mib_counters {
88 u64 tx_gd_octets; 137 u64 tx_gd_octets;
89 u32 tx_gd_pkts; 138 u32 tx_gd_pkts;
90 u32 tx_all_octets; 139 u32 tx_all_octets;
91 u32 tx_all_pkts; 140 u32 tx_all_pkts;
141 u32 tx_unicast;
92 u32 tx_brdcast; 142 u32 tx_brdcast;
93 u32 tx_mult; 143 u32 tx_mult;
94 u32 tx_64; 144 u32 tx_64;
@@ -97,7 +147,12 @@ struct bcm_enet_mib_counters {
97 u32 tx_256_511; 147 u32 tx_256_511;
98 u32 tx_512_1023; 148 u32 tx_512_1023;
99 u32 tx_1024_max; 149 u32 tx_1024_max;
150 u32 tx_1523_2047;
151 u32 tx_2048_4095;
152 u32 tx_4096_8191;
153 u32 tx_8192_9728;
100 u32 tx_jab; 154 u32 tx_jab;
155 u32 tx_drop;
101 u32 tx_ovr; 156 u32 tx_ovr;
102 u32 tx_frag; 157 u32 tx_frag;
103 u32 tx_underrun; 158 u32 tx_underrun;
@@ -114,6 +169,7 @@ struct bcm_enet_mib_counters {
114 u32 rx_all_octets; 169 u32 rx_all_octets;
115 u32 rx_all_pkts; 170 u32 rx_all_pkts;
116 u32 rx_brdcast; 171 u32 rx_brdcast;
172 u32 rx_unicast;
117 u32 rx_mult; 173 u32 rx_mult;
118 u32 rx_64; 174 u32 rx_64;
119 u32 rx_65_127; 175 u32 rx_65_127;
@@ -197,6 +253,9 @@ struct bcm_enet_priv {
197 /* number of dma desc in tx ring */ 253 /* number of dma desc in tx ring */
198 int tx_ring_size; 254 int tx_ring_size;
199 255
256 /* maximum dma burst size */
257 int dma_maxburst;
258
200 /* cpu view of rx dma ring */ 259 /* cpu view of rx dma ring */
201 struct bcm_enet_desc *tx_desc_cpu; 260 struct bcm_enet_desc *tx_desc_cpu;
202 261
@@ -269,6 +328,18 @@ struct bcm_enet_priv {
269 328
270 /* maximum hardware transmit/receive size */ 329 /* maximum hardware transmit/receive size */
271 unsigned int hw_mtu; 330 unsigned int hw_mtu;
331
332 bool enet_is_sw;
333
334 /* port mapping for switch devices */
335 int num_ports;
336 struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
337 int sw_port_link[ENETSW_MAX_PORT];
338
339 /* used to poll switch port state */
340 struct timer_list swphy_poll;
341 spinlock_t enetsw_mdio_lock;
272}; 342};
273 343
344
274#endif /* ! BCM63XX_ENET_H_ */ 345#endif /* ! BCM63XX_ENET_H_ */