diff options
author | Cyril Chemparathy <cyril@ti.com> | 2010-09-15 10:11:29 -0400 |
---|---|---|
committer | Kevin Hilman <khilman@deeprootsystems.com> | 2010-09-24 10:40:31 -0400 |
commit | 3ef0fdb2342cf58f617ce2bdcd133978629c2403 (patch) | |
tree | 85ee852762295714055a810c37521bcf62104ef7 /drivers/net/davinci_emac.c | |
parent | ef8c2dab01b6e30c4b2ca3ea3b8db33430493589 (diff) |
net: davinci_emac: switch to new cpdma layer
This patch hooks up the emac driver with the newly separated cpdma driver.
Key differences introduced here:
- The old buffer list scheme is no longer required
- The original code maintained mac address per rx channel, even if only one
rx channel was being used. With this change, mac address is maintained
device wide. If support for multiple rx channels is added in future, this
will need to be reworked a bit.
- The new CPDMA code handles short packets better than before. The
earlier code was adjusting the length up, without ensuring that the tail
end of the padding was cleared - a possible security issue. This has been
fixed to use skb_padto().
Signed-off-by: Cyril Chemparathy <cyril@ti.com>
Tested-by: Michael Williamson <michael.williamson@criticallink.com>
Tested-by: Caglar Akyuz <caglarakyuz@gmail.com>
Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'drivers/net/davinci_emac.c')
-rw-r--r-- | drivers/net/davinci_emac.c | 249 |
1 files changed, 164 insertions, 85 deletions
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index d4298cb23b4d..67dbcfb5e894 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -63,6 +63,8 @@ | |||
63 | #include <asm/irq.h> | 63 | #include <asm/irq.h> |
64 | #include <asm/page.h> | 64 | #include <asm/page.h> |
65 | 65 | ||
66 | #include "davinci_cpdma.h" | ||
67 | |||
66 | static int debug_level; | 68 | static int debug_level; |
67 | module_param(debug_level, int, 0); | 69 | module_param(debug_level, int, 0); |
68 | MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)"); | 70 | MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)"); |
@@ -113,6 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; | |||
113 | #define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4) | 115 | #define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4) |
114 | #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ | 116 | #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ |
115 | #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ | 117 | #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ |
118 | #define EMAC_DEF_RX_NUM_DESC (128) | ||
116 | #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ | 119 | #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ |
117 | #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ | 120 | #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ |
118 | #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ | 121 | #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ |
@@ -460,6 +463,9 @@ struct emac_priv { | |||
460 | u32 hw_ram_addr; | 463 | u32 hw_ram_addr; |
461 | struct emac_txch *txch[EMAC_DEF_MAX_TX_CH]; | 464 | struct emac_txch *txch[EMAC_DEF_MAX_TX_CH]; |
462 | struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH]; | 465 | struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH]; |
466 | struct cpdma_ctlr *dma; | ||
467 | struct cpdma_chan *txchan; | ||
468 | struct cpdma_chan *rxchan; | ||
463 | u32 link; /* 1=link on, 0=link off */ | 469 | u32 link; /* 1=link on, 0=link off */ |
464 | u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */ | 470 | u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */ |
465 | u32 duplex; /* Link duplex: 0=Half, 1=Full */ | 471 | u32 duplex; /* Link duplex: 0=Half, 1=Full */ |
@@ -624,6 +630,8 @@ static void emac_dump_regs(struct emac_priv *priv) | |||
624 | emac_read(EMAC_RXMOFOVERRUNS)); | 630 | emac_read(EMAC_RXMOFOVERRUNS)); |
625 | dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", | 631 | dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", |
626 | emac_read(EMAC_RXDMAOVERRUNS)); | 632 | emac_read(EMAC_RXDMAOVERRUNS)); |
633 | |||
634 | cpdma_ctlr_dump(priv->dma); | ||
627 | } | 635 | } |
628 | 636 | ||
629 | /** | 637 | /** |
@@ -1151,6 +1159,70 @@ static irqreturn_t emac_irq(int irq, void *dev_id) | |||
1151 | return IRQ_HANDLED; | 1159 | return IRQ_HANDLED; |
1152 | } | 1160 | } |
1153 | 1161 | ||
1162 | static struct sk_buff *emac_rx_alloc(struct emac_priv *priv) | ||
1163 | { | ||
1164 | struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size); | ||
1165 | if (WARN_ON(!skb)) | ||
1166 | return NULL; | ||
1167 | skb->dev = priv->ndev; | ||
1168 | skb_reserve(skb, NET_IP_ALIGN); | ||
1169 | return skb; | ||
1170 | } | ||
1171 | |||
1172 | static void emac_rx_handler(void *token, int len, int status) | ||
1173 | { | ||
1174 | struct sk_buff *skb = token; | ||
1175 | struct net_device *ndev = skb->dev; | ||
1176 | struct emac_priv *priv = netdev_priv(ndev); | ||
1177 | struct device *emac_dev = &ndev->dev; | ||
1178 | int ret; | ||
1179 | |||
1180 | /* free and bail if we are shutting down */ | ||
1181 | if (unlikely(!netif_running(ndev))) { | ||
1182 | dev_kfree_skb_any(skb); | ||
1183 | return; | ||
1184 | } | ||
1185 | |||
1186 | /* recycle on recieve error */ | ||
1187 | if (status < 0) { | ||
1188 | ndev->stats.rx_errors++; | ||
1189 | goto recycle; | ||
1190 | } | ||
1191 | |||
1192 | /* feed received packet up the stack */ | ||
1193 | skb_put(skb, len); | ||
1194 | skb->protocol = eth_type_trans(skb, ndev); | ||
1195 | netif_receive_skb(skb); | ||
1196 | ndev->stats.rx_bytes += len; | ||
1197 | ndev->stats.rx_packets++; | ||
1198 | |||
1199 | /* alloc a new packet for receive */ | ||
1200 | skb = emac_rx_alloc(priv); | ||
1201 | if (!skb) { | ||
1202 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
1203 | dev_err(emac_dev, "failed rx buffer alloc\n"); | ||
1204 | return; | ||
1205 | } | ||
1206 | |||
1207 | recycle: | ||
1208 | ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, | ||
1209 | skb_tailroom(skb), GFP_KERNEL); | ||
1210 | if (WARN_ON(ret < 0)) | ||
1211 | dev_kfree_skb_any(skb); | ||
1212 | } | ||
1213 | |||
1214 | static void emac_tx_handler(void *token, int len, int status) | ||
1215 | { | ||
1216 | struct sk_buff *skb = token; | ||
1217 | struct net_device *ndev = skb->dev; | ||
1218 | |||
1219 | if (unlikely(netif_queue_stopped(ndev))) | ||
1220 | netif_start_queue(ndev); | ||
1221 | ndev->stats.tx_packets++; | ||
1222 | ndev->stats.tx_bytes += len; | ||
1223 | dev_kfree_skb_any(skb); | ||
1224 | } | ||
1225 | |||
1154 | /** EMAC on-chip buffer descriptor memory | 1226 | /** EMAC on-chip buffer descriptor memory |
1155 | * | 1227 | * |
1156 | * WARNING: Please note that the on chip memory is used for both TX and RX | 1228 | * WARNING: Please note that the on chip memory is used for both TX and RX |
@@ -1532,42 +1604,36 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1532 | { | 1604 | { |
1533 | struct device *emac_dev = &ndev->dev; | 1605 | struct device *emac_dev = &ndev->dev; |
1534 | int ret_code; | 1606 | int ret_code; |
1535 | struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */ | ||
1536 | struct emac_netpktobj tx_packet; /* packet object */ | ||
1537 | struct emac_priv *priv = netdev_priv(ndev); | 1607 | struct emac_priv *priv = netdev_priv(ndev); |
1538 | 1608 | ||
1539 | /* If no link, return */ | 1609 | /* If no link, return */ |
1540 | if (unlikely(!priv->link)) { | 1610 | if (unlikely(!priv->link)) { |
1541 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 1611 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
1542 | dev_err(emac_dev, "DaVinci EMAC: No link to transmit"); | 1612 | dev_err(emac_dev, "DaVinci EMAC: No link to transmit"); |
1543 | return NETDEV_TX_BUSY; | 1613 | goto fail_tx; |
1544 | } | 1614 | } |
1545 | 1615 | ||
1546 | /* Build the buffer and packet objects - Since only single fragment is | 1616 | ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE); |
1547 | * supported, need not set length and token in both packet & object. | 1617 | if (unlikely(ret_code < 0)) { |
1548 | * Doing so for completeness sake & to show that this needs to be done | 1618 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
1549 | * in multifragment case | 1619 | dev_err(emac_dev, "DaVinci EMAC: packet pad failed"); |
1550 | */ | 1620 | goto fail_tx; |
1551 | tx_packet.buf_list = &tx_buf; | 1621 | } |
1552 | tx_packet.num_bufs = 1; /* only single fragment supported */ | 1622 | |
1553 | tx_packet.pkt_length = skb->len; | 1623 | ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, |
1554 | tx_packet.pkt_token = (void *)skb; | 1624 | GFP_KERNEL); |
1555 | tx_buf.length = skb->len; | ||
1556 | tx_buf.buf_token = (void *)skb; | ||
1557 | tx_buf.data_ptr = skb->data; | ||
1558 | ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); | ||
1559 | if (unlikely(ret_code != 0)) { | 1625 | if (unlikely(ret_code != 0)) { |
1560 | if (ret_code == EMAC_ERR_TX_OUT_OF_BD) { | 1626 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
1561 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 1627 | dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); |
1562 | dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\ | 1628 | goto fail_tx; |
1563 | " err. Out of TX BD's"); | ||
1564 | netif_stop_queue(priv->ndev); | ||
1565 | } | ||
1566 | ndev->stats.tx_dropped++; | ||
1567 | return NETDEV_TX_BUSY; | ||
1568 | } | 1629 | } |
1569 | 1630 | ||
1570 | return NETDEV_TX_OK; | 1631 | return NETDEV_TX_OK; |
1632 | |||
1633 | fail_tx: | ||
1634 | ndev->stats.tx_dropped++; | ||
1635 | netif_stop_queue(ndev); | ||
1636 | return NETDEV_TX_BUSY; | ||
1571 | } | 1637 | } |
1572 | 1638 | ||
1573 | /** | 1639 | /** |
@@ -1588,13 +1654,12 @@ static void emac_dev_tx_timeout(struct net_device *ndev) | |||
1588 | if (netif_msg_tx_err(priv)) | 1654 | if (netif_msg_tx_err(priv)) |
1589 | dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); | 1655 | dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); |
1590 | 1656 | ||
1657 | emac_dump_regs(priv); | ||
1658 | |||
1591 | ndev->stats.tx_errors++; | 1659 | ndev->stats.tx_errors++; |
1592 | emac_int_disable(priv); | 1660 | emac_int_disable(priv); |
1593 | emac_stop_txch(priv, EMAC_DEF_TX_CH); | 1661 | cpdma_chan_stop(priv->txchan); |
1594 | emac_cleanup_txch(priv, EMAC_DEF_TX_CH); | 1662 | cpdma_chan_start(priv->txchan); |
1595 | emac_init_txch(priv, EMAC_DEF_TX_CH); | ||
1596 | emac_write(EMAC_TXHDP(0), 0); | ||
1597 | emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH)); | ||
1598 | emac_int_enable(priv); | 1663 | emac_int_enable(priv); |
1599 | } | 1664 | } |
1600 | 1665 | ||
@@ -1915,7 +1980,6 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr) | |||
1915 | static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) | 1980 | static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) |
1916 | { | 1981 | { |
1917 | struct emac_priv *priv = netdev_priv(ndev); | 1982 | struct emac_priv *priv = netdev_priv(ndev); |
1918 | struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH]; | ||
1919 | struct device *emac_dev = &priv->ndev->dev; | 1983 | struct device *emac_dev = &priv->ndev->dev; |
1920 | struct sockaddr *sa = addr; | 1984 | struct sockaddr *sa = addr; |
1921 | 1985 | ||
@@ -1926,11 +1990,10 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) | |||
1926 | memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); | 1990 | memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); |
1927 | memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); | 1991 | memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); |
1928 | 1992 | ||
1929 | /* If the interface is down - rxch is NULL. */ | ||
1930 | /* MAC address is configured only after the interface is enabled. */ | 1993 | /* MAC address is configured only after the interface is enabled. */ |
1931 | if (netif_running(ndev)) { | 1994 | if (netif_running(ndev)) { |
1932 | memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len); | 1995 | memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); |
1933 | emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr); | 1996 | emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr); |
1934 | } | 1997 | } |
1935 | 1998 | ||
1936 | if (netif_msg_drv(priv)) | 1999 | if (netif_msg_drv(priv)) |
@@ -2139,7 +2202,7 @@ end_emac_rx_bdproc: | |||
2139 | */ | 2202 | */ |
2140 | static int emac_hw_enable(struct emac_priv *priv) | 2203 | static int emac_hw_enable(struct emac_priv *priv) |
2141 | { | 2204 | { |
2142 | u32 ch, val, mbp_enable, mac_control; | 2205 | u32 val, mbp_enable, mac_control; |
2143 | 2206 | ||
2144 | /* Soft reset */ | 2207 | /* Soft reset */ |
2145 | emac_write(EMAC_SOFTRESET, 1); | 2208 | emac_write(EMAC_SOFTRESET, 1); |
@@ -2182,26 +2245,9 @@ static int emac_hw_enable(struct emac_priv *priv) | |||
2182 | emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL); | 2245 | emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL); |
2183 | priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF; | 2246 | priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF; |
2184 | 2247 | ||
2185 | val = emac_read(EMAC_TXCONTROL); | ||
2186 | val |= EMAC_TX_CONTROL_TX_ENABLE_VAL; | ||
2187 | emac_write(EMAC_TXCONTROL, val); | ||
2188 | val = emac_read(EMAC_RXCONTROL); | ||
2189 | val |= EMAC_RX_CONTROL_RX_ENABLE_VAL; | ||
2190 | emac_write(EMAC_RXCONTROL, val); | ||
2191 | emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL); | 2248 | emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL); |
2192 | 2249 | ||
2193 | for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) { | 2250 | emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr); |
2194 | emac_write(EMAC_TXHDP(ch), 0); | ||
2195 | emac_write(EMAC_TXINTMASKSET, BIT(ch)); | ||
2196 | } | ||
2197 | for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) { | ||
2198 | struct emac_rxch *rxch = priv->rxch[ch]; | ||
2199 | emac_setmac(priv, ch, rxch->mac_addr); | ||
2200 | emac_write(EMAC_RXINTMASKSET, BIT(ch)); | ||
2201 | rxch->queue_active = 1; | ||
2202 | emac_write(EMAC_RXHDP(ch), | ||
2203 | emac_virt_to_phys(rxch->active_queue_head, priv)); | ||
2204 | } | ||
2205 | 2251 | ||
2206 | /* Enable MII */ | 2252 | /* Enable MII */ |
2207 | val = emac_read(EMAC_MACCONTROL); | 2253 | val = emac_read(EMAC_MACCONTROL); |
@@ -2246,8 +2292,8 @@ static int emac_poll(struct napi_struct *napi, int budget) | |||
2246 | mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; | 2292 | mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; |
2247 | 2293 | ||
2248 | if (status & mask) { | 2294 | if (status & mask) { |
2249 | num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH, | 2295 | num_tx_pkts = cpdma_chan_process(priv->txchan, |
2250 | EMAC_DEF_TX_MAX_SERVICE); | 2296 | EMAC_DEF_TX_MAX_SERVICE); |
2251 | } /* TX processing */ | 2297 | } /* TX processing */ |
2252 | 2298 | ||
2253 | mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; | 2299 | mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; |
@@ -2256,7 +2302,7 @@ static int emac_poll(struct napi_struct *napi, int budget) | |||
2256 | mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; | 2302 | mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; |
2257 | 2303 | ||
2258 | if (status & mask) { | 2304 | if (status & mask) { |
2259 | num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget); | 2305 | num_rx_pkts = cpdma_chan_process(priv->rxchan, budget); |
2260 | } /* RX processing */ | 2306 | } /* RX processing */ |
2261 | 2307 | ||
2262 | mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; | 2308 | mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; |
@@ -2397,9 +2443,9 @@ static int match_first_device(struct device *dev, void *data) | |||
2397 | static int emac_dev_open(struct net_device *ndev) | 2443 | static int emac_dev_open(struct net_device *ndev) |
2398 | { | 2444 | { |
2399 | struct device *emac_dev = &ndev->dev; | 2445 | struct device *emac_dev = &ndev->dev; |
2400 | u32 rc, cnt, ch; | 2446 | u32 cnt; |
2401 | struct resource *res; | 2447 | struct resource *res; |
2402 | int q, m; | 2448 | int q, m, ret; |
2403 | int i = 0; | 2449 | int i = 0; |
2404 | int k = 0; | 2450 | int k = 0; |
2405 | struct emac_priv *priv = netdev_priv(ndev); | 2451 | struct emac_priv *priv = netdev_priv(ndev); |
@@ -2411,29 +2457,21 @@ static int emac_dev_open(struct net_device *ndev) | |||
2411 | /* Configuration items */ | 2457 | /* Configuration items */ |
2412 | priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; | 2458 | priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; |
2413 | 2459 | ||
2414 | /* Clear basic hardware */ | ||
2415 | for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) { | ||
2416 | emac_write(EMAC_TXHDP(ch), 0); | ||
2417 | emac_write(EMAC_RXHDP(ch), 0); | ||
2418 | emac_write(EMAC_RXHDP(ch), 0); | ||
2419 | emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR); | ||
2420 | emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR); | ||
2421 | } | ||
2422 | priv->mac_hash1 = 0; | 2460 | priv->mac_hash1 = 0; |
2423 | priv->mac_hash2 = 0; | 2461 | priv->mac_hash2 = 0; |
2424 | emac_write(EMAC_MACHASH1, 0); | 2462 | emac_write(EMAC_MACHASH1, 0); |
2425 | emac_write(EMAC_MACHASH2, 0); | 2463 | emac_write(EMAC_MACHASH2, 0); |
2426 | 2464 | ||
2427 | /* multi ch not supported - open 1 TX, 1RX ch by default */ | 2465 | for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) { |
2428 | rc = emac_init_txch(priv, EMAC_DEF_TX_CH); | 2466 | struct sk_buff *skb = emac_rx_alloc(priv); |
2429 | if (0 != rc) { | 2467 | |
2430 | dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed"); | 2468 | if (!skb) |
2431 | return rc; | 2469 | break; |
2432 | } | 2470 | |
2433 | rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr); | 2471 | ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, |
2434 | if (0 != rc) { | 2472 | skb_tailroom(skb), GFP_KERNEL); |
2435 | dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed"); | 2473 | if (WARN_ON(ret < 0)) |
2436 | return rc; | 2474 | break; |
2437 | } | 2475 | } |
2438 | 2476 | ||
2439 | /* Request IRQ */ | 2477 | /* Request IRQ */ |
@@ -2458,6 +2496,8 @@ static int emac_dev_open(struct net_device *ndev) | |||
2458 | emac_set_coalesce(ndev, &coal); | 2496 | emac_set_coalesce(ndev, &coal); |
2459 | } | 2497 | } |
2460 | 2498 | ||
2499 | cpdma_ctlr_start(priv->dma); | ||
2500 | |||
2461 | priv->phydev = NULL; | 2501 | priv->phydev = NULL; |
2462 | /* use the first phy on the bus if pdata did not give us a phy id */ | 2502 | /* use the first phy on the bus if pdata did not give us a phy id */ |
2463 | if (!priv->phy_id) { | 2503 | if (!priv->phy_id) { |
@@ -2545,10 +2585,7 @@ static int emac_dev_stop(struct net_device *ndev) | |||
2545 | 2585 | ||
2546 | netif_carrier_off(ndev); | 2586 | netif_carrier_off(ndev); |
2547 | emac_int_disable(priv); | 2587 | emac_int_disable(priv); |
2548 | emac_stop_txch(priv, EMAC_DEF_TX_CH); | 2588 | cpdma_ctlr_stop(priv->dma); |
2549 | emac_stop_rxch(priv, EMAC_DEF_RX_CH); | ||
2550 | emac_cleanup_txch(priv, EMAC_DEF_TX_CH); | ||
2551 | emac_cleanup_rxch(priv, EMAC_DEF_RX_CH); | ||
2552 | emac_write(EMAC_SOFTRESET, 1); | 2589 | emac_write(EMAC_SOFTRESET, 1); |
2553 | 2590 | ||
2554 | if (priv->phydev) | 2591 | if (priv->phydev) |
@@ -2653,9 +2690,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
2653 | struct resource *res; | 2690 | struct resource *res; |
2654 | struct net_device *ndev; | 2691 | struct net_device *ndev; |
2655 | struct emac_priv *priv; | 2692 | struct emac_priv *priv; |
2656 | unsigned long size; | 2693 | unsigned long size, hw_ram_addr; |
2657 | struct emac_platform_data *pdata; | 2694 | struct emac_platform_data *pdata; |
2658 | struct device *emac_dev; | 2695 | struct device *emac_dev; |
2696 | struct cpdma_params dma_params; | ||
2659 | 2697 | ||
2660 | /* obtain emac clock from kernel */ | 2698 | /* obtain emac clock from kernel */ |
2661 | emac_clk = clk_get(&pdev->dev, NULL); | 2699 | emac_clk = clk_get(&pdev->dev, NULL); |
@@ -2731,11 +2769,40 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
2731 | priv->ctrl_ram_size = pdata->ctrl_ram_size; | 2769 | priv->ctrl_ram_size = pdata->ctrl_ram_size; |
2732 | priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset; | 2770 | priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset; |
2733 | 2771 | ||
2734 | if (pdata->hw_ram_addr) | 2772 | hw_ram_addr = pdata->hw_ram_addr; |
2735 | priv->hw_ram_addr = pdata->hw_ram_addr; | 2773 | if (!hw_ram_addr) |
2736 | else | 2774 | hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; |
2737 | priv->hw_ram_addr = (u32 __force)res->start + | 2775 | |
2738 | pdata->ctrl_ram_offset; | 2776 | memset(&dma_params, 0, sizeof(dma_params)); |
2777 | dma_params.dev = emac_dev; | ||
2778 | dma_params.dmaregs = priv->emac_base; | ||
2779 | dma_params.rxthresh = priv->emac_base + 0x120; | ||
2780 | dma_params.rxfree = priv->emac_base + 0x140; | ||
2781 | dma_params.txhdp = priv->emac_base + 0x600; | ||
2782 | dma_params.rxhdp = priv->emac_base + 0x620; | ||
2783 | dma_params.txcp = priv->emac_base + 0x640; | ||
2784 | dma_params.rxcp = priv->emac_base + 0x660; | ||
2785 | dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; | ||
2786 | dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; | ||
2787 | dma_params.desc_mem_phys = hw_ram_addr; | ||
2788 | dma_params.desc_mem_size = pdata->ctrl_ram_size; | ||
2789 | dma_params.desc_align = 16; | ||
2790 | |||
2791 | priv->dma = cpdma_ctlr_create(&dma_params); | ||
2792 | if (!priv->dma) { | ||
2793 | dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); | ||
2794 | rc = -ENOMEM; | ||
2795 | goto no_dma; | ||
2796 | } | ||
2797 | |||
2798 | priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), | ||
2799 | emac_tx_handler); | ||
2800 | priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH), | ||
2801 | emac_rx_handler); | ||
2802 | if (WARN_ON(!priv->txchan || !priv->rxchan)) { | ||
2803 | rc = -ENOMEM; | ||
2804 | goto no_irq_res; | ||
2805 | } | ||
2739 | 2806 | ||
2740 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 2807 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
2741 | if (!res) { | 2808 | if (!res) { |
@@ -2778,6 +2845,12 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
2778 | netdev_reg_err: | 2845 | netdev_reg_err: |
2779 | clk_disable(emac_clk); | 2846 | clk_disable(emac_clk); |
2780 | no_irq_res: | 2847 | no_irq_res: |
2848 | if (priv->txchan) | ||
2849 | cpdma_chan_destroy(priv->txchan); | ||
2850 | if (priv->rxchan) | ||
2851 | cpdma_chan_destroy(priv->rxchan); | ||
2852 | cpdma_ctlr_destroy(priv->dma); | ||
2853 | no_dma: | ||
2781 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2854 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2782 | release_mem_region(res->start, res->end - res->start + 1); | 2855 | release_mem_region(res->start, res->end - res->start + 1); |
2783 | iounmap(priv->remap_addr); | 2856 | iounmap(priv->remap_addr); |
@@ -2806,6 +2879,12 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev) | |||
2806 | platform_set_drvdata(pdev, NULL); | 2879 | platform_set_drvdata(pdev, NULL); |
2807 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2880 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2808 | 2881 | ||
2882 | if (priv->txchan) | ||
2883 | cpdma_chan_destroy(priv->txchan); | ||
2884 | if (priv->rxchan) | ||
2885 | cpdma_chan_destroy(priv->rxchan); | ||
2886 | cpdma_ctlr_destroy(priv->dma); | ||
2887 | |||
2809 | release_mem_region(res->start, res->end - res->start + 1); | 2888 | release_mem_region(res->start, res->end - res->start + 1); |
2810 | 2889 | ||
2811 | unregister_netdev(ndev); | 2890 | unregister_netdev(ndev); |