aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
authorGiuseppe CAVALLARO <peppe.cavallaro@st.com>2011-10-17 20:01:24 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-19 19:24:18 -0400
commit286a837217204b1ef105e3a554d0757e4fdfaac1 (patch)
tree466ed6dbe1e6281173adc57cf09a40e86f0c80fd /drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
parent38fe7a93fc734357c4811f1c710b1906a87d315c (diff)
stmmac: add CHAINED descriptor mode support (V4)
This patch enhances the STMMAC driver to support CHAINED mode of descriptor. STMMAC supports DMA descriptor to operate both in dual buffer(RING) and linked-list(CHAINED) mode. In RING mode (default) each descriptor points to two data buffer pointers whereas in CHAINED mode they point to only one data buffer pointer. In CHAINED mode each descriptor will have pointer to next descriptor in the list, hence creating the explicit chaining in the descriptor itself, whereas such explicit chaining is not possible in RING mode. First version of this work has been done by Rayagond. Then the patch has been reworked avoiding ifdef inside the C code. A new header file has been added to define all the functions needed for managing enhanced and normal descriptors. In fact, these have to be specialized according to the ring/chain usage. Two new C files have been also added to implement the helper routines needed to manage: jumbo frames, chain and ring setup (i.e. desc3). Signed-off-by: Rayagond Kokatanur <rayagond@vayavyalabs.com> Signed-off-by: Giuseppe Cavallaro <peppe.cavallaro@st.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c130
1 files changed, 52 insertions, 78 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index bf895cb7578..5eccd996cde 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2,7 +2,7 @@
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core. 3 ST Ethernet IPs are built around a Synopsys IP Core.
4 4
5 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 Copyright(C) 2007-2011 STMicroelectronics Ltd
6 6
7 This program is free software; you can redistribute it and/or modify it 7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License, 8 under the terms and conditions of the GNU General Public License,
@@ -41,17 +41,16 @@
41#include <linux/if_ether.h> 41#include <linux/if_ether.h>
42#include <linux/crc32.h> 42#include <linux/crc32.h>
43#include <linux/mii.h> 43#include <linux/mii.h>
44#include <linux/phy.h>
45#include <linux/if.h> 44#include <linux/if.h>
46#include <linux/if_vlan.h> 45#include <linux/if_vlan.h>
47#include <linux/dma-mapping.h> 46#include <linux/dma-mapping.h>
48#include <linux/slab.h> 47#include <linux/slab.h>
49#include <linux/prefetch.h> 48#include <linux/prefetch.h>
50#include "stmmac.h"
51#ifdef CONFIG_STMMAC_DEBUG_FS 49#ifdef CONFIG_STMMAC_DEBUG_FS
52#include <linux/debugfs.h> 50#include <linux/debugfs.h>
53#include <linux/seq_file.h> 51#include <linux/seq_file.h>
54#endif 52#endif
53#include "stmmac.h"
55 54
56#define STMMAC_RESOURCE_NAME "stmmaceth" 55#define STMMAC_RESOURCE_NAME "stmmaceth"
57 56
@@ -388,11 +387,28 @@ static void display_ring(struct dma_desc *p, int size)
388 } 387 }
389} 388}
390 389
390static int stmmac_set_bfsize(int mtu, int bufsize)
391{
392 int ret = bufsize;
393
394 if (mtu >= BUF_SIZE_4KiB)
395 ret = BUF_SIZE_8KiB;
396 else if (mtu >= BUF_SIZE_2KiB)
397 ret = BUF_SIZE_4KiB;
398 else if (mtu >= DMA_BUFFER_SIZE)
399 ret = BUF_SIZE_2KiB;
400 else
401 ret = DMA_BUFFER_SIZE;
402
403 return ret;
404}
405
391/** 406/**
392 * init_dma_desc_rings - init the RX/TX descriptor rings 407 * init_dma_desc_rings - init the RX/TX descriptor rings
393 * @dev: net device structure 408 * @dev: net device structure
394 * Description: this function initializes the DMA RX/TX descriptors 409 * Description: this function initializes the DMA RX/TX descriptors
395 * and allocates the socket buffers. 410 * and allocates the socket buffers. It suppors the chained and ring
411 * modes.
396 */ 412 */
397static void init_dma_desc_rings(struct net_device *dev) 413static void init_dma_desc_rings(struct net_device *dev)
398{ 414{
@@ -401,31 +417,24 @@ static void init_dma_desc_rings(struct net_device *dev)
401 struct sk_buff *skb; 417 struct sk_buff *skb;
402 unsigned int txsize = priv->dma_tx_size; 418 unsigned int txsize = priv->dma_tx_size;
403 unsigned int rxsize = priv->dma_rx_size; 419 unsigned int rxsize = priv->dma_rx_size;
404 unsigned int bfsize = priv->dma_buf_sz; 420 unsigned int bfsize;
405 int buff2_needed = 0, dis_ic = 0; 421 int dis_ic = 0;
422 int des3_as_data_buf = 0;
406 423
407 /* Set the Buffer size according to the MTU; 424 /* Set the max buffer size according to the DESC mode
408 * indeed, in case of jumbo we need to bump-up the buffer sizes. 425 * and the MTU. Note that RING mode allows 16KiB bsize. */
409 */ 426 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
410 if (unlikely(dev->mtu >= BUF_SIZE_8KiB)) 427
411 bfsize = BUF_SIZE_16KiB; 428 if (bfsize == BUF_SIZE_16KiB)
412 else if (unlikely(dev->mtu >= BUF_SIZE_4KiB)) 429 des3_as_data_buf = 1;
413 bfsize = BUF_SIZE_8KiB;
414 else if (unlikely(dev->mtu >= BUF_SIZE_2KiB))
415 bfsize = BUF_SIZE_4KiB;
416 else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE))
417 bfsize = BUF_SIZE_2KiB;
418 else 430 else
419 bfsize = DMA_BUFFER_SIZE; 431 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
420 432
421#ifdef CONFIG_STMMAC_TIMER 433#ifdef CONFIG_STMMAC_TIMER
422 /* Disable interrupts on completion for the reception if timer is on */ 434 /* Disable interrupts on completion for the reception if timer is on */
423 if (likely(priv->tm->enable)) 435 if (likely(priv->tm->enable))
424 dis_ic = 1; 436 dis_ic = 1;
425#endif 437#endif
426 /* If the MTU exceeds 8k so use the second buffer in the chain */
427 if (bfsize >= BUF_SIZE_8KiB)
428 buff2_needed = 1;
429 438
430 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 439 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
431 txsize, rxsize, bfsize); 440 txsize, rxsize, bfsize);
@@ -453,7 +462,7 @@ static void init_dma_desc_rings(struct net_device *dev)
453 return; 462 return;
454 } 463 }
455 464
456 DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, " 465 DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
457 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", 466 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
458 dev->name, priv->dma_rx, priv->dma_tx, 467 dev->name, priv->dma_rx, priv->dma_tx,
459 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); 468 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
@@ -475,8 +484,9 @@ static void init_dma_desc_rings(struct net_device *dev)
475 bfsize, DMA_FROM_DEVICE); 484 bfsize, DMA_FROM_DEVICE);
476 485
477 p->des2 = priv->rx_skbuff_dma[i]; 486 p->des2 = priv->rx_skbuff_dma[i];
478 if (unlikely(buff2_needed)) 487
479 p->des3 = p->des2 + BUF_SIZE_8KiB; 488 priv->hw->ring->init_desc3(des3_as_data_buf, p);
489
480 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 490 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
481 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 491 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
482 } 492 }
@@ -490,6 +500,12 @@ static void init_dma_desc_rings(struct net_device *dev)
490 priv->tx_skbuff[i] = NULL; 500 priv->tx_skbuff[i] = NULL;
491 priv->dma_tx[i].des2 = 0; 501 priv->dma_tx[i].des2 = 0;
492 } 502 }
503
504 /* In case of Chained mode this sets the des3 to the next
505 * element in the chain */
506 priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
507 priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
508
493 priv->dirty_tx = 0; 509 priv->dirty_tx = 0;
494 priv->cur_tx = 0; 510 priv->cur_tx = 0;
495 511
@@ -620,8 +636,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
620 dma_unmap_single(priv->device, p->des2, 636 dma_unmap_single(priv->device, p->des2,
621 priv->hw->desc->get_tx_len(p), 637 priv->hw->desc->get_tx_len(p),
622 DMA_TO_DEVICE); 638 DMA_TO_DEVICE);
623 if (unlikely(p->des3)) 639 priv->hw->ring->clean_desc3(p);
624 p->des3 = 0;
625 640
626 if (likely(skb != NULL)) { 641 if (likely(skb != NULL)) {
627 /* 642 /*
@@ -728,7 +743,6 @@ static void stmmac_no_timer_stopped(void)
728 */ 743 */
729static void stmmac_tx_err(struct stmmac_priv *priv) 744static void stmmac_tx_err(struct stmmac_priv *priv)
730{ 745{
731
732 netif_stop_queue(priv->dev); 746 netif_stop_queue(priv->dev);
733 747
734 priv->hw->dma->stop_tx(priv->ioaddr); 748 priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1028,47 +1042,6 @@ static int stmmac_release(struct net_device *dev)
1028 return 0; 1042 return 0;
1029} 1043}
1030 1044
1031static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
1032 struct net_device *dev,
1033 int csum_insertion)
1034{
1035 struct stmmac_priv *priv = netdev_priv(dev);
1036 unsigned int nopaged_len = skb_headlen(skb);
1037 unsigned int txsize = priv->dma_tx_size;
1038 unsigned int entry = priv->cur_tx % txsize;
1039 struct dma_desc *desc = priv->dma_tx + entry;
1040
1041 if (nopaged_len > BUF_SIZE_8KiB) {
1042
1043 int buf2_size = nopaged_len - BUF_SIZE_8KiB;
1044
1045 desc->des2 = dma_map_single(priv->device, skb->data,
1046 BUF_SIZE_8KiB, DMA_TO_DEVICE);
1047 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1048 priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
1049 csum_insertion);
1050
1051 entry = (++priv->cur_tx) % txsize;
1052 desc = priv->dma_tx + entry;
1053
1054 desc->des2 = dma_map_single(priv->device,
1055 skb->data + BUF_SIZE_8KiB,
1056 buf2_size, DMA_TO_DEVICE);
1057 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1058 priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
1059 csum_insertion);
1060 priv->hw->desc->set_tx_owner(desc);
1061 priv->tx_skbuff[entry] = NULL;
1062 } else {
1063 desc->des2 = dma_map_single(priv->device, skb->data,
1064 nopaged_len, DMA_TO_DEVICE);
1065 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1066 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1067 csum_insertion);
1068 }
1069 return entry;
1070}
1071
1072/** 1045/**
1073 * stmmac_xmit: 1046 * stmmac_xmit:
1074 * @skb : the socket buffer 1047 * @skb : the socket buffer
@@ -1083,6 +1056,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1083 int i, csum_insertion = 0; 1056 int i, csum_insertion = 0;
1084 int nfrags = skb_shinfo(skb)->nr_frags; 1057 int nfrags = skb_shinfo(skb)->nr_frags;
1085 struct dma_desc *desc, *first; 1058 struct dma_desc *desc, *first;
1059 unsigned int nopaged_len = skb_headlen(skb);
1086 1060
1087 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1061 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1088 if (!netif_queue_stopped(dev)) { 1062 if (!netif_queue_stopped(dev)) {
@@ -1103,7 +1077,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1103 pr_info("stmmac xmit:\n" 1077 pr_info("stmmac xmit:\n"
1104 "\tskb addr %p - len: %d - nopaged_len: %d\n" 1078 "\tskb addr %p - len: %d - nopaged_len: %d\n"
1105 "\tn_frags: %d - ip_summed: %d - %s gso\n", 1079 "\tn_frags: %d - ip_summed: %d - %s gso\n",
1106 skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed, 1080 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1107 !skb_is_gso(skb) ? "isn't" : "is"); 1081 !skb_is_gso(skb) ? "isn't" : "is");
1108#endif 1082#endif
1109 1083
@@ -1116,14 +1090,14 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1116 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) 1090 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1117 pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" 1091 pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n"
1118 "\t\tn_frags: %d, ip_summed: %d\n", 1092 "\t\tn_frags: %d, ip_summed: %d\n",
1119 skb->len, skb_headlen(skb), nfrags, skb->ip_summed); 1093 skb->len, nopaged_len, nfrags, skb->ip_summed);
1120#endif 1094#endif
1121 priv->tx_skbuff[entry] = skb; 1095 priv->tx_skbuff[entry] = skb;
1122 if (unlikely(skb->len >= BUF_SIZE_4KiB)) { 1096
1123 entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion); 1097 if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) {
1098 entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion);
1124 desc = priv->dma_tx + entry; 1099 desc = priv->dma_tx + entry;
1125 } else { 1100 } else {
1126 unsigned int nopaged_len = skb_headlen(skb);
1127 desc->des2 = dma_map_single(priv->device, skb->data, 1101 desc->des2 = dma_map_single(priv->device, skb->data,
1128 nopaged_len, DMA_TO_DEVICE); 1102 nopaged_len, DMA_TO_DEVICE);
1129 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1103 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
@@ -1214,11 +1188,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1214 DMA_FROM_DEVICE); 1188 DMA_FROM_DEVICE);
1215 1189
1216 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1190 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1217 if (unlikely(priv->plat->has_gmac)) { 1191
1218 if (bfsize >= BUF_SIZE_8KiB) 1192 if (unlikely(priv->plat->has_gmac))
1219 (p + entry)->des3 = 1193 priv->hw->ring->refill_desc3(bfsize, p + entry);
1220 (p + entry)->des2 + BUF_SIZE_8KiB; 1194
1221 }
1222 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1195 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1223 } 1196 }
1224 wmb(); 1197 wmb();
@@ -1795,6 +1768,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1795 device->desc = &ndesc_ops; 1768 device->desc = &ndesc_ops;
1796 1769
1797 priv->hw = device; 1770 priv->hw = device;
1771 priv->hw->ring = &ring_mode_ops;
1798 1772
1799 if (device_can_wakeup(priv->device)) { 1773 if (device_can_wakeup(priv->device)) {
1800 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1774 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */