aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c47
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c13
4 files changed, 57 insertions, 13 deletions
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 3772804fb697..b35e6a76664c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -374,6 +374,9 @@ void cpsw_tx_handler(void *token, int len, int status)
374 struct net_device *ndev = skb->dev; 374 struct net_device *ndev = skb->dev;
375 struct cpsw_priv *priv = netdev_priv(ndev); 375 struct cpsw_priv *priv = netdev_priv(ndev);
376 376
377 /* Check whether the queue is stopped due to stalled tx dma, if the
378 * queue is stopped then start the queue as we have free desc for tx
379 */
377 if (unlikely(netif_queue_stopped(ndev))) 380 if (unlikely(netif_queue_stopped(ndev)))
378 netif_start_queue(ndev); 381 netif_start_queue(ndev);
379 cpts_tx_timestamp(&priv->cpts, skb); 382 cpts_tx_timestamp(&priv->cpts, skb);
@@ -736,6 +739,12 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
736 goto fail; 739 goto fail;
737 } 740 }
738 741
742 /* If there is no more tx desc left free then we need to
743 * tell the kernel to stop sending us tx frames.
744 */
745 if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
746 netif_stop_queue(ndev);
747
739 return NETDEV_TX_OK; 748 return NETDEV_TX_OK;
740fail: 749fail:
741 priv->stats.tx_dropped++; 750 priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49956730cd8d..f8629186afbe 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -105,13 +105,13 @@ struct cpdma_ctlr {
105}; 105};
106 106
107struct cpdma_chan { 107struct cpdma_chan {
108 struct cpdma_desc __iomem *head, *tail;
109 void __iomem *hdp, *cp, *rxfree;
108 enum cpdma_state state; 110 enum cpdma_state state;
109 struct cpdma_ctlr *ctlr; 111 struct cpdma_ctlr *ctlr;
110 int chan_num; 112 int chan_num;
111 spinlock_t lock; 113 spinlock_t lock;
112 struct cpdma_desc __iomem *head, *tail;
113 int count; 114 int count;
114 void __iomem *hdp, *cp, *rxfree;
115 u32 mask; 115 u32 mask;
116 cpdma_handler_fn handler; 116 cpdma_handler_fn handler;
117 enum dma_data_direction dir; 117 enum dma_data_direction dir;
@@ -217,17 +217,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
217} 217}
218 218
219static struct cpdma_desc __iomem * 219static struct cpdma_desc __iomem *
220cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) 220cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
221{ 221{
222 unsigned long flags; 222 unsigned long flags;
223 int index; 223 int index;
224 int desc_start;
225 int desc_end;
224 struct cpdma_desc __iomem *desc = NULL; 226 struct cpdma_desc __iomem *desc = NULL;
225 227
226 spin_lock_irqsave(&pool->lock, flags); 228 spin_lock_irqsave(&pool->lock, flags);
227 229
228 index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, 230 if (is_rx) {
229 num_desc, 0); 231 desc_start = 0;
230 if (index < pool->num_desc) { 232 desc_end = pool->num_desc/2;
233 } else {
234 desc_start = pool->num_desc/2;
235 desc_end = pool->num_desc;
236 }
237
238 index = bitmap_find_next_zero_area(pool->bitmap,
239 desc_end, desc_start, num_desc, 0);
240 if (index < desc_end) {
231 bitmap_set(pool->bitmap, index, num_desc); 241 bitmap_set(pool->bitmap, index, num_desc);
232 desc = pool->iomap + pool->desc_size * index; 242 desc = pool->iomap + pool->desc_size * index;
233 pool->used_desc++; 243 pool->used_desc++;
@@ -668,7 +678,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
668 goto unlock_ret; 678 goto unlock_ret;
669 } 679 }
670 680
671 desc = cpdma_desc_alloc(ctlr->pool, 1); 681 desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
672 if (!desc) { 682 if (!desc) {
673 chan->stats.desc_alloc_fail++; 683 chan->stats.desc_alloc_fail++;
674 ret = -ENOMEM; 684 ret = -ENOMEM;
@@ -704,6 +714,29 @@ unlock_ret:
704} 714}
705EXPORT_SYMBOL_GPL(cpdma_chan_submit); 715EXPORT_SYMBOL_GPL(cpdma_chan_submit);
706 716
717bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
718{
719 unsigned long flags;
720 int index;
721 bool ret;
722 struct cpdma_ctlr *ctlr = chan->ctlr;
723 struct cpdma_desc_pool *pool = ctlr->pool;
724
725 spin_lock_irqsave(&pool->lock, flags);
726
727 index = bitmap_find_next_zero_area(pool->bitmap,
728 pool->num_desc, pool->num_desc/2, 1, 0);
729
730 if (index < pool->num_desc)
731 ret = true;
732 else
733 ret = false;
734
735 spin_unlock_irqrestore(&pool->lock, flags);
736 return ret;
737}
738EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
739
707static void __cpdma_chan_free(struct cpdma_chan *chan, 740static void __cpdma_chan_free(struct cpdma_chan *chan,
708 struct cpdma_desc __iomem *desc, 741 struct cpdma_desc __iomem *desc,
709 int outlen, int status) 742 int outlen, int status)
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index afa19a0c0d81..8d2aeb2096ac 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -88,6 +88,7 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota);
88int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); 88int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
89void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr); 89void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
90int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); 90int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
91bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
91 92
92enum cpdma_control { 93enum cpdma_control {
93 CPDMA_CMD_IDLE, /* write-only */ 94 CPDMA_CMD_IDLE, /* write-only */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 8478d98c1092..1c97c8171d3e 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
120#define EMAC_DEF_TX_CH (0) /* Default 0th channel */ 120#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
121#define EMAC_DEF_RX_CH (0) /* Default 0th channel */ 121#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
122#define EMAC_DEF_RX_NUM_DESC (128) 122#define EMAC_DEF_RX_NUM_DESC (128)
123#define EMAC_DEF_TX_NUM_DESC (128)
124#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ 123#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
125#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ 124#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
126#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ 125#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -342,7 +341,6 @@ struct emac_priv {
342 u32 mac_hash2; 341 u32 mac_hash2;
343 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; 342 u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
344 u32 rx_addr_type; 343 u32 rx_addr_type;
345 atomic_t cur_tx;
346 const char *phy_id; 344 const char *phy_id;
347#ifdef CONFIG_OF 345#ifdef CONFIG_OF
348 struct device_node *phy_node; 346 struct device_node *phy_node;
@@ -1050,10 +1048,10 @@ static void emac_tx_handler(void *token, int len, int status)
1050{ 1048{
1051 struct sk_buff *skb = token; 1049 struct sk_buff *skb = token;
1052 struct net_device *ndev = skb->dev; 1050 struct net_device *ndev = skb->dev;
1053 struct emac_priv *priv = netdev_priv(ndev);
1054
1055 atomic_dec(&priv->cur_tx);
1056 1051
1052 /* Check whether the queue is stopped due to stalled tx dma, if the
1053 * queue is stopped then start the queue as we have free desc for tx
1054 */
1057 if (unlikely(netif_queue_stopped(ndev))) 1055 if (unlikely(netif_queue_stopped(ndev)))
1058 netif_start_queue(ndev); 1056 netif_start_queue(ndev);
1059 ndev->stats.tx_packets++; 1057 ndev->stats.tx_packets++;
@@ -1101,7 +1099,10 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1101 goto fail_tx; 1099 goto fail_tx;
1102 } 1100 }
1103 1101
1104 if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC) 1102 /* If there is no more tx desc left free then we need to
1103 * tell the kernel to stop sending us tx frames.
1104 */
1105 if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
1105 netif_stop_queue(ndev); 1106 netif_stop_queue(ndev);
1106 1107
1107 return NETDEV_TX_OK; 1108 return NETDEV_TX_OK;