aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bfin_mac.c
diff options
context:
space:
mode:
authorSonic Zhang <sonic.zhang@analog.com>2010-06-11 05:44:31 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-15 18:04:10 -0400
commit4fcc3d3409b0ab37c1f790e04a1f7c984b436167 (patch)
treec05b3e27f61b3644cd3514d4bbf7443d7588803a /drivers/net/bfin_mac.c
parentaa1039e73cc2cf834e99c09d2033d5d2675357b9 (diff)
netdev:bfin_mac: reclaim and free tx skb as soon as possible after transfer
SKBs hold onto resources that can't be held indefinitely, such as TCP socket references and netfilter conntrack state. So if a packet is left in TX ring for a long time, there might be a TCP socket that cannot be closed and freed up. Current blackfin EMAC driver always reclaim and free used tx skbs in future transfers. The problem is that future transfer may not come as soon as possible. This patch start a timer after transfer to reclaim and free skb. There is nearly no performance drop with this patch. TX interrupt is not enabled because of a strange behavior of the Blackfin EMAC. If EMAC TX transfer control is turned on, endless TX interrupts are triggered no matter if TX DMA is enabled or not. Since DMA walks down the ring automatically, TX transfer control can't be turned off in the middle. The only way is to disable TX interrupt completely. Signed-off-by: Sonic Zhang <sonic.zhang@analog.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bfin_mac.c')
-rw-r--r--drivers/net/bfin_mac.c123
1 files changed, 80 insertions, 43 deletions
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 368f33313fb6..012613fde3f4 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
922# define bfin_tx_hwtstamp(dev, skb) 922# define bfin_tx_hwtstamp(dev, skb)
923#endif 923#endif
924 924
925static void adjust_tx_list(void) 925static inline void _tx_reclaim_skb(void)
926{
927 do {
928 tx_list_head->desc_a.config &= ~DMAEN;
929 tx_list_head->status.status_word = 0;
930 if (tx_list_head->skb) {
931 dev_kfree_skb(tx_list_head->skb);
932 tx_list_head->skb = NULL;
933 }
934 tx_list_head = tx_list_head->next;
935
936 } while (tx_list_head->status.status_word != 0);
937}
938
939static void tx_reclaim_skb(struct bfin_mac_local *lp)
926{ 940{
927 int timeout_cnt = MAX_TIMEOUT_CNT; 941 int timeout_cnt = MAX_TIMEOUT_CNT;
928 942
929 if (tx_list_head->status.status_word != 0 && 943 if (tx_list_head->status.status_word != 0)
930 current_tx_ptr != tx_list_head) { 944 _tx_reclaim_skb();
931 goto adjust_head; /* released something, just return; */
932 }
933 945
934 /* 946 if (current_tx_ptr->next == tx_list_head) {
935 * if nothing released, check wait condition
936 * current's next can not be the head,
937 * otherwise the dma will not stop as we want
938 */
939 if (current_tx_ptr->next->next == tx_list_head) {
940 while (tx_list_head->status.status_word == 0) { 947 while (tx_list_head->status.status_word == 0) {
948 /* slow down polling to avoid too many queue stop. */
941 udelay(10); 949 udelay(10);
942 if (tx_list_head->status.status_word != 0 || 950 /* reclaim skb if DMA is not running. */
943 !(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) { 951 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
944 goto adjust_head; 952 break;
945 } 953 if (timeout_cnt-- < 0)
946 if (timeout_cnt-- < 0) {
947 printk(KERN_ERR DRV_NAME
948 ": wait for adjust tx list head timeout\n");
949 break; 954 break;
950 }
951 }
952 if (tx_list_head->status.status_word != 0) {
953 goto adjust_head;
954 } 955 }
956
957 if (timeout_cnt >= 0)
958 _tx_reclaim_skb();
959 else
960 netif_stop_queue(lp->ndev);
955 } 961 }
956 962
957 return; 963 if (current_tx_ptr->next != tx_list_head &&
964 netif_queue_stopped(lp->ndev))
965 netif_wake_queue(lp->ndev);
966
967 if (tx_list_head != current_tx_ptr) {
968 /* shorten the timer interval if tx queue is stopped */
969 if (netif_queue_stopped(lp->ndev))
970 lp->tx_reclaim_timer.expires =
971 jiffies + (TX_RECLAIM_JIFFIES >> 4);
972 else
973 lp->tx_reclaim_timer.expires =
974 jiffies + TX_RECLAIM_JIFFIES;
975
976 mod_timer(&lp->tx_reclaim_timer,
977 lp->tx_reclaim_timer.expires);
978 }
958 979
959adjust_head:
960 do {
961 tx_list_head->desc_a.config &= ~DMAEN;
962 tx_list_head->status.status_word = 0;
963 if (tx_list_head->skb) {
964 dev_kfree_skb(tx_list_head->skb);
965 tx_list_head->skb = NULL;
966 } else {
967 printk(KERN_ERR DRV_NAME
968 ": no sk_buff in a transmitted frame!\n");
969 }
970 tx_list_head = tx_list_head->next;
971 } while (tx_list_head->status.status_word != 0 &&
972 current_tx_ptr != tx_list_head);
973 return; 980 return;
981}
974 982
983static void tx_reclaim_skb_timeout(unsigned long lp)
984{
985 tx_reclaim_skb((struct bfin_mac_local *)lp);
975} 986}
976 987
977static int bfin_mac_hard_start_xmit(struct sk_buff *skb, 988static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
978 struct net_device *dev) 989 struct net_device *dev)
979{ 990{
991 struct bfin_mac_local *lp = netdev_priv(dev);
980 u16 *data; 992 u16 *data;
981 u32 data_align = (unsigned long)(skb->data) & 0x3; 993 u32 data_align = (unsigned long)(skb->data) & 0x3;
982 union skb_shared_tx *shtx = skb_tx(skb); 994 union skb_shared_tx *shtx = skb_tx(skb);
@@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1009 skb->len); 1021 skb->len);
1010 current_tx_ptr->desc_a.start_addr = 1022 current_tx_ptr->desc_a.start_addr =
1011 (u32)current_tx_ptr->packet; 1023 (u32)current_tx_ptr->packet;
1012 if (current_tx_ptr->status.status_word != 0)
1013 current_tx_ptr->status.status_word = 0;
1014 blackfin_dcache_flush_range( 1024 blackfin_dcache_flush_range(
1015 (u32)current_tx_ptr->packet, 1025 (u32)current_tx_ptr->packet,
1016 (u32)(current_tx_ptr->packet + skb->len + 2)); 1026 (u32)(current_tx_ptr->packet + skb->len + 2));
@@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1022 */ 1032 */
1023 SSYNC(); 1033 SSYNC();
1024 1034
1035 /* always clear status buffer before start tx dma */
1036 current_tx_ptr->status.status_word = 0;
1037
1025 /* enable this packet's dma */ 1038 /* enable this packet's dma */
1026 current_tx_ptr->desc_a.config |= DMAEN; 1039 current_tx_ptr->desc_a.config |= DMAEN;
1027 1040
@@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1037 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); 1050 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1038 1051
1039out: 1052out:
1040 adjust_tx_list();
1041
1042 bfin_tx_hwtstamp(dev, skb); 1053 bfin_tx_hwtstamp(dev, skb);
1043 1054
1044 current_tx_ptr = current_tx_ptr->next; 1055 current_tx_ptr = current_tx_ptr->next;
1045 dev->stats.tx_packets++; 1056 dev->stats.tx_packets++;
1046 dev->stats.tx_bytes += (skb->len); 1057 dev->stats.tx_bytes += (skb->len);
1058
1059 tx_reclaim_skb(lp);
1060
1047 return NETDEV_TX_OK; 1061 return NETDEV_TX_OK;
1048} 1062}
1049 1063
@@ -1167,8 +1181,11 @@ real_rx:
1167#ifdef CONFIG_NET_POLL_CONTROLLER 1181#ifdef CONFIG_NET_POLL_CONTROLLER
1168static void bfin_mac_poll(struct net_device *dev) 1182static void bfin_mac_poll(struct net_device *dev)
1169{ 1183{
1184 struct bfin_mac_local *lp = netdev_priv(dev);
1185
1170 disable_irq(IRQ_MAC_RX); 1186 disable_irq(IRQ_MAC_RX);
1171 bfin_mac_interrupt(IRQ_MAC_RX, dev); 1187 bfin_mac_interrupt(IRQ_MAC_RX, dev);
1188 tx_reclaim_skb(lp);
1172 enable_irq(IRQ_MAC_RX); 1189 enable_irq(IRQ_MAC_RX);
1173} 1190}
1174#endif /* CONFIG_NET_POLL_CONTROLLER */ 1191#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void)
1232/* Our watchdog timed out. Called by the networking layer */ 1249/* Our watchdog timed out. Called by the networking layer */
1233static void bfin_mac_timeout(struct net_device *dev) 1250static void bfin_mac_timeout(struct net_device *dev)
1234{ 1251{
1252 struct bfin_mac_local *lp = netdev_priv(dev);
1253
1235 pr_debug("%s: %s\n", dev->name, __func__); 1254 pr_debug("%s: %s\n", dev->name, __func__);
1236 1255
1237 bfin_mac_disable(); 1256 bfin_mac_disable();
1238 1257
1239 /* reset tx queue */ 1258 del_timer(&lp->tx_reclaim_timer);
1240 tx_list_tail = tx_list_head->next; 1259
1260 /* reset tx queue and free skb */
1261 while (tx_list_head != current_tx_ptr) {
1262 tx_list_head->desc_a.config &= ~DMAEN;
1263 tx_list_head->status.status_word = 0;
1264 if (tx_list_head->skb) {
1265 dev_kfree_skb(tx_list_head->skb);
1266 tx_list_head->skb = NULL;
1267 }
1268 tx_list_head = tx_list_head->next;
1269 }
1270
1271 if (netif_queue_stopped(lp->ndev))
1272 netif_wake_queue(lp->ndev);
1241 1273
1242 bfin_mac_enable(); 1274 bfin_mac_enable();
1243 1275
@@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1430 SET_NETDEV_DEV(ndev, &pdev->dev); 1462 SET_NETDEV_DEV(ndev, &pdev->dev);
1431 platform_set_drvdata(pdev, ndev); 1463 platform_set_drvdata(pdev, ndev);
1432 lp = netdev_priv(ndev); 1464 lp = netdev_priv(ndev);
1465 lp->ndev = ndev;
1433 1466
1434 /* Grab the MAC address in the MAC */ 1467 /* Grab the MAC address in the MAC */
1435 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); 1468 *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
@@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1485 ndev->netdev_ops = &bfin_mac_netdev_ops; 1518 ndev->netdev_ops = &bfin_mac_netdev_ops;
1486 ndev->ethtool_ops = &bfin_mac_ethtool_ops; 1519 ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1487 1520
1521 init_timer(&lp->tx_reclaim_timer);
1522 lp->tx_reclaim_timer.data = (unsigned long)lp;
1523 lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
1524
1488 spin_lock_init(&lp->lock); 1525 spin_lock_init(&lp->lock);
1489 1526
1490 /* now, enable interrupts */ 1527 /* now, enable interrupts */