aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/b44.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 0ee3e27969c6..b334cc310bc1 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -948,6 +948,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948{ 948{
949 struct b44 *bp = netdev_priv(dev); 949 struct b44 *bp = netdev_priv(dev);
950 struct sk_buff *bounce_skb; 950 struct sk_buff *bounce_skb;
951 int rc = NETDEV_TX_OK;
951 dma_addr_t mapping; 952 dma_addr_t mapping;
952 u32 len, entry, ctrl; 953 u32 len, entry, ctrl;
953 954
@@ -957,10 +958,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
957 /* This is a hard error, log it. */ 958 /* This is a hard error, log it. */
958 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { 959 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
959 netif_stop_queue(dev); 960 netif_stop_queue(dev);
960 spin_unlock_irq(&bp->lock);
961 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 961 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
962 dev->name); 962 dev->name);
963 return 1; 963 goto err_out;
964 } 964 }
965 965
966 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 966 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -971,7 +971,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
971 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, 971 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
972 GFP_ATOMIC|GFP_DMA); 972 GFP_ATOMIC|GFP_DMA);
973 if (!bounce_skb) 973 if (!bounce_skb)
974 return NETDEV_TX_BUSY; 974 goto err_out;
975 975
976 mapping = pci_map_single(bp->pdev, bounce_skb->data, 976 mapping = pci_map_single(bp->pdev, bounce_skb->data,
977 len, PCI_DMA_TODEVICE); 977 len, PCI_DMA_TODEVICE);
@@ -979,7 +979,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
979 pci_unmap_single(bp->pdev, mapping, 979 pci_unmap_single(bp->pdev, mapping,
980 len, PCI_DMA_TODEVICE); 980 len, PCI_DMA_TODEVICE);
981 dev_kfree_skb_any(bounce_skb); 981 dev_kfree_skb_any(bounce_skb);
982 return NETDEV_TX_BUSY; 982 goto err_out;
983 } 983 }
984 984
985 memcpy(skb_put(bounce_skb, len), skb->data, skb->len); 985 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
@@ -1019,11 +1019,16 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1019 if (TX_BUFFS_AVAIL(bp) < 1) 1019 if (TX_BUFFS_AVAIL(bp) < 1)
1020 netif_stop_queue(dev); 1020 netif_stop_queue(dev);
1021 1021
1022 dev->trans_start = jiffies;
1023
1024out_unlock:
1022 spin_unlock_irq(&bp->lock); 1025 spin_unlock_irq(&bp->lock);
1023 1026
1024 dev->trans_start = jiffies; 1027 return rc;
1025 1028
1026 return 0; 1029err_out:
1030 rc = NETDEV_TX_BUSY;
1031 goto out_unlock;
1027} 1032}
1028 1033
1029static int b44_change_mtu(struct net_device *dev, int new_mtu) 1034static int b44_change_mtu(struct net_device *dev, int new_mtu)