diff options
author | David S. Miller <davem@davemloft.net> | 2012-05-16 22:17:37 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-05-16 22:17:37 -0400 |
commit | 028940342a906db8da014a7603a0deddc2c323dd (patch) | |
tree | 688dbc38a3e218f2493d311b1d70a67668837347 /drivers/net/ethernet/oki-semi | |
parent | be3eed2e96340d3c7a4d1ea1d63e7bd6095d1e34 (diff) | |
parent | 0e93b4b304ae052ba1bc73f6d34a68556fe93429 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/net/ethernet/oki-semi')
-rw-r--r-- | drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 25 |
2 files changed, 11 insertions, 16 deletions
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 9f3dbc4feadc..b07311eaa693 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | |||
@@ -584,7 +584,6 @@ struct pch_gbe_hw_stats { | |||
584 | /** | 584 | /** |
585 | * struct pch_gbe_adapter - board specific private data structure | 585 | * struct pch_gbe_adapter - board specific private data structure |
586 | * @stats_lock: Spinlock structure for status | 586 | * @stats_lock: Spinlock structure for status |
587 | * @tx_queue_lock: Spinlock structure for transmit | ||
588 | * @ethtool_lock: Spinlock structure for ethtool | 587 | * @ethtool_lock: Spinlock structure for ethtool |
589 | * @irq_sem: Semaphore for interrupt | 588 | * @irq_sem: Semaphore for interrupt |
590 | * @netdev: Pointer of network device structure | 589 | * @netdev: Pointer of network device structure |
@@ -609,7 +608,6 @@ struct pch_gbe_hw_stats { | |||
609 | 608 | ||
610 | struct pch_gbe_adapter { | 609 | struct pch_gbe_adapter { |
611 | spinlock_t stats_lock; | 610 | spinlock_t stats_lock; |
612 | spinlock_t tx_queue_lock; | ||
613 | spinlock_t ethtool_lock; | 611 | spinlock_t ethtool_lock; |
614 | atomic_t irq_sem; | 612 | atomic_t irq_sem; |
615 | struct net_device *netdev; | 613 | struct net_device *netdev; |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 9dc7e5023671..3787c64ee71c 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | |||
@@ -645,14 +645,11 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) | |||
645 | */ | 645 | */ |
646 | static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) | 646 | static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter) |
647 | { | 647 | { |
648 | int size; | 648 | adapter->tx_ring = kzalloc(sizeof(*adapter->tx_ring), GFP_KERNEL); |
649 | |||
650 | size = (int)sizeof(struct pch_gbe_tx_ring); | ||
651 | adapter->tx_ring = kzalloc(size, GFP_KERNEL); | ||
652 | if (!adapter->tx_ring) | 649 | if (!adapter->tx_ring) |
653 | return -ENOMEM; | 650 | return -ENOMEM; |
654 | size = (int)sizeof(struct pch_gbe_rx_ring); | 651 | |
655 | adapter->rx_ring = kzalloc(size, GFP_KERNEL); | 652 | adapter->rx_ring = kzalloc(sizeof(*adapter->rx_ring), GFP_KERNEL); |
656 | if (!adapter->rx_ring) { | 653 | if (!adapter->rx_ring) { |
657 | kfree(adapter->tx_ring); | 654 | kfree(adapter->tx_ring); |
658 | return -ENOMEM; | 655 | return -ENOMEM; |
@@ -1169,7 +1166,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, | |||
1169 | struct sk_buff *tmp_skb; | 1166 | struct sk_buff *tmp_skb; |
1170 | unsigned int frame_ctrl; | 1167 | unsigned int frame_ctrl; |
1171 | unsigned int ring_num; | 1168 | unsigned int ring_num; |
1172 | unsigned long flags; | ||
1173 | 1169 | ||
1174 | /*-- Set frame control --*/ | 1170 | /*-- Set frame control --*/ |
1175 | frame_ctrl = 0; | 1171 | frame_ctrl = 0; |
@@ -1216,14 +1212,14 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter, | |||
1216 | } | 1212 | } |
1217 | } | 1213 | } |
1218 | } | 1214 | } |
1219 | spin_lock_irqsave(&tx_ring->tx_lock, flags); | 1215 | |
1220 | ring_num = tx_ring->next_to_use; | 1216 | ring_num = tx_ring->next_to_use; |
1221 | if (unlikely((ring_num + 1) == tx_ring->count)) | 1217 | if (unlikely((ring_num + 1) == tx_ring->count)) |
1222 | tx_ring->next_to_use = 0; | 1218 | tx_ring->next_to_use = 0; |
1223 | else | 1219 | else |
1224 | tx_ring->next_to_use = ring_num + 1; | 1220 | tx_ring->next_to_use = ring_num + 1; |
1225 | 1221 | ||
1226 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | 1222 | |
1227 | buffer_info = &tx_ring->buffer_info[ring_num]; | 1223 | buffer_info = &tx_ring->buffer_info[ring_num]; |
1228 | tmp_skb = buffer_info->skb; | 1224 | tmp_skb = buffer_info->skb; |
1229 | 1225 | ||
@@ -1525,7 +1521,7 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter, | |||
1525 | &rx_ring->rx_buff_pool_logic, | 1521 | &rx_ring->rx_buff_pool_logic, |
1526 | GFP_KERNEL); | 1522 | GFP_KERNEL); |
1527 | if (!rx_ring->rx_buff_pool) { | 1523 | if (!rx_ring->rx_buff_pool) { |
1528 | pr_err("Unable to allocate memory for the receive poll buffer\n"); | 1524 | pr_err("Unable to allocate memory for the receive pool buffer\n"); |
1529 | return -ENOMEM; | 1525 | return -ENOMEM; |
1530 | } | 1526 | } |
1531 | memset(rx_ring->rx_buff_pool, 0, size); | 1527 | memset(rx_ring->rx_buff_pool, 0, size); |
@@ -1644,15 +1640,17 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1644 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", | 1640 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", |
1645 | cleaned_count); | 1641 | cleaned_count); |
1646 | /* Recover from running out of Tx resources in xmit_frame */ | 1642 | /* Recover from running out of Tx resources in xmit_frame */ |
1643 | spin_lock(&tx_ring->tx_lock); | ||
1647 | if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) { | 1644 | if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) { |
1648 | netif_wake_queue(adapter->netdev); | 1645 | netif_wake_queue(adapter->netdev); |
1649 | adapter->stats.tx_restart_count++; | 1646 | adapter->stats.tx_restart_count++; |
1650 | pr_debug("Tx wake queue\n"); | 1647 | pr_debug("Tx wake queue\n"); |
1651 | } | 1648 | } |
1652 | spin_lock(&adapter->tx_queue_lock); | 1649 | |
1653 | tx_ring->next_to_clean = i; | 1650 | tx_ring->next_to_clean = i; |
1654 | spin_unlock(&adapter->tx_queue_lock); | 1651 | |
1655 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); | 1652 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); |
1653 | spin_unlock(&tx_ring->tx_lock); | ||
1656 | return cleaned; | 1654 | return cleaned; |
1657 | } | 1655 | } |
1658 | 1656 | ||
@@ -2043,7 +2041,6 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) | |||
2043 | return -ENOMEM; | 2041 | return -ENOMEM; |
2044 | } | 2042 | } |
2045 | spin_lock_init(&adapter->hw.miim_lock); | 2043 | spin_lock_init(&adapter->hw.miim_lock); |
2046 | spin_lock_init(&adapter->tx_queue_lock); | ||
2047 | spin_lock_init(&adapter->stats_lock); | 2044 | spin_lock_init(&adapter->stats_lock); |
2048 | spin_lock_init(&adapter->ethtool_lock); | 2045 | spin_lock_init(&adapter->ethtool_lock); |
2049 | atomic_set(&adapter->irq_sem, 0); | 2046 | atomic_set(&adapter->irq_sem, 0); |
@@ -2148,10 +2145,10 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2148 | tx_ring->next_to_use, tx_ring->next_to_clean); | 2145 | tx_ring->next_to_use, tx_ring->next_to_clean); |
2149 | return NETDEV_TX_BUSY; | 2146 | return NETDEV_TX_BUSY; |
2150 | } | 2147 | } |
2151 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | ||
2152 | 2148 | ||
2153 | /* CRC,ITAG no support */ | 2149 | /* CRC,ITAG no support */ |
2154 | pch_gbe_tx_queue(adapter, tx_ring, skb); | 2150 | pch_gbe_tx_queue(adapter, tx_ring, skb); |
2151 | spin_unlock_irqrestore(&tx_ring->tx_lock, flags); | ||
2155 | return NETDEV_TX_OK; | 2152 | return NETDEV_TX_OK; |
2156 | } | 2153 | } |
2157 | 2154 | ||