aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 9267f16b1b32..3a03a74c0609 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -748,9 +748,9 @@ e1000_reset(struct e1000_adapter *adapter)
748 VLAN_TAG_SIZE; 748 VLAN_TAG_SIZE;
749 min_tx_space = min_rx_space; 749 min_tx_space = min_rx_space;
750 min_tx_space *= 2; 750 min_tx_space *= 2;
751 E1000_ROUNDUP(min_tx_space, 1024); 751 min_tx_space = ALIGN(min_tx_space, 1024);
752 min_tx_space >>= 10; 752 min_tx_space >>= 10;
753 E1000_ROUNDUP(min_rx_space, 1024); 753 min_rx_space = ALIGN(min_rx_space, 1024);
754 min_rx_space >>= 10; 754 min_rx_space >>= 10;
755 755
756 /* If current Tx allocation is less than the min Tx FIFO size, 756 /* If current Tx allocation is less than the min Tx FIFO size,
@@ -1354,31 +1354,27 @@ e1000_sw_init(struct e1000_adapter *adapter)
1354static int __devinit 1354static int __devinit
1355e1000_alloc_queues(struct e1000_adapter *adapter) 1355e1000_alloc_queues(struct e1000_adapter *adapter)
1356{ 1356{
1357 int size; 1357 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1358 1358 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1359 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
1360 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
1361 if (!adapter->tx_ring) 1359 if (!adapter->tx_ring)
1362 return -ENOMEM; 1360 return -ENOMEM;
1363 memset(adapter->tx_ring, 0, size);
1364 1361
1365 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues; 1362 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1366 adapter->rx_ring = kmalloc(size, GFP_KERNEL); 1363 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1367 if (!adapter->rx_ring) { 1364 if (!adapter->rx_ring) {
1368 kfree(adapter->tx_ring); 1365 kfree(adapter->tx_ring);
1369 return -ENOMEM; 1366 return -ENOMEM;
1370 } 1367 }
1371 memset(adapter->rx_ring, 0, size);
1372 1368
1373#ifdef CONFIG_E1000_NAPI 1369#ifdef CONFIG_E1000_NAPI
1374 size = sizeof(struct net_device) * adapter->num_rx_queues; 1370 adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
1375 adapter->polling_netdev = kmalloc(size, GFP_KERNEL); 1371 sizeof(struct net_device),
1372 GFP_KERNEL);
1376 if (!adapter->polling_netdev) { 1373 if (!adapter->polling_netdev) {
1377 kfree(adapter->tx_ring); 1374 kfree(adapter->tx_ring);
1378 kfree(adapter->rx_ring); 1375 kfree(adapter->rx_ring);
1379 return -ENOMEM; 1376 return -ENOMEM;
1380 } 1377 }
1381 memset(adapter->polling_netdev, 0, size);
1382#endif 1378#endif
1383 1379
1384 return E1000_SUCCESS; 1380 return E1000_SUCCESS;
@@ -1560,7 +1556,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1560 /* round up to nearest 4K */ 1556 /* round up to nearest 4K */
1561 1557
1562 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1558 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1563 E1000_ROUNDUP(txdr->size, 4096); 1559 txdr->size = ALIGN(txdr->size, 4096);
1564 1560
1565 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1561 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1566 if (!txdr->desc) { 1562 if (!txdr->desc) {
@@ -1774,18 +1770,18 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1774 } 1770 }
1775 memset(rxdr->buffer_info, 0, size); 1771 memset(rxdr->buffer_info, 0, size);
1776 1772
1777 size = sizeof(struct e1000_ps_page) * rxdr->count; 1773 rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
1778 rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1774 GFP_KERNEL);
1779 if (!rxdr->ps_page) { 1775 if (!rxdr->ps_page) {
1780 vfree(rxdr->buffer_info); 1776 vfree(rxdr->buffer_info);
1781 DPRINTK(PROBE, ERR, 1777 DPRINTK(PROBE, ERR,
1782 "Unable to allocate memory for the receive descriptor ring\n"); 1778 "Unable to allocate memory for the receive descriptor ring\n");
1783 return -ENOMEM; 1779 return -ENOMEM;
1784 } 1780 }
1785 memset(rxdr->ps_page, 0, size);
1786 1781
1787 size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1782 rxdr->ps_page_dma = kcalloc(rxdr->count,
1788 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1783 sizeof(struct e1000_ps_page_dma),
1784 GFP_KERNEL);
1789 if (!rxdr->ps_page_dma) { 1785 if (!rxdr->ps_page_dma) {
1790 vfree(rxdr->buffer_info); 1786 vfree(rxdr->buffer_info);
1791 kfree(rxdr->ps_page); 1787 kfree(rxdr->ps_page);
@@ -1793,7 +1789,6 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1793 "Unable to allocate memory for the receive descriptor ring\n"); 1789 "Unable to allocate memory for the receive descriptor ring\n");
1794 return -ENOMEM; 1790 return -ENOMEM;
1795 } 1791 }
1796 memset(rxdr->ps_page_dma, 0, size);
1797 1792
1798 if (adapter->hw.mac_type <= e1000_82547_rev_2) 1793 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1799 desc_len = sizeof(struct e1000_rx_desc); 1794 desc_len = sizeof(struct e1000_rx_desc);
@@ -1803,7 +1798,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1803 /* Round up to nearest 4K */ 1798 /* Round up to nearest 4K */
1804 1799
1805 rxdr->size = rxdr->count * desc_len; 1800 rxdr->size = rxdr->count * desc_len;
1806 E1000_ROUNDUP(rxdr->size, 4096); 1801 rxdr->size = ALIGN(rxdr->size, 4096);
1807 1802
1808 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1803 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1809 1804
@@ -2667,7 +2662,7 @@ e1000_watchdog(unsigned long data)
2667 2662
2668 netif_carrier_on(netdev); 2663 netif_carrier_on(netdev);
2669 netif_wake_queue(netdev); 2664 netif_wake_queue(netdev);
2670 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); 2665 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2671 adapter->smartspeed = 0; 2666 adapter->smartspeed = 0;
2672 } else { 2667 } else {
2673 /* make sure the receive unit is started */ 2668 /* make sure the receive unit is started */
@@ -2684,7 +2679,7 @@ e1000_watchdog(unsigned long data)
2684 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2679 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2685 netif_carrier_off(netdev); 2680 netif_carrier_off(netdev);
2686 netif_stop_queue(netdev); 2681 netif_stop_queue(netdev);
2687 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); 2682 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
2688 2683
2689 /* 80003ES2LAN workaround-- 2684 /* 80003ES2LAN workaround--
2690 * For packet buffer work-around on link down event; 2685 * For packet buffer work-around on link down event;
@@ -2736,7 +2731,7 @@ e1000_watchdog(unsigned long data)
2736 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2731 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2737 2732
2738 /* Reset the timer */ 2733 /* Reset the timer */
2739 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 2734 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
2740} 2735}
2741 2736
2742enum latency_range { 2737enum latency_range {
@@ -3175,7 +3170,7 @@ e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
3175 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3170 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3176 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR; 3171 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
3177 3172
3178 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR); 3173 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3179 3174
3180 if (adapter->link_duplex != HALF_DUPLEX) 3175 if (adapter->link_duplex != HALF_DUPLEX)
3181 goto no_fifo_stall_required; 3176 goto no_fifo_stall_required;