aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/e1000/e1000.h67
-rw-r--r--drivers/net/e1000/e1000_ethtool.c75
-rw-r--r--drivers/net/e1000/e1000_main.c655
-rw-r--r--drivers/net/e1000/e1000_param.c10
4 files changed, 564 insertions, 243 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 092757bc721f..9b7274b111f3 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,6 +72,10 @@
72#include <linux/mii.h> 72#include <linux/mii.h>
73#include <linux/ethtool.h> 73#include <linux/ethtool.h>
74#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
75#ifdef CONFIG_E1000_MQ
76#include <linux/cpu.h>
77#include <linux/smp.h>
78#endif
75 79
76#define BAR_0 0 80#define BAR_0 0
77#define BAR_1 1 81#define BAR_1 1
@@ -168,7 +172,30 @@ struct e1000_buffer {
168struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; 172struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; };
169struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; 173struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; };
170 174
171struct e1000_desc_ring { 175struct e1000_tx_ring {
176 /* pointer to the descriptor ring memory */
177 void *desc;
178 /* physical address of the descriptor ring */
179 dma_addr_t dma;
180 /* length of descriptor ring in bytes */
181 unsigned int size;
182 /* number of descriptors in the ring */
183 unsigned int count;
184 /* next descriptor to associate a buffer with */
185 unsigned int next_to_use;
186 /* next descriptor to check for DD status bit */
187 unsigned int next_to_clean;
188 /* array of buffer information structs */
189 struct e1000_buffer *buffer_info;
190
191 struct e1000_buffer previous_buffer_info;
192 spinlock_t tx_lock;
193 uint16_t tdh;
194 uint16_t tdt;
195 uint64_t pkt;
196};
197
198struct e1000_rx_ring {
172 /* pointer to the descriptor ring memory */ 199 /* pointer to the descriptor ring memory */
173 void *desc; 200 void *desc;
174 /* physical address of the descriptor ring */ 201 /* physical address of the descriptor ring */
@@ -186,6 +213,10 @@ struct e1000_desc_ring {
186 /* arrays of page information for packet split */ 213 /* arrays of page information for packet split */
187 struct e1000_ps_page *ps_page; 214 struct e1000_ps_page *ps_page;
188 struct e1000_ps_page_dma *ps_page_dma; 215 struct e1000_ps_page_dma *ps_page_dma;
216
217 uint16_t rdh;
218 uint16_t rdt;
219 uint64_t pkt;
189}; 220};
190 221
191#define E1000_DESC_UNUSED(R) \ 222#define E1000_DESC_UNUSED(R) \
@@ -227,9 +258,10 @@ struct e1000_adapter {
227 unsigned long led_status; 258 unsigned long led_status;
228 259
229 /* TX */ 260 /* TX */
230 struct e1000_desc_ring tx_ring; 261 struct e1000_tx_ring *tx_ring; /* One per active queue */
231 struct e1000_buffer previous_buffer_info; 262#ifdef CONFIG_E1000_MQ
232 spinlock_t tx_lock; 263 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
264#endif
233 uint32_t txd_cmd; 265 uint32_t txd_cmd;
234 uint32_t tx_int_delay; 266 uint32_t tx_int_delay;
235 uint32_t tx_abs_int_delay; 267 uint32_t tx_abs_int_delay;
@@ -246,13 +278,26 @@ struct e1000_adapter {
246 278
247 /* RX */ 279 /* RX */
248#ifdef CONFIG_E1000_NAPI 280#ifdef CONFIG_E1000_NAPI
249 boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, 281 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
250 int work_to_do); 282 struct e1000_rx_ring *rx_ring,
283 int *work_done, int work_to_do);
251#else 284#else
252 boolean_t (*clean_rx) (struct e1000_adapter *adapter); 285 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
286 struct e1000_rx_ring *rx_ring);
253#endif 287#endif
254 void (*alloc_rx_buf) (struct e1000_adapter *adapter); 288 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
255 struct e1000_desc_ring rx_ring; 289 struct e1000_rx_ring *rx_ring);
290 struct e1000_rx_ring *rx_ring; /* One per active queue */
291#ifdef CONFIG_E1000_NAPI
292 struct net_device *polling_netdev; /* One per active queue */
293#endif
294#ifdef CONFIG_E1000_MQ
295 struct net_device **cpu_netdev; /* per-cpu */
296 struct call_async_data_struct rx_sched_call_data;
297 int cpu_for_queue[4];
298#endif
299 int num_queues;
300
256 uint64_t hw_csum_err; 301 uint64_t hw_csum_err;
257 uint64_t hw_csum_good; 302 uint64_t hw_csum_good;
258 uint32_t rx_int_delay; 303 uint32_t rx_int_delay;
@@ -278,8 +323,8 @@ struct e1000_adapter {
278 struct e1000_phy_stats phy_stats; 323 struct e1000_phy_stats phy_stats;
279 324
280 uint32_t test_icr; 325 uint32_t test_icr;
281 struct e1000_desc_ring test_tx_ring; 326 struct e1000_tx_ring test_tx_ring;
282 struct e1000_desc_ring test_rx_ring; 327 struct e1000_rx_ring test_rx_ring;
283 328
284 329
285 int msg_enable; 330 int msg_enable;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 5f9a36bb77f5..6e7e34f59a34 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -39,10 +39,10 @@ extern int e1000_up(struct e1000_adapter *adapter);
39extern void e1000_down(struct e1000_adapter *adapter); 39extern void e1000_down(struct e1000_adapter *adapter);
40extern void e1000_reset(struct e1000_adapter *adapter); 40extern void e1000_reset(struct e1000_adapter *adapter);
41extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 41extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
42extern int e1000_setup_rx_resources(struct e1000_adapter *adapter); 42extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
43extern int e1000_setup_tx_resources(struct e1000_adapter *adapter); 43extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
44extern void e1000_free_rx_resources(struct e1000_adapter *adapter); 44extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
45extern void e1000_free_tx_resources(struct e1000_adapter *adapter); 45extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
46extern void e1000_update_stats(struct e1000_adapter *adapter); 46extern void e1000_update_stats(struct e1000_adapter *adapter);
47 47
48struct e1000_stats { 48struct e1000_stats {
@@ -576,8 +576,8 @@ e1000_get_ringparam(struct net_device *netdev,
576{ 576{
577 struct e1000_adapter *adapter = netdev_priv(netdev); 577 struct e1000_adapter *adapter = netdev_priv(netdev);
578 e1000_mac_type mac_type = adapter->hw.mac_type; 578 e1000_mac_type mac_type = adapter->hw.mac_type;
579 struct e1000_desc_ring *txdr = &adapter->tx_ring; 579 struct e1000_tx_ring *txdr = adapter->tx_ring;
580 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 580 struct e1000_rx_ring *rxdr = adapter->rx_ring;
581 581
582 ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : 582 ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
583 E1000_MAX_82544_RXD; 583 E1000_MAX_82544_RXD;
@@ -597,20 +597,40 @@ e1000_set_ringparam(struct net_device *netdev,
597{ 597{
598 struct e1000_adapter *adapter = netdev_priv(netdev); 598 struct e1000_adapter *adapter = netdev_priv(netdev);
599 e1000_mac_type mac_type = adapter->hw.mac_type; 599 e1000_mac_type mac_type = adapter->hw.mac_type;
600 struct e1000_desc_ring *txdr = &adapter->tx_ring; 600 struct e1000_tx_ring *txdr, *tx_old, *tx_new;
601 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 601 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
602 struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new; 602 int i, err, tx_ring_size, rx_ring_size;
603 int err; 603
604 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
605 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
606
607 if (netif_running(adapter->netdev))
608 e1000_down(adapter);
604 609
605 tx_old = adapter->tx_ring; 610 tx_old = adapter->tx_ring;
606 rx_old = adapter->rx_ring; 611 rx_old = adapter->rx_ring;
607 612
613 adapter->tx_ring = kmalloc(tx_ring_size, GFP_KERNEL);
614 if (!adapter->tx_ring) {
615 err = -ENOMEM;
616 goto err_setup_rx;
617 }
618 memset(adapter->tx_ring, 0, tx_ring_size);
619
620 adapter->rx_ring = kmalloc(rx_ring_size, GFP_KERNEL);
621 if (!adapter->rx_ring) {
622 kfree(adapter->tx_ring);
623 err = -ENOMEM;
624 goto err_setup_rx;
625 }
626 memset(adapter->rx_ring, 0, rx_ring_size);
627
628 txdr = adapter->tx_ring;
629 rxdr = adapter->rx_ring;
630
608 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 631 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
609 return -EINVAL; 632 return -EINVAL;
610 633
611 if(netif_running(adapter->netdev))
612 e1000_down(adapter);
613
614 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 634 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
615 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 635 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
616 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 636 E1000_MAX_RXD : E1000_MAX_82544_RXD));
@@ -621,11 +641,16 @@ e1000_set_ringparam(struct net_device *netdev,
621 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 641 E1000_MAX_TXD : E1000_MAX_82544_TXD));
622 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 642 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
623 643
644 for (i = 0; i < adapter->num_queues; i++) {
645 txdr[i].count = txdr->count;
646 rxdr[i].count = rxdr->count;
647 }
648
624 if(netif_running(adapter->netdev)) { 649 if(netif_running(adapter->netdev)) {
625 /* Try to get new resources before deleting old */ 650 /* Try to get new resources before deleting old */
626 if((err = e1000_setup_rx_resources(adapter))) 651 if ((err = e1000_setup_all_rx_resources(adapter)))
627 goto err_setup_rx; 652 goto err_setup_rx;
628 if((err = e1000_setup_tx_resources(adapter))) 653 if ((err = e1000_setup_all_tx_resources(adapter)))
629 goto err_setup_tx; 654 goto err_setup_tx;
630 655
631 /* save the new, restore the old in order to free it, 656 /* save the new, restore the old in order to free it,
@@ -635,8 +660,10 @@ e1000_set_ringparam(struct net_device *netdev,
635 tx_new = adapter->tx_ring; 660 tx_new = adapter->tx_ring;
636 adapter->rx_ring = rx_old; 661 adapter->rx_ring = rx_old;
637 adapter->tx_ring = tx_old; 662 adapter->tx_ring = tx_old;
638 e1000_free_rx_resources(adapter); 663 e1000_free_all_rx_resources(adapter);
639 e1000_free_tx_resources(adapter); 664 e1000_free_all_tx_resources(adapter);
665 kfree(tx_old);
666 kfree(rx_old);
640 adapter->rx_ring = rx_new; 667 adapter->rx_ring = rx_new;
641 adapter->tx_ring = tx_new; 668 adapter->tx_ring = tx_new;
642 if((err = e1000_up(adapter))) 669 if((err = e1000_up(adapter)))
@@ -645,7 +672,7 @@ e1000_set_ringparam(struct net_device *netdev,
645 672
646 return 0; 673 return 0;
647err_setup_tx: 674err_setup_tx:
648 e1000_free_rx_resources(adapter); 675 e1000_free_all_rx_resources(adapter);
649err_setup_rx: 676err_setup_rx:
650 adapter->rx_ring = rx_old; 677 adapter->rx_ring = rx_old;
651 adapter->tx_ring = tx_old; 678 adapter->tx_ring = tx_old;
@@ -903,8 +930,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
903static void 930static void
904e1000_free_desc_rings(struct e1000_adapter *adapter) 931e1000_free_desc_rings(struct e1000_adapter *adapter)
905{ 932{
906 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 933 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
907 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 934 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
908 struct pci_dev *pdev = adapter->pdev; 935 struct pci_dev *pdev = adapter->pdev;
909 int i; 936 int i;
910 937
@@ -946,8 +973,8 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
946static int 973static int
947e1000_setup_desc_rings(struct e1000_adapter *adapter) 974e1000_setup_desc_rings(struct e1000_adapter *adapter)
948{ 975{
949 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 976 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
950 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 977 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
951 struct pci_dev *pdev = adapter->pdev; 978 struct pci_dev *pdev = adapter->pdev;
952 uint32_t rctl; 979 uint32_t rctl;
953 int size, i, ret_val; 980 int size, i, ret_val;
@@ -1347,8 +1374,8 @@ e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1347static int 1374static int
1348e1000_run_loopback_test(struct e1000_adapter *adapter) 1375e1000_run_loopback_test(struct e1000_adapter *adapter)
1349{ 1376{
1350 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 1377 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1351 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 1378 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1352 struct pci_dev *pdev = adapter->pdev; 1379 struct pci_dev *pdev = adapter->pdev;
1353 int i, j, k, l, lc, good_cnt, ret_val=0; 1380 int i, j, k, l, lc, good_cnt, ret_val=0;
1354 unsigned long time; 1381 unsigned long time;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 407abb2c0548..5145b7345c22 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -102,10 +102,18 @@ int e1000_up(struct e1000_adapter *adapter);
102void e1000_down(struct e1000_adapter *adapter); 102void e1000_down(struct e1000_adapter *adapter);
103void e1000_reset(struct e1000_adapter *adapter); 103void e1000_reset(struct e1000_adapter *adapter);
104int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 104int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
105int e1000_setup_tx_resources(struct e1000_adapter *adapter); 105int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
106int e1000_setup_rx_resources(struct e1000_adapter *adapter); 106int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
107void e1000_free_tx_resources(struct e1000_adapter *adapter); 107void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
108void e1000_free_rx_resources(struct e1000_adapter *adapter); 108void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
109int e1000_setup_tx_resources(struct e1000_adapter *adapter,
110 struct e1000_tx_ring *txdr);
111int e1000_setup_rx_resources(struct e1000_adapter *adapter,
112 struct e1000_rx_ring *rxdr);
113void e1000_free_tx_resources(struct e1000_adapter *adapter,
114 struct e1000_tx_ring *tx_ring);
115void e1000_free_rx_resources(struct e1000_adapter *adapter,
116 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter); 117void e1000_update_stats(struct e1000_adapter *adapter);
110 118
111/* Local Function Prototypes */ 119/* Local Function Prototypes */
@@ -114,14 +122,22 @@ static int e1000_init_module(void);
114static void e1000_exit_module(void); 122static void e1000_exit_module(void);
115static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 123static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
116static void __devexit e1000_remove(struct pci_dev *pdev); 124static void __devexit e1000_remove(struct pci_dev *pdev);
125static int e1000_alloc_queues(struct e1000_adapter *adapter);
126#ifdef CONFIG_E1000_MQ
127static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
128#endif
117static int e1000_sw_init(struct e1000_adapter *adapter); 129static int e1000_sw_init(struct e1000_adapter *adapter);
118static int e1000_open(struct net_device *netdev); 130static int e1000_open(struct net_device *netdev);
119static int e1000_close(struct net_device *netdev); 131static int e1000_close(struct net_device *netdev);
120static void e1000_configure_tx(struct e1000_adapter *adapter); 132static void e1000_configure_tx(struct e1000_adapter *adapter);
121static void e1000_configure_rx(struct e1000_adapter *adapter); 133static void e1000_configure_rx(struct e1000_adapter *adapter);
122static void e1000_setup_rctl(struct e1000_adapter *adapter); 134static void e1000_setup_rctl(struct e1000_adapter *adapter);
123static void e1000_clean_tx_ring(struct e1000_adapter *adapter); 135static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_rx_ring(struct e1000_adapter *adapter); 136static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
137static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
138 struct e1000_tx_ring *tx_ring);
139static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
140 struct e1000_rx_ring *rx_ring);
125static void e1000_set_multi(struct net_device *netdev); 141static void e1000_set_multi(struct net_device *netdev);
126static void e1000_update_phy_info(unsigned long data); 142static void e1000_update_phy_info(unsigned long data);
127static void e1000_watchdog(unsigned long data); 143static void e1000_watchdog(unsigned long data);
@@ -132,19 +148,26 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
132static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 148static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
133static int e1000_set_mac(struct net_device *netdev, void *p); 149static int e1000_set_mac(struct net_device *netdev, void *p);
134static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs); 150static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
135static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter); 151static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
152 struct e1000_tx_ring *tx_ring);
136#ifdef CONFIG_E1000_NAPI 153#ifdef CONFIG_E1000_NAPI
137static int e1000_clean(struct net_device *netdev, int *budget); 154static int e1000_clean(struct net_device *poll_dev, int *budget);
138static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 155static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
139 int *work_done, int work_to_do); 157 int *work_done, int work_to_do);
140static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 158static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
159 struct e1000_rx_ring *rx_ring,
141 int *work_done, int work_to_do); 160 int *work_done, int work_to_do);
142#else 161#else
143static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 162static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
144static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); 163 struct e1000_rx_ring *rx_ring);
164static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
165 struct e1000_rx_ring *rx_ring);
145#endif 166#endif
146static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 167static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
147static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); 168 struct e1000_rx_ring *rx_ring);
169static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
170 struct e1000_rx_ring *rx_ring);
148static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 171static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
149static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 172static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
150 int cmd); 173 int cmd);
@@ -289,7 +312,7 @@ int
289e1000_up(struct e1000_adapter *adapter) 312e1000_up(struct e1000_adapter *adapter)
290{ 313{
291 struct net_device *netdev = adapter->netdev; 314 struct net_device *netdev = adapter->netdev;
292 int err; 315 int i, err;
293 316
294 /* hardware has been reset, we need to reload some things */ 317 /* hardware has been reset, we need to reload some things */
295 318
@@ -308,7 +331,8 @@ e1000_up(struct e1000_adapter *adapter)
308 e1000_configure_tx(adapter); 331 e1000_configure_tx(adapter);
309 e1000_setup_rctl(adapter); 332 e1000_setup_rctl(adapter);
310 e1000_configure_rx(adapter); 333 e1000_configure_rx(adapter);
311 adapter->alloc_rx_buf(adapter); 334 for (i = 0; i < adapter->num_queues; i++)
335 adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
312 336
313#ifdef CONFIG_PCI_MSI 337#ifdef CONFIG_PCI_MSI
314 if(adapter->hw.mac_type > e1000_82547_rev_2) { 338 if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -363,8 +387,8 @@ e1000_down(struct e1000_adapter *adapter)
363 netif_stop_queue(netdev); 387 netif_stop_queue(netdev);
364 388
365 e1000_reset(adapter); 389 e1000_reset(adapter);
366 e1000_clean_tx_ring(adapter); 390 e1000_clean_all_tx_rings(adapter);
367 e1000_clean_rx_ring(adapter); 391 e1000_clean_all_rx_rings(adapter);
368 392
369 /* If WoL is not enabled 393 /* If WoL is not enabled
370 * and management mode is not IAMT 394 * and management mode is not IAMT
@@ -747,6 +771,9 @@ e1000_remove(struct pci_dev *pdev)
747 uint32_t manc, swsm; 771 uint32_t manc, swsm;
748 772
749 flush_scheduled_work(); 773 flush_scheduled_work();
774#ifdef CONFIG_E1000_NAPI
775 int i;
776#endif
750 777
751 if(adapter->hw.mac_type >= e1000_82540 && 778 if(adapter->hw.mac_type >= e1000_82540 &&
752 adapter->hw.media_type == e1000_media_type_copper) { 779 adapter->hw.media_type == e1000_media_type_copper) {
@@ -775,6 +802,10 @@ e1000_remove(struct pci_dev *pdev)
775 } 802 }
776 803
777 unregister_netdev(netdev); 804 unregister_netdev(netdev);
805#ifdef CONFIG_E1000_NAPI
806 for (i = 0; i < adapter->num_queues; i++)
807 __dev_put(&adapter->polling_netdev[i]);
808#endif
778 809
779 if(!e1000_check_phy_reset_block(&adapter->hw)) 810 if(!e1000_check_phy_reset_block(&adapter->hw))
780 e1000_phy_hw_reset(&adapter->hw); 811 e1000_phy_hw_reset(&adapter->hw);
@@ -802,6 +833,9 @@ e1000_sw_init(struct e1000_adapter *adapter)
802 struct e1000_hw *hw = &adapter->hw; 833 struct e1000_hw *hw = &adapter->hw;
803 struct net_device *netdev = adapter->netdev; 834 struct net_device *netdev = adapter->netdev;
804 struct pci_dev *pdev = adapter->pdev; 835 struct pci_dev *pdev = adapter->pdev;
836#ifdef CONFIG_E1000_NAPI
837 int i;
838#endif
805 839
806 /* PCI config space info */ 840 /* PCI config space info */
807 841
@@ -859,14 +893,71 @@ e1000_sw_init(struct e1000_adapter *adapter)
859 hw->master_slave = E1000_MASTER_SLAVE; 893 hw->master_slave = E1000_MASTER_SLAVE;
860 } 894 }
861 895
896 adapter->num_queues = 1;
897
898 if (e1000_alloc_queues(adapter)) {
899 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
900 return -ENOMEM;
901 }
902
903#ifdef CONFIG_E1000_NAPI
904 for (i = 0; i < adapter->num_queues; i++) {
905 adapter->polling_netdev[i].priv = adapter;
906 adapter->polling_netdev[i].poll = &e1000_clean;
907 adapter->polling_netdev[i].weight = 64;
908 dev_hold(&adapter->polling_netdev[i]);
909 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
910 }
911#endif
862 atomic_set(&adapter->irq_sem, 1); 912 atomic_set(&adapter->irq_sem, 1);
863 spin_lock_init(&adapter->stats_lock); 913 spin_lock_init(&adapter->stats_lock);
864 spin_lock_init(&adapter->tx_lock);
865 914
866 return 0; 915 return 0;
867} 916}
868 917
869/** 918/**
919 * e1000_alloc_queues - Allocate memory for all rings
920 * @adapter: board private structure to initialize
921 *
922 * We allocate one ring per queue at run-time since we don't know the
923 * number of queues at compile-time. The polling_netdev array is
924 * intended for Multiqueue, but should work fine with a single queue.
925 **/
926
927static int __devinit
928e1000_alloc_queues(struct e1000_adapter *adapter)
929{
930 int size;
931
932 size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
933 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
934 if (!adapter->tx_ring)
935 return -ENOMEM;
936 memset(adapter->tx_ring, 0, size);
937
938 size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
939 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
940 if (!adapter->rx_ring) {
941 kfree(adapter->tx_ring);
942 return -ENOMEM;
943 }
944 memset(adapter->rx_ring, 0, size);
945
946#ifdef CONFIG_E1000_NAPI
947 size = sizeof(struct net_device) * adapter->num_queues;
948 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
949 if (!adapter->polling_netdev) {
950 kfree(adapter->tx_ring);
951 kfree(adapter->rx_ring);
952 return -ENOMEM;
953 }
954 memset(adapter->polling_netdev, 0, size);
955#endif
956
957 return E1000_SUCCESS;
958}
959
960/**
870 * e1000_open - Called when a network interface is made active 961 * e1000_open - Called when a network interface is made active
871 * @netdev: network interface device structure 962 * @netdev: network interface device structure
872 * 963 *
@@ -887,12 +978,12 @@ e1000_open(struct net_device *netdev)
887 978
888 /* allocate transmit descriptors */ 979 /* allocate transmit descriptors */
889 980
890 if((err = e1000_setup_tx_resources(adapter))) 981 if ((err = e1000_setup_all_tx_resources(adapter)))
891 goto err_setup_tx; 982 goto err_setup_tx;
892 983
893 /* allocate receive descriptors */ 984 /* allocate receive descriptors */
894 985
895 if((err = e1000_setup_rx_resources(adapter))) 986 if ((err = e1000_setup_all_rx_resources(adapter)))
896 goto err_setup_rx; 987 goto err_setup_rx;
897 988
898 if((err = e1000_up(adapter))) 989 if((err = e1000_up(adapter)))
@@ -906,9 +997,9 @@ e1000_open(struct net_device *netdev)
906 return E1000_SUCCESS; 997 return E1000_SUCCESS;
907 998
908err_up: 999err_up:
909 e1000_free_rx_resources(adapter); 1000 e1000_free_all_rx_resources(adapter);
910err_setup_rx: 1001err_setup_rx:
911 e1000_free_tx_resources(adapter); 1002 e1000_free_all_tx_resources(adapter);
912err_setup_tx: 1003err_setup_tx:
913 e1000_reset(adapter); 1004 e1000_reset(adapter);
914 1005
@@ -934,8 +1025,8 @@ e1000_close(struct net_device *netdev)
934 1025
935 e1000_down(adapter); 1026 e1000_down(adapter);
936 1027
937 e1000_free_tx_resources(adapter); 1028 e1000_free_all_tx_resources(adapter);
938 e1000_free_rx_resources(adapter); 1029 e1000_free_all_rx_resources(adapter);
939 1030
940 if((adapter->hw.mng_cookie.status & 1031 if((adapter->hw.mng_cookie.status &
941 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1032 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
@@ -970,14 +1061,15 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
970/** 1061/**
971 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1062 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
972 * @adapter: board private structure 1063 * @adapter: board private structure
1064 * @txdr: tx descriptor ring (for a specific queue) to setup
973 * 1065 *
974 * Return 0 on success, negative on failure 1066 * Return 0 on success, negative on failure
975 **/ 1067 **/
976 1068
977int 1069int
978e1000_setup_tx_resources(struct e1000_adapter *adapter) 1070e1000_setup_tx_resources(struct e1000_adapter *adapter,
1071 struct e1000_tx_ring *txdr)
979{ 1072{
980 struct e1000_desc_ring *txdr = &adapter->tx_ring;
981 struct pci_dev *pdev = adapter->pdev; 1073 struct pci_dev *pdev = adapter->pdev;
982 int size; 1074 int size;
983 1075
@@ -1042,6 +1134,35 @@ setup_tx_desc_die:
1042} 1134}
1043 1135
1044/** 1136/**
1137 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1138 * (Descriptors) for all queues
1139 * @adapter: board private structure
1140 *
1141 * If this function returns with an error, then it's possible one or
1142 * more of the rings is populated (while the rest are not). It is the
1143 * callers duty to clean those orphaned rings.
1144 *
1145 * Return 0 on success, negative on failure
1146 **/
1147
1148int
1149e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1150{
1151 int i, err = 0;
1152
1153 for (i = 0; i < adapter->num_queues; i++) {
1154 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1155 if (err) {
1156 DPRINTK(PROBE, ERR,
1157 "Allocation for Tx Queue %u failed\n", i);
1158 break;
1159 }
1160 }
1161
1162 return err;
1163}
1164
1165/**
1045 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1166 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1046 * @adapter: board private structure 1167 * @adapter: board private structure
1047 * 1168 *
@@ -1051,23 +1172,28 @@ setup_tx_desc_die:
1051static void 1172static void
1052e1000_configure_tx(struct e1000_adapter *adapter) 1173e1000_configure_tx(struct e1000_adapter *adapter)
1053{ 1174{
1054 uint64_t tdba = adapter->tx_ring.dma; 1175 uint64_t tdba;
1055 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc); 1176 struct e1000_hw *hw = &adapter->hw;
1056 uint32_t tctl, tipg; 1177 uint32_t tdlen, tctl, tipg, tarc;
1057
1058 E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1059 E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1060
1061 E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
1062 1178
1063 /* Setup the HW Tx Head and Tail descriptor pointers */ 1179 /* Setup the HW Tx Head and Tail descriptor pointers */
1064 1180
1065 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1181 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1066 E1000_WRITE_REG(&adapter->hw, TDT, 0); 1182 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1183 tdba = adapter->tx_ring[0].dma;
1184 tdlen = adapter->tx_ring[0].count *
1185 sizeof(struct e1000_tx_desc);
1186 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1187 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1188 E1000_WRITE_REG(hw, TDLEN, tdlen);
1189 E1000_WRITE_REG(hw, TDH, 0);
1190 E1000_WRITE_REG(hw, TDT, 0);
1191 adapter->tx_ring[0].tdh = E1000_TDH;
1192 adapter->tx_ring[0].tdt = E1000_TDT;
1067 1193
1068 /* Set the default values for the Tx Inter Packet Gap timer */ 1194 /* Set the default values for the Tx Inter Packet Gap timer */
1069 1195
1070 switch (adapter->hw.mac_type) { 1196 switch (hw->mac_type) {
1071 case e1000_82542_rev2_0: 1197 case e1000_82542_rev2_0:
1072 case e1000_82542_rev2_1: 1198 case e1000_82542_rev2_1:
1073 tipg = DEFAULT_82542_TIPG_IPGT; 1199 tipg = DEFAULT_82542_TIPG_IPGT;
@@ -1075,67 +1201,68 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1075 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1201 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1076 break; 1202 break;
1077 default: 1203 default:
1078 if(adapter->hw.media_type == e1000_media_type_fiber || 1204 if (hw->media_type == e1000_media_type_fiber ||
1079 adapter->hw.media_type == e1000_media_type_internal_serdes) 1205 hw->media_type == e1000_media_type_internal_serdes)
1080 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1206 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1081 else 1207 else
1082 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1208 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1083 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1209 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1084 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1210 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1085 } 1211 }
1086 E1000_WRITE_REG(&adapter->hw, TIPG, tipg); 1212 E1000_WRITE_REG(hw, TIPG, tipg);
1087 1213
1088 /* Set the Tx Interrupt Delay register */ 1214 /* Set the Tx Interrupt Delay register */
1089 1215
1090 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); 1216 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
1091 if(adapter->hw.mac_type >= e1000_82540) 1217 if (hw->mac_type >= e1000_82540)
1092 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay); 1218 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
1093 1219
1094 /* Program the Transmit Control Register */ 1220 /* Program the Transmit Control Register */
1095 1221
1096 tctl = E1000_READ_REG(&adapter->hw, TCTL); 1222 tctl = E1000_READ_REG(hw, TCTL);
1097 1223
1098 tctl &= ~E1000_TCTL_CT; 1224 tctl &= ~E1000_TCTL_CT;
1099 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | 1225 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
1100 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1226 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1101 1227
1102 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 1228 E1000_WRITE_REG(hw, TCTL, tctl);
1103 1229
1104 e1000_config_collision_dist(&adapter->hw); 1230 e1000_config_collision_dist(hw);
1105 1231
1106 /* Setup Transmit Descriptor Settings for eop descriptor */ 1232 /* Setup Transmit Descriptor Settings for eop descriptor */
1107 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | 1233 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
1108 E1000_TXD_CMD_IFCS; 1234 E1000_TXD_CMD_IFCS;
1109 1235
1110 if(adapter->hw.mac_type < e1000_82543) 1236 if (hw->mac_type < e1000_82543)
1111 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1237 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1112 else 1238 else
1113 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1239 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1114 1240
1115 /* Cache if we're 82544 running in PCI-X because we'll 1241 /* Cache if we're 82544 running in PCI-X because we'll
1116 * need this to apply a workaround later in the send path. */ 1242 * need this to apply a workaround later in the send path. */
1117 if(adapter->hw.mac_type == e1000_82544 && 1243 if (hw->mac_type == e1000_82544 &&
1118 adapter->hw.bus_type == e1000_bus_type_pcix) 1244 hw->bus_type == e1000_bus_type_pcix)
1119 adapter->pcix_82544 = 1; 1245 adapter->pcix_82544 = 1;
1120} 1246}
1121 1247
1122/** 1248/**
1123 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1249 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1124 * @adapter: board private structure 1250 * @adapter: board private structure
1251 * @rxdr: rx descriptor ring (for a specific queue) to setup
1125 * 1252 *
1126 * Returns 0 on success, negative on failure 1253 * Returns 0 on success, negative on failure
1127 **/ 1254 **/
1128 1255
1129int 1256int
1130e1000_setup_rx_resources(struct e1000_adapter *adapter) 1257e1000_setup_rx_resources(struct e1000_adapter *adapter,
1258 struct e1000_rx_ring *rxdr)
1131{ 1259{
1132 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
1133 struct pci_dev *pdev = adapter->pdev; 1260 struct pci_dev *pdev = adapter->pdev;
1134 int size, desc_len; 1261 int size, desc_len;
1135 1262
1136 size = sizeof(struct e1000_buffer) * rxdr->count; 1263 size = sizeof(struct e1000_buffer) * rxdr->count;
1137 rxdr->buffer_info = vmalloc(size); 1264 rxdr->buffer_info = vmalloc(size);
1138 if(!rxdr->buffer_info) { 1265 if (!rxdr->buffer_info) {
1139 DPRINTK(PROBE, ERR, 1266 DPRINTK(PROBE, ERR,
1140 "Unable to allocate memory for the receive descriptor ring\n"); 1267 "Unable to allocate memory for the receive descriptor ring\n");
1141 return -ENOMEM; 1268 return -ENOMEM;
@@ -1175,13 +1302,13 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
1175 1302
1176 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1303 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1177 1304
1178 if(!rxdr->desc) { 1305 if (!rxdr->desc) {
1306 DPRINTK(PROBE, ERR,
1307 "Unable to allocate memory for the receive descriptor ring\n");
1179setup_rx_desc_die: 1308setup_rx_desc_die:
1180 vfree(rxdr->buffer_info); 1309 vfree(rxdr->buffer_info);
1181 kfree(rxdr->ps_page); 1310 kfree(rxdr->ps_page);
1182 kfree(rxdr->ps_page_dma); 1311 kfree(rxdr->ps_page_dma);
1183 DPRINTK(PROBE, ERR,
1184 "Unable to allocate memory for the receive descriptor ring\n");
1185 return -ENOMEM; 1312 return -ENOMEM;
1186 } 1313 }
1187 1314
@@ -1193,9 +1320,12 @@ setup_rx_desc_die:
1193 "at %p\n", rxdr->size, rxdr->desc); 1320 "at %p\n", rxdr->size, rxdr->desc);
1194 /* Try again, without freeing the previous */ 1321 /* Try again, without freeing the previous */
1195 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1322 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1196 if(!rxdr->desc) {
1197 /* Failed allocation, critical failure */ 1323 /* Failed allocation, critical failure */
1324 if (!rxdr->desc) {
1198 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1325 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1326 DPRINTK(PROBE, ERR,
1327 "Unable to allocate memory "
1328 "for the receive descriptor ring\n");
1199 goto setup_rx_desc_die; 1329 goto setup_rx_desc_die;
1200 } 1330 }
1201 1331
@@ -1207,10 +1337,7 @@ setup_rx_desc_die:
1207 DPRINTK(PROBE, ERR, 1337 DPRINTK(PROBE, ERR,
1208 "Unable to allocate aligned memory " 1338 "Unable to allocate aligned memory "
1209 "for the receive descriptor ring\n"); 1339 "for the receive descriptor ring\n");
1210 vfree(rxdr->buffer_info); 1340 goto setup_rx_desc_die;
1211 kfree(rxdr->ps_page);
1212 kfree(rxdr->ps_page_dma);
1213 return -ENOMEM;
1214 } else { 1341 } else {
1215 /* Free old allocation, new allocation was successful */ 1342 /* Free old allocation, new allocation was successful */
1216 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1343 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
@@ -1225,6 +1352,35 @@ setup_rx_desc_die:
1225} 1352}
1226 1353
1227/** 1354/**
1355 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1356 * (Descriptors) for all queues
1357 * @adapter: board private structure
1358 *
1359 * If this function returns with an error, then it's possible one or
1360 * more of the rings is populated (while the rest are not). It is the
1361 * callers duty to clean those orphaned rings.
1362 *
1363 * Return 0 on success, negative on failure
1364 **/
1365
1366int
1367e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1368{
1369 int i, err = 0;
1370
1371 for (i = 0; i < adapter->num_queues; i++) {
1372 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1373 if (err) {
1374 DPRINTK(PROBE, ERR,
1375 "Allocation for Rx Queue %u failed\n", i);
1376 break;
1377 }
1378 }
1379
1380 return err;
1381}
1382
1383/**
1228 * e1000_setup_rctl - configure the receive control registers 1384 * e1000_setup_rctl - configure the receive control registers
1229 * @adapter: Board private structure 1385 * @adapter: Board private structure
1230 **/ 1386 **/
@@ -1326,47 +1482,55 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1326static void 1482static void
1327e1000_configure_rx(struct e1000_adapter *adapter) 1483e1000_configure_rx(struct e1000_adapter *adapter)
1328{ 1484{
1329 uint64_t rdba = adapter->rx_ring.dma; 1485 uint64_t rdba;
1330 uint32_t rdlen, rctl, rxcsum; 1486 struct e1000_hw *hw = &adapter->hw;
1487 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
1488#ifdef CONFIG_E1000_MQ
1489 uint32_t reta, mrqc;
1490 int i;
1491#endif
1331 1492
1332 if(adapter->rx_ps) { 1493 if(adapter->rx_ps) {
1333 rdlen = adapter->rx_ring.count * 1494 rdlen = adapter->rx_ring[0].count *
1334 sizeof(union e1000_rx_desc_packet_split); 1495 sizeof(union e1000_rx_desc_packet_split);
1335 adapter->clean_rx = e1000_clean_rx_irq_ps; 1496 adapter->clean_rx = e1000_clean_rx_irq_ps;
1336 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1497 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1337 } else { 1498 } else {
1338 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1499 rdlen = adapter->rx_ring[0].count *
1500 sizeof(struct e1000_rx_desc);
1339 adapter->clean_rx = e1000_clean_rx_irq; 1501 adapter->clean_rx = e1000_clean_rx_irq;
1340 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1502 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1341 } 1503 }
1342 1504
1343 /* disable receives while setting up the descriptors */ 1505 /* disable receives while setting up the descriptors */
1344 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1506 rctl = E1000_READ_REG(hw, RCTL);
1345 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); 1507 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
1346 1508
1347 /* set the Receive Delay Timer Register */ 1509 /* set the Receive Delay Timer Register */
1348 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); 1510 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
1349 1511
1350 if(adapter->hw.mac_type >= e1000_82540) { 1512 if (hw->mac_type >= e1000_82540) {
1351 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay); 1513 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1352 if(adapter->itr > 1) 1514 if(adapter->itr > 1)
1353 E1000_WRITE_REG(&adapter->hw, ITR, 1515 E1000_WRITE_REG(hw, ITR,
1354 1000000000 / (adapter->itr * 256)); 1516 1000000000 / (adapter->itr * 256));
1355 } 1517 }
1356 1518
1357 /* Setup the Base and Length of the Rx Descriptor Ring */ 1519 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1358 E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); 1520 * the Base and Length of the Rx Descriptor Ring */
1359 E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); 1521 rdba = adapter->rx_ring[0].dma;
1360 1522 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1361 E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen); 1523 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1362 1524 E1000_WRITE_REG(hw, RDLEN, rdlen);
1363 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 1525 E1000_WRITE_REG(hw, RDH, 0);
1364 E1000_WRITE_REG(&adapter->hw, RDH, 0); 1526 E1000_WRITE_REG(hw, RDT, 0);
1365 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1527 adapter->rx_ring[0].rdh = E1000_RDH;
1528 adapter->rx_ring[0].rdt = E1000_RDT;
1529 break;
1366 1530
1367 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1531 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1368 if(adapter->hw.mac_type >= e1000_82543) { 1532 if (hw->mac_type >= e1000_82543) {
1369 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1533 rxcsum = E1000_READ_REG(hw, RXCSUM);
1370 if(adapter->rx_csum == TRUE) { 1534 if(adapter->rx_csum == TRUE) {
1371 rxcsum |= E1000_RXCSUM_TUOFL; 1535 rxcsum |= E1000_RXCSUM_TUOFL;
1372 1536
@@ -1380,37 +1544,54 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1380 rxcsum &= ~E1000_RXCSUM_TUOFL; 1544 rxcsum &= ~E1000_RXCSUM_TUOFL;
1381 /* don't need to clear IPPCSE as it defaults to 0 */ 1545 /* don't need to clear IPPCSE as it defaults to 0 */
1382 } 1546 }
1383 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1547 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1384 } 1548 }
1385 1549
1386 if (adapter->hw.mac_type == e1000_82573) 1550 if (hw->mac_type == e1000_82573)
1387 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); 1551 E1000_WRITE_REG(hw, ERT, 0x0100);
1388 1552
1389 /* Enable Receives */ 1553 /* Enable Receives */
1390 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1554 E1000_WRITE_REG(hw, RCTL, rctl);
1391} 1555}
1392 1556
1393/** 1557/**
1394 * e1000_free_tx_resources - Free Tx Resources 1558 * e1000_free_tx_resources - Free Tx Resources per Queue
1395 * @adapter: board private structure 1559 * @adapter: board private structure
1560 * @tx_ring: Tx descriptor ring for a specific queue
1396 * 1561 *
1397 * Free all transmit software resources 1562 * Free all transmit software resources
1398 **/ 1563 **/
1399 1564
1400void 1565void
1401e1000_free_tx_resources(struct e1000_adapter *adapter) 1566e1000_free_tx_resources(struct e1000_adapter *adapter,
1567 struct e1000_tx_ring *tx_ring)
1402{ 1568{
1403 struct pci_dev *pdev = adapter->pdev; 1569 struct pci_dev *pdev = adapter->pdev;
1404 1570
1405 e1000_clean_tx_ring(adapter); 1571 e1000_clean_tx_ring(adapter, tx_ring);
1406 1572
1407 vfree(adapter->tx_ring.buffer_info); 1573 vfree(tx_ring->buffer_info);
1408 adapter->tx_ring.buffer_info = NULL; 1574 tx_ring->buffer_info = NULL;
1409 1575
1410 pci_free_consistent(pdev, adapter->tx_ring.size, 1576 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1411 adapter->tx_ring.desc, adapter->tx_ring.dma);
1412 1577
1413 adapter->tx_ring.desc = NULL; 1578 tx_ring->desc = NULL;
1579}
1580
1581/**
1582 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1583 * @adapter: board private structure
1584 *
1585 * Free all transmit software resources
1586 **/
1587
1588void
1589e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1590{
1591 int i;
1592
1593 for (i = 0; i < adapter->num_queues; i++)
1594 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1414} 1595}
1415 1596
1416static inline void 1597static inline void
@@ -1433,21 +1614,22 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1433/** 1614/**
1434 * e1000_clean_tx_ring - Free Tx Buffers 1615 * e1000_clean_tx_ring - Free Tx Buffers
1435 * @adapter: board private structure 1616 * @adapter: board private structure
1617 * @tx_ring: ring to be cleaned
1436 **/ 1618 **/
1437 1619
1438static void 1620static void
1439e1000_clean_tx_ring(struct e1000_adapter *adapter) 1621e1000_clean_tx_ring(struct e1000_adapter *adapter,
1622 struct e1000_tx_ring *tx_ring)
1440{ 1623{
1441 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1442 struct e1000_buffer *buffer_info; 1624 struct e1000_buffer *buffer_info;
1443 unsigned long size; 1625 unsigned long size;
1444 unsigned int i; 1626 unsigned int i;
1445 1627
1446 /* Free all the Tx ring sk_buffs */ 1628 /* Free all the Tx ring sk_buffs */
1447 1629
1448 if (likely(adapter->previous_buffer_info.skb != NULL)) { 1630 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
1449 e1000_unmap_and_free_tx_resource(adapter, 1631 e1000_unmap_and_free_tx_resource(adapter,
1450 &adapter->previous_buffer_info); 1632 &tx_ring->previous_buffer_info);
1451 } 1633 }
1452 1634
1453 for(i = 0; i < tx_ring->count; i++) { 1635 for(i = 0; i < tx_ring->count; i++) {
@@ -1465,24 +1647,39 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
1465 tx_ring->next_to_use = 0; 1647 tx_ring->next_to_use = 0;
1466 tx_ring->next_to_clean = 0; 1648 tx_ring->next_to_clean = 0;
1467 1649
1468 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1650 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
1469 E1000_WRITE_REG(&adapter->hw, TDT, 0); 1651 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
1652}
1653
1654/**
1655 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
1656 * @adapter: board private structure
1657 **/
1658
1659static void
1660e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1661{
1662 int i;
1663
1664 for (i = 0; i < adapter->num_queues; i++)
1665 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1470} 1666}
1471 1667
1472/** 1668/**
1473 * e1000_free_rx_resources - Free Rx Resources 1669 * e1000_free_rx_resources - Free Rx Resources
1474 * @adapter: board private structure 1670 * @adapter: board private structure
1671 * @rx_ring: ring to clean the resources from
1475 * 1672 *
1476 * Free all receive software resources 1673 * Free all receive software resources
1477 **/ 1674 **/
1478 1675
1479void 1676void
1480e1000_free_rx_resources(struct e1000_adapter *adapter) 1677e1000_free_rx_resources(struct e1000_adapter *adapter,
1678 struct e1000_rx_ring *rx_ring)
1481{ 1679{
1482 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1483 struct pci_dev *pdev = adapter->pdev; 1680 struct pci_dev *pdev = adapter->pdev;
1484 1681
1485 e1000_clean_rx_ring(adapter); 1682 e1000_clean_rx_ring(adapter, rx_ring);
1486 1683
1487 vfree(rx_ring->buffer_info); 1684 vfree(rx_ring->buffer_info);
1488 rx_ring->buffer_info = NULL; 1685 rx_ring->buffer_info = NULL;
@@ -1497,14 +1694,31 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
1497} 1694}
1498 1695
1499/** 1696/**
1500 * e1000_clean_rx_ring - Free Rx Buffers 1697 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
1698 * @adapter: board private structure
1699 *
1700 * Free all receive software resources
1701 **/
1702
1703void
1704e1000_free_all_rx_resources(struct e1000_adapter *adapter)
1705{
1706 int i;
1707
1708 for (i = 0; i < adapter->num_queues; i++)
1709 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
1710}
1711
1712/**
1713 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1501 * @adapter: board private structure 1714 * @adapter: board private structure
1715 * @rx_ring: ring to free buffers from
1502 **/ 1716 **/
1503 1717
1504static void 1718static void
1505e1000_clean_rx_ring(struct e1000_adapter *adapter) 1719e1000_clean_rx_ring(struct e1000_adapter *adapter,
1720 struct e1000_rx_ring *rx_ring)
1506{ 1721{
1507 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1508 struct e1000_buffer *buffer_info; 1722 struct e1000_buffer *buffer_info;
1509 struct e1000_ps_page *ps_page; 1723 struct e1000_ps_page *ps_page;
1510 struct e1000_ps_page_dma *ps_page_dma; 1724 struct e1000_ps_page_dma *ps_page_dma;
@@ -1553,8 +1767,22 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1553 rx_ring->next_to_clean = 0; 1767 rx_ring->next_to_clean = 0;
1554 rx_ring->next_to_use = 0; 1768 rx_ring->next_to_use = 0;
1555 1769
1556 E1000_WRITE_REG(&adapter->hw, RDH, 0); 1770 writel(0, adapter->hw.hw_addr + rx_ring->rdh);
1557 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1771 writel(0, adapter->hw.hw_addr + rx_ring->rdt);
1772}
1773
1774/**
1775 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
1776 * @adapter: board private structure
1777 **/
1778
1779static void
1780e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
1781{
1782 int i;
1783
1784 for (i = 0; i < adapter->num_queues; i++)
1785 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1558} 1786}
1559 1787
1560/* The 82542 2.0 (revision 2) needs to have the receive unit in reset 1788/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
@@ -1575,7 +1803,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
1575 mdelay(5); 1803 mdelay(5);
1576 1804
1577 if(netif_running(netdev)) 1805 if(netif_running(netdev))
1578 e1000_clean_rx_ring(adapter); 1806 e1000_clean_all_rx_rings(adapter);
1579} 1807}
1580 1808
1581static void 1809static void
@@ -1595,7 +1823,7 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
1595 1823
1596 if(netif_running(netdev)) { 1824 if(netif_running(netdev)) {
1597 e1000_configure_rx(adapter); 1825 e1000_configure_rx(adapter);
1598 e1000_alloc_rx_buffers(adapter); 1826 e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
1599 } 1827 }
1600} 1828}
1601 1829
@@ -1664,12 +1892,10 @@ e1000_set_multi(struct net_device *netdev)
1664 struct e1000_adapter *adapter = netdev_priv(netdev); 1892 struct e1000_adapter *adapter = netdev_priv(netdev);
1665 struct e1000_hw *hw = &adapter->hw; 1893 struct e1000_hw *hw = &adapter->hw;
1666 struct dev_mc_list *mc_ptr; 1894 struct dev_mc_list *mc_ptr;
1667 unsigned long flags;
1668 uint32_t rctl; 1895 uint32_t rctl;
1669 uint32_t hash_value; 1896 uint32_t hash_value;
1670 int i, rar_entries = E1000_RAR_ENTRIES; 1897 int i, rar_entries = E1000_RAR_ENTRIES;
1671 1898
1672 spin_lock_irqsave(&adapter->tx_lock, flags);
1673 /* reserve RAR[14] for LAA over-write work-around */ 1899 /* reserve RAR[14] for LAA over-write work-around */
1674 if (adapter->hw.mac_type == e1000_82571) 1900 if (adapter->hw.mac_type == e1000_82571)
1675 rar_entries--; 1901 rar_entries--;
@@ -1725,8 +1951,6 @@ e1000_set_multi(struct net_device *netdev)
1725 1951
1726 if(hw->mac_type == e1000_82542_rev2_0) 1952 if(hw->mac_type == e1000_82542_rev2_0)
1727 e1000_leave_82542_rst(adapter); 1953 e1000_leave_82542_rst(adapter);
1728
1729 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1730} 1954}
1731 1955
1732/* Need to wait a few seconds after link up to get diagnostic information from 1956/* Need to wait a few seconds after link up to get diagnostic information from
@@ -1798,7 +2022,7 @@ static void
1798e1000_watchdog_task(struct e1000_adapter *adapter) 2022e1000_watchdog_task(struct e1000_adapter *adapter)
1799{ 2023{
1800 struct net_device *netdev = adapter->netdev; 2024 struct net_device *netdev = adapter->netdev;
1801 struct e1000_desc_ring *txdr = &adapter->tx_ring; 2025 struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
1802 uint32_t link; 2026 uint32_t link;
1803 2027
1804 e1000_check_for_link(&adapter->hw); 2028 e1000_check_for_link(&adapter->hw);
@@ -1857,8 +2081,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1857 2081
1858 e1000_update_adaptive(&adapter->hw); 2082 e1000_update_adaptive(&adapter->hw);
1859 2083
1860 if(!netif_carrier_ok(netdev)) { 2084 if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
1861 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2085 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1862 /* We've lost link, so the controller stops DMA, 2086 /* We've lost link, so the controller stops DMA,
1863 * but we've got queued Tx work that's never going 2087 * but we've got queued Tx work that's never going
1864 * to get done, so reset controller to flush Tx. 2088 * to get done, so reset controller to flush Tx.
@@ -1903,7 +2127,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1903#define E1000_TX_FLAGS_VLAN_SHIFT 16 2127#define E1000_TX_FLAGS_VLAN_SHIFT 16
1904 2128
1905static inline int 2129static inline int
1906e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) 2130e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2131 struct sk_buff *skb)
1907{ 2132{
1908#ifdef NETIF_F_TSO 2133#ifdef NETIF_F_TSO
1909 struct e1000_context_desc *context_desc; 2134 struct e1000_context_desc *context_desc;
@@ -1954,8 +2179,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1954 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2179 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1955 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2180 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1956 2181
1957 i = adapter->tx_ring.next_to_use; 2182 i = tx_ring->next_to_use;
1958 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 2183 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1959 2184
1960 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2185 context_desc->lower_setup.ip_fields.ipcss = ipcss;
1961 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2186 context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -1967,8 +2192,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1967 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2192 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1968 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2193 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
1969 2194
1970 if(++i == adapter->tx_ring.count) i = 0; 2195 if (++i == tx_ring->count) i = 0;
1971 adapter->tx_ring.next_to_use = i; 2196 tx_ring->next_to_use = i;
1972 2197
1973 return 1; 2198 return 1;
1974 } 2199 }
@@ -1978,7 +2203,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1978} 2203}
1979 2204
1980static inline boolean_t 2205static inline boolean_t
1981e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) 2206e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2207 struct sk_buff *skb)
1982{ 2208{
1983 struct e1000_context_desc *context_desc; 2209 struct e1000_context_desc *context_desc;
1984 unsigned int i; 2210 unsigned int i;
@@ -1987,8 +2213,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1987 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2213 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1988 css = skb->h.raw - skb->data; 2214 css = skb->h.raw - skb->data;
1989 2215
1990 i = adapter->tx_ring.next_to_use; 2216 i = tx_ring->next_to_use;
1991 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 2217 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1992 2218
1993 context_desc->upper_setup.tcp_fields.tucss = css; 2219 context_desc->upper_setup.tcp_fields.tucss = css;
1994 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; 2220 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
@@ -1996,8 +2222,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1996 context_desc->tcp_seg_setup.data = 0; 2222 context_desc->tcp_seg_setup.data = 0;
1997 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2223 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1998 2224
1999 if(unlikely(++i == adapter->tx_ring.count)) i = 0; 2225 if (unlikely(++i == tx_ring->count)) i = 0;
2000 adapter->tx_ring.next_to_use = i; 2226 tx_ring->next_to_use = i;
2001 2227
2002 return TRUE; 2228 return TRUE;
2003 } 2229 }
@@ -2009,11 +2235,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
2009#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2235#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2010 2236
2011static inline int 2237static inline int
2012e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb, 2238e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2013 unsigned int first, unsigned int max_per_txd, 2239 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
2014 unsigned int nr_frags, unsigned int mss) 2240 unsigned int nr_frags, unsigned int mss)
2015{ 2241{
2016 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2017 struct e1000_buffer *buffer_info; 2242 struct e1000_buffer *buffer_info;
2018 unsigned int len = skb->len; 2243 unsigned int len = skb->len;
2019 unsigned int offset = 0, size, count = 0, i; 2244 unsigned int offset = 0, size, count = 0, i;
@@ -2109,9 +2334,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
2109} 2334}
2110 2335
2111static inline void 2336static inline void
2112e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags) 2337e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2338 int tx_flags, int count)
2113{ 2339{
2114 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2115 struct e1000_tx_desc *tx_desc = NULL; 2340 struct e1000_tx_desc *tx_desc = NULL;
2116 struct e1000_buffer *buffer_info; 2341 struct e1000_buffer *buffer_info;
2117 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2342 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -2157,7 +2382,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
2157 wmb(); 2382 wmb();
2158 2383
2159 tx_ring->next_to_use = i; 2384 tx_ring->next_to_use = i;
2160 E1000_WRITE_REG(&adapter->hw, TDT, i); 2385 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
2161} 2386}
2162 2387
2163/** 2388/**
@@ -2250,6 +2475,7 @@ static int
2250e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2475e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2251{ 2476{
2252 struct e1000_adapter *adapter = netdev_priv(netdev); 2477 struct e1000_adapter *adapter = netdev_priv(netdev);
2478 struct e1000_tx_ring *tx_ring;
2253 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2479 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2254 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2480 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2255 unsigned int tx_flags = 0; 2481 unsigned int tx_flags = 0;
@@ -2262,7 +2488,8 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2262 unsigned int f; 2488 unsigned int f;
2263 len -= skb->data_len; 2489 len -= skb->data_len;
2264 2490
2265 if(unlikely(skb->len <= 0)) { 2491 tx_ring = adapter->tx_ring;
2492 if (unlikely(skb->len <= 0)) {
2266 dev_kfree_skb_any(skb); 2493 dev_kfree_skb_any(skb);
2267 return NETDEV_TX_OK; 2494 return NETDEV_TX_OK;
2268 } 2495 }
@@ -2306,12 +2533,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2306 if(adapter->pcix_82544) 2533 if(adapter->pcix_82544)
2307 count += nr_frags; 2534 count += nr_frags;
2308 2535
2309 local_irq_save(flags);
2310 if (!spin_trylock(&adapter->tx_lock)) {
2311 /* Collision - tell upper layer to requeue */
2312 local_irq_restore(flags);
2313 return NETDEV_TX_LOCKED;
2314 }
2315#ifdef NETIF_F_TSO 2536#ifdef NETIF_F_TSO
2316 /* TSO Workaround for 82571/2 Controllers -- if skb->data 2537 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2317 * points to just header, pull a few bytes of payload from 2538 * points to just header, pull a few bytes of payload from
@@ -2336,12 +2557,18 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2336 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2557 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2337 e1000_transfer_dhcp_info(adapter, skb); 2558 e1000_transfer_dhcp_info(adapter, skb);
2338 2559
2560 local_irq_save(flags);
2561 if (!spin_trylock(&tx_ring->tx_lock)) {
2562 /* Collision - tell upper layer to requeue */
2563 local_irq_restore(flags);
2564 return NETDEV_TX_LOCKED;
2565 }
2339 2566
2340 /* need: count + 2 desc gap to keep tail from touching 2567 /* need: count + 2 desc gap to keep tail from touching
2341 * head, otherwise try next time */ 2568 * head, otherwise try next time */
2342 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { 2569 if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
2343 netif_stop_queue(netdev); 2570 netif_stop_queue(netdev);
2344 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2571 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2345 return NETDEV_TX_BUSY; 2572 return NETDEV_TX_BUSY;
2346 } 2573 }
2347 2574
@@ -2349,7 +2576,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2349 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2576 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2350 netif_stop_queue(netdev); 2577 netif_stop_queue(netdev);
2351 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2578 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2352 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2579 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2353 return NETDEV_TX_BUSY; 2580 return NETDEV_TX_BUSY;
2354 } 2581 }
2355 } 2582 }
@@ -2359,37 +2586,37 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2359 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2586 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2360 } 2587 }
2361 2588
2362 first = adapter->tx_ring.next_to_use; 2589 first = tx_ring->next_to_use;
2363 2590
2364 tso = e1000_tso(adapter, skb); 2591 tso = e1000_tso(adapter, tx_ring, skb);
2365 if (tso < 0) { 2592 if (tso < 0) {
2366 dev_kfree_skb_any(skb); 2593 dev_kfree_skb_any(skb);
2367 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2594 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2368 return NETDEV_TX_OK; 2595 return NETDEV_TX_OK;
2369 } 2596 }
2370 2597
2371 if (likely(tso)) 2598 if (likely(tso))
2372 tx_flags |= E1000_TX_FLAGS_TSO; 2599 tx_flags |= E1000_TX_FLAGS_TSO;
2373 else if(likely(e1000_tx_csum(adapter, skb))) 2600 else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
2374 tx_flags |= E1000_TX_FLAGS_CSUM; 2601 tx_flags |= E1000_TX_FLAGS_CSUM;
2375 2602
2376 /* Old method was to assume IPv4 packet by default if TSO was enabled. 2603 /* Old method was to assume IPv4 packet by default if TSO was enabled.
2377 * 82571 hardware supports TSO capabilities for IPv6 as well... 2604 * 82571 hardware supports TSO capabilities for IPv6 as well...
2378 * no longer assume, we must. */ 2605 * no longer assume, we must. */
2379 if(likely(skb->protocol == ntohs(ETH_P_IP))) 2606 if (likely(skb->protocol == ntohs(ETH_P_IP)))
2380 tx_flags |= E1000_TX_FLAGS_IPV4; 2607 tx_flags |= E1000_TX_FLAGS_IPV4;
2381 2608
2382 e1000_tx_queue(adapter, 2609 e1000_tx_queue(adapter, tx_ring, tx_flags,
2383 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), 2610 e1000_tx_map(adapter, tx_ring, skb, first,
2384 tx_flags); 2611 max_per_txd, nr_frags, mss));
2385 2612
2386 netdev->trans_start = jiffies; 2613 netdev->trans_start = jiffies;
2387 2614
2388 /* Make sure there is space in the ring for the next send. */ 2615 /* Make sure there is space in the ring for the next send. */
2389 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) 2616 if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
2390 netif_stop_queue(netdev); 2617 netif_stop_queue(netdev);
2391 2618
2392 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2619 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2393 return NETDEV_TX_OK; 2620 return NETDEV_TX_OK;
2394} 2621}
2395 2622
@@ -2666,9 +2893,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2666 struct e1000_adapter *adapter = netdev_priv(netdev); 2893 struct e1000_adapter *adapter = netdev_priv(netdev);
2667 struct e1000_hw *hw = &adapter->hw; 2894 struct e1000_hw *hw = &adapter->hw;
2668 uint32_t icr = E1000_READ_REG(hw, ICR); 2895 uint32_t icr = E1000_READ_REG(hw, ICR);
2669#ifndef CONFIG_E1000_NAPI 2896 int i;
2670 unsigned int i;
2671#endif
2672 2897
2673 if(unlikely(!icr)) 2898 if(unlikely(!icr))
2674 return IRQ_NONE; /* Not our interrupt */ 2899 return IRQ_NONE; /* Not our interrupt */
@@ -2679,17 +2904,15 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2679 } 2904 }
2680 2905
2681#ifdef CONFIG_E1000_NAPI 2906#ifdef CONFIG_E1000_NAPI
2682 if(likely(netif_rx_schedule_prep(netdev))) { 2907 atomic_inc(&adapter->irq_sem);
2683 2908 E1000_WRITE_REG(hw, IMC, ~0);
2684 /* Disable interrupts and register for poll. The flush 2909 E1000_WRITE_FLUSH(hw);
2685 of the posted write is intentionally left out.
2686 */
2687
2688 atomic_inc(&adapter->irq_sem);
2689 E1000_WRITE_REG(hw, IMC, ~0);
2690 __netif_rx_schedule(netdev);
2691 } 2910 }
2692#else 2911#else
2912 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
2913 __netif_rx_schedule(&adapter->polling_netdev[0]);
2914 else
2915 e1000_irq_enable(adapter);
2693 /* Writing IMC and IMS is needed for 82547. 2916 /* Writing IMC and IMS is needed for 82547.
2694 Due to Hub Link bus being occupied, an interrupt 2917 Due to Hub Link bus being occupied, an interrupt
2695 de-assertion message is not able to be sent. 2918 de-assertion message is not able to be sent.
@@ -2706,12 +2929,13 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2706 } 2929 }
2707 2930
2708 for(i = 0; i < E1000_MAX_INTR; i++) 2931 for(i = 0; i < E1000_MAX_INTR; i++)
2709 if(unlikely(!adapter->clean_rx(adapter) & 2932 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
2710 !e1000_clean_tx_irq(adapter))) 2933 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
2711 break; 2934 break;
2712 2935
2713 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 2936 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
2714 e1000_irq_enable(adapter); 2937 e1000_irq_enable(adapter);
2938
2715#endif 2939#endif
2716 2940
2717 return IRQ_HANDLED; 2941 return IRQ_HANDLED;
@@ -2724,22 +2948,37 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2724 **/ 2948 **/
2725 2949
2726static int 2950static int
2727e1000_clean(struct net_device *netdev, int *budget) 2951e1000_clean(struct net_device *poll_dev, int *budget)
2728{ 2952{
2729 struct e1000_adapter *adapter = netdev_priv(netdev); 2953 struct e1000_adapter *adapter;
2730 int work_to_do = min(*budget, netdev->quota); 2954 int work_to_do = min(*budget, poll_dev->quota);
2731 int tx_cleaned; 2955 int tx_cleaned, i = 0, work_done = 0;
2732 int work_done = 0; 2956
2957 /* Must NOT use netdev_priv macro here. */
2958 adapter = poll_dev->priv;
2959
2960 /* Keep link state information with original netdev */
2961 if (!netif_carrier_ok(adapter->netdev))
2962 goto quit_polling;
2733 2963
2734 tx_cleaned = e1000_clean_tx_irq(adapter); 2964 while (poll_dev != &adapter->polling_netdev[i]) {
2735 adapter->clean_rx(adapter, &work_done, work_to_do); 2965 i++;
2966 if (unlikely(i == adapter->num_queues))
2967 BUG();
2968 }
2969
2970 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
2971 adapter->clean_rx(adapter, &adapter->rx_ring[i],
2972 &work_done, work_to_do);
2736 2973
2737 *budget -= work_done; 2974 *budget -= work_done;
2738 netdev->quota -= work_done; 2975 poll_dev->quota -= work_done;
2739 2976
2740 if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
2741 /* If no Tx and not enough Rx work done, exit the polling mode */ 2977 /* If no Tx and not enough Rx work done, exit the polling mode */
2742 netif_rx_complete(netdev); 2978 if((!tx_cleaned && (work_done == 0)) ||
2979 !netif_running(adapter->netdev)) {
2980quit_polling:
2981 netif_rx_complete(poll_dev);
2743 e1000_irq_enable(adapter); 2982 e1000_irq_enable(adapter);
2744 return 0; 2983 return 0;
2745 } 2984 }
@@ -2754,9 +2993,9 @@ e1000_clean(struct net_device *netdev, int *budget)
2754 **/ 2993 **/
2755 2994
2756static boolean_t 2995static boolean_t
2757e1000_clean_tx_irq(struct e1000_adapter *adapter) 2996e1000_clean_tx_irq(struct e1000_adapter *adapter,
2997 struct e1000_tx_ring *tx_ring)
2758{ 2998{
2759 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2760 struct net_device *netdev = adapter->netdev; 2999 struct net_device *netdev = adapter->netdev;
2761 struct e1000_tx_desc *tx_desc, *eop_desc; 3000 struct e1000_tx_desc *tx_desc, *eop_desc;
2762 struct e1000_buffer *buffer_info; 3001 struct e1000_buffer *buffer_info;
@@ -2767,12 +3006,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2767 eop = tx_ring->buffer_info[i].next_to_watch; 3006 eop = tx_ring->buffer_info[i].next_to_watch;
2768 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3007 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2769 3008
2770 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3009 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2771 /* Premature writeback of Tx descriptors clear (free buffers 3010 /* Premature writeback of Tx descriptors clear (free buffers
2772 * and unmap pci_mapping) previous_buffer_info */ 3011 * and unmap pci_mapping) previous_buffer_info */
2773 if (likely(adapter->previous_buffer_info.skb != NULL)) { 3012 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
2774 e1000_unmap_and_free_tx_resource(adapter, 3013 e1000_unmap_and_free_tx_resource(adapter,
2775 &adapter->previous_buffer_info); 3014 &tx_ring->previous_buffer_info);
2776 } 3015 }
2777 3016
2778 for(cleaned = FALSE; !cleaned; ) { 3017 for(cleaned = FALSE; !cleaned; ) {
@@ -2788,7 +3027,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2788#ifdef NETIF_F_TSO 3027#ifdef NETIF_F_TSO
2789 } else { 3028 } else {
2790 if (cleaned) { 3029 if (cleaned) {
2791 memcpy(&adapter->previous_buffer_info, 3030 memcpy(&tx_ring->previous_buffer_info,
2792 buffer_info, 3031 buffer_info,
2793 sizeof(struct e1000_buffer)); 3032 sizeof(struct e1000_buffer));
2794 memset(buffer_info, 0, 3033 memset(buffer_info, 0,
@@ -2806,6 +3045,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2806 3045
2807 if(unlikely(++i == tx_ring->count)) i = 0; 3046 if(unlikely(++i == tx_ring->count)) i = 0;
2808 } 3047 }
3048
3049 tx_ring->pkt++;
2809 3050
2810 eop = tx_ring->buffer_info[i].next_to_watch; 3051 eop = tx_ring->buffer_info[i].next_to_watch;
2811 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3052 eop_desc = E1000_TX_DESC(*tx_ring, eop);
@@ -2813,15 +3054,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2813 3054
2814 tx_ring->next_to_clean = i; 3055 tx_ring->next_to_clean = i;
2815 3056
2816 spin_lock(&adapter->tx_lock); 3057 spin_lock(&tx_ring->tx_lock);
2817 3058
2818 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3059 if(unlikely(cleaned && netif_queue_stopped(netdev) &&
2819 netif_carrier_ok(netdev))) 3060 netif_carrier_ok(netdev)))
2820 netif_wake_queue(netdev); 3061 netif_wake_queue(netdev);
2821 3062
2822 spin_unlock(&adapter->tx_lock); 3063 spin_unlock(&tx_ring->tx_lock);
2823 if(adapter->detect_tx_hung) {
2824 3064
3065 if (adapter->detect_tx_hung) {
2825 /* Detect a transmit hang in hardware, this serializes the 3066 /* Detect a transmit hang in hardware, this serializes the
2826 * check with the clearing of time_stamp and movement of i */ 3067 * check with the clearing of time_stamp and movement of i */
2827 adapter->detect_tx_hung = FALSE; 3068 adapter->detect_tx_hung = FALSE;
@@ -2845,8 +3086,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2845 " next_to_watch <%x>\n" 3086 " next_to_watch <%x>\n"
2846 " jiffies <%lx>\n" 3087 " jiffies <%lx>\n"
2847 " next_to_watch.status <%x>\n", 3088 " next_to_watch.status <%x>\n",
2848 E1000_READ_REG(&adapter->hw, TDH), 3089 readl(adapter->hw.hw_addr + tx_ring->tdh),
2849 E1000_READ_REG(&adapter->hw, TDT), 3090 readl(adapter->hw.hw_addr + tx_ring->tdt),
2850 tx_ring->next_to_use, 3091 tx_ring->next_to_use,
2851 i, 3092 i,
2852 (unsigned long long)tx_ring->buffer_info[i].dma, 3093 (unsigned long long)tx_ring->buffer_info[i].dma,
@@ -2858,12 +3099,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2858 } 3099 }
2859 } 3100 }
2860#ifdef NETIF_F_TSO 3101#ifdef NETIF_F_TSO
2861 3102 if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
2862 if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3103 time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ)))
2863 time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
2864 e1000_unmap_and_free_tx_resource( 3104 e1000_unmap_and_free_tx_resource(
2865 adapter, &adapter->previous_buffer_info); 3105 adapter, &tx_ring->previous_buffer_info);
2866
2867#endif 3106#endif
2868 return cleaned; 3107 return cleaned;
2869} 3108}
@@ -2926,13 +3165,14 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
2926 3165
2927static boolean_t 3166static boolean_t
2928#ifdef CONFIG_E1000_NAPI 3167#ifdef CONFIG_E1000_NAPI
2929e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done, 3168e1000_clean_rx_irq(struct e1000_adapter *adapter,
2930 int work_to_do) 3169 struct e1000_rx_ring *rx_ring,
3170 int *work_done, int work_to_do)
2931#else 3171#else
2932e1000_clean_rx_irq(struct e1000_adapter *adapter) 3172e1000_clean_rx_irq(struct e1000_adapter *adapter,
3173 struct e1000_rx_ring *rx_ring)
2933#endif 3174#endif
2934{ 3175{
2935 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2936 struct net_device *netdev = adapter->netdev; 3176 struct net_device *netdev = adapter->netdev;
2937 struct pci_dev *pdev = adapter->pdev; 3177 struct pci_dev *pdev = adapter->pdev;
2938 struct e1000_rx_desc *rx_desc; 3178 struct e1000_rx_desc *rx_desc;
@@ -3018,6 +3258,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
3018 } 3258 }
3019#endif /* CONFIG_E1000_NAPI */ 3259#endif /* CONFIG_E1000_NAPI */
3020 netdev->last_rx = jiffies; 3260 netdev->last_rx = jiffies;
3261 rx_ring->pkt++;
3021 3262
3022next_desc: 3263next_desc:
3023 rx_desc->status = 0; 3264 rx_desc->status = 0;
@@ -3027,7 +3268,7 @@ next_desc:
3027 rx_desc = E1000_RX_DESC(*rx_ring, i); 3268 rx_desc = E1000_RX_DESC(*rx_ring, i);
3028 } 3269 }
3029 rx_ring->next_to_clean = i; 3270 rx_ring->next_to_clean = i;
3030 adapter->alloc_rx_buf(adapter); 3271 adapter->alloc_rx_buf(adapter, rx_ring);
3031 3272
3032 return cleaned; 3273 return cleaned;
3033} 3274}
@@ -3039,13 +3280,14 @@ next_desc:
3039 3280
3040static boolean_t 3281static boolean_t
3041#ifdef CONFIG_E1000_NAPI 3282#ifdef CONFIG_E1000_NAPI
3042e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, 3283e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3043 int work_to_do) 3284 struct e1000_rx_ring *rx_ring,
3285 int *work_done, int work_to_do)
3044#else 3286#else
3045e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) 3287e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3288 struct e1000_rx_ring *rx_ring)
3046#endif 3289#endif
3047{ 3290{
3048 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3049 union e1000_rx_desc_packet_split *rx_desc; 3291 union e1000_rx_desc_packet_split *rx_desc;
3050 struct net_device *netdev = adapter->netdev; 3292 struct net_device *netdev = adapter->netdev;
3051 struct pci_dev *pdev = adapter->pdev; 3293 struct pci_dev *pdev = adapter->pdev;
@@ -3145,6 +3387,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3145 } 3387 }
3146#endif /* CONFIG_E1000_NAPI */ 3388#endif /* CONFIG_E1000_NAPI */
3147 netdev->last_rx = jiffies; 3389 netdev->last_rx = jiffies;
3390 rx_ring->pkt++;
3148 3391
3149next_desc: 3392next_desc:
3150 rx_desc->wb.middle.status_error &= ~0xFF; 3393 rx_desc->wb.middle.status_error &= ~0xFF;
@@ -3155,7 +3398,7 @@ next_desc:
3155 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3398 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3156 } 3399 }
3157 rx_ring->next_to_clean = i; 3400 rx_ring->next_to_clean = i;
3158 adapter->alloc_rx_buf(adapter); 3401 adapter->alloc_rx_buf(adapter, rx_ring);
3159 3402
3160 return cleaned; 3403 return cleaned;
3161} 3404}
@@ -3166,9 +3409,9 @@ next_desc:
3166 **/ 3409 **/
3167 3410
3168static void 3411static void
3169e1000_alloc_rx_buffers(struct e1000_adapter *adapter) 3412e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3413 struct e1000_rx_ring *rx_ring)
3170{ 3414{
3171 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3172 struct net_device *netdev = adapter->netdev; 3415 struct net_device *netdev = adapter->netdev;
3173 struct pci_dev *pdev = adapter->pdev; 3416 struct pci_dev *pdev = adapter->pdev;
3174 struct e1000_rx_desc *rx_desc; 3417 struct e1000_rx_desc *rx_desc;
@@ -3252,7 +3495,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3252 * applicable for weak-ordered memory model archs, 3495 * applicable for weak-ordered memory model archs,
3253 * such as IA-64). */ 3496 * such as IA-64). */
3254 wmb(); 3497 wmb();
3255 E1000_WRITE_REG(&adapter->hw, RDT, i); 3498 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3256 } 3499 }
3257 3500
3258 if(unlikely(++i == rx_ring->count)) i = 0; 3501 if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3268,9 +3511,9 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3268 **/ 3511 **/
3269 3512
3270static void 3513static void
3271e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) 3514e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3515 struct e1000_rx_ring *rx_ring)
3272{ 3516{
3273 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3274 struct net_device *netdev = adapter->netdev; 3517 struct net_device *netdev = adapter->netdev;
3275 struct pci_dev *pdev = adapter->pdev; 3518 struct pci_dev *pdev = adapter->pdev;
3276 union e1000_rx_desc_packet_split *rx_desc; 3519 union e1000_rx_desc_packet_split *rx_desc;
@@ -3338,7 +3581,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3338 * descriptors are 32 bytes...so we increment tail 3581 * descriptors are 32 bytes...so we increment tail
3339 * twice as much. 3582 * twice as much.
3340 */ 3583 */
3341 E1000_WRITE_REG(&adapter->hw, RDT, i<<1); 3584 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
3342 } 3585 }
3343 3586
3344 if(unlikely(++i == rx_ring->count)) i = 0; 3587 if(unlikely(++i == rx_ring->count)) i = 0;
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 676247f9f1cc..38695d5b4637 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -306,7 +306,8 @@ e1000_check_options(struct e1000_adapter *adapter)
306 .def = E1000_DEFAULT_TXD, 306 .def = E1000_DEFAULT_TXD,
307 .arg = { .r = { .min = E1000_MIN_TXD }} 307 .arg = { .r = { .min = E1000_MIN_TXD }}
308 }; 308 };
309 struct e1000_desc_ring *tx_ring = &adapter->tx_ring; 309 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
310 int i;
310 e1000_mac_type mac_type = adapter->hw.mac_type; 311 e1000_mac_type mac_type = adapter->hw.mac_type;
311 opt.arg.r.max = mac_type < e1000_82544 ? 312 opt.arg.r.max = mac_type < e1000_82544 ?
312 E1000_MAX_TXD : E1000_MAX_82544_TXD; 313 E1000_MAX_TXD : E1000_MAX_82544_TXD;
@@ -319,6 +320,8 @@ e1000_check_options(struct e1000_adapter *adapter)
319 } else { 320 } else {
320 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
321 } 322 }
323 for (i = 0; i < adapter->num_queues; i++)
324 tx_ring[i].count = tx_ring->count;
322 } 325 }
323 { /* Receive Descriptor Count */ 326 { /* Receive Descriptor Count */
324 struct e1000_option opt = { 327 struct e1000_option opt = {
@@ -329,7 +332,8 @@ e1000_check_options(struct e1000_adapter *adapter)
329 .def = E1000_DEFAULT_RXD, 332 .def = E1000_DEFAULT_RXD,
330 .arg = { .r = { .min = E1000_MIN_RXD }} 333 .arg = { .r = { .min = E1000_MIN_RXD }}
331 }; 334 };
332 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 335 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
336 int i;
333 e1000_mac_type mac_type = adapter->hw.mac_type; 337 e1000_mac_type mac_type = adapter->hw.mac_type;
334 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : 338 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
335 E1000_MAX_82544_RXD; 339 E1000_MAX_82544_RXD;
@@ -342,6 +346,8 @@ e1000_check_options(struct e1000_adapter *adapter)
342 } else { 346 } else {
343 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
344 } 348 }
349 for (i = 0; i < adapter->num_queues; i++)
350 rx_ring[i].count = rx_ring->count;
345 } 351 }
346 { /* Checksum Offload Enable/Disable */ 352 { /* Checksum Offload Enable/Disable */
347 struct e1000_option opt = { 353 struct e1000_option opt = {