diff options
-rw-r--r-- | drivers/net/e1000/e1000.h | 26 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_ethtool.c | 43 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 227 |
3 files changed, 4 insertions, 292 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 99baf0e099fc..658f36ad8b4f 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h | |||
@@ -83,10 +83,6 @@ | |||
83 | struct e1000_adapter; | 83 | struct e1000_adapter; |
84 | 84 | ||
85 | #include "e1000_hw.h" | 85 | #include "e1000_hw.h" |
86 | #ifdef CONFIG_E1000_MQ | ||
87 | #include <linux/cpu.h> | ||
88 | #include <linux/smp.h> | ||
89 | #endif | ||
90 | 86 | ||
91 | #ifdef DBG | 87 | #ifdef DBG |
92 | #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) | 88 | #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) |
@@ -169,12 +165,6 @@ struct e1000_buffer { | |||
169 | uint16_t next_to_watch; | 165 | uint16_t next_to_watch; |
170 | }; | 166 | }; |
171 | 167 | ||
172 | #ifdef CONFIG_E1000_MQ | ||
173 | struct e1000_queue_stats { | ||
174 | uint64_t packets; | ||
175 | uint64_t bytes; | ||
176 | }; | ||
177 | #endif | ||
178 | 168 | ||
179 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; | 169 | struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; |
180 | struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; | 170 | struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; |
@@ -198,12 +188,7 @@ struct e1000_tx_ring { | |||
198 | spinlock_t tx_lock; | 188 | spinlock_t tx_lock; |
199 | uint16_t tdh; | 189 | uint16_t tdh; |
200 | uint16_t tdt; | 190 | uint16_t tdt; |
201 | |||
202 | boolean_t last_tx_tso; | 191 | boolean_t last_tx_tso; |
203 | |||
204 | #ifdef CONFIG_E1000_MQ | ||
205 | struct e1000_queue_stats tx_stats; | ||
206 | #endif | ||
207 | }; | 192 | }; |
208 | 193 | ||
209 | struct e1000_rx_ring { | 194 | struct e1000_rx_ring { |
@@ -230,9 +215,6 @@ struct e1000_rx_ring { | |||
230 | 215 | ||
231 | uint16_t rdh; | 216 | uint16_t rdh; |
232 | uint16_t rdt; | 217 | uint16_t rdt; |
233 | #ifdef CONFIG_E1000_MQ | ||
234 | struct e1000_queue_stats rx_stats; | ||
235 | #endif | ||
236 | }; | 218 | }; |
237 | 219 | ||
238 | #define E1000_DESC_UNUSED(R) \ | 220 | #define E1000_DESC_UNUSED(R) \ |
@@ -278,9 +260,6 @@ struct e1000_adapter { | |||
278 | 260 | ||
279 | /* TX */ | 261 | /* TX */ |
280 | struct e1000_tx_ring *tx_ring; /* One per active queue */ | 262 | struct e1000_tx_ring *tx_ring; /* One per active queue */ |
281 | #ifdef CONFIG_E1000_MQ | ||
282 | struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ | ||
283 | #endif | ||
284 | unsigned long tx_queue_len; | 263 | unsigned long tx_queue_len; |
285 | uint32_t txd_cmd; | 264 | uint32_t txd_cmd; |
286 | uint32_t tx_int_delay; | 265 | uint32_t tx_int_delay; |
@@ -314,11 +293,6 @@ struct e1000_adapter { | |||
314 | #ifdef CONFIG_E1000_NAPI | 293 | #ifdef CONFIG_E1000_NAPI |
315 | struct net_device *polling_netdev; /* One per active queue */ | 294 | struct net_device *polling_netdev; /* One per active queue */ |
316 | #endif | 295 | #endif |
317 | #ifdef CONFIG_E1000_MQ | ||
318 | struct net_device **cpu_netdev; /* per-cpu */ | ||
319 | struct call_async_data_struct rx_sched_call_data; | ||
320 | cpumask_t cpumask; | ||
321 | #endif | ||
322 | int num_tx_queues; | 296 | int num_tx_queues; |
323 | int num_rx_queues; | 297 | int num_rx_queues; |
324 | 298 | ||
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index 5cedc81786e3..c7b47911f003 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c | |||
@@ -97,14 +97,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { | |||
97 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, | 97 | { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, |
98 | }; | 98 | }; |
99 | 99 | ||
100 | #ifdef CONFIG_E1000_MQ | ||
101 | #define E1000_QUEUE_STATS_LEN \ | ||
102 | (((struct e1000_adapter *)netdev->priv)->num_tx_queues + \ | ||
103 | ((struct e1000_adapter *)netdev->priv)->num_rx_queues) \ | ||
104 | * (sizeof(struct e1000_queue_stats) / sizeof(uint64_t)) | ||
105 | #else | ||
106 | #define E1000_QUEUE_STATS_LEN 0 | 100 | #define E1000_QUEUE_STATS_LEN 0 |
107 | #endif | ||
108 | #define E1000_GLOBAL_STATS_LEN \ | 101 | #define E1000_GLOBAL_STATS_LEN \ |
109 | sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) | 102 | sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) |
110 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) | 103 | #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) |
@@ -1799,11 +1792,6 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1799 | struct ethtool_stats *stats, uint64_t *data) | 1792 | struct ethtool_stats *stats, uint64_t *data) |
1800 | { | 1793 | { |
1801 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1794 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1802 | #ifdef CONFIG_E1000_MQ | ||
1803 | uint64_t *queue_stat; | ||
1804 | int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t); | ||
1805 | int j, k; | ||
1806 | #endif | ||
1807 | int i; | 1795 | int i; |
1808 | 1796 | ||
1809 | e1000_update_stats(adapter); | 1797 | e1000_update_stats(adapter); |
@@ -1812,29 +1800,12 @@ e1000_get_ethtool_stats(struct net_device *netdev, | |||
1812 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == | 1800 | data[i] = (e1000_gstrings_stats[i].sizeof_stat == |
1813 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | 1801 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; |
1814 | } | 1802 | } |
1815 | #ifdef CONFIG_E1000_MQ | ||
1816 | for (j = 0; j < adapter->num_tx_queues; j++) { | ||
1817 | queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats; | ||
1818 | for (k = 0; k < stat_count; k++) | ||
1819 | data[i + k] = queue_stat[k]; | ||
1820 | i += k; | ||
1821 | } | ||
1822 | for (j = 0; j < adapter->num_rx_queues; j++) { | ||
1823 | queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats; | ||
1824 | for (k = 0; k < stat_count; k++) | ||
1825 | data[i + k] = queue_stat[k]; | ||
1826 | i += k; | ||
1827 | } | ||
1828 | #endif | ||
1829 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1803 | /* BUG_ON(i != E1000_STATS_LEN); */ |
1830 | } | 1804 | } |
1831 | 1805 | ||
1832 | static void | 1806 | static void |
1833 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | 1807 | e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) |
1834 | { | 1808 | { |
1835 | #ifdef CONFIG_E1000_MQ | ||
1836 | struct e1000_adapter *adapter = netdev_priv(netdev); | ||
1837 | #endif | ||
1838 | uint8_t *p = data; | 1809 | uint8_t *p = data; |
1839 | int i; | 1810 | int i; |
1840 | 1811 | ||
@@ -1849,20 +1820,6 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | |||
1849 | ETH_GSTRING_LEN); | 1820 | ETH_GSTRING_LEN); |
1850 | p += ETH_GSTRING_LEN; | 1821 | p += ETH_GSTRING_LEN; |
1851 | } | 1822 | } |
1852 | #ifdef CONFIG_E1000_MQ | ||
1853 | for (i = 0; i < adapter->num_tx_queues; i++) { | ||
1854 | sprintf(p, "tx_queue_%u_packets", i); | ||
1855 | p += ETH_GSTRING_LEN; | ||
1856 | sprintf(p, "tx_queue_%u_bytes", i); | ||
1857 | p += ETH_GSTRING_LEN; | ||
1858 | } | ||
1859 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
1860 | sprintf(p, "rx_queue_%u_packets", i); | ||
1861 | p += ETH_GSTRING_LEN; | ||
1862 | sprintf(p, "rx_queue_%u_bytes", i); | ||
1863 | p += ETH_GSTRING_LEN; | ||
1864 | } | ||
1865 | #endif | ||
1866 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ | 1823 | /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ |
1867 | break; | 1824 | break; |
1868 | } | 1825 | } |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 5b7d0f425af2..af87eb04d832 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -103,7 +103,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |||
103 | #else | 103 | #else |
104 | #define DRIVERNAPI "-NAPI" | 104 | #define DRIVERNAPI "-NAPI" |
105 | #endif | 105 | #endif |
106 | #define DRV_VERSION "6.3.9-k4"DRIVERNAPI | 106 | #define DRV_VERSION "7.0.33-k2"DRIVERNAPI |
107 | char e1000_driver_version[] = DRV_VERSION; | 107 | char e1000_driver_version[] = DRV_VERSION; |
108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; | 108 | static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; |
109 | 109 | ||
@@ -191,9 +191,6 @@ static void e1000_exit_module(void); | |||
191 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | 191 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
192 | static void __devexit e1000_remove(struct pci_dev *pdev); | 192 | static void __devexit e1000_remove(struct pci_dev *pdev); |
193 | static int e1000_alloc_queues(struct e1000_adapter *adapter); | 193 | static int e1000_alloc_queues(struct e1000_adapter *adapter); |
194 | #ifdef CONFIG_E1000_MQ | ||
195 | static void e1000_setup_queue_mapping(struct e1000_adapter *adapter); | ||
196 | #endif | ||
197 | static int e1000_sw_init(struct e1000_adapter *adapter); | 194 | static int e1000_sw_init(struct e1000_adapter *adapter); |
198 | static int e1000_open(struct net_device *netdev); | 195 | static int e1000_open(struct net_device *netdev); |
199 | static int e1000_close(struct net_device *netdev); | 196 | static int e1000_close(struct net_device *netdev); |
@@ -265,10 +262,6 @@ static int e1000_resume(struct pci_dev *pdev); | |||
265 | static void e1000_netpoll (struct net_device *netdev); | 262 | static void e1000_netpoll (struct net_device *netdev); |
266 | #endif | 263 | #endif |
267 | 264 | ||
268 | #ifdef CONFIG_E1000_MQ | ||
269 | /* for multiple Rx queues */ | ||
270 | void e1000_rx_schedule(void *data); | ||
271 | #endif | ||
272 | 265 | ||
273 | /* Exported from other modules */ | 266 | /* Exported from other modules */ |
274 | 267 | ||
@@ -502,10 +495,6 @@ e1000_up(struct e1000_adapter *adapter) | |||
502 | return err; | 495 | return err; |
503 | } | 496 | } |
504 | 497 | ||
505 | #ifdef CONFIG_E1000_MQ | ||
506 | e1000_setup_queue_mapping(adapter); | ||
507 | #endif | ||
508 | |||
509 | adapter->tx_queue_len = netdev->tx_queue_len; | 498 | adapter->tx_queue_len = netdev->tx_queue_len; |
510 | 499 | ||
511 | mod_timer(&adapter->watchdog_timer, jiffies); | 500 | mod_timer(&adapter->watchdog_timer, jiffies); |
@@ -526,9 +515,7 @@ e1000_down(struct e1000_adapter *adapter) | |||
526 | e1000_check_mng_mode(&adapter->hw); | 515 | e1000_check_mng_mode(&adapter->hw); |
527 | 516 | ||
528 | e1000_irq_disable(adapter); | 517 | e1000_irq_disable(adapter); |
529 | #ifdef CONFIG_E1000_MQ | 518 | |
530 | while (atomic_read(&adapter->rx_sched_call_data.count) != 0); | ||
531 | #endif | ||
532 | free_irq(adapter->pdev->irq, netdev); | 519 | free_irq(adapter->pdev->irq, netdev); |
533 | #ifdef CONFIG_PCI_MSI | 520 | #ifdef CONFIG_PCI_MSI |
534 | if (adapter->hw.mac_type > e1000_82547_rev_2 && | 521 | if (adapter->hw.mac_type > e1000_82547_rev_2 && |
@@ -972,10 +959,6 @@ e1000_remove(struct pci_dev *pdev) | |||
972 | iounmap(adapter->hw.hw_addr); | 959 | iounmap(adapter->hw.hw_addr); |
973 | pci_release_regions(pdev); | 960 | pci_release_regions(pdev); |
974 | 961 | ||
975 | #ifdef CONFIG_E1000_MQ | ||
976 | free_percpu(adapter->cpu_netdev); | ||
977 | free_percpu(adapter->cpu_tx_ring); | ||
978 | #endif | ||
979 | free_netdev(netdev); | 962 | free_netdev(netdev); |
980 | 963 | ||
981 | pci_disable_device(pdev); | 964 | pci_disable_device(pdev); |
@@ -1056,40 +1039,8 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
1056 | hw->master_slave = E1000_MASTER_SLAVE; | 1039 | hw->master_slave = E1000_MASTER_SLAVE; |
1057 | } | 1040 | } |
1058 | 1041 | ||
1059 | #ifdef CONFIG_E1000_MQ | ||
1060 | /* Number of supported queues */ | ||
1061 | switch (hw->mac_type) { | ||
1062 | case e1000_82571: | ||
1063 | case e1000_82572: | ||
1064 | /* These controllers support 2 tx queues, but with a single | ||
1065 | * qdisc implementation, multiple tx queues aren't quite as | ||
1066 | * interesting. If we can find a logical way of mapping | ||
1067 | * flows to a queue, then perhaps we can up the num_tx_queue | ||
1068 | * count back to its default. Until then, we run the risk of | ||
1069 | * terrible performance due to SACK overload. */ | ||
1070 | adapter->num_tx_queues = 1; | ||
1071 | adapter->num_rx_queues = 2; | ||
1072 | break; | ||
1073 | default: | ||
1074 | adapter->num_tx_queues = 1; | ||
1075 | adapter->num_rx_queues = 1; | ||
1076 | break; | ||
1077 | } | ||
1078 | adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus()); | ||
1079 | adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus()); | ||
1080 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n", | ||
1081 | adapter->num_rx_queues, | ||
1082 | ((adapter->num_rx_queues == 1) | ||
1083 | ? ((num_online_cpus() > 1) | ||
1084 | ? "(due to unsupported feature in current adapter)" | ||
1085 | : "(due to unsupported system configuration)") | ||
1086 | : "")); | ||
1087 | DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n", | ||
1088 | adapter->num_tx_queues); | ||
1089 | #else | ||
1090 | adapter->num_tx_queues = 1; | 1042 | adapter->num_tx_queues = 1; |
1091 | adapter->num_rx_queues = 1; | 1043 | adapter->num_rx_queues = 1; |
1092 | #endif | ||
1093 | 1044 | ||
1094 | if (e1000_alloc_queues(adapter)) { | 1045 | if (e1000_alloc_queues(adapter)) { |
1095 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | 1046 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); |
@@ -1152,51 +1103,9 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
1152 | memset(adapter->polling_netdev, 0, size); | 1103 | memset(adapter->polling_netdev, 0, size); |
1153 | #endif | 1104 | #endif |
1154 | 1105 | ||
1155 | #ifdef CONFIG_E1000_MQ | ||
1156 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1157 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1158 | |||
1159 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1160 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1161 | #endif | ||
1162 | |||
1163 | return E1000_SUCCESS; | 1106 | return E1000_SUCCESS; |
1164 | } | 1107 | } |
1165 | 1108 | ||
1166 | #ifdef CONFIG_E1000_MQ | ||
1167 | static void __devinit | ||
1168 | e1000_setup_queue_mapping(struct e1000_adapter *adapter) | ||
1169 | { | ||
1170 | int i, cpu; | ||
1171 | |||
1172 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1173 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1174 | cpus_clear(adapter->rx_sched_call_data.cpumask); | ||
1175 | |||
1176 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1177 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1178 | |||
1179 | lock_cpu_hotplug(); | ||
1180 | i = 0; | ||
1181 | for_each_online_cpu(cpu) { | ||
1182 | *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues]; | ||
1183 | /* This is incomplete because we'd like to assign separate | ||
1184 | * physical cpus to these netdev polling structures and | ||
1185 | * avoid saturating a subset of cpus. | ||
1186 | */ | ||
1187 | if (i < adapter->num_rx_queues) { | ||
1188 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; | ||
1189 | adapter->rx_ring[i].cpu = cpu; | ||
1190 | cpu_set(cpu, adapter->cpumask); | ||
1191 | } else | ||
1192 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; | ||
1193 | |||
1194 | i++; | ||
1195 | } | ||
1196 | unlock_cpu_hotplug(); | ||
1197 | } | ||
1198 | #endif | ||
1199 | |||
1200 | /** | 1109 | /** |
1201 | * e1000_open - Called when a network interface is made active | 1110 | * e1000_open - Called when a network interface is made active |
1202 | * @netdev: network interface device structure | 1111 | * @netdev: network interface device structure |
@@ -1435,18 +1344,6 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1435 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1344 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1436 | 1345 | ||
1437 | switch (adapter->num_tx_queues) { | 1346 | switch (adapter->num_tx_queues) { |
1438 | case 2: | ||
1439 | tdba = adapter->tx_ring[1].dma; | ||
1440 | tdlen = adapter->tx_ring[1].count * | ||
1441 | sizeof(struct e1000_tx_desc); | ||
1442 | E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL)); | ||
1443 | E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32)); | ||
1444 | E1000_WRITE_REG(hw, TDLEN1, tdlen); | ||
1445 | E1000_WRITE_REG(hw, TDH1, 0); | ||
1446 | E1000_WRITE_REG(hw, TDT1, 0); | ||
1447 | adapter->tx_ring[1].tdh = E1000_TDH1; | ||
1448 | adapter->tx_ring[1].tdt = E1000_TDT1; | ||
1449 | /* Fall Through */ | ||
1450 | case 1: | 1347 | case 1: |
1451 | default: | 1348 | default: |
1452 | tdba = adapter->tx_ring[0].dma; | 1349 | tdba = adapter->tx_ring[0].dma; |
@@ -1790,10 +1687,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1790 | uint64_t rdba; | 1687 | uint64_t rdba; |
1791 | struct e1000_hw *hw = &adapter->hw; | 1688 | struct e1000_hw *hw = &adapter->hw; |
1792 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; | 1689 | uint32_t rdlen, rctl, rxcsum, ctrl_ext; |
1793 | #ifdef CONFIG_E1000_MQ | ||
1794 | uint32_t reta, mrqc; | ||
1795 | int i; | ||
1796 | #endif | ||
1797 | 1690 | ||
1798 | if (adapter->rx_ps_pages) { | 1691 | if (adapter->rx_ps_pages) { |
1799 | rdlen = adapter->rx_ring[0].count * | 1692 | rdlen = adapter->rx_ring[0].count * |
@@ -1837,18 +1730,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1837 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1730 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1838 | * the Base and Length of the Rx Descriptor Ring */ | 1731 | * the Base and Length of the Rx Descriptor Ring */ |
1839 | switch (adapter->num_rx_queues) { | 1732 | switch (adapter->num_rx_queues) { |
1840 | #ifdef CONFIG_E1000_MQ | ||
1841 | case 2: | ||
1842 | rdba = adapter->rx_ring[1].dma; | ||
1843 | E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); | ||
1844 | E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); | ||
1845 | E1000_WRITE_REG(hw, RDLEN1, rdlen); | ||
1846 | E1000_WRITE_REG(hw, RDH1, 0); | ||
1847 | E1000_WRITE_REG(hw, RDT1, 0); | ||
1848 | adapter->rx_ring[1].rdh = E1000_RDH1; | ||
1849 | adapter->rx_ring[1].rdt = E1000_RDT1; | ||
1850 | /* Fall Through */ | ||
1851 | #endif | ||
1852 | case 1: | 1733 | case 1: |
1853 | default: | 1734 | default: |
1854 | rdba = adapter->rx_ring[0].dma; | 1735 | rdba = adapter->rx_ring[0].dma; |
@@ -1862,46 +1743,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1862 | break; | 1743 | break; |
1863 | } | 1744 | } |
1864 | 1745 | ||
1865 | #ifdef CONFIG_E1000_MQ | ||
1866 | if (adapter->num_rx_queues > 1) { | ||
1867 | uint32_t random[10]; | ||
1868 | |||
1869 | get_random_bytes(&random[0], 40); | ||
1870 | |||
1871 | if (hw->mac_type <= e1000_82572) { | ||
1872 | E1000_WRITE_REG(hw, RSSIR, 0); | ||
1873 | E1000_WRITE_REG(hw, RSSIM, 0); | ||
1874 | } | ||
1875 | |||
1876 | switch (adapter->num_rx_queues) { | ||
1877 | case 2: | ||
1878 | default: | ||
1879 | reta = 0x00800080; | ||
1880 | mrqc = E1000_MRQC_ENABLE_RSS_2Q; | ||
1881 | break; | ||
1882 | } | ||
1883 | |||
1884 | /* Fill out redirection table */ | ||
1885 | for (i = 0; i < 32; i++) | ||
1886 | E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); | ||
1887 | /* Fill out hash function seeds */ | ||
1888 | for (i = 0; i < 10; i++) | ||
1889 | E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); | ||
1890 | |||
1891 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | | ||
1892 | E1000_MRQC_RSS_FIELD_IPV4_TCP); | ||
1893 | E1000_WRITE_REG(hw, MRQC, mrqc); | ||
1894 | } | ||
1895 | |||
1896 | /* Multiqueue and packet checksumming are mutually exclusive. */ | ||
1897 | if (hw->mac_type >= e1000_82571) { | ||
1898 | rxcsum = E1000_READ_REG(hw, RXCSUM); | ||
1899 | rxcsum |= E1000_RXCSUM_PCSD; | ||
1900 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | ||
1901 | } | ||
1902 | |||
1903 | #else | ||
1904 | |||
1905 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 1746 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
1906 | if (hw->mac_type >= e1000_82543) { | 1747 | if (hw->mac_type >= e1000_82543) { |
1907 | rxcsum = E1000_READ_REG(hw, RXCSUM); | 1748 | rxcsum = E1000_READ_REG(hw, RXCSUM); |
@@ -1920,7 +1761,6 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1920 | } | 1761 | } |
1921 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1762 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1922 | } | 1763 | } |
1923 | #endif /* CONFIG_E1000_MQ */ | ||
1924 | 1764 | ||
1925 | if (hw->mac_type == e1000_82573) | 1765 | if (hw->mac_type == e1000_82573) |
1926 | E1000_WRITE_REG(hw, ERT, 0x0100); | 1766 | E1000_WRITE_REG(hw, ERT, 0x0100); |
@@ -2465,9 +2305,6 @@ e1000_watchdog_task(struct e1000_adapter *adapter) | |||
2465 | 2305 | ||
2466 | e1000_update_adaptive(&adapter->hw); | 2306 | e1000_update_adaptive(&adapter->hw); |
2467 | 2307 | ||
2468 | #ifdef CONFIG_E1000_MQ | ||
2469 | txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2470 | #endif | ||
2471 | if (!netif_carrier_ok(netdev)) { | 2308 | if (!netif_carrier_ok(netdev)) { |
2472 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | 2309 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
2473 | /* We've lost link, so the controller stops DMA, | 2310 | /* We've lost link, so the controller stops DMA, |
@@ -2881,11 +2718,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2881 | unsigned int f; | 2718 | unsigned int f; |
2882 | len -= skb->data_len; | 2719 | len -= skb->data_len; |
2883 | 2720 | ||
2884 | #ifdef CONFIG_E1000_MQ | ||
2885 | tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2886 | #else | ||
2887 | tx_ring = adapter->tx_ring; | 2721 | tx_ring = adapter->tx_ring; |
2888 | #endif | ||
2889 | 2722 | ||
2890 | if (unlikely(skb->len <= 0)) { | 2723 | if (unlikely(skb->len <= 0)) { |
2891 | dev_kfree_skb_any(skb); | 2724 | dev_kfree_skb_any(skb); |
@@ -3288,29 +3121,6 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
3288 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3121 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
3289 | } | 3122 | } |
3290 | 3123 | ||
3291 | #ifdef CONFIG_E1000_MQ | ||
3292 | void | ||
3293 | e1000_rx_schedule(void *data) | ||
3294 | { | ||
3295 | struct net_device *poll_dev, *netdev = data; | ||
3296 | struct e1000_adapter *adapter = netdev->priv; | ||
3297 | int this_cpu = get_cpu(); | ||
3298 | |||
3299 | poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu); | ||
3300 | if (poll_dev == NULL) { | ||
3301 | put_cpu(); | ||
3302 | return; | ||
3303 | } | ||
3304 | |||
3305 | if (likely(netif_rx_schedule_prep(poll_dev))) | ||
3306 | __netif_rx_schedule(poll_dev); | ||
3307 | else | ||
3308 | e1000_irq_enable(adapter); | ||
3309 | |||
3310 | put_cpu(); | ||
3311 | } | ||
3312 | #endif | ||
3313 | |||
3314 | /** | 3124 | /** |
3315 | * e1000_intr - Interrupt Handler | 3125 | * e1000_intr - Interrupt Handler |
3316 | * @irq: interrupt number | 3126 | * @irq: interrupt number |
@@ -3355,26 +3165,11 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3355 | E1000_WRITE_REG(hw, IMC, ~0); | 3165 | E1000_WRITE_REG(hw, IMC, ~0); |
3356 | E1000_WRITE_FLUSH(hw); | 3166 | E1000_WRITE_FLUSH(hw); |
3357 | } | 3167 | } |
3358 | #ifdef CONFIG_E1000_MQ | ||
3359 | if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { | ||
3360 | /* We must setup the cpumask once count == 0 since | ||
3361 | * each cpu bit is cleared when the work is done. */ | ||
3362 | adapter->rx_sched_call_data.cpumask = adapter->cpumask; | ||
3363 | atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem); | ||
3364 | atomic_set(&adapter->rx_sched_call_data.count, | ||
3365 | adapter->num_rx_queues); | ||
3366 | smp_call_async_mask(&adapter->rx_sched_call_data); | ||
3367 | } else { | ||
3368 | printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); | ||
3369 | } | ||
3370 | #else /* if !CONFIG_E1000_MQ */ | ||
3371 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) | 3168 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) |
3372 | __netif_rx_schedule(&adapter->polling_netdev[0]); | 3169 | __netif_rx_schedule(&adapter->polling_netdev[0]); |
3373 | else | 3170 | else |
3374 | e1000_irq_enable(adapter); | 3171 | e1000_irq_enable(adapter); |
3375 | #endif /* CONFIG_E1000_MQ */ | 3172 | #else |
3376 | |||
3377 | #else /* if !CONFIG_E1000_NAPI */ | ||
3378 | /* Writing IMC and IMS is needed for 82547. | 3173 | /* Writing IMC and IMS is needed for 82547. |
3379 | * Due to Hub Link bus being occupied, an interrupt | 3174 | * Due to Hub Link bus being occupied, an interrupt |
3380 | * de-assertion message is not able to be sent. | 3175 | * de-assertion message is not able to be sent. |
@@ -3398,7 +3193,7 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
3398 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) | 3193 | if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) |
3399 | e1000_irq_enable(adapter); | 3194 | e1000_irq_enable(adapter); |
3400 | 3195 | ||
3401 | #endif /* CONFIG_E1000_NAPI */ | 3196 | #endif |
3402 | 3197 | ||
3403 | return IRQ_HANDLED; | 3198 | return IRQ_HANDLED; |
3404 | } | 3199 | } |
@@ -3486,18 +3281,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, | |||
3486 | buffer_info = &tx_ring->buffer_info[i]; | 3281 | buffer_info = &tx_ring->buffer_info[i]; |
3487 | cleaned = (i == eop); | 3282 | cleaned = (i == eop); |
3488 | 3283 | ||
3489 | #ifdef CONFIG_E1000_MQ | ||
3490 | tx_ring->tx_stats.bytes += buffer_info->length; | ||
3491 | #endif | ||
3492 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | 3284 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); |
3493 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); | 3285 | memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); |
3494 | 3286 | ||
3495 | if (unlikely(++i == tx_ring->count)) i = 0; | 3287 | if (unlikely(++i == tx_ring->count)) i = 0; |
3496 | } | 3288 | } |
3497 | 3289 | ||
3498 | #ifdef CONFIG_E1000_MQ | ||
3499 | tx_ring->tx_stats.packets++; | ||
3500 | #endif | ||
3501 | 3290 | ||
3502 | eop = tx_ring->buffer_info[i].next_to_watch; | 3291 | eop = tx_ring->buffer_info[i].next_to_watch; |
3503 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | 3292 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
@@ -3733,10 +3522,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, | |||
3733 | } | 3522 | } |
3734 | #endif /* CONFIG_E1000_NAPI */ | 3523 | #endif /* CONFIG_E1000_NAPI */ |
3735 | netdev->last_rx = jiffies; | 3524 | netdev->last_rx = jiffies; |
3736 | #ifdef CONFIG_E1000_MQ | ||
3737 | rx_ring->rx_stats.packets++; | ||
3738 | rx_ring->rx_stats.bytes += length; | ||
3739 | #endif | ||
3740 | 3525 | ||
3741 | next_desc: | 3526 | next_desc: |
3742 | rx_desc->status = 0; | 3527 | rx_desc->status = 0; |
@@ -3878,10 +3663,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, | |||
3878 | } | 3663 | } |
3879 | #endif /* CONFIG_E1000_NAPI */ | 3664 | #endif /* CONFIG_E1000_NAPI */ |
3880 | netdev->last_rx = jiffies; | 3665 | netdev->last_rx = jiffies; |
3881 | #ifdef CONFIG_E1000_MQ | ||
3882 | rx_ring->rx_stats.packets++; | ||
3883 | rx_ring->rx_stats.bytes += length; | ||
3884 | #endif | ||
3885 | 3666 | ||
3886 | next_desc: | 3667 | next_desc: |
3887 | rx_desc->wb.middle.status_error &= ~0xFF; | 3668 | rx_desc->wb.middle.status_error &= ~0xFF; |