diff options
author | Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> | 2005-10-04 07:03:23 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-04 07:03:23 -0400 |
commit | 24025e4ecf88743e1b3d46451b0e3f9de4bbcba5 (patch) | |
tree | 4ab263116b31128c40dcad3e22b26a46fae87ab4 /drivers/net/e1000/e1000_main.c | |
parent | 581d708eb47cccb5f41bc0817e50c9b004011ba8 (diff) |
e1000: implementation of the multi-queue feature
Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: Ganesh Venkatesan <ganesh.venkatesan@intel.com>
Signed-off-by: John Ronciak <john.ronciak@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 191 |
1 files changed, 188 insertions, 3 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 5145b7345c22..ce1044a80bd2 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -195,6 +195,11 @@ static int e1000_resume(struct pci_dev *pdev); | |||
195 | static void e1000_netpoll (struct net_device *netdev); | 195 | static void e1000_netpoll (struct net_device *netdev); |
196 | #endif | 196 | #endif |
197 | 197 | ||
198 | #ifdef CONFIG_E1000_MQ | ||
199 | /* for multiple Rx queues */ | ||
200 | void e1000_rx_schedule(void *data); | ||
201 | #endif | ||
202 | |||
198 | /* Exported from other modules */ | 203 | /* Exported from other modules */ |
199 | 204 | ||
200 | extern void e1000_check_options(struct e1000_adapter *adapter); | 205 | extern void e1000_check_options(struct e1000_adapter *adapter); |
@@ -368,6 +373,9 @@ e1000_down(struct e1000_adapter *adapter) | |||
368 | struct net_device *netdev = adapter->netdev; | 373 | struct net_device *netdev = adapter->netdev; |
369 | 374 | ||
370 | e1000_irq_disable(adapter); | 375 | e1000_irq_disable(adapter); |
376 | #ifdef CONFIG_E1000_MQ | ||
377 | while (atomic_read(&adapter->rx_sched_call_data.count) != 0); | ||
378 | #endif | ||
371 | free_irq(adapter->pdev->irq, netdev); | 379 | free_irq(adapter->pdev->irq, netdev); |
372 | #ifdef CONFIG_PCI_MSI | 380 | #ifdef CONFIG_PCI_MSI |
373 | if(adapter->hw.mac_type > e1000_82547_rev_2 && | 381 | if(adapter->hw.mac_type > e1000_82547_rev_2 && |
@@ -810,9 +818,19 @@ e1000_remove(struct pci_dev *pdev) | |||
810 | if(!e1000_check_phy_reset_block(&adapter->hw)) | 818 | if(!e1000_check_phy_reset_block(&adapter->hw)) |
811 | e1000_phy_hw_reset(&adapter->hw); | 819 | e1000_phy_hw_reset(&adapter->hw); |
812 | 820 | ||
821 | kfree(adapter->tx_ring); | ||
822 | kfree(adapter->rx_ring); | ||
823 | #ifdef CONFIG_E1000_NAPI | ||
824 | kfree(adapter->polling_netdev); | ||
825 | #endif | ||
826 | |||
813 | iounmap(adapter->hw.hw_addr); | 827 | iounmap(adapter->hw.hw_addr); |
814 | pci_release_regions(pdev); | 828 | pci_release_regions(pdev); |
815 | 829 | ||
830 | #ifdef CONFIG_E1000_MQ | ||
831 | free_percpu(adapter->cpu_netdev); | ||
832 | free_percpu(adapter->cpu_tx_ring); | ||
833 | #endif | ||
816 | free_netdev(netdev); | 834 | free_netdev(netdev); |
817 | 835 | ||
818 | pci_disable_device(pdev); | 836 | pci_disable_device(pdev); |
@@ -893,7 +911,21 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
893 | hw->master_slave = E1000_MASTER_SLAVE; | 911 | hw->master_slave = E1000_MASTER_SLAVE; |
894 | } | 912 | } |
895 | 913 | ||
914 | #ifdef CONFIG_E1000_MQ | ||
915 | /* Number of supported queues */ | ||
916 | switch (hw->mac_type) { | ||
917 | case e1000_82571: | ||
918 | case e1000_82572: | ||
919 | adapter->num_queues = 2; | ||
920 | break; | ||
921 | default: | ||
922 | adapter->num_queues = 1; | ||
923 | break; | ||
924 | } | ||
925 | adapter->num_queues = min(adapter->num_queues, num_online_cpus()); | ||
926 | #else | ||
896 | adapter->num_queues = 1; | 927 | adapter->num_queues = 1; |
928 | #endif | ||
897 | 929 | ||
898 | if (e1000_alloc_queues(adapter)) { | 930 | if (e1000_alloc_queues(adapter)) { |
899 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | 931 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); |
@@ -909,6 +941,11 @@ e1000_sw_init(struct e1000_adapter *adapter) | |||
909 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); | 941 | set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); |
910 | } | 942 | } |
911 | #endif | 943 | #endif |
944 | |||
945 | #ifdef CONFIG_E1000_MQ | ||
946 | e1000_setup_queue_mapping(adapter); | ||
947 | #endif | ||
948 | |||
912 | atomic_set(&adapter->irq_sem, 1); | 949 | atomic_set(&adapter->irq_sem, 1); |
913 | spin_lock_init(&adapter->stats_lock); | 950 | spin_lock_init(&adapter->stats_lock); |
914 | 951 | ||
@@ -957,6 +994,39 @@ e1000_alloc_queues(struct e1000_adapter *adapter) | |||
957 | return E1000_SUCCESS; | 994 | return E1000_SUCCESS; |
958 | } | 995 | } |
959 | 996 | ||
997 | #ifdef CONFIG_E1000_MQ | ||
998 | static void __devinit | ||
999 | e1000_setup_queue_mapping(struct e1000_adapter *adapter) | ||
1000 | { | ||
1001 | int i, cpu; | ||
1002 | |||
1003 | adapter->rx_sched_call_data.func = e1000_rx_schedule; | ||
1004 | adapter->rx_sched_call_data.info = adapter->netdev; | ||
1005 | cpus_clear(adapter->rx_sched_call_data.cpumask); | ||
1006 | |||
1007 | adapter->cpu_netdev = alloc_percpu(struct net_device *); | ||
1008 | adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); | ||
1009 | |||
1010 | lock_cpu_hotplug(); | ||
1011 | i = 0; | ||
1012 | for_each_online_cpu(cpu) { | ||
1013 | *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; | ||
1014 | /* This is incomplete because we'd like to assign separate | ||
1015 | * physical cpus to these netdev polling structures and | ||
1016 | * avoid saturating a subset of cpus. | ||
1017 | */ | ||
1018 | if (i < adapter->num_queues) { | ||
1019 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; | ||
1020 | adapter->cpu_for_queue[i] = cpu; | ||
1021 | } else | ||
1022 | *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; | ||
1023 | |||
1024 | i++; | ||
1025 | } | ||
1026 | unlock_cpu_hotplug(); | ||
1027 | } | ||
1028 | #endif | ||
1029 | |||
960 | /** | 1030 | /** |
961 | * e1000_open - Called when a network interface is made active | 1031 | * e1000_open - Called when a network interface is made active |
962 | * @netdev: network interface device structure | 1032 | * @netdev: network interface device structure |
@@ -1178,8 +1248,21 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1178 | 1248 | ||
1179 | /* Setup the HW Tx Head and Tail descriptor pointers */ | 1249 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1180 | 1250 | ||
1181 | E1000_WRITE_REG(&adapter->hw, TDH, 0); | 1251 | switch (adapter->num_queues) { |
1182 | E1000_WRITE_REG(&adapter->hw, TDT, 0); | 1252 | case 2: |
1253 | tdba = adapter->tx_ring[1].dma; | ||
1254 | tdlen = adapter->tx_ring[1].count * | ||
1255 | sizeof(struct e1000_tx_desc); | ||
1256 | E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL)); | ||
1257 | E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32)); | ||
1258 | E1000_WRITE_REG(hw, TDLEN1, tdlen); | ||
1259 | E1000_WRITE_REG(hw, TDH1, 0); | ||
1260 | E1000_WRITE_REG(hw, TDT1, 0); | ||
1261 | adapter->tx_ring[1].tdh = E1000_TDH1; | ||
1262 | adapter->tx_ring[1].tdt = E1000_TDT1; | ||
1263 | /* Fall Through */ | ||
1264 | case 1: | ||
1265 | default: | ||
1183 | tdba = adapter->tx_ring[0].dma; | 1266 | tdba = adapter->tx_ring[0].dma; |
1184 | tdlen = adapter->tx_ring[0].count * | 1267 | tdlen = adapter->tx_ring[0].count * |
1185 | sizeof(struct e1000_tx_desc); | 1268 | sizeof(struct e1000_tx_desc); |
@@ -1190,6 +1273,8 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1190 | E1000_WRITE_REG(hw, TDT, 0); | 1273 | E1000_WRITE_REG(hw, TDT, 0); |
1191 | adapter->tx_ring[0].tdh = E1000_TDH; | 1274 | adapter->tx_ring[0].tdh = E1000_TDH; |
1192 | adapter->tx_ring[0].tdt = E1000_TDT; | 1275 | adapter->tx_ring[0].tdt = E1000_TDT; |
1276 | break; | ||
1277 | } | ||
1193 | 1278 | ||
1194 | /* Set the default values for the Tx Inter Packet Gap timer */ | 1279 | /* Set the default values for the Tx Inter Packet Gap timer */ |
1195 | 1280 | ||
@@ -1222,7 +1307,7 @@ e1000_configure_tx(struct e1000_adapter *adapter) | |||
1222 | tctl = E1000_READ_REG(hw, TCTL); | 1307 | tctl = E1000_READ_REG(hw, TCTL); |
1223 | 1308 | ||
1224 | tctl &= ~E1000_TCTL_CT; | 1309 | tctl &= ~E1000_TCTL_CT; |
1225 | tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | | 1310 | tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1226 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | 1311 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1227 | 1312 | ||
1228 | E1000_WRITE_REG(hw, TCTL, tctl); | 1313 | E1000_WRITE_REG(hw, TCTL, tctl); |
@@ -1518,6 +1603,21 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1518 | 1603 | ||
1519 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 1604 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1520 | * the Base and Length of the Rx Descriptor Ring */ | 1605 | * the Base and Length of the Rx Descriptor Ring */ |
1606 | switch (adapter->num_queues) { | ||
1607 | #ifdef CONFIG_E1000_MQ | ||
1608 | case 2: | ||
1609 | rdba = adapter->rx_ring[1].dma; | ||
1610 | E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); | ||
1611 | E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); | ||
1612 | E1000_WRITE_REG(hw, RDLEN1, rdlen); | ||
1613 | E1000_WRITE_REG(hw, RDH1, 0); | ||
1614 | E1000_WRITE_REG(hw, RDT1, 0); | ||
1615 | adapter->rx_ring[1].rdh = E1000_RDH1; | ||
1616 | adapter->rx_ring[1].rdt = E1000_RDT1; | ||
1617 | /* Fall Through */ | ||
1618 | #endif | ||
1619 | case 1: | ||
1620 | default: | ||
1521 | rdba = adapter->rx_ring[0].dma; | 1621 | rdba = adapter->rx_ring[0].dma; |
1522 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); | 1622 | E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); |
1523 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); | 1623 | E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); |
@@ -1527,6 +1627,47 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1527 | adapter->rx_ring[0].rdh = E1000_RDH; | 1627 | adapter->rx_ring[0].rdh = E1000_RDH; |
1528 | adapter->rx_ring[0].rdt = E1000_RDT; | 1628 | adapter->rx_ring[0].rdt = E1000_RDT; |
1529 | break; | 1629 | break; |
1630 | } | ||
1631 | |||
1632 | #ifdef CONFIG_E1000_MQ | ||
1633 | if (adapter->num_queues > 1) { | ||
1634 | uint32_t random[10]; | ||
1635 | |||
1636 | get_random_bytes(&random[0], 40); | ||
1637 | |||
1638 | if (hw->mac_type <= e1000_82572) { | ||
1639 | E1000_WRITE_REG(hw, RSSIR, 0); | ||
1640 | E1000_WRITE_REG(hw, RSSIM, 0); | ||
1641 | } | ||
1642 | |||
1643 | switch (adapter->num_queues) { | ||
1644 | case 2: | ||
1645 | default: | ||
1646 | reta = 0x00800080; | ||
1647 | mrqc = E1000_MRQC_ENABLE_RSS_2Q; | ||
1648 | break; | ||
1649 | } | ||
1650 | |||
1651 | /* Fill out redirection table */ | ||
1652 | for (i = 0; i < 32; i++) | ||
1653 | E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); | ||
1654 | /* Fill out hash function seeds */ | ||
1655 | for (i = 0; i < 10; i++) | ||
1656 | E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); | ||
1657 | |||
1658 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | | ||
1659 | E1000_MRQC_RSS_FIELD_IPV4_TCP); | ||
1660 | E1000_WRITE_REG(hw, MRQC, mrqc); | ||
1661 | } | ||
1662 | |||
1663 | /* Multiqueue and packet checksumming are mutually exclusive. */ | ||
1664 | if (hw->mac_type >= e1000_82571) { | ||
1665 | rxcsum = E1000_READ_REG(hw, RXCSUM); | ||
1666 | rxcsum |= E1000_RXCSUM_PCSD; | ||
1667 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | ||
1668 | } | ||
1669 | |||
1670 | #else | ||
1530 | 1671 | ||
1531 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | 1672 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
1532 | if (hw->mac_type >= e1000_82543) { | 1673 | if (hw->mac_type >= e1000_82543) { |
@@ -1546,6 +1687,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) | |||
1546 | } | 1687 | } |
1547 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); | 1688 | E1000_WRITE_REG(hw, RXCSUM, rxcsum); |
1548 | } | 1689 | } |
1690 | #endif /* CONFIG_E1000_MQ */ | ||
1549 | 1691 | ||
1550 | if (hw->mac_type == e1000_82573) | 1692 | if (hw->mac_type == e1000_82573) |
1551 | E1000_WRITE_REG(hw, ERT, 0x0100); | 1693 | E1000_WRITE_REG(hw, ERT, 0x0100); |
@@ -2488,7 +2630,12 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2488 | unsigned int f; | 2630 | unsigned int f; |
2489 | len -= skb->data_len; | 2631 | len -= skb->data_len; |
2490 | 2632 | ||
2633 | #ifdef CONFIG_E1000_MQ | ||
2634 | tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); | ||
2635 | #else | ||
2491 | tx_ring = adapter->tx_ring; | 2636 | tx_ring = adapter->tx_ring; |
2637 | #endif | ||
2638 | |||
2492 | if (unlikely(skb->len <= 0)) { | 2639 | if (unlikely(skb->len <= 0)) { |
2493 | dev_kfree_skb_any(skb); | 2640 | dev_kfree_skb_any(skb); |
2494 | return NETDEV_TX_OK; | 2641 | return NETDEV_TX_OK; |
@@ -2879,6 +3026,29 @@ e1000_update_stats(struct e1000_adapter *adapter) | |||
2879 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | 3026 | spin_unlock_irqrestore(&adapter->stats_lock, flags); |
2880 | } | 3027 | } |
2881 | 3028 | ||
3029 | #ifdef CONFIG_E1000_MQ | ||
3030 | void | ||
3031 | e1000_rx_schedule(void *data) | ||
3032 | { | ||
3033 | struct net_device *poll_dev, *netdev = data; | ||
3034 | struct e1000_adapter *adapter = netdev->priv; | ||
3035 | int this_cpu = get_cpu(); | ||
3036 | |||
3037 | poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu); | ||
3038 | if (poll_dev == NULL) { | ||
3039 | put_cpu(); | ||
3040 | return; | ||
3041 | } | ||
3042 | |||
3043 | if (likely(netif_rx_schedule_prep(poll_dev))) | ||
3044 | __netif_rx_schedule(poll_dev); | ||
3045 | else | ||
3046 | e1000_irq_enable(adapter); | ||
3047 | |||
3048 | put_cpu(); | ||
3049 | } | ||
3050 | #endif | ||
3051 | |||
2882 | /** | 3052 | /** |
2883 | * e1000_intr - Interrupt Handler | 3053 | * e1000_intr - Interrupt Handler |
2884 | * @irq: interrupt number | 3054 | * @irq: interrupt number |
@@ -2907,12 +3077,27 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) | |||
2907 | atomic_inc(&adapter->irq_sem); | 3077 | atomic_inc(&adapter->irq_sem); |
2908 | E1000_WRITE_REG(hw, IMC, ~0); | 3078 | E1000_WRITE_REG(hw, IMC, ~0); |
2909 | E1000_WRITE_FLUSH(hw); | 3079 | E1000_WRITE_FLUSH(hw); |
3080 | #ifdef CONFIG_E1000_MQ | ||
3081 | if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { | ||
3082 | cpu_set(adapter->cpu_for_queue[0], | ||
3083 | adapter->rx_sched_call_data.cpumask); | ||
3084 | for (i = 1; i < adapter->num_queues; i++) { | ||
3085 | cpu_set(adapter->cpu_for_queue[i], | ||
3086 | adapter->rx_sched_call_data.cpumask); | ||
3087 | atomic_inc(&adapter->irq_sem); | ||
3088 | } | ||
3089 | atomic_set(&adapter->rx_sched_call_data.count, i); | ||
3090 | smp_call_async_mask(&adapter->rx_sched_call_data); | ||
3091 | } else { | ||
3092 | printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); | ||
2910 | } | 3093 | } |
2911 | #else | 3094 | #else |
2912 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) | 3095 | if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) |
2913 | __netif_rx_schedule(&adapter->polling_netdev[0]); | 3096 | __netif_rx_schedule(&adapter->polling_netdev[0]); |
2914 | else | 3097 | else |
2915 | e1000_irq_enable(adapter); | 3098 | e1000_irq_enable(adapter); |
3099 | #endif | ||
3100 | #else | ||
2916 | /* Writing IMC and IMS is needed for 82547. | 3101 | /* Writing IMC and IMS is needed for 82547. |
2917 | Due to Hub Link bus being occupied, an interrupt | 3102 | Due to Hub Link bus being occupied, an interrupt |
2918 | de-assertion message is not able to be sent. | 3103 | de-assertion message is not able to be sent. |