aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2008-12-26 04:34:11 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-26 04:34:11 -0500
commit26bc19ecf6c077c926827c25631340fd2e8fb16e (patch)
treefab4cb0d2c633b5f336046dcd3416d26ef3e08e0 /drivers/net
parent0e014cb16234c3797aa518d46fe7e1fe91ebcca9 (diff)
igb: re-order queues to support cleaner use of ivar on 82576
The 82576 adapter orders the queues in pairs when virtualization is in use. The queue ordering previously conflicted with the ordering when sr-iov was enabled. This new ordering allows a PF to allocate 2 queues without using any VF resources. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/igb/igb.h3
-rw-r--r--drivers/net/igb/igb_main.c115
2 files changed, 79 insertions, 39 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index c90632524fda..5a27825cc48a 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -159,7 +159,8 @@ struct igb_ring {
159 u16 itr_register; 159 u16 itr_register;
160 u16 cpu; 160 u16 cpu;
161 161
162 int queue_index; 162 u16 queue_index;
163 u16 reg_idx;
163 unsigned int total_bytes; 164 unsigned int total_bytes;
164 unsigned int total_packets; 165 unsigned int total_packets;
165 166
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 4962cdfc507c..9331e5212461 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -231,6 +231,40 @@ static void __exit igb_exit_module(void)
231 231
232module_exit(igb_exit_module); 232module_exit(igb_exit_module);
233 233
234#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
235/**
236 * igb_cache_ring_register - Descriptor ring to register mapping
237 * @adapter: board private structure to initialize
238 *
239 * Once we know the feature-set enabled for the device, we'll cache
240 * the register offset the descriptor ring is assigned to.
241 **/
242static void igb_cache_ring_register(struct igb_adapter *adapter)
243{
244 int i;
245
246 switch (adapter->hw.mac.type) {
247 case e1000_82576:
248 /* The queues are allocated for virtualization such that VF 0
249 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
250 * In order to avoid collision we start at the first free queue
251 * and continue consuming queues in the same sequence
252 */
253 for (i = 0; i < adapter->num_rx_queues; i++)
254 adapter->rx_ring[i].reg_idx = Q_IDX_82576(i);
255 for (i = 0; i < adapter->num_tx_queues; i++)
256 adapter->tx_ring[i].reg_idx = Q_IDX_82576(i);
257 break;
258 case e1000_82575:
259 default:
260 for (i = 0; i < adapter->num_rx_queues; i++)
261 adapter->rx_ring[i].reg_idx = i;
262 for (i = 0; i < adapter->num_tx_queues; i++)
263 adapter->tx_ring[i].reg_idx = i;
264 break;
265 }
266}
267
234/** 268/**
235 * igb_alloc_queues - Allocate memory for all rings 269 * igb_alloc_queues - Allocate memory for all rings
236 * @adapter: board private structure to initialize 270 * @adapter: board private structure to initialize
@@ -272,6 +306,8 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
272 /* set a default napi handler for each rx_ring */ 306 /* set a default napi handler for each rx_ring */
273 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 307 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
274 } 308 }
309
310 igb_cache_ring_register(adapter);
275 return 0; 311 return 0;
276} 312}
277 313
@@ -312,36 +348,36 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
312 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 348 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
313 break; 349 break;
314 case e1000_82576: 350 case e1000_82576:
315 /* The 82576 uses a table-based method for assigning vectors. 351 /* 82576 uses a table-based method for assigning vectors.
316 Each queue has a single entry in the table to which we write 352 Each queue has a single entry in the table to which we write
317 a vector number along with a "valid" bit. Sadly, the layout 353 a vector number along with a "valid" bit. Sadly, the layout
318 of the table is somewhat counterintuitive. */ 354 of the table is somewhat counterintuitive. */
319 if (rx_queue > IGB_N0_QUEUE) { 355 if (rx_queue > IGB_N0_QUEUE) {
320 index = (rx_queue & 0x7); 356 index = (rx_queue >> 1);
321 ivar = array_rd32(E1000_IVAR0, index); 357 ivar = array_rd32(E1000_IVAR0, index);
322 if (rx_queue < 8) { 358 if (rx_queue & 0x1) {
323 /* vector goes into low byte of register */
324 ivar = ivar & 0xFFFFFF00;
325 ivar |= msix_vector | E1000_IVAR_VALID;
326 } else {
327 /* vector goes into third byte of register */ 359 /* vector goes into third byte of register */
328 ivar = ivar & 0xFF00FFFF; 360 ivar = ivar & 0xFF00FFFF;
329 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 361 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
362 } else {
363 /* vector goes into low byte of register */
364 ivar = ivar & 0xFFFFFF00;
365 ivar |= msix_vector | E1000_IVAR_VALID;
330 } 366 }
331 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; 367 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
332 array_wr32(E1000_IVAR0, index, ivar); 368 array_wr32(E1000_IVAR0, index, ivar);
333 } 369 }
334 if (tx_queue > IGB_N0_QUEUE) { 370 if (tx_queue > IGB_N0_QUEUE) {
335 index = (tx_queue & 0x7); 371 index = (tx_queue >> 1);
336 ivar = array_rd32(E1000_IVAR0, index); 372 ivar = array_rd32(E1000_IVAR0, index);
337 if (tx_queue < 8) { 373 if (tx_queue & 0x1) {
338 /* vector goes into second byte of register */
339 ivar = ivar & 0xFFFF00FF;
340 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
341 } else {
342 /* vector goes into high byte of register */ 374 /* vector goes into high byte of register */
343 ivar = ivar & 0x00FFFFFF; 375 ivar = ivar & 0x00FFFFFF;
344 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 376 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
377 } else {
378 /* vector goes into second byte of register */
379 ivar = ivar & 0xFFFF00FF;
380 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
345 } 381 }
346 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; 382 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
347 array_wr32(E1000_IVAR0, index, ivar); 383 array_wr32(E1000_IVAR0, index, ivar);
@@ -1638,33 +1674,33 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1638 struct e1000_hw *hw = &adapter->hw; 1674 struct e1000_hw *hw = &adapter->hw;
1639 u32 tctl; 1675 u32 tctl;
1640 u32 txdctl, txctrl; 1676 u32 txdctl, txctrl;
1641 int i; 1677 int i, j;
1642 1678
1643 for (i = 0; i < adapter->num_tx_queues; i++) { 1679 for (i = 0; i < adapter->num_tx_queues; i++) {
1644 struct igb_ring *ring = &(adapter->tx_ring[i]); 1680 struct igb_ring *ring = &(adapter->tx_ring[i]);
1645 1681 j = ring->reg_idx;
1646 wr32(E1000_TDLEN(i), 1682 wr32(E1000_TDLEN(j),
1647 ring->count * sizeof(struct e1000_tx_desc)); 1683 ring->count * sizeof(struct e1000_tx_desc));
1648 tdba = ring->dma; 1684 tdba = ring->dma;
1649 wr32(E1000_TDBAL(i), 1685 wr32(E1000_TDBAL(j),
1650 tdba & 0x00000000ffffffffULL); 1686 tdba & 0x00000000ffffffffULL);
1651 wr32(E1000_TDBAH(i), tdba >> 32); 1687 wr32(E1000_TDBAH(j), tdba >> 32);
1652 1688
1653 ring->head = E1000_TDH(i); 1689 ring->head = E1000_TDH(j);
1654 ring->tail = E1000_TDT(i); 1690 ring->tail = E1000_TDT(j);
1655 writel(0, hw->hw_addr + ring->tail); 1691 writel(0, hw->hw_addr + ring->tail);
1656 writel(0, hw->hw_addr + ring->head); 1692 writel(0, hw->hw_addr + ring->head);
1657 txdctl = rd32(E1000_TXDCTL(i)); 1693 txdctl = rd32(E1000_TXDCTL(j));
1658 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1694 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1659 wr32(E1000_TXDCTL(i), txdctl); 1695 wr32(E1000_TXDCTL(j), txdctl);
1660 1696
1661 /* Turn off Relaxed Ordering on head write-backs. The 1697 /* Turn off Relaxed Ordering on head write-backs. The
1662 * writebacks MUST be delivered in order or it will 1698 * writebacks MUST be delivered in order or it will
1663 * completely screw up our bookeeping. 1699 * completely screw up our bookeeping.
1664 */ 1700 */
1665 txctrl = rd32(E1000_DCA_TXCTRL(i)); 1701 txctrl = rd32(E1000_DCA_TXCTRL(j));
1666 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1702 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1667 wr32(E1000_DCA_TXCTRL(i), txctrl); 1703 wr32(E1000_DCA_TXCTRL(j), txctrl);
1668 } 1704 }
1669 1705
1670 1706
@@ -1781,7 +1817,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1781 struct e1000_hw *hw = &adapter->hw; 1817 struct e1000_hw *hw = &adapter->hw;
1782 u32 rctl; 1818 u32 rctl;
1783 u32 srrctl = 0; 1819 u32 srrctl = 0;
1784 int i; 1820 int i, j;
1785 1821
1786 rctl = rd32(E1000_RCTL); 1822 rctl = rd32(E1000_RCTL);
1787 1823
@@ -1839,8 +1875,10 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1839 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1875 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1840 } 1876 }
1841 1877
1842 for (i = 0; i < adapter->num_rx_queues; i++) 1878 for (i = 0; i < adapter->num_rx_queues; i++) {
1843 wr32(E1000_SRRCTL(i), srrctl); 1879 j = adapter->rx_ring[i].reg_idx;
1880 wr32(E1000_SRRCTL(j), srrctl);
1881 }
1844 1882
1845 wr32(E1000_RCTL, rctl); 1883 wr32(E1000_RCTL, rctl);
1846} 1884}
@@ -1857,7 +1895,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1857 struct e1000_hw *hw = &adapter->hw; 1895 struct e1000_hw *hw = &adapter->hw;
1858 u32 rctl, rxcsum; 1896 u32 rctl, rxcsum;
1859 u32 rxdctl; 1897 u32 rxdctl;
1860 int i; 1898 int i, j;
1861 1899
1862 /* disable receives while setting up the descriptors */ 1900 /* disable receives while setting up the descriptors */
1863 rctl = rd32(E1000_RCTL); 1901 rctl = rd32(E1000_RCTL);
@@ -1872,25 +1910,26 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1872 * the Base and Length of the Rx Descriptor Ring */ 1910 * the Base and Length of the Rx Descriptor Ring */
1873 for (i = 0; i < adapter->num_rx_queues; i++) { 1911 for (i = 0; i < adapter->num_rx_queues; i++) {
1874 struct igb_ring *ring = &(adapter->rx_ring[i]); 1912 struct igb_ring *ring = &(adapter->rx_ring[i]);
1913 j = ring->reg_idx;
1875 rdba = ring->dma; 1914 rdba = ring->dma;
1876 wr32(E1000_RDBAL(i), 1915 wr32(E1000_RDBAL(j),
1877 rdba & 0x00000000ffffffffULL); 1916 rdba & 0x00000000ffffffffULL);
1878 wr32(E1000_RDBAH(i), rdba >> 32); 1917 wr32(E1000_RDBAH(j), rdba >> 32);
1879 wr32(E1000_RDLEN(i), 1918 wr32(E1000_RDLEN(j),
1880 ring->count * sizeof(union e1000_adv_rx_desc)); 1919 ring->count * sizeof(union e1000_adv_rx_desc));
1881 1920
1882 ring->head = E1000_RDH(i); 1921 ring->head = E1000_RDH(j);
1883 ring->tail = E1000_RDT(i); 1922 ring->tail = E1000_RDT(j);
1884 writel(0, hw->hw_addr + ring->tail); 1923 writel(0, hw->hw_addr + ring->tail);
1885 writel(0, hw->hw_addr + ring->head); 1924 writel(0, hw->hw_addr + ring->head);
1886 1925
1887 rxdctl = rd32(E1000_RXDCTL(i)); 1926 rxdctl = rd32(E1000_RXDCTL(j));
1888 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1927 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1889 rxdctl &= 0xFFF00000; 1928 rxdctl &= 0xFFF00000;
1890 rxdctl |= IGB_RX_PTHRESH; 1929 rxdctl |= IGB_RX_PTHRESH;
1891 rxdctl |= IGB_RX_HTHRESH << 8; 1930 rxdctl |= IGB_RX_HTHRESH << 8;
1892 rxdctl |= IGB_RX_WTHRESH << 16; 1931 rxdctl |= IGB_RX_WTHRESH << 16;
1893 wr32(E1000_RXDCTL(i), rxdctl); 1932 wr32(E1000_RXDCTL(j), rxdctl);
1894#ifdef CONFIG_IGB_LRO 1933#ifdef CONFIG_IGB_LRO
1895 /* Intitial LRO Settings */ 1934 /* Intitial LRO Settings */
1896 ring->lro_mgr.max_aggr = MAX_LRO_AGGR; 1935 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
@@ -1920,7 +1959,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1920 shift = 6; 1959 shift = 6;
1921 for (j = 0; j < (32 * 4); j++) { 1960 for (j = 0; j < (32 * 4); j++) {
1922 reta.bytes[j & 3] = 1961 reta.bytes[j & 3] =
1923 (j % adapter->num_rx_queues) << shift; 1962 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
1924 if ((j & 3) == 3) 1963 if ((j & 3) == 3)
1925 writel(reta.dword, 1964 writel(reta.dword,
1926 hw->hw_addr + E1000_RETA(0) + (j & ~3)); 1965 hw->hw_addr + E1000_RETA(0) + (j & ~3));
@@ -3365,7 +3404,7 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring)
3365 struct igb_adapter *adapter = rx_ring->adapter; 3404 struct igb_adapter *adapter = rx_ring->adapter;
3366 struct e1000_hw *hw = &adapter->hw; 3405 struct e1000_hw *hw = &adapter->hw;
3367 int cpu = get_cpu(); 3406 int cpu = get_cpu();
3368 int q = rx_ring - adapter->rx_ring; 3407 int q = rx_ring->reg_idx;
3369 3408
3370 if (rx_ring->cpu != cpu) { 3409 if (rx_ring->cpu != cpu) {
3371 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 3410 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
@@ -3392,7 +3431,7 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
3392 struct igb_adapter *adapter = tx_ring->adapter; 3431 struct igb_adapter *adapter = tx_ring->adapter;
3393 struct e1000_hw *hw = &adapter->hw; 3432 struct e1000_hw *hw = &adapter->hw;
3394 int cpu = get_cpu(); 3433 int cpu = get_cpu();
3395 int q = tx_ring - adapter->tx_ring; 3434 int q = tx_ring->reg_idx;
3396 3435
3397 if (tx_ring->cpu != cpu) { 3436 if (tx_ring->cpu != cpu) {
3398 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); 3437 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));