diff options
author | Jesse Brandeburg <jesse.brandeburg@intel.com> | 2008-08-26 07:27:16 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-09-03 10:03:33 -0400 |
commit | 7c6e0a436d971641d37cebcb12e8cc0c4419b5d4 (patch) | |
tree | c7a32ac6c02c0431477f7df4cf9ac5fbcde54a6d /drivers/net/ixgbe/ixgbe_main.c | |
parent | e01c31a5f7eb4f8a147cf6205f0f2ef11146068d (diff) |
ixgbe: Lock RSS seed, move rx_buf_len to the rx_ring
This locks the seed down so loading/unloading the driver will present
predictable hashing from RSS. Also move the rx_buf_len out of the adapter
struct, and into the Rx ring struct.
Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 102 |
1 files changed, 57 insertions, 45 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 95d00416093c..b5780991c17b 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -474,15 +474,15 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, | |||
474 | * @adapter: address of board private structure | 474 | * @adapter: address of board private structure |
475 | **/ | 475 | **/ |
476 | static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | 476 | static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, |
477 | struct ixgbe_ring *rx_ring, | 477 | struct ixgbe_ring *rx_ring, |
478 | int cleaned_count) | 478 | int cleaned_count) |
479 | { | 479 | { |
480 | struct net_device *netdev = adapter->netdev; | 480 | struct net_device *netdev = adapter->netdev; |
481 | struct pci_dev *pdev = adapter->pdev; | 481 | struct pci_dev *pdev = adapter->pdev; |
482 | union ixgbe_adv_rx_desc *rx_desc; | 482 | union ixgbe_adv_rx_desc *rx_desc; |
483 | struct ixgbe_rx_buffer *bi; | 483 | struct ixgbe_rx_buffer *bi; |
484 | unsigned int i; | 484 | unsigned int i; |
485 | unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; | 485 | unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; |
486 | 486 | ||
487 | i = rx_ring->next_to_use; | 487 | i = rx_ring->next_to_use; |
488 | bi = &rx_ring->rx_buffer_info[i]; | 488 | bi = &rx_ring->rx_buffer_info[i]; |
@@ -498,8 +498,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
498 | goto no_buffers; | 498 | goto no_buffers; |
499 | } | 499 | } |
500 | bi->page_dma = pci_map_page(pdev, bi->page, 0, | 500 | bi->page_dma = pci_map_page(pdev, bi->page, 0, |
501 | PAGE_SIZE, | 501 | PAGE_SIZE, |
502 | PCI_DMA_FROMDEVICE); | 502 | PCI_DMA_FROMDEVICE); |
503 | } | 503 | } |
504 | 504 | ||
505 | if (!bi->skb) { | 505 | if (!bi->skb) { |
@@ -535,6 +535,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, | |||
535 | i = 0; | 535 | i = 0; |
536 | bi = &rx_ring->rx_buffer_info[i]; | 536 | bi = &rx_ring->rx_buffer_info[i]; |
537 | } | 537 | } |
538 | |||
538 | no_buffers: | 539 | no_buffers: |
539 | if (rx_ring->next_to_use != i) { | 540 | if (rx_ring->next_to_use != i) { |
540 | rx_ring->next_to_use = i; | 541 | rx_ring->next_to_use = i; |
@@ -552,9 +553,19 @@ no_buffers: | |||
552 | } | 553 | } |
553 | } | 554 | } |
554 | 555 | ||
556 | static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) | ||
557 | { | ||
558 | return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; | ||
559 | } | ||
560 | |||
561 | static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) | ||
562 | { | ||
563 | return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | ||
564 | } | ||
565 | |||
555 | static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | 566 | static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, |
556 | struct ixgbe_ring *rx_ring, | 567 | struct ixgbe_ring *rx_ring, |
557 | int *work_done, int work_to_do) | 568 | int *work_done, int work_to_do) |
558 | { | 569 | { |
559 | struct net_device *netdev = adapter->netdev; | 570 | struct net_device *netdev = adapter->netdev; |
560 | struct pci_dev *pdev = adapter->pdev; | 571 | struct pci_dev *pdev = adapter->pdev; |
@@ -562,36 +573,35 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
562 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; | 573 | struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; |
563 | struct sk_buff *skb; | 574 | struct sk_buff *skb; |
564 | unsigned int i; | 575 | unsigned int i; |
565 | u32 upper_len, len, staterr; | 576 | u32 len, staterr; |
566 | u16 hdr_info; | 577 | u16 hdr_info; |
567 | bool cleaned = false; | 578 | bool cleaned = false; |
568 | int cleaned_count = 0; | 579 | int cleaned_count = 0; |
569 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 580 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
570 | 581 | ||
571 | i = rx_ring->next_to_clean; | 582 | i = rx_ring->next_to_clean; |
572 | upper_len = 0; | ||
573 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); | 583 | rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); |
574 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); | 584 | staterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
575 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 585 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
576 | 586 | ||
577 | while (staterr & IXGBE_RXD_STAT_DD) { | 587 | while (staterr & IXGBE_RXD_STAT_DD) { |
588 | u32 upper_len = 0; | ||
578 | if (*work_done >= work_to_do) | 589 | if (*work_done >= work_to_do) |
579 | break; | 590 | break; |
580 | (*work_done)++; | 591 | (*work_done)++; |
581 | 592 | ||
582 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 593 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
583 | hdr_info = | 594 | hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); |
584 | le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); | 595 | len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> |
585 | len = | 596 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; |
586 | ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | ||
587 | IXGBE_RXDADV_HDRBUFLEN_SHIFT); | ||
588 | if (hdr_info & IXGBE_RXDADV_SPH) | 597 | if (hdr_info & IXGBE_RXDADV_SPH) |
589 | adapter->rx_hdr_split++; | 598 | adapter->rx_hdr_split++; |
590 | if (len > IXGBE_RX_HDR_SIZE) | 599 | if (len > IXGBE_RX_HDR_SIZE) |
591 | len = IXGBE_RX_HDR_SIZE; | 600 | len = IXGBE_RX_HDR_SIZE; |
592 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | 601 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); |
593 | } else | 602 | } else { |
594 | len = le16_to_cpu(rx_desc->wb.upper.length); | 603 | len = le16_to_cpu(rx_desc->wb.upper.length); |
604 | } | ||
595 | 605 | ||
596 | cleaned = true; | 606 | cleaned = true; |
597 | skb = rx_buffer_info->skb; | 607 | skb = rx_buffer_info->skb; |
@@ -600,8 +610,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, | |||
600 | 610 | ||
601 | if (len && !skb_shinfo(skb)->nr_frags) { | 611 | if (len && !skb_shinfo(skb)->nr_frags) { |
602 | pci_unmap_single(pdev, rx_buffer_info->dma, | 612 | pci_unmap_single(pdev, rx_buffer_info->dma, |
603 | adapter->rx_buf_len + NET_IP_ALIGN, | 613 | rx_ring->rx_buf_len + NET_IP_ALIGN, |
604 | PCI_DMA_FROMDEVICE); | 614 | PCI_DMA_FROMDEVICE); |
605 | skb_put(skb, len); | 615 | skb_put(skb, len); |
606 | } | 616 | } |
607 | 617 | ||
@@ -1415,7 +1425,7 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph, | |||
1415 | union ixgbe_adv_rx_desc *rx_desc = priv; | 1425 | union ixgbe_adv_rx_desc *rx_desc = priv; |
1416 | 1426 | ||
1417 | /* Verify that this is a valid IPv4 TCP packet */ | 1427 | /* Verify that this is a valid IPv4 TCP packet */ |
1418 | if (!(rx_desc->wb.lower.lo_dword.pkt_info & | 1428 | if (!(ixgbe_get_pkt_info(rx_desc) & |
1419 | (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) | 1429 | (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) |
1420 | return -1; | 1430 | return -1; |
1421 | 1431 | ||
@@ -1442,10 +1452,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1442 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 1452 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
1443 | int i, j; | 1453 | int i, j; |
1444 | u32 rdlen, rxctrl, rxcsum; | 1454 | u32 rdlen, rxctrl, rxcsum; |
1445 | u32 random[10]; | 1455 | static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, |
1456 | 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, | ||
1457 | 0x6A3E67EA, 0x14364D17, 0x3BED200D}; | ||
1446 | u32 fctrl, hlreg0; | 1458 | u32 fctrl, hlreg0; |
1447 | u32 pages; | 1459 | u32 pages; |
1448 | u32 reta = 0, mrqc, srrctl; | 1460 | u32 reta = 0, mrqc, srrctl; |
1461 | int rx_buf_len; | ||
1449 | 1462 | ||
1450 | /* Decide whether to use packet split mode or not */ | 1463 | /* Decide whether to use packet split mode or not */ |
1451 | if (netdev->mtu > ETH_DATA_LEN) | 1464 | if (netdev->mtu > ETH_DATA_LEN) |
@@ -1455,12 +1468,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1455 | 1468 | ||
1456 | /* Set the RX buffer length according to the mode */ | 1469 | /* Set the RX buffer length according to the mode */ |
1457 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 1470 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
1458 | adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; | 1471 | rx_buf_len = IXGBE_RX_HDR_SIZE; |
1459 | } else { | 1472 | } else { |
1460 | if (netdev->mtu <= ETH_DATA_LEN) | 1473 | if (netdev->mtu <= ETH_DATA_LEN) |
1461 | adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 1474 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
1462 | else | 1475 | else |
1463 | adapter->rx_buf_len = ALIGN(max_frame, 1024); | 1476 | rx_buf_len = ALIGN(max_frame, 1024); |
1464 | } | 1477 | } |
1465 | 1478 | ||
1466 | fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); | 1479 | fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); |
@@ -1490,12 +1503,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1490 | } else { | 1503 | } else { |
1491 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 1504 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
1492 | 1505 | ||
1493 | if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) | 1506 | if (rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) |
1494 | srrctl |= | 1507 | srrctl |= |
1495 | IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 1508 | IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1496 | else | 1509 | else |
1497 | srrctl |= | 1510 | srrctl |= rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1498 | adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1499 | } | 1511 | } |
1500 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); | 1512 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl); |
1501 | 1513 | ||
@@ -1508,13 +1520,15 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1508 | * the Base and Length of the Rx Descriptor Ring */ | 1520 | * the Base and Length of the Rx Descriptor Ring */ |
1509 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1521 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1510 | rdba = adapter->rx_ring[i].dma; | 1522 | rdba = adapter->rx_ring[i].dma; |
1511 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); | 1523 | j = adapter->rx_ring[i].reg_idx; |
1512 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); | 1524 | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK)); |
1513 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); | 1525 | IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); |
1514 | IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); | 1526 | IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); |
1515 | IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); | 1527 | IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); |
1516 | adapter->rx_ring[i].head = IXGBE_RDH(i); | 1528 | IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); |
1517 | adapter->rx_ring[i].tail = IXGBE_RDT(i); | 1529 | adapter->rx_ring[i].head = IXGBE_RDH(j); |
1530 | adapter->rx_ring[i].tail = IXGBE_RDT(j); | ||
1531 | adapter->rx_ring[i].rx_buf_len = rx_buf_len; | ||
1518 | } | 1532 | } |
1519 | 1533 | ||
1520 | /* Intitial LRO Settings */ | 1534 | /* Intitial LRO Settings */ |
@@ -1541,22 +1555,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1541 | } | 1555 | } |
1542 | 1556 | ||
1543 | /* Fill out hash function seeds */ | 1557 | /* Fill out hash function seeds */ |
1544 | /* XXX use a random constant here to glue certain flows */ | ||
1545 | get_random_bytes(&random[0], 40); | ||
1546 | for (i = 0; i < 10; i++) | 1558 | for (i = 0; i < 10; i++) |
1547 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); | 1559 | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); |
1548 | 1560 | ||
1549 | mrqc = IXGBE_MRQC_RSSEN | 1561 | mrqc = IXGBE_MRQC_RSSEN |
1550 | /* Perform hash on these packet types */ | 1562 | /* Perform hash on these packet types */ |
1551 | | IXGBE_MRQC_RSS_FIELD_IPV4 | 1563 | | IXGBE_MRQC_RSS_FIELD_IPV4 |
1552 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 1564 | | IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
1553 | | IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 1565 | | IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
1554 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 1566 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
1555 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX | 1567 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX |
1556 | | IXGBE_MRQC_RSS_FIELD_IPV6 | 1568 | | IXGBE_MRQC_RSS_FIELD_IPV6 |
1557 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 1569 | | IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
1558 | | IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 1570 | | IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
1559 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; | 1571 | | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; |
1560 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | 1572 | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); |
1561 | } | 1573 | } |
1562 | 1574 | ||
@@ -1926,7 +1938,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, | |||
1926 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 1938 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; |
1927 | if (rx_buffer_info->dma) { | 1939 | if (rx_buffer_info->dma) { |
1928 | pci_unmap_single(pdev, rx_buffer_info->dma, | 1940 | pci_unmap_single(pdev, rx_buffer_info->dma, |
1929 | adapter->rx_buf_len, | 1941 | rx_ring->rx_buf_len, |
1930 | PCI_DMA_FROMDEVICE); | 1942 | PCI_DMA_FROMDEVICE); |
1931 | rx_buffer_info->dma = 0; | 1943 | rx_buffer_info->dma = 0; |
1932 | } | 1944 | } |