diff options
author | Alexander Duyck <alexander.h.duyck@intel.com> | 2009-10-05 02:33:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-06 17:59:19 -0400 |
commit | 68d480c4defb69d834e75fd0be9069a8447afe36 (patch) | |
tree | 39c82b303e5bb71904d48a146e27308f578dba43 /drivers | |
parent | 26ad91783c489486d3fd1a6932e5bdab9d404a38 (diff) |
igb: make use of the uta to allow for promiscous mode filter
In order to support functions such as vlan tag stripping when SR-IOV is
enabled any given packet must match at least one filter. However in the
case of promiscous mode being enabled on the PF the traffic routed to it
may not match any filters and is just sent to the PF by default. In order
to make certain that this traffic is processed we can set all bits in the
UTA registers to create a pseudo promiscous mode filter that accepts all
packets.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/igb/e1000_82575.c | 5 | ||||
-rw-r--r-- | drivers/net/igb/e1000_hw.h | 1 | ||||
-rw-r--r-- | drivers/net/igb/e1000_regs.h | 1 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 203 |
4 files changed, 163 insertions, 47 deletions
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 78971815bbc..b8a88a8b393 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c | |||
@@ -875,6 +875,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) | |||
875 | for (i = 0; i < mac->mta_reg_count; i++) | 875 | for (i = 0; i < mac->mta_reg_count; i++) |
876 | array_wr32(E1000_MTA, i, 0); | 876 | array_wr32(E1000_MTA, i, 0); |
877 | 877 | ||
878 | /* Zero out the Unicast HASH table */ | ||
879 | hw_dbg("Zeroing the UTA\n"); | ||
880 | for (i = 0; i < mac->uta_reg_count; i++) | ||
881 | array_wr32(E1000_UTA, i, 0); | ||
882 | |||
878 | /* Setup link and flow control */ | 883 | /* Setup link and flow control */ |
879 | ret_val = igb_setup_link(hw); | 884 | ret_val = igb_setup_link(hw); |
880 | 885 | ||
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index b1e0c0613a9..7b7898bc934 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h | |||
@@ -340,6 +340,7 @@ struct e1000_mac_info { | |||
340 | u16 ifs_ratio; | 340 | u16 ifs_ratio; |
341 | u16 ifs_step_size; | 341 | u16 ifs_step_size; |
342 | u16 mta_reg_count; | 342 | u16 mta_reg_count; |
343 | u16 uta_reg_count; | ||
343 | 344 | ||
344 | /* Maximum size of the MTA register table in all supported adapters */ | 345 | /* Maximum size of the MTA register table in all supported adapters */ |
345 | #define MAX_MTA_REG 128 | 346 | #define MAX_MTA_REG 128 |
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index 345d1442d6d..76c338929f6 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h | |||
@@ -331,6 +331,7 @@ enum { | |||
331 | #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ | 331 | #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ |
332 | #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ | 332 | #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ |
333 | #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ | 333 | #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ |
334 | #define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ | ||
334 | #define E1000_IOVTCL 0x05BBC /* IOV Control Register */ | 335 | #define E1000_IOVTCL 0x05BBC /* IOV Control Register */ |
335 | /* These act per VF so an array friendly macro is used */ | 336 | /* These act per VF so an array friendly macro is used */ |
336 | #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) | 337 | #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index bb0aacd9961..fdbe33228d6 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -106,6 +106,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, | |||
106 | static struct net_device_stats *igb_get_stats(struct net_device *); | 106 | static struct net_device_stats *igb_get_stats(struct net_device *); |
107 | static int igb_change_mtu(struct net_device *, int); | 107 | static int igb_change_mtu(struct net_device *, int); |
108 | static int igb_set_mac(struct net_device *, void *); | 108 | static int igb_set_mac(struct net_device *, void *); |
109 | static void igb_set_uta(struct igb_adapter *adapter); | ||
109 | static irqreturn_t igb_intr(int irq, void *); | 110 | static irqreturn_t igb_intr(int irq, void *); |
110 | static irqreturn_t igb_intr_msi(int irq, void *); | 111 | static irqreturn_t igb_intr_msi(int irq, void *); |
111 | static irqreturn_t igb_msix_other(int irq, void *); | 112 | static irqreturn_t igb_msix_other(int irq, void *); |
@@ -141,7 +142,6 @@ static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn) | |||
141 | 142 | ||
142 | reg_data = rd32(E1000_VMOLR(vfn)); | 143 | reg_data = rd32(E1000_VMOLR(vfn)); |
143 | reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */ | 144 | reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */ |
144 | E1000_VMOLR_ROPE | /* Accept packets matched in UTA */ | ||
145 | E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */ | 145 | E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */ |
146 | E1000_VMOLR_AUPE | /* Accept untagged packets */ | 146 | E1000_VMOLR_AUPE | /* Accept untagged packets */ |
147 | E1000_VMOLR_STRVLAN; /* Strip vlan tags */ | 147 | E1000_VMOLR_STRVLAN; /* Strip vlan tags */ |
@@ -2286,6 +2286,9 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
2286 | /* Set the default pool for the PF's first queue */ | 2286 | /* Set the default pool for the PF's first queue */ |
2287 | igb_configure_vt_default_pool(adapter); | 2287 | igb_configure_vt_default_pool(adapter); |
2288 | 2288 | ||
2289 | /* set UTA to appropriate mode */ | ||
2290 | igb_set_uta(adapter); | ||
2291 | |||
2289 | /* set the correct pool for the PF default MAC address in entry 0 */ | 2292 | /* set the correct pool for the PF default MAC address in entry 0 */ |
2290 | igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, | 2293 | igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, |
2291 | adapter->vfs_allocated_count); | 2294 | adapter->vfs_allocated_count); |
@@ -2521,44 +2524,72 @@ static int igb_set_mac(struct net_device *netdev, void *p) | |||
2521 | } | 2524 | } |
2522 | 2525 | ||
2523 | /** | 2526 | /** |
2524 | * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set | 2527 | * igb_write_mc_addr_list - write multicast addresses to MTA |
2525 | * @netdev: network interface device structure | 2528 | * @netdev: network interface device structure |
2526 | * | 2529 | * |
2527 | * The set_rx_mode entry point is called whenever the unicast or multicast | 2530 | * Writes multicast address list to the MTA hash table. |
2528 | * address lists or the network interface flags are updated. This routine is | 2531 | * Returns: -ENOMEM on failure |
2529 | * responsible for configuring the hardware for proper unicast, multicast, | 2532 | * 0 on no addresses written |
2530 | * promiscuous mode, and all-multi behavior. | 2533 | * X on writing X addresses to MTA |
2531 | **/ | 2534 | **/ |
2532 | static void igb_set_rx_mode(struct net_device *netdev) | 2535 | static int igb_write_mc_addr_list(struct net_device *netdev) |
2533 | { | 2536 | { |
2534 | struct igb_adapter *adapter = netdev_priv(netdev); | 2537 | struct igb_adapter *adapter = netdev_priv(netdev); |
2535 | struct e1000_hw *hw = &adapter->hw; | 2538 | struct e1000_hw *hw = &adapter->hw; |
2536 | unsigned int rar_entries = hw->mac.rar_entry_count - | ||
2537 | (adapter->vfs_allocated_count + 1); | ||
2538 | struct dev_mc_list *mc_ptr = netdev->mc_list; | 2539 | struct dev_mc_list *mc_ptr = netdev->mc_list; |
2539 | u8 *mta_list = NULL; | 2540 | u8 *mta_list; |
2540 | u32 rctl; | 2541 | u32 vmolr = 0; |
2541 | int i; | 2542 | int i; |
2542 | 2543 | ||
2543 | /* Check for Promiscuous and All Multicast modes */ | 2544 | if (!netdev->mc_count) { |
2544 | rctl = rd32(E1000_RCTL); | 2545 | /* nothing to program, so clear mc list */ |
2546 | igb_update_mc_addr_list(hw, NULL, 0); | ||
2547 | igb_restore_vf_multicasts(adapter); | ||
2548 | return 0; | ||
2549 | } | ||
2545 | 2550 | ||
2546 | if (netdev->flags & IFF_PROMISC) { | 2551 | mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); |
2547 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | 2552 | if (!mta_list) |
2548 | rctl &= ~E1000_RCTL_VFE; | 2553 | return -ENOMEM; |
2549 | } else { | ||
2550 | if (netdev->flags & IFF_ALLMULTI) | ||
2551 | rctl |= E1000_RCTL_MPE; | ||
2552 | else | ||
2553 | rctl &= ~E1000_RCTL_MPE; | ||
2554 | 2554 | ||
2555 | if (netdev->uc.count > rar_entries) | 2555 | /* set vmolr receive overflow multicast bit */ |
2556 | rctl |= E1000_RCTL_UPE; | 2556 | vmolr |= E1000_VMOLR_ROMPE; |
2557 | else | 2557 | |
2558 | rctl &= ~E1000_RCTL_UPE; | 2558 | /* The shared function expects a packed array of only addresses. */ |
2559 | rctl |= E1000_RCTL_VFE; | 2559 | mc_ptr = netdev->mc_list; |
2560 | |||
2561 | for (i = 0; i < netdev->mc_count; i++) { | ||
2562 | if (!mc_ptr) | ||
2563 | break; | ||
2564 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); | ||
2565 | mc_ptr = mc_ptr->next; | ||
2560 | } | 2566 | } |
2561 | wr32(E1000_RCTL, rctl); | 2567 | igb_update_mc_addr_list(hw, mta_list, i); |
2568 | kfree(mta_list); | ||
2569 | |||
2570 | return netdev->mc_count; | ||
2571 | } | ||
2572 | |||
2573 | /** | ||
2574 | * igb_write_uc_addr_list - write unicast addresses to RAR table | ||
2575 | * @netdev: network interface device structure | ||
2576 | * | ||
2577 | * Writes unicast address list to the RAR table. | ||
2578 | * Returns: -ENOMEM on failure/insufficient address space | ||
2579 | * 0 on no addresses written | ||
2580 | * X on writing X addresses to the RAR table | ||
2581 | **/ | ||
2582 | static int igb_write_uc_addr_list(struct net_device *netdev) | ||
2583 | { | ||
2584 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
2585 | struct e1000_hw *hw = &adapter->hw; | ||
2586 | unsigned int vfn = adapter->vfs_allocated_count; | ||
2587 | unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1); | ||
2588 | int count = 0; | ||
2589 | |||
2590 | /* return ENOMEM indicating insufficient memory for addresses */ | ||
2591 | if (netdev->uc.count > rar_entries) | ||
2592 | return -ENOMEM; | ||
2562 | 2593 | ||
2563 | if (netdev->uc.count && rar_entries) { | 2594 | if (netdev->uc.count && rar_entries) { |
2564 | struct netdev_hw_addr *ha; | 2595 | struct netdev_hw_addr *ha; |
@@ -2567,7 +2598,8 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
2567 | break; | 2598 | break; |
2568 | igb_rar_set_qsel(adapter, ha->addr, | 2599 | igb_rar_set_qsel(adapter, ha->addr, |
2569 | rar_entries--, | 2600 | rar_entries--, |
2570 | adapter->vfs_allocated_count); | 2601 | vfn); |
2602 | count++; | ||
2571 | } | 2603 | } |
2572 | } | 2604 | } |
2573 | /* write the addresses in reverse order to avoid write combining */ | 2605 | /* write the addresses in reverse order to avoid write combining */ |
@@ -2577,29 +2609,79 @@ static void igb_set_rx_mode(struct net_device *netdev) | |||
2577 | } | 2609 | } |
2578 | wrfl(); | 2610 | wrfl(); |
2579 | 2611 | ||
2580 | if (!netdev->mc_count) { | 2612 | return count; |
2581 | /* nothing to program, so clear mc list */ | 2613 | } |
2582 | igb_update_mc_addr_list(hw, NULL, 0); | 2614 | |
2583 | igb_restore_vf_multicasts(adapter); | 2615 | /** |
2584 | return; | 2616 | * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set |
2617 | * @netdev: network interface device structure | ||
2618 | * | ||
2619 | * The set_rx_mode entry point is called whenever the unicast or multicast | ||
2620 | * address lists or the network interface flags are updated. This routine is | ||
2621 | * responsible for configuring the hardware for proper unicast, multicast, | ||
2622 | * promiscuous mode, and all-multi behavior. | ||
2623 | **/ | ||
2624 | static void igb_set_rx_mode(struct net_device *netdev) | ||
2625 | { | ||
2626 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
2627 | struct e1000_hw *hw = &adapter->hw; | ||
2628 | unsigned int vfn = adapter->vfs_allocated_count; | ||
2629 | u32 rctl, vmolr = 0; | ||
2630 | int count; | ||
2631 | |||
2632 | /* Check for Promiscuous and All Multicast modes */ | ||
2633 | rctl = rd32(E1000_RCTL); | ||
2634 | |||
2635 | /* clear the effected bits */ | ||
2636 | rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); | ||
2637 | |||
2638 | if (netdev->flags & IFF_PROMISC) { | ||
2639 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | ||
2640 | vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); | ||
2641 | } else { | ||
2642 | if (netdev->flags & IFF_ALLMULTI) { | ||
2643 | rctl |= E1000_RCTL_MPE; | ||
2644 | vmolr |= E1000_VMOLR_MPME; | ||
2645 | } else { | ||
2646 | /* | ||
2647 | * Write addresses to the MTA, if the attempt fails | ||
2648 | * then we should just turn on promiscous mode so | ||
2649 | * that we can at least receive multicast traffic | ||
2650 | */ | ||
2651 | count = igb_write_mc_addr_list(netdev); | ||
2652 | if (count < 0) { | ||
2653 | rctl |= E1000_RCTL_MPE; | ||
2654 | vmolr |= E1000_VMOLR_MPME; | ||
2655 | } else if (count) { | ||
2656 | vmolr |= E1000_VMOLR_ROMPE; | ||
2657 | } | ||
2658 | } | ||
2659 | /* | ||
2660 | * Write addresses to available RAR registers, if there is not | ||
2661 | * sufficient space to store all the addresses then enable | ||
2662 | * unicast promiscous mode | ||
2663 | */ | ||
2664 | count = igb_write_uc_addr_list(netdev); | ||
2665 | if (count < 0) { | ||
2666 | rctl |= E1000_RCTL_UPE; | ||
2667 | vmolr |= E1000_VMOLR_ROPE; | ||
2668 | } | ||
2669 | rctl |= E1000_RCTL_VFE; | ||
2585 | } | 2670 | } |
2671 | wr32(E1000_RCTL, rctl); | ||
2586 | 2672 | ||
2587 | mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); | 2673 | /* |
2588 | if (!mta_list) { | 2674 | * In order to support SR-IOV and eventually VMDq it is necessary to set |
2589 | dev_err(&adapter->pdev->dev, | 2675 | * the VMOLR to enable the appropriate modes. Without this workaround |
2590 | "failed to allocate multicast filter list\n"); | 2676 | * we will have issues with VLAN tag stripping not being done for frames |
2677 | * that are only arriving because we are the default pool | ||
2678 | */ | ||
2679 | if (hw->mac.type < e1000_82576) | ||
2591 | return; | 2680 | return; |
2592 | } | ||
2593 | 2681 | ||
2594 | /* The shared function expects a packed array of only addresses. */ | 2682 | vmolr |= rd32(E1000_VMOLR(vfn)) & |
2595 | for (i = 0; i < netdev->mc_count; i++) { | 2683 | ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); |
2596 | if (!mc_ptr) | 2684 | wr32(E1000_VMOLR(vfn), vmolr); |
2597 | break; | ||
2598 | memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); | ||
2599 | mc_ptr = mc_ptr->next; | ||
2600 | } | ||
2601 | igb_update_mc_addr_list(hw, mta_list, i); | ||
2602 | kfree(mta_list); | ||
2603 | igb_restore_vf_multicasts(adapter); | 2685 | igb_restore_vf_multicasts(adapter); |
2604 | } | 2686 | } |
2605 | 2687 | ||
@@ -4264,6 +4346,33 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) | |||
4264 | } | 4346 | } |
4265 | 4347 | ||
4266 | /** | 4348 | /** |
4349 | * igb_set_uta - Set unicast filter table address | ||
4350 | * @adapter: board private structure | ||
4351 | * | ||
4352 | * The unicast table address is a register array of 32-bit registers. | ||
4353 | * The table is meant to be used in a way similar to how the MTA is used | ||
4354 | * however due to certain limitations in the hardware it is necessary to | ||
4355 | * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous | ||
4356 | * enable bit to allow vlan tag stripping when promiscous mode is enabled | ||
4357 | **/ | ||
4358 | static void igb_set_uta(struct igb_adapter *adapter) | ||
4359 | { | ||
4360 | struct e1000_hw *hw = &adapter->hw; | ||
4361 | int i; | ||
4362 | |||
4363 | /* The UTA table only exists on 82576 hardware and newer */ | ||
4364 | if (hw->mac.type < e1000_82576) | ||
4365 | return; | ||
4366 | |||
4367 | /* we only need to do this if VMDq is enabled */ | ||
4368 | if (!adapter->vfs_allocated_count) | ||
4369 | return; | ||
4370 | |||
4371 | for (i = 0; i < hw->mac.uta_reg_count; i++) | ||
4372 | array_wr32(E1000_UTA, i, ~0); | ||
4373 | } | ||
4374 | |||
4375 | /** | ||
4267 | * igb_intr_msi - Interrupt Handler | 4376 | * igb_intr_msi - Interrupt Handler |
4268 | * @irq: interrupt number | 4377 | * @irq: interrupt number |
4269 | * @data: pointer to a network interface device structure | 4378 | * @data: pointer to a network interface device structure |