aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-05 02:32:49 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-06 17:59:18 -0400
commit26ad91783c489486d3fd1a6932e5bdab9d404a38 (patch)
tree8f081e578f90cc9a4e3a5199c980ef0ea2c63bca /drivers/net/igb
parentbf6f7a928d313ddecb0a16ea60fa6b45ac1414a7 (diff)
igb: add combined function for setting rar and pool bits
This patch adds igb_rar_qsel which sets the mac address and pool bits for a given mac address in the receive address register table. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/igb_main.c63
1 files changed, 40 insertions, 23 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 714c3a4a44ef..bb0aacd9961e 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -127,10 +127,10 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16); 127static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16); 128static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *); 129static void igb_restore_vlan(struct igb_adapter *);
130static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
130static void igb_ping_all_vfs(struct igb_adapter *); 131static void igb_ping_all_vfs(struct igb_adapter *);
131static void igb_msg_task(struct igb_adapter *); 132static void igb_msg_task(struct igb_adapter *);
132static int igb_rcv_msg_from_vf(struct igb_adapter *, u32); 133static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
133static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
134static void igb_vmm_control(struct igb_adapter *); 134static void igb_vmm_control(struct igb_adapter *);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 136static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
@@ -168,16 +168,6 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
168 return 0; 168 return 0;
169} 169}
170 170
171static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
172{
173 u32 reg_data;
174
175 reg_data = rd32(E1000_RAH(entry));
176 reg_data &= ~E1000_RAH_POOL_MASK;
177 reg_data |= E1000_RAH_POOL_1 << pool;;
178 wr32(E1000_RAH(entry), reg_data);
179}
180
181#ifdef CONFIG_PM 171#ifdef CONFIG_PM
182static int igb_suspend(struct pci_dev *, pm_message_t); 172static int igb_suspend(struct pci_dev *, pm_message_t);
183static int igb_resume(struct pci_dev *); 173static int igb_resume(struct pci_dev *);
@@ -982,7 +972,6 @@ int igb_up(struct igb_adapter *adapter)
982 igb_configure_msix(adapter); 972 igb_configure_msix(adapter);
983 973
984 igb_vmm_control(adapter); 974 igb_vmm_control(adapter);
985 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
986 igb_set_vmolr(hw, adapter->vfs_allocated_count); 975 igb_set_vmolr(hw, adapter->vfs_allocated_count);
987 976
988 /* Clear any pending interrupts. */ 977 /* Clear any pending interrupts. */
@@ -1769,7 +1758,6 @@ static int igb_open(struct net_device *netdev)
1769 igb_configure(adapter); 1758 igb_configure(adapter);
1770 1759
1771 igb_vmm_control(adapter); 1760 igb_vmm_control(adapter);
1772 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1773 igb_set_vmolr(hw, adapter->vfs_allocated_count); 1761 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1774 1762
1775 err = igb_request_irq(adapter); 1763 err = igb_request_irq(adapter);
@@ -2298,6 +2286,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2298 /* Set the default pool for the PF's first queue */ 2286 /* Set the default pool for the PF's first queue */
2299 igb_configure_vt_default_pool(adapter); 2287 igb_configure_vt_default_pool(adapter);
2300 2288
2289 /* set the correct pool for the PF default MAC address in entry 0 */
2290 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2291 adapter->vfs_allocated_count);
2292
2301 igb_rlpml_set(adapter); 2293 igb_rlpml_set(adapter);
2302 2294
2303 /* Enable Receives */ 2295 /* Enable Receives */
@@ -2521,8 +2513,9 @@ static int igb_set_mac(struct net_device *netdev, void *p)
2521 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2513 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2522 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 2514 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2523 2515
2524 igb_rar_set(hw, hw->mac.addr, 0); 2516 /* set the correct pool for the new PF MAC address in entry 0 */
2525 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); 2517 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2518 adapter->vfs_allocated_count);
2526 2519
2527 return 0; 2520 return 0;
2528} 2521}
@@ -2572,10 +2565,9 @@ static void igb_set_rx_mode(struct net_device *netdev)
2572 list_for_each_entry(ha, &netdev->uc.list, list) { 2565 list_for_each_entry(ha, &netdev->uc.list, list) {
2573 if (!rar_entries) 2566 if (!rar_entries)
2574 break; 2567 break;
2575 igb_rar_set(hw, ha->addr, rar_entries); 2568 igb_rar_set_qsel(adapter, ha->addr,
2576 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 2569 rar_entries--,
2577 rar_entries); 2570 adapter->vfs_allocated_count);
2578 rar_entries--;
2579 } 2571 }
2580 } 2572 }
2581 /* write the addresses in reverse order to avoid write combining */ 2573 /* write the addresses in reverse order to avoid write combining */
@@ -4142,8 +4134,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4142 igb_vf_reset_event(adapter, vf); 4134 igb_vf_reset_event(adapter, vf);
4143 4135
4144 /* set vf mac address */ 4136 /* set vf mac address */
4145 igb_rar_set(hw, vf_mac, rar_entry); 4137 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4146 igb_set_rah_pool(hw, vf, rar_entry);
4147 4138
4148 /* enable transmit and receive for vf */ 4139 /* enable transmit and receive for vf */
4149 reg = rd32(E1000_VFTE); 4140 reg = rd32(E1000_VFTE);
@@ -5532,6 +5523,33 @@ static void igb_io_resume(struct pci_dev *pdev)
5532 igb_get_hw_control(adapter); 5523 igb_get_hw_control(adapter);
5533} 5524}
5534 5525
5526static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5527 u8 qsel)
5528{
5529 u32 rar_low, rar_high;
5530 struct e1000_hw *hw = &adapter->hw;
5531
5532 /* HW expects these in little endian so we reverse the byte order
5533 * from network order (big endian) to little endian
5534 */
5535 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5536 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5537 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5538
5539 /* Indicate to hardware the Address is Valid. */
5540 rar_high |= E1000_RAH_AV;
5541
5542 if (hw->mac.type == e1000_82575)
5543 rar_high |= E1000_RAH_POOL_1 * qsel;
5544 else
5545 rar_high |= E1000_RAH_POOL_1 << qsel;
5546
5547 wr32(E1000_RAL(index), rar_low);
5548 wrfl();
5549 wr32(E1000_RAH(index), rar_high);
5550 wrfl();
5551}
5552
5535static int igb_set_vf_mac(struct igb_adapter *adapter, 5553static int igb_set_vf_mac(struct igb_adapter *adapter,
5536 int vf, unsigned char *mac_addr) 5554 int vf, unsigned char *mac_addr)
5537{ 5555{
@@ -5542,8 +5560,7 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5542 5560
5543 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 5561 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5544 5562
5545 igb_rar_set(hw, mac_addr, rar_entry); 5563 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5546 igb_set_rah_pool(hw, vf, rar_entry);
5547 5564
5548 return 0; 5565 return 0;
5549} 5566}