diff options
61 files changed, 1515 insertions, 725 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index d47839184a06..b0cb29d4cc01 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -54,8 +54,8 @@ | |||
54 | 54 | ||
55 | #define DRV_MODULE_NAME "bnx2" | 55 | #define DRV_MODULE_NAME "bnx2" |
56 | #define PFX DRV_MODULE_NAME ": " | 56 | #define PFX DRV_MODULE_NAME ": " |
57 | #define DRV_MODULE_VERSION "2.0.0" | 57 | #define DRV_MODULE_VERSION "2.0.1" |
58 | #define DRV_MODULE_RELDATE "April 2, 2009" | 58 | #define DRV_MODULE_RELDATE "May 6, 2009" |
59 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw" | 59 | #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw" |
60 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw" | 60 | #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw" |
61 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw" | 61 | #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw" |
@@ -2600,6 +2600,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) | |||
2600 | /* Tell compiler that status block fields can change. */ | 2600 | /* Tell compiler that status block fields can change. */ |
2601 | barrier(); | 2601 | barrier(); |
2602 | cons = *bnapi->hw_tx_cons_ptr; | 2602 | cons = *bnapi->hw_tx_cons_ptr; |
2603 | barrier(); | ||
2603 | if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) | 2604 | if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) |
2604 | cons++; | 2605 | cons++; |
2605 | return cons; | 2606 | return cons; |
@@ -2879,6 +2880,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) | |||
2879 | /* Tell compiler that status block fields can change. */ | 2880 | /* Tell compiler that status block fields can change. */ |
2880 | barrier(); | 2881 | barrier(); |
2881 | cons = *bnapi->hw_rx_cons_ptr; | 2882 | cons = *bnapi->hw_rx_cons_ptr; |
2883 | barrier(); | ||
2882 | if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) | 2884 | if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) |
2883 | cons++; | 2885 | cons++; |
2884 | return cons; | 2886 | return cons; |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 553a89919778..46d312bedfb8 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1706,10 +1706,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave | |||
1706 | * Called with RTNL | 1706 | * Called with RTNL |
1707 | */ | 1707 | */ |
1708 | int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) | 1708 | int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) |
1709 | __releases(&bond->curr_slave_lock) | ||
1710 | __releases(&bond->lock) | ||
1711 | __acquires(&bond->lock) | 1709 | __acquires(&bond->lock) |
1712 | __acquires(&bond->curr_slave_lock) | 1710 | __releases(&bond->lock) |
1713 | { | 1711 | { |
1714 | struct bonding *bond = netdev_priv(bond_dev); | 1712 | struct bonding *bond = netdev_priv(bond_dev); |
1715 | struct sockaddr *sa = addr; | 1713 | struct sockaddr *sa = addr; |
@@ -1745,9 +1743,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) | |||
1745 | } | 1743 | } |
1746 | } | 1744 | } |
1747 | 1745 | ||
1748 | write_unlock_bh(&bond->curr_slave_lock); | ||
1749 | read_unlock(&bond->lock); | ||
1750 | |||
1751 | if (swap_slave) { | 1746 | if (swap_slave) { |
1752 | alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); | 1747 | alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave); |
1753 | alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); | 1748 | alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave); |
@@ -1755,16 +1750,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) | |||
1755 | alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, | 1750 | alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr, |
1756 | bond->alb_info.rlb_enabled); | 1751 | bond->alb_info.rlb_enabled); |
1757 | 1752 | ||
1753 | read_lock(&bond->lock); | ||
1758 | alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); | 1754 | alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); |
1759 | if (bond->alb_info.rlb_enabled) { | 1755 | if (bond->alb_info.rlb_enabled) { |
1760 | /* inform clients mac address has changed */ | 1756 | /* inform clients mac address has changed */ |
1761 | rlb_req_update_slave_clients(bond, bond->curr_active_slave); | 1757 | rlb_req_update_slave_clients(bond, bond->curr_active_slave); |
1762 | } | 1758 | } |
1759 | read_unlock(&bond->lock); | ||
1763 | } | 1760 | } |
1764 | 1761 | ||
1765 | read_lock(&bond->lock); | ||
1766 | write_lock_bh(&bond->curr_slave_lock); | ||
1767 | |||
1768 | return 0; | 1762 | return 0; |
1769 | } | 1763 | } |
1770 | 1764 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e3af662b0559..815191dd03c3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3459,8 +3459,28 @@ static void bond_destroy_proc_dir(void) | |||
3459 | bond_proc_dir = NULL; | 3459 | bond_proc_dir = NULL; |
3460 | } | 3460 | } |
3461 | } | 3461 | } |
3462 | |||
3463 | #else /* !CONFIG_PROC_FS */ | ||
3464 | |||
3465 | static int bond_create_proc_entry(struct bonding *bond) | ||
3466 | { | ||
3467 | } | ||
3468 | |||
3469 | static void bond_remove_proc_entry(struct bonding *bond) | ||
3470 | { | ||
3471 | } | ||
3472 | |||
3473 | static void bond_create_proc_dir(void) | ||
3474 | { | ||
3475 | } | ||
3476 | |||
3477 | static void bond_destroy_proc_dir(void) | ||
3478 | { | ||
3479 | } | ||
3480 | |||
3462 | #endif /* CONFIG_PROC_FS */ | 3481 | #endif /* CONFIG_PROC_FS */ |
3463 | 3482 | ||
3483 | |||
3464 | /*-------------------------- netdev event handling --------------------------*/ | 3484 | /*-------------------------- netdev event handling --------------------------*/ |
3465 | 3485 | ||
3466 | /* | 3486 | /* |
@@ -3468,10 +3488,8 @@ static void bond_destroy_proc_dir(void) | |||
3468 | */ | 3488 | */ |
3469 | static int bond_event_changename(struct bonding *bond) | 3489 | static int bond_event_changename(struct bonding *bond) |
3470 | { | 3490 | { |
3471 | #ifdef CONFIG_PROC_FS | ||
3472 | bond_remove_proc_entry(bond); | 3491 | bond_remove_proc_entry(bond); |
3473 | bond_create_proc_entry(bond); | 3492 | bond_create_proc_entry(bond); |
3474 | #endif | ||
3475 | down_write(&(bonding_rwsem)); | 3493 | down_write(&(bonding_rwsem)); |
3476 | bond_destroy_sysfs_entry(bond); | 3494 | bond_destroy_sysfs_entry(bond); |
3477 | bond_create_sysfs_entry(bond); | 3495 | bond_create_sysfs_entry(bond); |
@@ -4637,9 +4655,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params) | |||
4637 | NETIF_F_HW_VLAN_RX | | 4655 | NETIF_F_HW_VLAN_RX | |
4638 | NETIF_F_HW_VLAN_FILTER); | 4656 | NETIF_F_HW_VLAN_FILTER); |
4639 | 4657 | ||
4640 | #ifdef CONFIG_PROC_FS | ||
4641 | bond_create_proc_entry(bond); | 4658 | bond_create_proc_entry(bond); |
4642 | #endif | ||
4643 | list_add_tail(&bond->bond_list, &bond_dev_list); | 4659 | list_add_tail(&bond->bond_list, &bond_dev_list); |
4644 | 4660 | ||
4645 | return 0; | 4661 | return 0; |
@@ -4677,9 +4693,7 @@ static void bond_deinit(struct net_device *bond_dev) | |||
4677 | 4693 | ||
4678 | bond_work_cancel_all(bond); | 4694 | bond_work_cancel_all(bond); |
4679 | 4695 | ||
4680 | #ifdef CONFIG_PROC_FS | ||
4681 | bond_remove_proc_entry(bond); | 4696 | bond_remove_proc_entry(bond); |
4682 | #endif | ||
4683 | } | 4697 | } |
4684 | 4698 | ||
4685 | /* Unregister and free all bond devices. | 4699 | /* Unregister and free all bond devices. |
@@ -4698,9 +4712,7 @@ static void bond_free_all(void) | |||
4698 | bond_destroy(bond); | 4712 | bond_destroy(bond); |
4699 | } | 4713 | } |
4700 | 4714 | ||
4701 | #ifdef CONFIG_PROC_FS | ||
4702 | bond_destroy_proc_dir(); | 4715 | bond_destroy_proc_dir(); |
4703 | #endif | ||
4704 | } | 4716 | } |
4705 | 4717 | ||
4706 | /*------------------------- Module initialization ---------------------------*/ | 4718 | /*------------------------- Module initialization ---------------------------*/ |
@@ -5196,9 +5208,7 @@ static int __init bonding_init(void) | |||
5196 | goto out; | 5208 | goto out; |
5197 | } | 5209 | } |
5198 | 5210 | ||
5199 | #ifdef CONFIG_PROC_FS | ||
5200 | bond_create_proc_dir(); | 5211 | bond_create_proc_dir(); |
5201 | #endif | ||
5202 | 5212 | ||
5203 | init_rwsem(&bonding_rwsem); | 5213 | init_rwsem(&bonding_rwsem); |
5204 | 5214 | ||
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 71d4fe15976a..9a32d0c73cb3 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -2646,6 +2646,8 @@ static void e1000_watchdog(unsigned long data) | |||
2646 | * (Do the reset outside of interrupt context). */ | 2646 | * (Do the reset outside of interrupt context). */ |
2647 | adapter->tx_timeout_count++; | 2647 | adapter->tx_timeout_count++; |
2648 | schedule_work(&adapter->reset_task); | 2648 | schedule_work(&adapter->reset_task); |
2649 | /* return immediately since reset is imminent */ | ||
2650 | return; | ||
2649 | } | 2651 | } |
2650 | } | 2652 | } |
2651 | 2653 | ||
@@ -3739,7 +3741,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
3739 | struct e1000_hw *hw = &adapter->hw; | 3741 | struct e1000_hw *hw = &adapter->hw; |
3740 | u32 rctl, icr = er32(ICR); | 3742 | u32 rctl, icr = er32(ICR); |
3741 | 3743 | ||
3742 | if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags))) | 3744 | if (unlikely((!icr) || test_bit(__E1000_DOWN, &adapter->flags))) |
3743 | return IRQ_NONE; /* Not our interrupt */ | 3745 | return IRQ_NONE; /* Not our interrupt */ |
3744 | 3746 | ||
3745 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | 3747 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index da6b37e05bea..ccaaee0951cf 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -3651,6 +3651,8 @@ link_up: | |||
3651 | */ | 3651 | */ |
3652 | adapter->tx_timeout_count++; | 3652 | adapter->tx_timeout_count++; |
3653 | schedule_work(&adapter->reset_task); | 3653 | schedule_work(&adapter->reset_task); |
3654 | /* return immediately since reset is imminent */ | ||
3655 | return; | ||
3654 | } | 3656 | } |
3655 | } | 3657 | } |
3656 | 3658 | ||
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 6e317caf429c..16a41389575a 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0100" | 43 | #define DRV_VERSION "EHEA_0101" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 37a190d85fcd..147c4b088fb3 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -545,14 +545,17 @@ static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, | |||
545 | x &= (arr_len - 1); | 545 | x &= (arr_len - 1); |
546 | 546 | ||
547 | pref = skb_array[x]; | 547 | pref = skb_array[x]; |
548 | prefetchw(pref); | 548 | if (pref) { |
549 | prefetchw(pref + EHEA_CACHE_LINE); | 549 | prefetchw(pref); |
550 | 550 | prefetchw(pref + EHEA_CACHE_LINE); | |
551 | pref = (skb_array[x]->data); | 551 | |
552 | prefetch(pref); | 552 | pref = (skb_array[x]->data); |
553 | prefetch(pref + EHEA_CACHE_LINE); | 553 | prefetch(pref); |
554 | prefetch(pref + EHEA_CACHE_LINE * 2); | 554 | prefetch(pref + EHEA_CACHE_LINE); |
555 | prefetch(pref + EHEA_CACHE_LINE * 3); | 555 | prefetch(pref + EHEA_CACHE_LINE * 2); |
556 | prefetch(pref + EHEA_CACHE_LINE * 3); | ||
557 | } | ||
558 | |||
556 | skb = skb_array[skb_index]; | 559 | skb = skb_array[skb_index]; |
557 | skb_array[skb_index] = NULL; | 560 | skb_array[skb_index] = NULL; |
558 | return skb; | 561 | return skb; |
@@ -569,12 +572,14 @@ static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, | |||
569 | x &= (arr_len - 1); | 572 | x &= (arr_len - 1); |
570 | 573 | ||
571 | pref = skb_array[x]; | 574 | pref = skb_array[x]; |
572 | prefetchw(pref); | 575 | if (pref) { |
573 | prefetchw(pref + EHEA_CACHE_LINE); | 576 | prefetchw(pref); |
577 | prefetchw(pref + EHEA_CACHE_LINE); | ||
574 | 578 | ||
575 | pref = (skb_array[x]->data); | 579 | pref = (skb_array[x]->data); |
576 | prefetchw(pref); | 580 | prefetchw(pref); |
577 | prefetchw(pref + EHEA_CACHE_LINE); | 581 | prefetchw(pref + EHEA_CACHE_LINE); |
582 | } | ||
578 | 583 | ||
579 | skb = skb_array[wqe_index]; | 584 | skb = skb_array[wqe_index]; |
580 | skb_array[wqe_index] = NULL; | 585 | skb_array[wqe_index] = NULL; |
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 4e8464b9df2e..154c5acc6fce 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -238,7 +238,6 @@ struct igb_adapter { | |||
238 | u64 hw_csum_err; | 238 | u64 hw_csum_err; |
239 | u64 hw_csum_good; | 239 | u64 hw_csum_good; |
240 | u32 alloc_rx_buff_failed; | 240 | u32 alloc_rx_buff_failed; |
241 | bool rx_csum; | ||
242 | u32 gorc; | 241 | u32 gorc; |
243 | u64 gorc_old; | 242 | u64 gorc_old; |
244 | u16 rx_ps_hdr_size; | 243 | u16 rx_ps_hdr_size; |
@@ -286,6 +285,7 @@ struct igb_adapter { | |||
286 | #define IGB_FLAG_DCA_ENABLED (1 << 1) | 285 | #define IGB_FLAG_DCA_ENABLED (1 << 1) |
287 | #define IGB_FLAG_QUAD_PORT_A (1 << 2) | 286 | #define IGB_FLAG_QUAD_PORT_A (1 << 2) |
288 | #define IGB_FLAG_NEED_CTX_IDX (1 << 3) | 287 | #define IGB_FLAG_NEED_CTX_IDX (1 << 3) |
288 | #define IGB_FLAG_RX_CSUM_DISABLED (1 << 4) | ||
289 | 289 | ||
290 | enum e1000_state_t { | 290 | enum e1000_state_t { |
291 | __IGB_TESTING, | 291 | __IGB_TESTING, |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index b1367ce6586e..b8551a57dd3f 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -275,13 +275,17 @@ static int igb_set_pauseparam(struct net_device *netdev, | |||
275 | static u32 igb_get_rx_csum(struct net_device *netdev) | 275 | static u32 igb_get_rx_csum(struct net_device *netdev) |
276 | { | 276 | { |
277 | struct igb_adapter *adapter = netdev_priv(netdev); | 277 | struct igb_adapter *adapter = netdev_priv(netdev); |
278 | return adapter->rx_csum; | 278 | return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); |
279 | } | 279 | } |
280 | 280 | ||
281 | static int igb_set_rx_csum(struct net_device *netdev, u32 data) | 281 | static int igb_set_rx_csum(struct net_device *netdev, u32 data) |
282 | { | 282 | { |
283 | struct igb_adapter *adapter = netdev_priv(netdev); | 283 | struct igb_adapter *adapter = netdev_priv(netdev); |
284 | adapter->rx_csum = data; | 284 | |
285 | if (data) | ||
286 | adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; | ||
287 | else | ||
288 | adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; | ||
285 | 289 | ||
286 | return 0; | 290 | return 0; |
287 | } | 291 | } |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index bca7e9f76be4..ffd731539997 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -1395,8 +1395,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1395 | 1395 | ||
1396 | igb_validate_mdi_setting(hw); | 1396 | igb_validate_mdi_setting(hw); |
1397 | 1397 | ||
1398 | adapter->rx_csum = 1; | ||
1399 | |||
1400 | /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, | 1398 | /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, |
1401 | * enable the ACPI Magic Packet filter | 1399 | * enable the ACPI Magic Packet filter |
1402 | */ | 1400 | */ |
@@ -2012,7 +2010,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter) | |||
2012 | struct e1000_hw *hw = &adapter->hw; | 2010 | struct e1000_hw *hw = &adapter->hw; |
2013 | u32 rctl; | 2011 | u32 rctl; |
2014 | u32 srrctl = 0; | 2012 | u32 srrctl = 0; |
2015 | int i, j; | 2013 | int i; |
2016 | 2014 | ||
2017 | rctl = rd32(E1000_RCTL); | 2015 | rctl = rd32(E1000_RCTL); |
2018 | 2016 | ||
@@ -2077,8 +2075,6 @@ static void igb_setup_rctl(struct igb_adapter *adapter) | |||
2077 | if (adapter->vfs_allocated_count) { | 2075 | if (adapter->vfs_allocated_count) { |
2078 | u32 vmolr; | 2076 | u32 vmolr; |
2079 | 2077 | ||
2080 | j = adapter->rx_ring[0].reg_idx; | ||
2081 | |||
2082 | /* set all queue drop enable bits */ | 2078 | /* set all queue drop enable bits */ |
2083 | wr32(E1000_QDE, ALL_QUEUES); | 2079 | wr32(E1000_QDE, ALL_QUEUES); |
2084 | srrctl |= E1000_SRRCTL_DROP_EN; | 2080 | srrctl |= E1000_SRRCTL_DROP_EN; |
@@ -2086,16 +2082,16 @@ static void igb_setup_rctl(struct igb_adapter *adapter) | |||
2086 | /* disable queue 0 to prevent tail write w/o re-config */ | 2082 | /* disable queue 0 to prevent tail write w/o re-config */ |
2087 | wr32(E1000_RXDCTL(0), 0); | 2083 | wr32(E1000_RXDCTL(0), 0); |
2088 | 2084 | ||
2089 | vmolr = rd32(E1000_VMOLR(j)); | 2085 | vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); |
2090 | if (rctl & E1000_RCTL_LPE) | 2086 | if (rctl & E1000_RCTL_LPE) |
2091 | vmolr |= E1000_VMOLR_LPE; | 2087 | vmolr |= E1000_VMOLR_LPE; |
2092 | if (adapter->num_rx_queues > 0) | 2088 | if (adapter->num_rx_queues > 1) |
2093 | vmolr |= E1000_VMOLR_RSSE; | 2089 | vmolr |= E1000_VMOLR_RSSE; |
2094 | wr32(E1000_VMOLR(j), vmolr); | 2090 | wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr); |
2095 | } | 2091 | } |
2096 | 2092 | ||
2097 | for (i = 0; i < adapter->num_rx_queues; i++) { | 2093 | for (i = 0; i < adapter->num_rx_queues; i++) { |
2098 | j = adapter->rx_ring[i].reg_idx; | 2094 | int j = adapter->rx_ring[i].reg_idx; |
2099 | wr32(E1000_SRRCTL(j), srrctl); | 2095 | wr32(E1000_SRRCTL(j), srrctl); |
2100 | } | 2096 | } |
2101 | 2097 | ||
@@ -2249,13 +2245,12 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
2249 | rxcsum = rd32(E1000_RXCSUM); | 2245 | rxcsum = rd32(E1000_RXCSUM); |
2250 | /* Disable raw packet checksumming */ | 2246 | /* Disable raw packet checksumming */ |
2251 | rxcsum |= E1000_RXCSUM_PCSD; | 2247 | rxcsum |= E1000_RXCSUM_PCSD; |
2252 | /* Don't need to set TUOFL or IPOFL, they default to 1 */ | 2248 | |
2253 | if (!adapter->rx_csum) | 2249 | if (adapter->hw.mac.type == e1000_82576) |
2254 | rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL); | ||
2255 | else if (adapter->hw.mac.type == e1000_82576) | ||
2256 | /* Enable Receive Checksum Offload for SCTP */ | 2250 | /* Enable Receive Checksum Offload for SCTP */ |
2257 | rxcsum |= E1000_RXCSUM_CRCOFL; | 2251 | rxcsum |= E1000_RXCSUM_CRCOFL; |
2258 | 2252 | ||
2253 | /* Don't need to set TUOFL or IPOFL, they default to 1 */ | ||
2259 | wr32(E1000_RXCSUM, rxcsum); | 2254 | wr32(E1000_RXCSUM, rxcsum); |
2260 | 2255 | ||
2261 | /* Set the default pool for the PF's first queue */ | 2256 | /* Set the default pool for the PF's first queue */ |
@@ -2714,6 +2709,8 @@ link_up: | |||
2714 | * (Do the reset outside of interrupt context). */ | 2709 | * (Do the reset outside of interrupt context). */ |
2715 | adapter->tx_timeout_count++; | 2710 | adapter->tx_timeout_count++; |
2716 | schedule_work(&adapter->reset_task); | 2711 | schedule_work(&adapter->reset_task); |
2712 | /* return immediately since reset is imminent */ | ||
2713 | return; | ||
2717 | } | 2714 | } |
2718 | } | 2715 | } |
2719 | 2716 | ||
@@ -4455,7 +4452,8 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, | |||
4455 | skb->ip_summed = CHECKSUM_NONE; | 4452 | skb->ip_summed = CHECKSUM_NONE; |
4456 | 4453 | ||
4457 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ | 4454 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ |
4458 | if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum) | 4455 | if ((status_err & E1000_RXD_STAT_IXSM) || |
4456 | (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) | ||
4459 | return; | 4457 | return; |
4460 | /* TCP/UDP checksum error bit is set */ | 4458 | /* TCP/UDP checksum error bit is set */ |
4461 | if (status_err & | 4459 | if (status_err & |
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c index 1dcaa6905312..ee17a097d1ca 100644 --- a/drivers/net/igbvf/ethtool.c +++ b/drivers/net/igbvf/ethtool.c | |||
@@ -133,6 +133,24 @@ static int igbvf_set_pauseparam(struct net_device *netdev, | |||
133 | return -EOPNOTSUPP; | 133 | return -EOPNOTSUPP; |
134 | } | 134 | } |
135 | 135 | ||
136 | static u32 igbvf_get_rx_csum(struct net_device *netdev) | ||
137 | { | ||
138 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
139 | return !(adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED); | ||
140 | } | ||
141 | |||
142 | static int igbvf_set_rx_csum(struct net_device *netdev, u32 data) | ||
143 | { | ||
144 | struct igbvf_adapter *adapter = netdev_priv(netdev); | ||
145 | |||
146 | if (data) | ||
147 | adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; | ||
148 | else | ||
149 | adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
136 | static u32 igbvf_get_tx_csum(struct net_device *netdev) | 154 | static u32 igbvf_get_tx_csum(struct net_device *netdev) |
137 | { | 155 | { |
138 | return ((netdev->features & NETIF_F_IP_CSUM) != 0); | 156 | return ((netdev->features & NETIF_F_IP_CSUM) != 0); |
@@ -150,8 +168,6 @@ static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) | |||
150 | static int igbvf_set_tso(struct net_device *netdev, u32 data) | 168 | static int igbvf_set_tso(struct net_device *netdev, u32 data) |
151 | { | 169 | { |
152 | struct igbvf_adapter *adapter = netdev_priv(netdev); | 170 | struct igbvf_adapter *adapter = netdev_priv(netdev); |
153 | int i; | ||
154 | struct net_device *v_netdev; | ||
155 | 171 | ||
156 | if (data) { | 172 | if (data) { |
157 | netdev->features |= NETIF_F_TSO; | 173 | netdev->features |= NETIF_F_TSO; |
@@ -159,24 +175,10 @@ static int igbvf_set_tso(struct net_device *netdev, u32 data) | |||
159 | } else { | 175 | } else { |
160 | netdev->features &= ~NETIF_F_TSO; | 176 | netdev->features &= ~NETIF_F_TSO; |
161 | netdev->features &= ~NETIF_F_TSO6; | 177 | netdev->features &= ~NETIF_F_TSO6; |
162 | /* disable TSO on all VLANs if they're present */ | ||
163 | if (!adapter->vlgrp) | ||
164 | goto tso_out; | ||
165 | for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { | ||
166 | v_netdev = vlan_group_get_device(adapter->vlgrp, i); | ||
167 | if (!v_netdev) | ||
168 | continue; | ||
169 | |||
170 | v_netdev->features &= ~NETIF_F_TSO; | ||
171 | v_netdev->features &= ~NETIF_F_TSO6; | ||
172 | vlan_group_set_device(adapter->vlgrp, i, v_netdev); | ||
173 | } | ||
174 | } | 178 | } |
175 | 179 | ||
176 | tso_out: | ||
177 | dev_info(&adapter->pdev->dev, "TSO is %s\n", | 180 | dev_info(&adapter->pdev->dev, "TSO is %s\n", |
178 | data ? "Enabled" : "Disabled"); | 181 | data ? "Enabled" : "Disabled"); |
179 | adapter->flags |= FLAG_TSO_FORCE; | ||
180 | return 0; | 182 | return 0; |
181 | } | 183 | } |
182 | 184 | ||
@@ -517,6 +519,8 @@ static const struct ethtool_ops igbvf_ethtool_ops = { | |||
517 | .set_ringparam = igbvf_set_ringparam, | 519 | .set_ringparam = igbvf_set_ringparam, |
518 | .get_pauseparam = igbvf_get_pauseparam, | 520 | .get_pauseparam = igbvf_get_pauseparam, |
519 | .set_pauseparam = igbvf_set_pauseparam, | 521 | .set_pauseparam = igbvf_set_pauseparam, |
522 | .get_rx_csum = igbvf_get_rx_csum, | ||
523 | .set_rx_csum = igbvf_set_rx_csum, | ||
520 | .get_tx_csum = igbvf_get_tx_csum, | 524 | .get_tx_csum = igbvf_get_tx_csum, |
521 | .set_tx_csum = igbvf_set_tx_csum, | 525 | .set_tx_csum = igbvf_set_tx_csum, |
522 | .get_sg = ethtool_op_get_sg, | 526 | .get_sg = ethtool_op_get_sg, |
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index 4bff35e46871..2ad6cd756539 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h | |||
@@ -286,11 +286,7 @@ struct igbvf_info { | |||
286 | }; | 286 | }; |
287 | 287 | ||
288 | /* hardware capability, feature, and workaround flags */ | 288 | /* hardware capability, feature, and workaround flags */ |
289 | #define FLAG_HAS_HW_VLAN_FILTER (1 << 0) | 289 | #define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) |
290 | #define FLAG_HAS_JUMBO_FRAMES (1 << 1) | ||
291 | #define FLAG_MSI_ENABLED (1 << 2) | ||
292 | #define FLAG_RX_CSUM_ENABLED (1 << 3) | ||
293 | #define FLAG_TSO_FORCE (1 << 4) | ||
294 | 290 | ||
295 | #define IGBVF_RX_DESC_ADV(R, i) \ | 291 | #define IGBVF_RX_DESC_ADV(R, i) \ |
296 | (&((((R).desc))[i].rx_desc)) | 292 | (&((((R).desc))[i].rx_desc)) |
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index b774666ad3cf..44a8eef03a74 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c | |||
@@ -58,8 +58,7 @@ static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); | |||
58 | 58 | ||
59 | static struct igbvf_info igbvf_vf_info = { | 59 | static struct igbvf_info igbvf_vf_info = { |
60 | .mac = e1000_vfadapt, | 60 | .mac = e1000_vfadapt, |
61 | .flags = FLAG_HAS_JUMBO_FRAMES | 61 | .flags = 0, |
62 | | FLAG_RX_CSUM_ENABLED, | ||
63 | .pba = 10, | 62 | .pba = 10, |
64 | .init_ops = e1000_init_function_pointers_vf, | 63 | .init_ops = e1000_init_function_pointers_vf, |
65 | }; | 64 | }; |
@@ -107,8 +106,10 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | |||
107 | skb->ip_summed = CHECKSUM_NONE; | 106 | skb->ip_summed = CHECKSUM_NONE; |
108 | 107 | ||
109 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ | 108 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ |
110 | if ((status_err & E1000_RXD_STAT_IXSM)) | 109 | if ((status_err & E1000_RXD_STAT_IXSM) || |
110 | (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) | ||
111 | return; | 111 | return; |
112 | |||
112 | /* TCP/UDP checksum error bit is set */ | 113 | /* TCP/UDP checksum error bit is set */ |
113 | if (status_err & | 114 | if (status_err & |
114 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { | 115 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { |
@@ -116,6 +117,7 @@ static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, | |||
116 | adapter->hw_csum_err++; | 117 | adapter->hw_csum_err++; |
117 | return; | 118 | return; |
118 | } | 119 | } |
120 | |||
119 | /* It must be a TCP or UDP packet with a valid checksum */ | 121 | /* It must be a TCP or UDP packet with a valid checksum */ |
120 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) | 122 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) |
121 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 123 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -2351,15 +2353,6 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
2351 | return -EINVAL; | 2353 | return -EINVAL; |
2352 | } | 2354 | } |
2353 | 2355 | ||
2354 | /* Jumbo frame size limits */ | ||
2355 | if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { | ||
2356 | if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { | ||
2357 | dev_err(&adapter->pdev->dev, | ||
2358 | "Jumbo Frames not supported.\n"); | ||
2359 | return -EINVAL; | ||
2360 | } | ||
2361 | } | ||
2362 | |||
2363 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | 2356 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 |
2364 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | 2357 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { |
2365 | dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); | 2358 | dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index cb9ecc48f6d0..04cb81a739c2 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -1140,6 +1140,8 @@ ixgb_watchdog(unsigned long data) | |||
1140 | * to get done, so reset controller to flush Tx. | 1140 | * to get done, so reset controller to flush Tx. |
1141 | * (Do the reset outside of interrupt context). */ | 1141 | * (Do the reset outside of interrupt context). */ |
1142 | schedule_work(&adapter->tx_timeout_task); | 1142 | schedule_work(&adapter->tx_timeout_task); |
1143 | /* return immediately since reset is imminent */ | ||
1144 | return; | ||
1143 | } | 1145 | } |
1144 | } | 1146 | } |
1145 | 1147 | ||
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 4b44a8efac8c..d743d0ed5c2e 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h | |||
@@ -187,6 +187,7 @@ struct ixgbe_q_vector { | |||
187 | u8 tx_itr; | 187 | u8 tx_itr; |
188 | u8 rx_itr; | 188 | u8 rx_itr; |
189 | u32 eitr; | 189 | u32 eitr; |
190 | u32 v_idx; /* vector index in list */ | ||
190 | }; | 191 | }; |
191 | 192 | ||
192 | /* Helper macros to switch between ints/sec and what the register uses. | 193 | /* Helper macros to switch between ints/sec and what the register uses. |
@@ -230,7 +231,7 @@ struct ixgbe_adapter { | |||
230 | struct vlan_group *vlgrp; | 231 | struct vlan_group *vlgrp; |
231 | u16 bd_number; | 232 | u16 bd_number; |
232 | struct work_struct reset_task; | 233 | struct work_struct reset_task; |
233 | struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; | 234 | struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; |
234 | char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; | 235 | char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; |
235 | struct ixgbe_dcb_config dcb_cfg; | 236 | struct ixgbe_dcb_config dcb_cfg; |
236 | struct ixgbe_dcb_config temp_dcb_cfg; | 237 | struct ixgbe_dcb_config temp_dcb_cfg; |
@@ -367,10 +368,8 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *) | |||
367 | extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 368 | extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); |
368 | extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); | 369 | extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); |
369 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); | 370 | extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); |
370 | extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter); | ||
371 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); | 371 | extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); |
372 | void ixgbe_napi_add_all(struct ixgbe_adapter *adapter); | 372 | extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); |
373 | void ixgbe_napi_del_all(struct ixgbe_adapter *adapter); | ||
374 | extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32); | 373 | extern void ixgbe_write_eitr(struct ixgbe_adapter *, int, u32); |
375 | 374 | ||
376 | #endif /* _IXGBE_H_ */ | 375 | #endif /* _IXGBE_H_ */ |
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index bd0a0c276952..99e0c106e671 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c | |||
@@ -124,13 +124,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | |||
124 | 124 | ||
125 | if (netif_running(netdev)) | 125 | if (netif_running(netdev)) |
126 | netdev->netdev_ops->ndo_stop(netdev); | 126 | netdev->netdev_ops->ndo_stop(netdev); |
127 | ixgbe_reset_interrupt_capability(adapter); | 127 | ixgbe_clear_interrupt_scheme(adapter); |
128 | ixgbe_napi_del_all(adapter); | ||
129 | INIT_LIST_HEAD(&netdev->napi_list); | ||
130 | kfree(adapter->tx_ring); | ||
131 | kfree(adapter->rx_ring); | ||
132 | adapter->tx_ring = NULL; | ||
133 | adapter->rx_ring = NULL; | ||
134 | 128 | ||
135 | adapter->hw.fc.requested_mode = ixgbe_fc_pfc; | 129 | adapter->hw.fc.requested_mode = ixgbe_fc_pfc; |
136 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 130 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
@@ -144,13 +138,7 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) | |||
144 | adapter->hw.fc.requested_mode = ixgbe_fc_default; | 138 | adapter->hw.fc.requested_mode = ixgbe_fc_default; |
145 | if (netif_running(netdev)) | 139 | if (netif_running(netdev)) |
146 | netdev->netdev_ops->ndo_stop(netdev); | 140 | netdev->netdev_ops->ndo_stop(netdev); |
147 | ixgbe_reset_interrupt_capability(adapter); | 141 | ixgbe_clear_interrupt_scheme(adapter); |
148 | ixgbe_napi_del_all(adapter); | ||
149 | INIT_LIST_HEAD(&netdev->napi_list); | ||
150 | kfree(adapter->tx_ring); | ||
151 | kfree(adapter->rx_ring); | ||
152 | adapter->tx_ring = NULL; | ||
153 | adapter->rx_ring = NULL; | ||
154 | 142 | ||
155 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 143 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
156 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 144 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index d822c92058c3..c0167d617b1e 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c | |||
@@ -1114,7 +1114,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, | |||
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { | 1116 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { |
1117 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; | 1117 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; |
1118 | if (q_vector->txr_count && !q_vector->rxr_count) | 1118 | if (q_vector->txr_count && !q_vector->rxr_count) |
1119 | /* tx vector gets half the rate */ | 1119 | /* tx vector gets half the rate */ |
1120 | q_vector->eitr = (adapter->eitr_param >> 1); | 1120 | q_vector->eitr = (adapter->eitr_param >> 1); |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index be5eabce9e35..efb175b1e438 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -461,6 +461,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) | |||
461 | **/ | 461 | **/ |
462 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, | 462 | static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, |
463 | struct sk_buff *skb, u8 status, | 463 | struct sk_buff *skb, u8 status, |
464 | struct ixgbe_ring *ring, | ||
464 | union ixgbe_adv_rx_desc *rx_desc) | 465 | union ixgbe_adv_rx_desc *rx_desc) |
465 | { | 466 | { |
466 | struct ixgbe_adapter *adapter = q_vector->adapter; | 467 | struct ixgbe_adapter *adapter = q_vector->adapter; |
@@ -468,7 +469,7 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, | |||
468 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); | 469 | bool is_vlan = (status & IXGBE_RXD_STAT_VP); |
469 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); | 470 | u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); |
470 | 471 | ||
471 | skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]); | 472 | skb_record_rx_queue(skb, ring->queue_index); |
472 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { | 473 | if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { |
473 | if (adapter->vlgrp && is_vlan && (tag != 0)) | 474 | if (adapter->vlgrp && is_vlan && (tag != 0)) |
474 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); | 475 | vlan_gro_receive(napi, adapter->vlgrp, tag, skb); |
@@ -782,7 +783,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
782 | total_rx_packets++; | 783 | total_rx_packets++; |
783 | 784 | ||
784 | skb->protocol = eth_type_trans(skb, adapter->netdev); | 785 | skb->protocol = eth_type_trans(skb, adapter->netdev); |
785 | ixgbe_receive_skb(q_vector, skb, staterr, rx_desc); | 786 | ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); |
786 | 787 | ||
787 | next_desc: | 788 | next_desc: |
788 | rx_desc->wb.upper.status_error = 0; | 789 | rx_desc->wb.upper.status_error = 0; |
@@ -835,7 +836,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) | |||
835 | * corresponding register. | 836 | * corresponding register. |
836 | */ | 837 | */ |
837 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { | 838 | for (v_idx = 0; v_idx < q_vectors; v_idx++) { |
838 | q_vector = &adapter->q_vector[v_idx]; | 839 | q_vector = adapter->q_vector[v_idx]; |
839 | /* XXX for_each_bit(...) */ | 840 | /* XXX for_each_bit(...) */ |
840 | r_idx = find_first_bit(q_vector->rxr_idx, | 841 | r_idx = find_first_bit(q_vector->rxr_idx, |
841 | adapter->num_rx_queues); | 842 | adapter->num_rx_queues); |
@@ -984,8 +985,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) | |||
984 | struct ixgbe_adapter *adapter = q_vector->adapter; | 985 | struct ixgbe_adapter *adapter = q_vector->adapter; |
985 | u32 new_itr; | 986 | u32 new_itr; |
986 | u8 current_itr, ret_itr; | 987 | u8 current_itr, ret_itr; |
987 | int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / | 988 | int i, r_idx, v_idx = q_vector->v_idx; |
988 | sizeof(struct ixgbe_q_vector); | ||
989 | struct ixgbe_ring *rx_ring, *tx_ring; | 989 | struct ixgbe_ring *rx_ring, *tx_ring; |
990 | 990 | ||
991 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); | 991 | r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); |
@@ -1303,19 +1303,21 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) | |||
1303 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, | 1303 | static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, |
1304 | int r_idx) | 1304 | int r_idx) |
1305 | { | 1305 | { |
1306 | a->q_vector[v_idx].adapter = a; | 1306 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
1307 | set_bit(r_idx, a->q_vector[v_idx].rxr_idx); | 1307 | |
1308 | a->q_vector[v_idx].rxr_count++; | 1308 | set_bit(r_idx, q_vector->rxr_idx); |
1309 | a->rx_ring[r_idx].v_idx = 1 << v_idx; | 1309 | q_vector->rxr_count++; |
1310 | a->rx_ring[r_idx].v_idx = (u64)1 << v_idx; | ||
1310 | } | 1311 | } |
1311 | 1312 | ||
1312 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, | 1313 | static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, |
1313 | int r_idx) | 1314 | int t_idx) |
1314 | { | 1315 | { |
1315 | a->q_vector[v_idx].adapter = a; | 1316 | struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; |
1316 | set_bit(r_idx, a->q_vector[v_idx].txr_idx); | 1317 | |
1317 | a->q_vector[v_idx].txr_count++; | 1318 | set_bit(t_idx, q_vector->txr_idx); |
1318 | a->tx_ring[r_idx].v_idx = 1 << v_idx; | 1319 | q_vector->txr_count++; |
1320 | a->tx_ring[t_idx].v_idx = (u64)1 << v_idx; | ||
1319 | } | 1321 | } |
1320 | 1322 | ||
1321 | /** | 1323 | /** |
@@ -1411,7 +1413,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1411 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ | 1413 | (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ |
1412 | &ixgbe_msix_clean_many) | 1414 | &ixgbe_msix_clean_many) |
1413 | for (vector = 0; vector < q_vectors; vector++) { | 1415 | for (vector = 0; vector < q_vectors; vector++) { |
1414 | handler = SET_HANDLER(&adapter->q_vector[vector]); | 1416 | handler = SET_HANDLER(adapter->q_vector[vector]); |
1415 | 1417 | ||
1416 | if(handler == &ixgbe_msix_clean_rx) { | 1418 | if(handler == &ixgbe_msix_clean_rx) { |
1417 | sprintf(adapter->name[vector], "%s-%s-%d", | 1419 | sprintf(adapter->name[vector], "%s-%s-%d", |
@@ -1427,7 +1429,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1427 | 1429 | ||
1428 | err = request_irq(adapter->msix_entries[vector].vector, | 1430 | err = request_irq(adapter->msix_entries[vector].vector, |
1429 | handler, 0, adapter->name[vector], | 1431 | handler, 0, adapter->name[vector], |
1430 | &(adapter->q_vector[vector])); | 1432 | adapter->q_vector[vector]); |
1431 | if (err) { | 1433 | if (err) { |
1432 | DPRINTK(PROBE, ERR, | 1434 | DPRINTK(PROBE, ERR, |
1433 | "request_irq failed for MSIX interrupt " | 1435 | "request_irq failed for MSIX interrupt " |
@@ -1450,7 +1452,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) | |||
1450 | free_queue_irqs: | 1452 | free_queue_irqs: |
1451 | for (i = vector - 1; i >= 0; i--) | 1453 | for (i = vector - 1; i >= 0; i--) |
1452 | free_irq(adapter->msix_entries[--vector].vector, | 1454 | free_irq(adapter->msix_entries[--vector].vector, |
1453 | &(adapter->q_vector[i])); | 1455 | adapter->q_vector[i]); |
1454 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 1456 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
1455 | pci_disable_msix(adapter->pdev); | 1457 | pci_disable_msix(adapter->pdev); |
1456 | kfree(adapter->msix_entries); | 1458 | kfree(adapter->msix_entries); |
@@ -1461,7 +1463,7 @@ out: | |||
1461 | 1463 | ||
1462 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) | 1464 | static void ixgbe_set_itr(struct ixgbe_adapter *adapter) |
1463 | { | 1465 | { |
1464 | struct ixgbe_q_vector *q_vector = adapter->q_vector; | 1466 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; |
1465 | u8 current_itr; | 1467 | u8 current_itr; |
1466 | u32 new_itr = q_vector->eitr; | 1468 | u32 new_itr = q_vector->eitr; |
1467 | struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; | 1469 | struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; |
@@ -1539,6 +1541,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1539 | struct net_device *netdev = data; | 1541 | struct net_device *netdev = data; |
1540 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1542 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1541 | struct ixgbe_hw *hw = &adapter->hw; | 1543 | struct ixgbe_hw *hw = &adapter->hw; |
1544 | struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; | ||
1542 | u32 eicr; | 1545 | u32 eicr; |
1543 | 1546 | ||
1544 | /* | 1547 | /* |
@@ -1566,13 +1569,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data) | |||
1566 | 1569 | ||
1567 | ixgbe_check_fan_failure(adapter, eicr); | 1570 | ixgbe_check_fan_failure(adapter, eicr); |
1568 | 1571 | ||
1569 | if (napi_schedule_prep(&adapter->q_vector[0].napi)) { | 1572 | if (napi_schedule_prep(&(q_vector->napi))) { |
1570 | adapter->tx_ring[0].total_packets = 0; | 1573 | adapter->tx_ring[0].total_packets = 0; |
1571 | adapter->tx_ring[0].total_bytes = 0; | 1574 | adapter->tx_ring[0].total_bytes = 0; |
1572 | adapter->rx_ring[0].total_packets = 0; | 1575 | adapter->rx_ring[0].total_packets = 0; |
1573 | adapter->rx_ring[0].total_bytes = 0; | 1576 | adapter->rx_ring[0].total_bytes = 0; |
1574 | /* would disable interrupts here but EIAM disabled it */ | 1577 | /* would disable interrupts here but EIAM disabled it */ |
1575 | __napi_schedule(&adapter->q_vector[0].napi); | 1578 | __napi_schedule(&(q_vector->napi)); |
1576 | } | 1579 | } |
1577 | 1580 | ||
1578 | return IRQ_HANDLED; | 1581 | return IRQ_HANDLED; |
@@ -1583,7 +1586,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) | |||
1583 | int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | 1586 | int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; |
1584 | 1587 | ||
1585 | for (i = 0; i < q_vectors; i++) { | 1588 | for (i = 0; i < q_vectors; i++) { |
1586 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; | 1589 | struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; |
1587 | bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); | 1590 | bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); |
1588 | bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); | 1591 | bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); |
1589 | q_vector->rxr_count = 0; | 1592 | q_vector->rxr_count = 0; |
@@ -1634,7 +1637,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) | |||
1634 | i--; | 1637 | i--; |
1635 | for (; i >= 0; i--) { | 1638 | for (; i >= 0; i--) { |
1636 | free_irq(adapter->msix_entries[i].vector, | 1639 | free_irq(adapter->msix_entries[i].vector, |
1637 | &(adapter->q_vector[i])); | 1640 | adapter->q_vector[i]); |
1638 | } | 1641 | } |
1639 | 1642 | ||
1640 | ixgbe_reset_q_vectors(adapter); | 1643 | ixgbe_reset_q_vectors(adapter); |
@@ -1737,7 +1740,18 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1737 | unsigned long mask; | 1740 | unsigned long mask; |
1738 | 1741 | ||
1739 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { | 1742 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
1740 | queue0 = index; | 1743 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { |
1744 | int dcb_i = adapter->ring_feature[RING_F_DCB].indices; | ||
1745 | if (dcb_i == 8) | ||
1746 | queue0 = index >> 4; | ||
1747 | else if (dcb_i == 4) | ||
1748 | queue0 = index >> 5; | ||
1749 | else | ||
1750 | dev_err(&adapter->pdev->dev, "Invalid DCB " | ||
1751 | "configuration\n"); | ||
1752 | } else { | ||
1753 | queue0 = index; | ||
1754 | } | ||
1741 | } else { | 1755 | } else { |
1742 | mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; | 1756 | mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask; |
1743 | queue0 = index & mask; | 1757 | queue0 = index & mask; |
@@ -1751,28 +1765,20 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) | |||
1751 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; | 1765 | srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; |
1752 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; | 1766 | srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; |
1753 | 1767 | ||
1768 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | ||
1769 | IXGBE_SRRCTL_BSIZEHDR_MASK; | ||
1770 | |||
1754 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | 1771 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { |
1755 | u16 bufsz = IXGBE_RXBUFFER_2048; | 1772 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER |
1756 | /* grow the amount we can receive on large page machines */ | 1773 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1757 | if (bufsz < (PAGE_SIZE / 2)) | 1774 | #else |
1758 | bufsz = (PAGE_SIZE / 2); | 1775 | srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
1759 | /* cap the bufsz at our largest descriptor size */ | 1776 | #endif |
1760 | bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz); | ||
1761 | |||
1762 | srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1763 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | 1777 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
1764 | srrctl |= ((IXGBE_RX_HDR_SIZE << | ||
1765 | IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | ||
1766 | IXGBE_SRRCTL_BSIZEHDR_MASK); | ||
1767 | } else { | 1778 | } else { |
1779 | srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> | ||
1780 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1768 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | 1781 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
1769 | |||
1770 | if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) | ||
1771 | srrctl |= IXGBE_RXBUFFER_2048 >> | ||
1772 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1773 | else | ||
1774 | srrctl |= rx_ring->rx_buf_len >> | ||
1775 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
1776 | } | 1782 | } |
1777 | 1783 | ||
1778 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); | 1784 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); |
@@ -1812,7 +1818,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) | |||
1812 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | | 1818 | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | |
1813 | IXGBE_PSRTYPE_UDPHDR | | 1819 | IXGBE_PSRTYPE_UDPHDR | |
1814 | IXGBE_PSRTYPE_IPV4HDR | | 1820 | IXGBE_PSRTYPE_IPV4HDR | |
1815 | IXGBE_PSRTYPE_IPV6HDR; | 1821 | IXGBE_PSRTYPE_IPV6HDR | |
1822 | IXGBE_PSRTYPE_L2HDR; | ||
1816 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | 1823 | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); |
1817 | } | 1824 | } |
1818 | } else { | 1825 | } else { |
@@ -2135,7 +2142,7 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) | |||
2135 | 2142 | ||
2136 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | 2143 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { |
2137 | struct napi_struct *napi; | 2144 | struct napi_struct *napi; |
2138 | q_vector = &adapter->q_vector[q_idx]; | 2145 | q_vector = adapter->q_vector[q_idx]; |
2139 | if (!q_vector->rxr_count) | 2146 | if (!q_vector->rxr_count) |
2140 | continue; | 2147 | continue; |
2141 | napi = &q_vector->napi; | 2148 | napi = &q_vector->napi; |
@@ -2158,7 +2165,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) | |||
2158 | q_vectors = 1; | 2165 | q_vectors = 1; |
2159 | 2166 | ||
2160 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | 2167 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { |
2161 | q_vector = &adapter->q_vector[q_idx]; | 2168 | q_vector = adapter->q_vector[q_idx]; |
2162 | if (!q_vector->rxr_count) | 2169 | if (!q_vector->rxr_count) |
2163 | continue; | 2170 | continue; |
2164 | napi_disable(&q_vector->napi); | 2171 | napi_disable(&q_vector->napi); |
@@ -2451,6 +2458,17 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) | |||
2451 | ixgbe_irq_enable(adapter); | 2458 | ixgbe_irq_enable(adapter); |
2452 | 2459 | ||
2453 | /* | 2460 | /* |
2461 | * If this adapter has a fan, check to see if we had a failure | ||
2462 | * before we enabled the interrupt. | ||
2463 | */ | ||
2464 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | ||
2465 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
2466 | if (esdp & IXGBE_ESDP_SDP1) | ||
2467 | DPRINTK(DRV, CRIT, | ||
2468 | "Fan has stopped, replace the adapter\n"); | ||
2469 | } | ||
2470 | |||
2471 | /* | ||
2454 | * For hot-pluggable SFP+ devices, a new SFP+ module may have | 2472 | * For hot-pluggable SFP+ devices, a new SFP+ module may have |
2455 | * arrived before interrupts were enabled. We need to kick off | 2473 | * arrived before interrupts were enabled. We need to kick off |
2456 | * the SFP+ module setup first, then try to bring up link. | 2474 | * the SFP+ module setup first, then try to bring up link. |
@@ -2498,8 +2516,6 @@ int ixgbe_up(struct ixgbe_adapter *adapter) | |||
2498 | /* hardware has been reset, we need to reload some things */ | 2516 | /* hardware has been reset, we need to reload some things */ |
2499 | ixgbe_configure(adapter); | 2517 | ixgbe_configure(adapter); |
2500 | 2518 | ||
2501 | ixgbe_napi_add_all(adapter); | ||
2502 | |||
2503 | return ixgbe_up_complete(adapter); | 2519 | return ixgbe_up_complete(adapter); |
2504 | } | 2520 | } |
2505 | 2521 | ||
@@ -2877,9 +2893,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |||
2877 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | 2893 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; |
2878 | kfree(adapter->msix_entries); | 2894 | kfree(adapter->msix_entries); |
2879 | adapter->msix_entries = NULL; | 2895 | adapter->msix_entries = NULL; |
2880 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | ||
2881 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | ||
2882 | ixgbe_set_num_queues(adapter); | ||
2883 | } else { | 2896 | } else { |
2884 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | 2897 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ |
2885 | /* | 2898 | /* |
@@ -3103,31 +3116,20 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) | |||
3103 | * mean we disable MSI-X capabilities of the adapter. */ | 3116 | * mean we disable MSI-X capabilities of the adapter. */ |
3104 | adapter->msix_entries = kcalloc(v_budget, | 3117 | adapter->msix_entries = kcalloc(v_budget, |
3105 | sizeof(struct msix_entry), GFP_KERNEL); | 3118 | sizeof(struct msix_entry), GFP_KERNEL); |
3106 | if (!adapter->msix_entries) { | 3119 | if (adapter->msix_entries) { |
3107 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | 3120 | for (vector = 0; vector < v_budget; vector++) |
3108 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; | 3121 | adapter->msix_entries[vector].entry = vector; |
3109 | ixgbe_set_num_queues(adapter); | ||
3110 | kfree(adapter->tx_ring); | ||
3111 | kfree(adapter->rx_ring); | ||
3112 | err = ixgbe_alloc_queues(adapter); | ||
3113 | if (err) { | ||
3114 | DPRINTK(PROBE, ERR, "Unable to allocate memory " | ||
3115 | "for queues\n"); | ||
3116 | goto out; | ||
3117 | } | ||
3118 | |||
3119 | goto try_msi; | ||
3120 | } | ||
3121 | 3122 | ||
3122 | for (vector = 0; vector < v_budget; vector++) | 3123 | ixgbe_acquire_msix_vectors(adapter, v_budget); |
3123 | adapter->msix_entries[vector].entry = vector; | ||
3124 | 3124 | ||
3125 | ixgbe_acquire_msix_vectors(adapter, v_budget); | 3125 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) |
3126 | goto out; | ||
3127 | } | ||
3126 | 3128 | ||
3127 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | 3129 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; |
3128 | goto out; | 3130 | adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; |
3131 | ixgbe_set_num_queues(adapter); | ||
3129 | 3132 | ||
3130 | try_msi: | ||
3131 | err = pci_enable_msi(adapter->pdev); | 3133 | err = pci_enable_msi(adapter->pdev); |
3132 | if (!err) { | 3134 | if (!err) { |
3133 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; | 3135 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; |
@@ -3142,6 +3144,87 @@ out: | |||
3142 | return err; | 3144 | return err; |
3143 | } | 3145 | } |
3144 | 3146 | ||
3147 | /** | ||
3148 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | ||
3149 | * @adapter: board private structure to initialize | ||
3150 | * | ||
3151 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
3152 | * return -ENOMEM. | ||
3153 | **/ | ||
3154 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | ||
3155 | { | ||
3156 | int q_idx, num_q_vectors; | ||
3157 | struct ixgbe_q_vector *q_vector; | ||
3158 | int napi_vectors; | ||
3159 | int (*poll)(struct napi_struct *, int); | ||
3160 | |||
3161 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3162 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3163 | napi_vectors = adapter->num_rx_queues; | ||
3164 | poll = &ixgbe_clean_rxonly; | ||
3165 | } else { | ||
3166 | num_q_vectors = 1; | ||
3167 | napi_vectors = 1; | ||
3168 | poll = &ixgbe_poll; | ||
3169 | } | ||
3170 | |||
3171 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | ||
3172 | q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL); | ||
3173 | if (!q_vector) | ||
3174 | goto err_out; | ||
3175 | q_vector->adapter = adapter; | ||
3176 | q_vector->v_idx = q_idx; | ||
3177 | q_vector->eitr = adapter->eitr_param; | ||
3178 | if (q_idx < napi_vectors) | ||
3179 | netif_napi_add(adapter->netdev, &q_vector->napi, | ||
3180 | (*poll), 64); | ||
3181 | adapter->q_vector[q_idx] = q_vector; | ||
3182 | } | ||
3183 | |||
3184 | return 0; | ||
3185 | |||
3186 | err_out: | ||
3187 | while (q_idx) { | ||
3188 | q_idx--; | ||
3189 | q_vector = adapter->q_vector[q_idx]; | ||
3190 | netif_napi_del(&q_vector->napi); | ||
3191 | kfree(q_vector); | ||
3192 | adapter->q_vector[q_idx] = NULL; | ||
3193 | } | ||
3194 | return -ENOMEM; | ||
3195 | } | ||
3196 | |||
3197 | /** | ||
3198 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | ||
3199 | * @adapter: board private structure to initialize | ||
3200 | * | ||
3201 | * This function frees the memory allocated to the q_vectors. In addition if | ||
3202 | * NAPI is enabled it will delete any references to the NAPI struct prior | ||
3203 | * to freeing the q_vector. | ||
3204 | **/ | ||
3205 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | ||
3206 | { | ||
3207 | int q_idx, num_q_vectors; | ||
3208 | int napi_vectors; | ||
3209 | |||
3210 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3211 | num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3212 | napi_vectors = adapter->num_rx_queues; | ||
3213 | } else { | ||
3214 | num_q_vectors = 1; | ||
3215 | napi_vectors = 1; | ||
3216 | } | ||
3217 | |||
3218 | for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { | ||
3219 | struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx]; | ||
3220 | |||
3221 | adapter->q_vector[q_idx] = NULL; | ||
3222 | if (q_idx < napi_vectors) | ||
3223 | netif_napi_del(&q_vector->napi); | ||
3224 | kfree(q_vector); | ||
3225 | } | ||
3226 | } | ||
3227 | |||
3145 | void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | 3228 | void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) |
3146 | { | 3229 | { |
3147 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | 3230 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { |
@@ -3173,18 +3256,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
3173 | /* Number of supported queues */ | 3256 | /* Number of supported queues */ |
3174 | ixgbe_set_num_queues(adapter); | 3257 | ixgbe_set_num_queues(adapter); |
3175 | 3258 | ||
3176 | err = ixgbe_alloc_queues(adapter); | ||
3177 | if (err) { | ||
3178 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | ||
3179 | goto err_alloc_queues; | ||
3180 | } | ||
3181 | |||
3182 | err = ixgbe_set_interrupt_capability(adapter); | 3259 | err = ixgbe_set_interrupt_capability(adapter); |
3183 | if (err) { | 3260 | if (err) { |
3184 | DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); | 3261 | DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n"); |
3185 | goto err_set_interrupt; | 3262 | goto err_set_interrupt; |
3186 | } | 3263 | } |
3187 | 3264 | ||
3265 | err = ixgbe_alloc_q_vectors(adapter); | ||
3266 | if (err) { | ||
3267 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queue " | ||
3268 | "vectors\n"); | ||
3269 | goto err_alloc_q_vectors; | ||
3270 | } | ||
3271 | |||
3272 | err = ixgbe_alloc_queues(adapter); | ||
3273 | if (err) { | ||
3274 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | ||
3275 | goto err_alloc_queues; | ||
3276 | } | ||
3277 | |||
3188 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " | 3278 | DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " |
3189 | "Tx Queue count = %u\n", | 3279 | "Tx Queue count = %u\n", |
3190 | (adapter->num_rx_queues > 1) ? "Enabled" : | 3280 | (adapter->num_rx_queues > 1) ? "Enabled" : |
@@ -3194,11 +3284,30 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |||
3194 | 3284 | ||
3195 | return 0; | 3285 | return 0; |
3196 | 3286 | ||
3287 | err_alloc_queues: | ||
3288 | ixgbe_free_q_vectors(adapter); | ||
3289 | err_alloc_q_vectors: | ||
3290 | ixgbe_reset_interrupt_capability(adapter); | ||
3197 | err_set_interrupt: | 3291 | err_set_interrupt: |
3292 | return err; | ||
3293 | } | ||
3294 | |||
3295 | /** | ||
3296 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | ||
3297 | * @adapter: board private structure to clear interrupt scheme on | ||
3298 | * | ||
3299 | * We go through and clear interrupt specific resources and reset the structure | ||
3300 | * to pre-load conditions | ||
3301 | **/ | ||
3302 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | ||
3303 | { | ||
3198 | kfree(adapter->tx_ring); | 3304 | kfree(adapter->tx_ring); |
3199 | kfree(adapter->rx_ring); | 3305 | kfree(adapter->rx_ring); |
3200 | err_alloc_queues: | 3306 | adapter->tx_ring = NULL; |
3201 | return err; | 3307 | adapter->rx_ring = NULL; |
3308 | |||
3309 | ixgbe_free_q_vectors(adapter); | ||
3310 | ixgbe_reset_interrupt_capability(adapter); | ||
3202 | } | 3311 | } |
3203 | 3312 | ||
3204 | /** | 3313 | /** |
@@ -3284,9 +3393,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) | |||
3284 | adapter->ring_feature[RING_F_RSS].indices = rss; | 3393 | adapter->ring_feature[RING_F_RSS].indices = rss; |
3285 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; | 3394 | adapter->flags |= IXGBE_FLAG_RSS_ENABLED; |
3286 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; | 3395 | adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; |
3287 | if (hw->mac.type == ixgbe_mac_82598EB) | 3396 | if (hw->mac.type == ixgbe_mac_82598EB) { |
3397 | if (hw->device_id == IXGBE_DEV_ID_82598AT) | ||
3398 | adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; | ||
3288 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; | 3399 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; |
3289 | else if (hw->mac.type == ixgbe_mac_82599EB) { | 3400 | } else if (hw->mac.type == ixgbe_mac_82599EB) { |
3290 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; | 3401 | adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; |
3291 | adapter->flags |= IXGBE_FLAG_RSC_CAPABLE; | 3402 | adapter->flags |= IXGBE_FLAG_RSC_CAPABLE; |
3292 | adapter->flags |= IXGBE_FLAG_RSC_ENABLED; | 3403 | adapter->flags |= IXGBE_FLAG_RSC_ENABLED; |
@@ -3619,8 +3730,6 @@ static int ixgbe_open(struct net_device *netdev) | |||
3619 | 3730 | ||
3620 | ixgbe_configure(adapter); | 3731 | ixgbe_configure(adapter); |
3621 | 3732 | ||
3622 | ixgbe_napi_add_all(adapter); | ||
3623 | |||
3624 | err = ixgbe_request_irq(adapter); | 3733 | err = ixgbe_request_irq(adapter); |
3625 | if (err) | 3734 | if (err) |
3626 | goto err_req_irq; | 3735 | goto err_req_irq; |
@@ -3672,55 +3781,6 @@ static int ixgbe_close(struct net_device *netdev) | |||
3672 | return 0; | 3781 | return 0; |
3673 | } | 3782 | } |
3674 | 3783 | ||
3675 | /** | ||
3676 | * ixgbe_napi_add_all - prep napi structs for use | ||
3677 | * @adapter: private struct | ||
3678 | * | ||
3679 | * helper function to napi_add each possible q_vector->napi | ||
3680 | */ | ||
3681 | void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) | ||
3682 | { | ||
3683 | int q_idx, q_vectors; | ||
3684 | struct net_device *netdev = adapter->netdev; | ||
3685 | int (*poll)(struct napi_struct *, int); | ||
3686 | |||
3687 | /* check if we already have our netdev->napi_list populated */ | ||
3688 | if (&netdev->napi_list != netdev->napi_list.next) | ||
3689 | return; | ||
3690 | |||
3691 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | ||
3692 | poll = &ixgbe_clean_rxonly; | ||
3693 | /* Only enable as many vectors as we have rx queues. */ | ||
3694 | q_vectors = adapter->num_rx_queues; | ||
3695 | } else { | ||
3696 | poll = &ixgbe_poll; | ||
3697 | /* only one q_vector for legacy modes */ | ||
3698 | q_vectors = 1; | ||
3699 | } | ||
3700 | |||
3701 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
3702 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; | ||
3703 | netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); | ||
3704 | } | ||
3705 | } | ||
3706 | |||
3707 | void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) | ||
3708 | { | ||
3709 | int q_idx; | ||
3710 | int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; | ||
3711 | |||
3712 | /* legacy and MSI only use one vector */ | ||
3713 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | ||
3714 | q_vectors = 1; | ||
3715 | |||
3716 | for (q_idx = 0; q_idx < q_vectors; q_idx++) { | ||
3717 | struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; | ||
3718 | if (!q_vector->rxr_count) | ||
3719 | continue; | ||
3720 | netif_napi_del(&q_vector->napi); | ||
3721 | } | ||
3722 | } | ||
3723 | |||
3724 | #ifdef CONFIG_PM | 3784 | #ifdef CONFIG_PM |
3725 | static int ixgbe_resume(struct pci_dev *pdev) | 3785 | static int ixgbe_resume(struct pci_dev *pdev) |
3726 | { | 3786 | { |
@@ -3730,7 +3790,8 @@ static int ixgbe_resume(struct pci_dev *pdev) | |||
3730 | 3790 | ||
3731 | pci_set_power_state(pdev, PCI_D0); | 3791 | pci_set_power_state(pdev, PCI_D0); |
3732 | pci_restore_state(pdev); | 3792 | pci_restore_state(pdev); |
3733 | err = pci_enable_device(pdev); | 3793 | |
3794 | err = pci_enable_device_mem(pdev); | ||
3734 | if (err) { | 3795 | if (err) { |
3735 | printk(KERN_ERR "ixgbe: Cannot enable PCI device from " | 3796 | printk(KERN_ERR "ixgbe: Cannot enable PCI device from " |
3736 | "suspend\n"); | 3797 | "suspend\n"); |
@@ -3782,11 +3843,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) | |||
3782 | ixgbe_free_all_tx_resources(adapter); | 3843 | ixgbe_free_all_tx_resources(adapter); |
3783 | ixgbe_free_all_rx_resources(adapter); | 3844 | ixgbe_free_all_rx_resources(adapter); |
3784 | } | 3845 | } |
3785 | ixgbe_reset_interrupt_capability(adapter); | 3846 | ixgbe_clear_interrupt_scheme(adapter); |
3786 | ixgbe_napi_del_all(adapter); | ||
3787 | INIT_LIST_HEAD(&netdev->napi_list); | ||
3788 | kfree(adapter->tx_ring); | ||
3789 | kfree(adapter->rx_ring); | ||
3790 | 3847 | ||
3791 | #ifdef CONFIG_PM | 3848 | #ifdef CONFIG_PM |
3792 | retval = pci_save_state(pdev); | 3849 | retval = pci_save_state(pdev); |
@@ -4000,7 +4057,7 @@ static void ixgbe_watchdog(unsigned long data) | |||
4000 | int i; | 4057 | int i; |
4001 | 4058 | ||
4002 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) | 4059 | for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) |
4003 | eics |= (1 << i); | 4060 | eics |= ((u64)1 << i); |
4004 | 4061 | ||
4005 | /* Cause software interrupt to ensure rx rings are cleaned */ | 4062 | /* Cause software interrupt to ensure rx rings are cleaned */ |
4006 | switch (hw->mac.type) { | 4063 | switch (hw->mac.type) { |
@@ -4739,7 +4796,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4739 | int i, err, pci_using_dac; | 4796 | int i, err, pci_using_dac; |
4740 | u32 part_num, eec; | 4797 | u32 part_num, eec; |
4741 | 4798 | ||
4742 | err = pci_enable_device(pdev); | 4799 | err = pci_enable_device_mem(pdev); |
4743 | if (err) | 4800 | if (err) |
4744 | return err; | 4801 | return err; |
4745 | 4802 | ||
@@ -4759,9 +4816,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4759 | pci_using_dac = 0; | 4816 | pci_using_dac = 0; |
4760 | } | 4817 | } |
4761 | 4818 | ||
4762 | err = pci_request_regions(pdev, ixgbe_driver_name); | 4819 | err = pci_request_selected_regions(pdev, pci_select_bars(pdev, |
4820 | IORESOURCE_MEM), ixgbe_driver_name); | ||
4763 | if (err) { | 4821 | if (err) { |
4764 | dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); | 4822 | dev_err(&pdev->dev, |
4823 | "pci_request_selected_regions failed 0x%x\n", err); | ||
4765 | goto err_pci_reg; | 4824 | goto err_pci_reg; |
4766 | } | 4825 | } |
4767 | 4826 | ||
@@ -4869,6 +4928,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
4869 | if (err) | 4928 | if (err) |
4870 | goto err_sw_init; | 4929 | goto err_sw_init; |
4871 | 4930 | ||
4931 | /* | ||
4932 | * If there is a fan on this device and it has failed log the | ||
4933 | * failure. | ||
4934 | */ | ||
4935 | if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { | ||
4936 | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||
4937 | if (esdp & IXGBE_ESDP_SDP1) | ||
4938 | DPRINTK(PROBE, CRIT, | ||
4939 | "Fan has stopped, replace the adapter\n"); | ||
4940 | } | ||
4941 | |||
4872 | /* reset_hw fills in the perm_addr as well */ | 4942 | /* reset_hw fills in the perm_addr as well */ |
4873 | err = hw->mac.ops.reset_hw(hw); | 4943 | err = hw->mac.ops.reset_hw(hw); |
4874 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { | 4944 | if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { |
@@ -5012,8 +5082,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
5012 | err_register: | 5082 | err_register: |
5013 | ixgbe_release_hw_control(adapter); | 5083 | ixgbe_release_hw_control(adapter); |
5014 | err_hw_init: | 5084 | err_hw_init: |
5085 | ixgbe_clear_interrupt_scheme(adapter); | ||
5015 | err_sw_init: | 5086 | err_sw_init: |
5016 | ixgbe_reset_interrupt_capability(adapter); | ||
5017 | err_eeprom: | 5087 | err_eeprom: |
5018 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); | 5088 | clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); |
5019 | del_timer_sync(&adapter->sfp_timer); | 5089 | del_timer_sync(&adapter->sfp_timer); |
@@ -5024,7 +5094,8 @@ err_eeprom: | |||
5024 | err_ioremap: | 5094 | err_ioremap: |
5025 | free_netdev(netdev); | 5095 | free_netdev(netdev); |
5026 | err_alloc_etherdev: | 5096 | err_alloc_etherdev: |
5027 | pci_release_regions(pdev); | 5097 | pci_release_selected_regions(pdev, pci_select_bars(pdev, |
5098 | IORESOURCE_MEM)); | ||
5028 | err_pci_reg: | 5099 | err_pci_reg: |
5029 | err_dma: | 5100 | err_dma: |
5030 | pci_disable_device(pdev); | 5101 | pci_disable_device(pdev); |
@@ -5071,16 +5142,15 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) | |||
5071 | if (netdev->reg_state == NETREG_REGISTERED) | 5142 | if (netdev->reg_state == NETREG_REGISTERED) |
5072 | unregister_netdev(netdev); | 5143 | unregister_netdev(netdev); |
5073 | 5144 | ||
5074 | ixgbe_reset_interrupt_capability(adapter); | 5145 | ixgbe_clear_interrupt_scheme(adapter); |
5075 | 5146 | ||
5076 | ixgbe_release_hw_control(adapter); | 5147 | ixgbe_release_hw_control(adapter); |
5077 | 5148 | ||
5078 | iounmap(adapter->hw.hw_addr); | 5149 | iounmap(adapter->hw.hw_addr); |
5079 | pci_release_regions(pdev); | 5150 | pci_release_selected_regions(pdev, pci_select_bars(pdev, |
5151 | IORESOURCE_MEM)); | ||
5080 | 5152 | ||
5081 | DPRINTK(PROBE, INFO, "complete\n"); | 5153 | DPRINTK(PROBE, INFO, "complete\n"); |
5082 | kfree(adapter->tx_ring); | ||
5083 | kfree(adapter->rx_ring); | ||
5084 | 5154 | ||
5085 | free_netdev(netdev); | 5155 | free_netdev(netdev); |
5086 | 5156 | ||
@@ -5108,6 +5178,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, | |||
5108 | 5178 | ||
5109 | netif_device_detach(netdev); | 5179 | netif_device_detach(netdev); |
5110 | 5180 | ||
5181 | if (state == pci_channel_io_perm_failure) | ||
5182 | return PCI_ERS_RESULT_DISCONNECT; | ||
5183 | |||
5111 | if (netif_running(netdev)) | 5184 | if (netif_running(netdev)) |
5112 | ixgbe_down(adapter); | 5185 | ixgbe_down(adapter); |
5113 | pci_disable_device(pdev); | 5186 | pci_disable_device(pdev); |
@@ -5129,7 +5202,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) | |||
5129 | pci_ers_result_t result; | 5202 | pci_ers_result_t result; |
5130 | int err; | 5203 | int err; |
5131 | 5204 | ||
5132 | if (pci_enable_device(pdev)) { | 5205 | if (pci_enable_device_mem(pdev)) { |
5133 | DPRINTK(PROBE, ERR, | 5206 | DPRINTK(PROBE, ERR, |
5134 | "Cannot re-enable PCI device after reset.\n"); | 5207 | "Cannot re-enable PCI device after reset.\n"); |
5135 | result = PCI_ERS_RESULT_DISCONNECT; | 5208 | result = PCI_ERS_RESULT_DISCONNECT; |
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index e49e8af59eda..9fd79a05ff0f 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h | |||
@@ -1604,6 +1604,7 @@ | |||
1604 | #define IXGBE_PSRTYPE_UDPHDR 0x00000020 | 1604 | #define IXGBE_PSRTYPE_UDPHDR 0x00000020 |
1605 | #define IXGBE_PSRTYPE_IPV4HDR 0x00000100 | 1605 | #define IXGBE_PSRTYPE_IPV4HDR 0x00000100 |
1606 | #define IXGBE_PSRTYPE_IPV6HDR 0x00000200 | 1606 | #define IXGBE_PSRTYPE_IPV6HDR 0x00000200 |
1607 | #define IXGBE_PSRTYPE_L2HDR 0x00001000 | ||
1607 | 1608 | ||
1608 | /* SRRCTL bit definitions */ | 1609 | /* SRRCTL bit definitions */ |
1609 | #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ | 1610 | #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ |
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c index efbae4b8398e..a0c578585a50 100644 --- a/drivers/net/lasi_82596.c +++ b/drivers/net/lasi_82596.c | |||
@@ -161,12 +161,12 @@ lan_init_chip(struct parisc_device *dev) | |||
161 | 161 | ||
162 | if (!dev->irq) { | 162 | if (!dev->irq) { |
163 | printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", | 163 | printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", |
164 | __FILE__, dev->hpa.start); | 164 | __FILE__, (unsigned long)dev->hpa.start); |
165 | return -ENODEV; | 165 | return -ENODEV; |
166 | } | 166 | } |
167 | 167 | ||
168 | printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start, | 168 | printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", |
169 | dev->irq); | 169 | (unsigned long)dev->hpa.start, dev->irq); |
170 | 170 | ||
171 | netdevice = alloc_etherdev(sizeof(struct i596_private)); | 171 | netdevice = alloc_etherdev(sizeof(struct i596_private)); |
172 | if (!netdevice) | 172 | if (!netdevice) |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index a400d7115f78..d5838528791f 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -88,7 +88,24 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
88 | #define MAC_ADDR_LOW 0x0014 | 88 | #define MAC_ADDR_LOW 0x0014 |
89 | #define MAC_ADDR_HIGH 0x0018 | 89 | #define MAC_ADDR_HIGH 0x0018 |
90 | #define SDMA_CONFIG 0x001c | 90 | #define SDMA_CONFIG 0x001c |
91 | #define TX_BURST_SIZE_16_64BIT 0x01000000 | ||
92 | #define TX_BURST_SIZE_4_64BIT 0x00800000 | ||
93 | #define BLM_TX_NO_SWAP 0x00000020 | ||
94 | #define BLM_RX_NO_SWAP 0x00000010 | ||
95 | #define RX_BURST_SIZE_16_64BIT 0x00000008 | ||
96 | #define RX_BURST_SIZE_4_64BIT 0x00000004 | ||
91 | #define PORT_SERIAL_CONTROL 0x003c | 97 | #define PORT_SERIAL_CONTROL 0x003c |
98 | #define SET_MII_SPEED_TO_100 0x01000000 | ||
99 | #define SET_GMII_SPEED_TO_1000 0x00800000 | ||
100 | #define SET_FULL_DUPLEX_MODE 0x00200000 | ||
101 | #define MAX_RX_PACKET_9700BYTE 0x000a0000 | ||
102 | #define DISABLE_AUTO_NEG_SPEED_GMII 0x00002000 | ||
103 | #define DO_NOT_FORCE_LINK_FAIL 0x00000400 | ||
104 | #define SERIAL_PORT_CONTROL_RESERVED 0x00000200 | ||
105 | #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL 0x00000008 | ||
106 | #define DISABLE_AUTO_NEG_FOR_DUPLEX 0x00000004 | ||
107 | #define FORCE_LINK_PASS 0x00000002 | ||
108 | #define SERIAL_PORT_ENABLE 0x00000001 | ||
92 | #define PORT_STATUS 0x0044 | 109 | #define PORT_STATUS 0x0044 |
93 | #define TX_FIFO_EMPTY 0x00000400 | 110 | #define TX_FIFO_EMPTY 0x00000400 |
94 | #define TX_IN_PROGRESS 0x00000080 | 111 | #define TX_IN_PROGRESS 0x00000080 |
@@ -106,7 +123,9 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
106 | #define TX_BW_BURST 0x005c | 123 | #define TX_BW_BURST 0x005c |
107 | #define INT_CAUSE 0x0060 | 124 | #define INT_CAUSE 0x0060 |
108 | #define INT_TX_END 0x07f80000 | 125 | #define INT_TX_END 0x07f80000 |
126 | #define INT_TX_END_0 0x00080000 | ||
109 | #define INT_RX 0x000003fc | 127 | #define INT_RX 0x000003fc |
128 | #define INT_RX_0 0x00000004 | ||
110 | #define INT_EXT 0x00000002 | 129 | #define INT_EXT 0x00000002 |
111 | #define INT_CAUSE_EXT 0x0064 | 130 | #define INT_CAUSE_EXT 0x0064 |
112 | #define INT_EXT_LINK_PHY 0x00110000 | 131 | #define INT_EXT_LINK_PHY 0x00110000 |
@@ -135,15 +154,8 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
135 | 154 | ||
136 | 155 | ||
137 | /* | 156 | /* |
138 | * SDMA configuration register. | 157 | * SDMA configuration register default value. |
139 | */ | 158 | */ |
140 | #define RX_BURST_SIZE_4_64BIT (2 << 1) | ||
141 | #define RX_BURST_SIZE_16_64BIT (4 << 1) | ||
142 | #define BLM_RX_NO_SWAP (1 << 4) | ||
143 | #define BLM_TX_NO_SWAP (1 << 5) | ||
144 | #define TX_BURST_SIZE_4_64BIT (2 << 22) | ||
145 | #define TX_BURST_SIZE_16_64BIT (4 << 22) | ||
146 | |||
147 | #if defined(__BIG_ENDIAN) | 159 | #if defined(__BIG_ENDIAN) |
148 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ | 160 | #define PORT_SDMA_CONFIG_DEFAULT_VALUE \ |
149 | (RX_BURST_SIZE_4_64BIT | \ | 161 | (RX_BURST_SIZE_4_64BIT | \ |
@@ -160,22 +172,11 @@ static char mv643xx_eth_driver_version[] = "1.4"; | |||
160 | 172 | ||
161 | 173 | ||
162 | /* | 174 | /* |
163 | * Port serial control register. | 175 | * Misc definitions. |
164 | */ | 176 | */ |
165 | #define SET_MII_SPEED_TO_100 (1 << 24) | 177 | #define DEFAULT_RX_QUEUE_SIZE 128 |
166 | #define SET_GMII_SPEED_TO_1000 (1 << 23) | 178 | #define DEFAULT_TX_QUEUE_SIZE 256 |
167 | #define SET_FULL_DUPLEX_MODE (1 << 21) | 179 | #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) |
168 | #define MAX_RX_PACKET_9700BYTE (5 << 17) | ||
169 | #define DISABLE_AUTO_NEG_SPEED_GMII (1 << 13) | ||
170 | #define DO_NOT_FORCE_LINK_FAIL (1 << 10) | ||
171 | #define SERIAL_PORT_CONTROL_RESERVED (1 << 9) | ||
172 | #define DISABLE_AUTO_NEG_FOR_FLOW_CTRL (1 << 3) | ||
173 | #define DISABLE_AUTO_NEG_FOR_DUPLEX (1 << 2) | ||
174 | #define FORCE_LINK_PASS (1 << 1) | ||
175 | #define SERIAL_PORT_ENABLE (1 << 0) | ||
176 | |||
177 | #define DEFAULT_RX_QUEUE_SIZE 128 | ||
178 | #define DEFAULT_TX_QUEUE_SIZE 256 | ||
179 | 180 | ||
180 | 181 | ||
181 | /* | 182 | /* |
@@ -393,6 +394,7 @@ struct mv643xx_eth_private { | |||
393 | struct work_struct tx_timeout_task; | 394 | struct work_struct tx_timeout_task; |
394 | 395 | ||
395 | struct napi_struct napi; | 396 | struct napi_struct napi; |
397 | u32 int_mask; | ||
396 | u8 oom; | 398 | u8 oom; |
397 | u8 work_link; | 399 | u8 work_link; |
398 | u8 work_tx; | 400 | u8 work_tx; |
@@ -651,23 +653,20 @@ static int rxq_refill(struct rx_queue *rxq, int budget) | |||
651 | refilled = 0; | 653 | refilled = 0; |
652 | while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { | 654 | while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { |
653 | struct sk_buff *skb; | 655 | struct sk_buff *skb; |
654 | int unaligned; | ||
655 | int rx; | 656 | int rx; |
656 | struct rx_desc *rx_desc; | 657 | struct rx_desc *rx_desc; |
657 | 658 | ||
658 | skb = __skb_dequeue(&mp->rx_recycle); | 659 | skb = __skb_dequeue(&mp->rx_recycle); |
659 | if (skb == NULL) | 660 | if (skb == NULL) |
660 | skb = dev_alloc_skb(mp->skb_size + | 661 | skb = dev_alloc_skb(mp->skb_size); |
661 | dma_get_cache_alignment() - 1); | ||
662 | 662 | ||
663 | if (skb == NULL) { | 663 | if (skb == NULL) { |
664 | mp->oom = 1; | 664 | mp->oom = 1; |
665 | goto oom; | 665 | goto oom; |
666 | } | 666 | } |
667 | 667 | ||
668 | unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1); | 668 | if (SKB_DMA_REALIGN) |
669 | if (unaligned) | 669 | skb_reserve(skb, SKB_DMA_REALIGN); |
670 | skb_reserve(skb, dma_get_cache_alignment() - unaligned); | ||
671 | 670 | ||
672 | refilled++; | 671 | refilled++; |
673 | rxq->rx_desc_count++; | 672 | rxq->rx_desc_count++; |
@@ -966,8 +965,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) | |||
966 | if (skb != NULL) { | 965 | if (skb != NULL) { |
967 | if (skb_queue_len(&mp->rx_recycle) < | 966 | if (skb_queue_len(&mp->rx_recycle) < |
968 | mp->rx_ring_size && | 967 | mp->rx_ring_size && |
969 | skb_recycle_check(skb, mp->skb_size + | 968 | skb_recycle_check(skb, mp->skb_size)) |
970 | dma_get_cache_alignment() - 1)) | ||
971 | __skb_queue_head(&mp->rx_recycle, skb); | 969 | __skb_queue_head(&mp->rx_recycle, skb); |
972 | else | 970 | else |
973 | dev_kfree_skb(skb); | 971 | dev_kfree_skb(skb); |
@@ -1807,7 +1805,6 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev) | |||
1807 | if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { | 1805 | if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { |
1808 | int port_num; | 1806 | int port_num; |
1809 | u32 accept; | 1807 | u32 accept; |
1810 | int i; | ||
1811 | 1808 | ||
1812 | oom: | 1809 | oom: |
1813 | port_num = mp->port_num; | 1810 | port_num = mp->port_num; |
@@ -2064,15 +2061,16 @@ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) | |||
2064 | u32 int_cause; | 2061 | u32 int_cause; |
2065 | u32 int_cause_ext; | 2062 | u32 int_cause_ext; |
2066 | 2063 | ||
2067 | int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT); | 2064 | int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; |
2068 | if (int_cause == 0) | 2065 | if (int_cause == 0) |
2069 | return 0; | 2066 | return 0; |
2070 | 2067 | ||
2071 | int_cause_ext = 0; | 2068 | int_cause_ext = 0; |
2072 | if (int_cause & INT_EXT) | 2069 | if (int_cause & INT_EXT) { |
2070 | int_cause &= ~INT_EXT; | ||
2073 | int_cause_ext = rdlp(mp, INT_CAUSE_EXT); | 2071 | int_cause_ext = rdlp(mp, INT_CAUSE_EXT); |
2072 | } | ||
2074 | 2073 | ||
2075 | int_cause &= INT_TX_END | INT_RX; | ||
2076 | if (int_cause) { | 2074 | if (int_cause) { |
2077 | wrlp(mp, INT_CAUSE, ~int_cause); | 2075 | wrlp(mp, INT_CAUSE, ~int_cause); |
2078 | mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & | 2076 | mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & |
@@ -2179,6 +2177,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
2179 | if (mp->work_link) { | 2177 | if (mp->work_link) { |
2180 | mp->work_link = 0; | 2178 | mp->work_link = 0; |
2181 | handle_link_event(mp); | 2179 | handle_link_event(mp); |
2180 | work_done++; | ||
2182 | continue; | 2181 | continue; |
2183 | } | 2182 | } |
2184 | 2183 | ||
@@ -2217,7 +2216,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
2217 | if (mp->oom) | 2216 | if (mp->oom) |
2218 | mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); | 2217 | mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); |
2219 | napi_complete(napi); | 2218 | napi_complete(napi); |
2220 | wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); | 2219 | wrlp(mp, INT_MASK, mp->int_mask); |
2221 | } | 2220 | } |
2222 | 2221 | ||
2223 | return work_done; | 2222 | return work_done; |
@@ -2338,6 +2337,14 @@ static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) | |||
2338 | * size field are ignored by the hardware. | 2337 | * size field are ignored by the hardware. |
2339 | */ | 2338 | */ |
2340 | mp->skb_size = (skb_size + 7) & ~7; | 2339 | mp->skb_size = (skb_size + 7) & ~7; |
2340 | |||
2341 | /* | ||
2342 | * If NET_SKB_PAD is smaller than a cache line, | ||
2343 | * netdev_alloc_skb() will cause skb->data to be misaligned | ||
2344 | * to a cache line boundary. If this is the case, include | ||
2345 | * some extra space to allow re-aligning the data area. | ||
2346 | */ | ||
2347 | mp->skb_size += SKB_DMA_REALIGN; | ||
2341 | } | 2348 | } |
2342 | 2349 | ||
2343 | static int mv643xx_eth_open(struct net_device *dev) | 2350 | static int mv643xx_eth_open(struct net_device *dev) |
@@ -2363,6 +2370,8 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2363 | 2370 | ||
2364 | skb_queue_head_init(&mp->rx_recycle); | 2371 | skb_queue_head_init(&mp->rx_recycle); |
2365 | 2372 | ||
2373 | mp->int_mask = INT_EXT; | ||
2374 | |||
2366 | for (i = 0; i < mp->rxq_count; i++) { | 2375 | for (i = 0; i < mp->rxq_count; i++) { |
2367 | err = rxq_init(mp, i); | 2376 | err = rxq_init(mp, i); |
2368 | if (err) { | 2377 | if (err) { |
@@ -2372,6 +2381,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2372 | } | 2381 | } |
2373 | 2382 | ||
2374 | rxq_refill(mp->rxq + i, INT_MAX); | 2383 | rxq_refill(mp->rxq + i, INT_MAX); |
2384 | mp->int_mask |= INT_RX_0 << i; | ||
2375 | } | 2385 | } |
2376 | 2386 | ||
2377 | if (mp->oom) { | 2387 | if (mp->oom) { |
@@ -2386,12 +2396,13 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2386 | txq_deinit(mp->txq + i); | 2396 | txq_deinit(mp->txq + i); |
2387 | goto out_free; | 2397 | goto out_free; |
2388 | } | 2398 | } |
2399 | mp->int_mask |= INT_TX_END_0 << i; | ||
2389 | } | 2400 | } |
2390 | 2401 | ||
2391 | port_start(mp); | 2402 | port_start(mp); |
2392 | 2403 | ||
2393 | wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); | 2404 | wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); |
2394 | wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); | 2405 | wrlp(mp, INT_MASK, mp->int_mask); |
2395 | 2406 | ||
2396 | return 0; | 2407 | return 0; |
2397 | 2408 | ||
@@ -2535,7 +2546,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev) | |||
2535 | 2546 | ||
2536 | mv643xx_eth_irq(dev->irq, dev); | 2547 | mv643xx_eth_irq(dev->irq, dev); |
2537 | 2548 | ||
2538 | wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); | 2549 | wrlp(mp, INT_MASK, mp->int_mask); |
2539 | } | 2550 | } |
2540 | #endif | 2551 | #endif |
2541 | 2552 | ||
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 8dacfbb003e2..9350c8663fd8 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -1019,8 +1019,8 @@ typedef struct { | |||
1019 | #define NETXEN_MAC_DEL 2 | 1019 | #define NETXEN_MAC_DEL 2 |
1020 | 1020 | ||
1021 | typedef struct nx_mac_list_s { | 1021 | typedef struct nx_mac_list_s { |
1022 | struct nx_mac_list_s *next; | 1022 | struct list_head list; |
1023 | uint8_t mac_addr[MAX_ADDR_LEN]; | 1023 | uint8_t mac_addr[ETH_ALEN+2]; |
1024 | } nx_mac_list_t; | 1024 | } nx_mac_list_t; |
1025 | 1025 | ||
1026 | /* | 1026 | /* |
@@ -1213,7 +1213,7 @@ struct netxen_adapter { | |||
1213 | 1213 | ||
1214 | struct net_device *netdev; | 1214 | struct net_device *netdev; |
1215 | struct pci_dev *pdev; | 1215 | struct pci_dev *pdev; |
1216 | nx_mac_list_t *mac_list; | 1216 | struct list_head mac_list; |
1217 | 1217 | ||
1218 | u32 curr_window; | 1218 | u32 curr_window; |
1219 | u32 crb_win; | 1219 | u32 crb_win; |
@@ -1264,9 +1264,10 @@ struct netxen_adapter { | |||
1264 | 1264 | ||
1265 | int (*enable_phy_interrupts) (struct netxen_adapter *); | 1265 | int (*enable_phy_interrupts) (struct netxen_adapter *); |
1266 | int (*disable_phy_interrupts) (struct netxen_adapter *); | 1266 | int (*disable_phy_interrupts) (struct netxen_adapter *); |
1267 | int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); | 1267 | int (*macaddr_set) (struct netxen_adapter *, u8 *); |
1268 | int (*set_mtu) (struct netxen_adapter *, int); | 1268 | int (*set_mtu) (struct netxen_adapter *, int); |
1269 | int (*set_promisc) (struct netxen_adapter *, u32); | 1269 | int (*set_promisc) (struct netxen_adapter *, u32); |
1270 | void (*set_multi) (struct net_device *); | ||
1270 | int (*phy_read) (struct netxen_adapter *, long reg, u32 *); | 1271 | int (*phy_read) (struct netxen_adapter *, long reg, u32 *); |
1271 | int (*phy_write) (struct netxen_adapter *, long reg, u32 val); | 1272 | int (*phy_write) (struct netxen_adapter *, long reg, u32 val); |
1272 | int (*init_port) (struct netxen_adapter *, int); | 1273 | int (*init_port) (struct netxen_adapter *, int); |
@@ -1331,6 +1332,9 @@ int netxen_niu_gbe_phy_write(struct netxen_adapter *adapter, | |||
1331 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); | 1332 | int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu); |
1332 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu); | 1333 | int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu); |
1333 | 1334 | ||
1335 | int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr); | ||
1336 | int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr); | ||
1337 | |||
1334 | #define NXRD32(adapter, off) \ | 1338 | #define NXRD32(adapter, off) \ |
1335 | (adapter->hw_read_wx(adapter, off)) | 1339 | (adapter->hw_read_wx(adapter, off)) |
1336 | #define NXWR32(adapter, off, val) \ | 1340 | #define NXWR32(adapter, off, val) \ |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 9f5ced3eaf9d..86c9e78ec39e 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -321,27 +321,6 @@ static unsigned crb_hub_agt[64] = | |||
321 | 321 | ||
322 | #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ | 322 | #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ |
323 | 323 | ||
324 | int netxen_nic_set_mac(struct net_device *netdev, void *p) | ||
325 | { | ||
326 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
327 | struct sockaddr *addr = p; | ||
328 | |||
329 | if (netif_running(netdev)) | ||
330 | return -EBUSY; | ||
331 | |||
332 | if (!is_valid_ether_addr(addr->sa_data)) | ||
333 | return -EADDRNOTAVAIL; | ||
334 | |||
335 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
336 | |||
337 | /* For P3, MAC addr is not set in NIU */ | ||
338 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) | ||
339 | if (adapter->macaddr_set) | ||
340 | adapter->macaddr_set(adapter, addr->sa_data); | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | #define NETXEN_UNICAST_ADDR(port, index) \ | 324 | #define NETXEN_UNICAST_ADDR(port, index) \ |
346 | (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) | 325 | (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8)) |
347 | #define NETXEN_MCAST_ADDR(port, index) \ | 326 | #define NETXEN_MCAST_ADDR(port, index) \ |
@@ -470,45 +449,6 @@ void netxen_p2_nic_set_multi(struct net_device *netdev) | |||
470 | netxen_nic_set_mcast_addr(adapter, index, null_addr); | 449 | netxen_nic_set_mcast_addr(adapter, index, null_addr); |
471 | } | 450 | } |
472 | 451 | ||
473 | static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, | ||
474 | u8 *addr, nx_mac_list_t **add_list, nx_mac_list_t **del_list) | ||
475 | { | ||
476 | nx_mac_list_t *cur, *prev; | ||
477 | |||
478 | /* if in del_list, move it to adapter->mac_list */ | ||
479 | for (cur = *del_list, prev = NULL; cur;) { | ||
480 | if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { | ||
481 | if (prev == NULL) | ||
482 | *del_list = cur->next; | ||
483 | else | ||
484 | prev->next = cur->next; | ||
485 | cur->next = adapter->mac_list; | ||
486 | adapter->mac_list = cur; | ||
487 | return 0; | ||
488 | } | ||
489 | prev = cur; | ||
490 | cur = cur->next; | ||
491 | } | ||
492 | |||
493 | /* make sure to add each mac address only once */ | ||
494 | for (cur = adapter->mac_list; cur; cur = cur->next) { | ||
495 | if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) | ||
496 | return 0; | ||
497 | } | ||
498 | /* not in del_list, create new entry and add to add_list */ | ||
499 | cur = kmalloc(sizeof(*cur), in_atomic()? GFP_ATOMIC : GFP_KERNEL); | ||
500 | if (cur == NULL) { | ||
501 | printk(KERN_ERR "%s: cannot allocate memory. MAC filtering may" | ||
502 | "not work properly from now.\n", __func__); | ||
503 | return -1; | ||
504 | } | ||
505 | |||
506 | memcpy(cur->mac_addr, addr, ETH_ALEN); | ||
507 | cur->next = *add_list; | ||
508 | *add_list = cur; | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int | 452 | static int |
513 | netxen_send_cmd_descs(struct netxen_adapter *adapter, | 453 | netxen_send_cmd_descs(struct netxen_adapter *adapter, |
514 | struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) | 454 | struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) |
@@ -526,7 +466,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, | |||
526 | producer = tx_ring->producer; | 466 | producer = tx_ring->producer; |
527 | consumer = tx_ring->sw_consumer; | 467 | consumer = tx_ring->sw_consumer; |
528 | 468 | ||
529 | if (nr_desc > find_diff_among(producer, consumer, tx_ring->num_desc)) { | 469 | if (nr_desc >= find_diff_among(producer, consumer, tx_ring->num_desc)) { |
530 | netif_tx_unlock_bh(adapter->netdev); | 470 | netif_tx_unlock_bh(adapter->netdev); |
531 | return -EBUSY; | 471 | return -EBUSY; |
532 | } | 472 | } |
@@ -555,14 +495,12 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, | |||
555 | return 0; | 495 | return 0; |
556 | } | 496 | } |
557 | 497 | ||
558 | static int nx_p3_sre_macaddr_change(struct net_device *dev, | 498 | static int |
559 | u8 *addr, unsigned op) | 499 | nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op) |
560 | { | 500 | { |
561 | struct netxen_adapter *adapter = netdev_priv(dev); | ||
562 | nx_nic_req_t req; | 501 | nx_nic_req_t req; |
563 | nx_mac_req_t *mac_req; | 502 | nx_mac_req_t *mac_req; |
564 | u64 word; | 503 | u64 word; |
565 | int rv; | ||
566 | 504 | ||
567 | memset(&req, 0, sizeof(nx_nic_req_t)); | 505 | memset(&req, 0, sizeof(nx_nic_req_t)); |
568 | req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); | 506 | req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23); |
@@ -574,28 +512,51 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev, | |||
574 | mac_req->op = op; | 512 | mac_req->op = op; |
575 | memcpy(mac_req->mac_addr, addr, 6); | 513 | memcpy(mac_req->mac_addr, addr, 6); |
576 | 514 | ||
577 | rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); | 515 | return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); |
578 | if (rv != 0) { | 516 | } |
579 | printk(KERN_ERR "ERROR. Could not send mac update\n"); | 517 | |
580 | return rv; | 518 | static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, |
519 | u8 *addr, struct list_head *del_list) | ||
520 | { | ||
521 | struct list_head *head; | ||
522 | nx_mac_list_t *cur; | ||
523 | |||
524 | /* look up if already exists */ | ||
525 | list_for_each(head, del_list) { | ||
526 | cur = list_entry(head, nx_mac_list_t, list); | ||
527 | |||
528 | if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) { | ||
529 | list_move_tail(head, &adapter->mac_list); | ||
530 | return 0; | ||
531 | } | ||
581 | } | 532 | } |
582 | 533 | ||
583 | return 0; | 534 | cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC); |
535 | if (cur == NULL) { | ||
536 | printk(KERN_ERR "%s: failed to add mac address filter\n", | ||
537 | adapter->netdev->name); | ||
538 | return -ENOMEM; | ||
539 | } | ||
540 | memcpy(cur->mac_addr, addr, ETH_ALEN); | ||
541 | list_add_tail(&cur->list, &adapter->mac_list); | ||
542 | return nx_p3_sre_macaddr_change(adapter, | ||
543 | cur->mac_addr, NETXEN_MAC_ADD); | ||
584 | } | 544 | } |
585 | 545 | ||
586 | void netxen_p3_nic_set_multi(struct net_device *netdev) | 546 | void netxen_p3_nic_set_multi(struct net_device *netdev) |
587 | { | 547 | { |
588 | struct netxen_adapter *adapter = netdev_priv(netdev); | 548 | struct netxen_adapter *adapter = netdev_priv(netdev); |
589 | nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; | ||
590 | struct dev_mc_list *mc_ptr; | 549 | struct dev_mc_list *mc_ptr; |
591 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | 550 | u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; |
592 | u32 mode = VPORT_MISS_MODE_DROP; | 551 | u32 mode = VPORT_MISS_MODE_DROP; |
552 | LIST_HEAD(del_list); | ||
553 | struct list_head *head; | ||
554 | nx_mac_list_t *cur; | ||
593 | 555 | ||
594 | del_list = adapter->mac_list; | 556 | list_splice_tail_init(&adapter->mac_list, &del_list); |
595 | adapter->mac_list = NULL; | ||
596 | 557 | ||
597 | nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); | 558 | nx_p3_nic_add_mac(adapter, netdev->dev_addr, &del_list); |
598 | nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); | 559 | nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); |
599 | 560 | ||
600 | if (netdev->flags & IFF_PROMISC) { | 561 | if (netdev->flags & IFF_PROMISC) { |
601 | mode = VPORT_MISS_MODE_ACCEPT_ALL; | 562 | mode = VPORT_MISS_MODE_ACCEPT_ALL; |
@@ -611,25 +572,20 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) | |||
611 | if (netdev->mc_count > 0) { | 572 | if (netdev->mc_count > 0) { |
612 | for (mc_ptr = netdev->mc_list; mc_ptr; | 573 | for (mc_ptr = netdev->mc_list; mc_ptr; |
613 | mc_ptr = mc_ptr->next) { | 574 | mc_ptr = mc_ptr->next) { |
614 | nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, | 575 | nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list); |
615 | &add_list, &del_list); | ||
616 | } | 576 | } |
617 | } | 577 | } |
618 | 578 | ||
619 | send_fw_cmd: | 579 | send_fw_cmd: |
620 | adapter->set_promisc(adapter, mode); | 580 | adapter->set_promisc(adapter, mode); |
621 | for (cur = del_list; cur;) { | 581 | head = &del_list; |
622 | nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); | 582 | while (!list_empty(head)) { |
623 | next = cur->next; | 583 | cur = list_entry(head->next, nx_mac_list_t, list); |
584 | |||
585 | nx_p3_sre_macaddr_change(adapter, | ||
586 | cur->mac_addr, NETXEN_MAC_DEL); | ||
587 | list_del(&cur->list); | ||
624 | kfree(cur); | 588 | kfree(cur); |
625 | cur = next; | ||
626 | } | ||
627 | for (cur = add_list; cur;) { | ||
628 | nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_ADD); | ||
629 | next = cur->next; | ||
630 | cur->next = adapter->mac_list; | ||
631 | adapter->mac_list = cur; | ||
632 | cur = next; | ||
633 | } | 589 | } |
634 | } | 590 | } |
635 | 591 | ||
@@ -654,17 +610,25 @@ int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) | |||
654 | 610 | ||
655 | void netxen_p3_free_mac_list(struct netxen_adapter *adapter) | 611 | void netxen_p3_free_mac_list(struct netxen_adapter *adapter) |
656 | { | 612 | { |
657 | nx_mac_list_t *cur, *next; | 613 | nx_mac_list_t *cur; |
658 | 614 | struct list_head *head = &adapter->mac_list; | |
659 | cur = adapter->mac_list; | 615 | |
660 | 616 | while (!list_empty(head)) { | |
661 | while (cur) { | 617 | cur = list_entry(head->next, nx_mac_list_t, list); |
662 | next = cur->next; | 618 | nx_p3_sre_macaddr_change(adapter, |
619 | cur->mac_addr, NETXEN_MAC_DEL); | ||
620 | list_del(&cur->list); | ||
663 | kfree(cur); | 621 | kfree(cur); |
664 | cur = next; | ||
665 | } | 622 | } |
666 | } | 623 | } |
667 | 624 | ||
625 | int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) | ||
626 | { | ||
627 | /* assuming caller has already copied new addr to netdev */ | ||
628 | netxen_p3_nic_set_multi(adapter->netdev); | ||
629 | return 0; | ||
630 | } | ||
631 | |||
668 | #define NETXEN_CONFIG_INTR_COALESCE 3 | 632 | #define NETXEN_CONFIG_INTR_COALESCE 3 |
669 | 633 | ||
670 | /* | 634 | /* |
@@ -752,7 +716,7 @@ int netxen_linkevent_request(struct netxen_adapter *adapter, int enable) | |||
752 | 716 | ||
753 | word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); | 717 | word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16); |
754 | req.req_hdr = cpu_to_le64(word); | 718 | req.req_hdr = cpu_to_le64(word); |
755 | req.words[0] = cpu_to_le64(enable); | 719 | req.words[0] = cpu_to_le64(enable | (enable << 8)); |
756 | 720 | ||
757 | rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); | 721 | rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); |
758 | if (rv != 0) { | 722 | if (rv != 0) { |
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h index f20c96591a87..d4e833339781 100644 --- a/drivers/net/netxen/netxen_nic_hw.h +++ b/drivers/net/netxen/netxen_nic_hw.h | |||
@@ -42,8 +42,6 @@ struct netxen_adapter; | |||
42 | 42 | ||
43 | void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); | 43 | void netxen_nic_set_link_parameters(struct netxen_adapter *adapter); |
44 | 44 | ||
45 | typedef u8 netxen_ethernet_macaddr_t[6]; | ||
46 | |||
47 | /* Nibble or Byte mode for phy interface (GbE mode only) */ | 45 | /* Nibble or Byte mode for phy interface (GbE mode only) */ |
48 | 46 | ||
49 | #define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) | 47 | #define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1) |
@@ -395,14 +393,6 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
395 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | 393 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, |
396 | u32 mode); | 394 | u32 mode); |
397 | 395 | ||
398 | /* set the MAC address for a given MAC */ | ||
399 | int netxen_niu_macaddr_set(struct netxen_adapter *adapter, | ||
400 | netxen_ethernet_macaddr_t addr); | ||
401 | |||
402 | /* XG version */ | ||
403 | int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, | ||
404 | netxen_ethernet_macaddr_t addr); | ||
405 | |||
406 | /* Generic enable for GbE ports. Will detect the speed of the link. */ | 396 | /* Generic enable for GbE ports. Will detect the speed of the link. */ |
407 | int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port); | 397 | int netxen_niu_gbe_init_port(struct netxen_adapter *adapter, int port); |
408 | 398 | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index d18216779a09..4a51c31330da 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -319,13 +319,15 @@ err_out: | |||
319 | 319 | ||
320 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | 320 | void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) |
321 | { | 321 | { |
322 | adapter->macaddr_set = netxen_p2_nic_set_mac_addr; | ||
323 | adapter->set_multi = netxen_p2_nic_set_multi; | ||
324 | |||
322 | switch (adapter->ahw.port_type) { | 325 | switch (adapter->ahw.port_type) { |
323 | case NETXEN_NIC_GBE: | 326 | case NETXEN_NIC_GBE: |
324 | adapter->enable_phy_interrupts = | 327 | adapter->enable_phy_interrupts = |
325 | netxen_niu_gbe_enable_phy_interrupts; | 328 | netxen_niu_gbe_enable_phy_interrupts; |
326 | adapter->disable_phy_interrupts = | 329 | adapter->disable_phy_interrupts = |
327 | netxen_niu_gbe_disable_phy_interrupts; | 330 | netxen_niu_gbe_disable_phy_interrupts; |
328 | adapter->macaddr_set = netxen_niu_macaddr_set; | ||
329 | adapter->set_mtu = netxen_nic_set_mtu_gb; | 331 | adapter->set_mtu = netxen_nic_set_mtu_gb; |
330 | adapter->set_promisc = netxen_niu_set_promiscuous_mode; | 332 | adapter->set_promisc = netxen_niu_set_promiscuous_mode; |
331 | adapter->phy_read = netxen_niu_gbe_phy_read; | 333 | adapter->phy_read = netxen_niu_gbe_phy_read; |
@@ -339,7 +341,6 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | |||
339 | netxen_niu_xgbe_enable_phy_interrupts; | 341 | netxen_niu_xgbe_enable_phy_interrupts; |
340 | adapter->disable_phy_interrupts = | 342 | adapter->disable_phy_interrupts = |
341 | netxen_niu_xgbe_disable_phy_interrupts; | 343 | netxen_niu_xgbe_disable_phy_interrupts; |
342 | adapter->macaddr_set = netxen_niu_xg_macaddr_set; | ||
343 | adapter->set_mtu = netxen_nic_set_mtu_xgb; | 344 | adapter->set_mtu = netxen_nic_set_mtu_xgb; |
344 | adapter->init_port = netxen_niu_xg_init_port; | 345 | adapter->init_port = netxen_niu_xg_init_port; |
345 | adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode; | 346 | adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode; |
@@ -353,6 +354,8 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) | |||
353 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { | 354 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { |
354 | adapter->set_mtu = nx_fw_cmd_set_mtu; | 355 | adapter->set_mtu = nx_fw_cmd_set_mtu; |
355 | adapter->set_promisc = netxen_p3_nic_set_promisc; | 356 | adapter->set_promisc = netxen_p3_nic_set_promisc; |
357 | adapter->macaddr_set = netxen_p3_nic_set_mac_addr; | ||
358 | adapter->set_multi = netxen_p3_nic_set_multi; | ||
356 | } | 359 | } |
357 | } | 360 | } |
358 | 361 | ||
@@ -1319,10 +1322,11 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1319 | break; | 1322 | break; |
1320 | } | 1323 | } |
1321 | 1324 | ||
1322 | if (count) { | 1325 | tx_ring->sw_consumer = sw_consumer; |
1323 | tx_ring->sw_consumer = sw_consumer; | 1326 | |
1327 | if (count && netif_running(netdev)) { | ||
1324 | smp_mb(); | 1328 | smp_mb(); |
1325 | if (netif_queue_stopped(netdev) && netif_running(netdev)) { | 1329 | if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { |
1326 | netif_tx_lock(netdev); | 1330 | netif_tx_lock(netdev); |
1327 | netif_wake_queue(netdev); | 1331 | netif_wake_queue(netdev); |
1328 | smp_mb(); | 1332 | smp_mb(); |
@@ -1450,7 +1454,6 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | |||
1450 | rds_ring->producer = producer; | 1454 | rds_ring->producer = producer; |
1451 | NXWR32(adapter, rds_ring->crb_rcv_producer, | 1455 | NXWR32(adapter, rds_ring->crb_rcv_producer, |
1452 | (producer - 1) & (rds_ring->num_desc - 1)); | 1456 | (producer - 1) & (rds_ring->num_desc - 1)); |
1453 | wmb(); | ||
1454 | } | 1457 | } |
1455 | spin_unlock(&rds_ring->lock); | 1458 | spin_unlock(&rds_ring->lock); |
1456 | } | 1459 | } |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 5d79c19a6ec0..83dadfd78c3c 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -442,20 +442,38 @@ netxen_read_mac_addr(struct netxen_adapter *adapter) | |||
442 | 442 | ||
443 | if (!is_valid_ether_addr(netdev->perm_addr)) | 443 | if (!is_valid_ether_addr(netdev->perm_addr)) |
444 | dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); | 444 | dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr); |
445 | else | ||
446 | adapter->macaddr_set(adapter, netdev->dev_addr); | ||
447 | 445 | ||
448 | return 0; | 446 | return 0; |
449 | } | 447 | } |
450 | 448 | ||
449 | int netxen_nic_set_mac(struct net_device *netdev, void *p) | ||
450 | { | ||
451 | struct netxen_adapter *adapter = netdev_priv(netdev); | ||
452 | struct sockaddr *addr = p; | ||
453 | |||
454 | if (!is_valid_ether_addr(addr->sa_data)) | ||
455 | return -EINVAL; | ||
456 | |||
457 | if (netif_running(netdev)) { | ||
458 | netif_device_detach(netdev); | ||
459 | netxen_napi_disable(adapter); | ||
460 | } | ||
461 | |||
462 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
463 | adapter->macaddr_set(adapter, addr->sa_data); | ||
464 | |||
465 | if (netif_running(netdev)) { | ||
466 | netif_device_attach(netdev); | ||
467 | netxen_napi_enable(adapter); | ||
468 | } | ||
469 | return 0; | ||
470 | } | ||
471 | |||
451 | static void netxen_set_multicast_list(struct net_device *dev) | 472 | static void netxen_set_multicast_list(struct net_device *dev) |
452 | { | 473 | { |
453 | struct netxen_adapter *adapter = netdev_priv(dev); | 474 | struct netxen_adapter *adapter = netdev_priv(dev); |
454 | 475 | ||
455 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | 476 | adapter->set_multi(dev); |
456 | netxen_p3_nic_set_multi(dev); | ||
457 | else | ||
458 | netxen_p2_nic_set_multi(dev); | ||
459 | } | 477 | } |
460 | 478 | ||
461 | static const struct net_device_ops netxen_netdev_ops = { | 479 | static const struct net_device_ops netxen_netdev_ops = { |
@@ -782,16 +800,13 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) | |||
782 | netxen_nic_driver_name, adapter->portnum); | 800 | netxen_nic_driver_name, adapter->portnum); |
783 | return err; | 801 | return err; |
784 | } | 802 | } |
785 | adapter->macaddr_set(adapter, netdev->dev_addr); | 803 | if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) |
786 | 804 | adapter->macaddr_set(adapter, netdev->dev_addr); | |
787 | netxen_nic_set_link_parameters(adapter); | ||
788 | 805 | ||
789 | netxen_set_multicast_list(netdev); | 806 | adapter->set_multi(netdev); |
790 | if (adapter->set_mtu) | 807 | adapter->set_mtu(adapter, netdev->mtu); |
791 | adapter->set_mtu(adapter, netdev->mtu); | ||
792 | 808 | ||
793 | adapter->ahw.linkup = 0; | 809 | adapter->ahw.linkup = 0; |
794 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
795 | 810 | ||
796 | netxen_napi_enable(adapter); | 811 | netxen_napi_enable(adapter); |
797 | 812 | ||
@@ -800,6 +815,10 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) | |||
800 | 815 | ||
801 | if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) | 816 | if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) |
802 | netxen_linkevent_request(adapter, 1); | 817 | netxen_linkevent_request(adapter, 1); |
818 | else | ||
819 | netxen_nic_set_link_parameters(adapter); | ||
820 | |||
821 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
803 | 822 | ||
804 | return 0; | 823 | return 0; |
805 | } | 824 | } |
@@ -809,11 +828,15 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) | |||
809 | { | 828 | { |
810 | netif_carrier_off(netdev); | 829 | netif_carrier_off(netdev); |
811 | netif_stop_queue(netdev); | 830 | netif_stop_queue(netdev); |
831 | smp_mb(); | ||
812 | netxen_napi_disable(adapter); | 832 | netxen_napi_disable(adapter); |
813 | 833 | ||
814 | if (adapter->stop_port) | 834 | if (adapter->stop_port) |
815 | adapter->stop_port(adapter); | 835 | adapter->stop_port(adapter); |
816 | 836 | ||
837 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
838 | netxen_p3_free_mac_list(adapter); | ||
839 | |||
817 | netxen_release_tx_buffers(adapter); | 840 | netxen_release_tx_buffers(adapter); |
818 | 841 | ||
819 | FLUSH_SCHEDULED_WORK(); | 842 | FLUSH_SCHEDULED_WORK(); |
@@ -960,6 +983,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
960 | 983 | ||
961 | rwlock_init(&adapter->adapter_lock); | 984 | rwlock_init(&adapter->adapter_lock); |
962 | spin_lock_init(&adapter->tx_clean_lock); | 985 | spin_lock_init(&adapter->tx_clean_lock); |
986 | INIT_LIST_HEAD(&adapter->mac_list); | ||
963 | 987 | ||
964 | err = netxen_setup_pci_map(adapter); | 988 | err = netxen_setup_pci_map(adapter); |
965 | if (err) | 989 | if (err) |
@@ -1113,9 +1137,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev) | |||
1113 | 1137 | ||
1114 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { | 1138 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { |
1115 | netxen_nic_detach(adapter); | 1139 | netxen_nic_detach(adapter); |
1116 | |||
1117 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
1118 | netxen_p3_free_mac_list(adapter); | ||
1119 | } | 1140 | } |
1120 | 1141 | ||
1121 | if (adapter->portnum == 0) | 1142 | if (adapter->portnum == 0) |
@@ -1340,7 +1361,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1340 | producer = tx_ring->producer; | 1361 | producer = tx_ring->producer; |
1341 | smp_mb(); | 1362 | smp_mb(); |
1342 | consumer = tx_ring->sw_consumer; | 1363 | consumer = tx_ring->sw_consumer; |
1343 | if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) { | 1364 | if ((no_of_desc+2) >= find_diff_among(producer, consumer, num_txd)) { |
1344 | netif_stop_queue(netdev); | 1365 | netif_stop_queue(netdev); |
1345 | smp_mb(); | 1366 | smp_mb(); |
1346 | return NETDEV_TX_BUSY; | 1367 | return NETDEV_TX_BUSY; |
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index 5e2698bf575a..5941c79be723 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c | |||
@@ -402,76 +402,6 @@ int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) | |||
402 | return 0; | 402 | return 0; |
403 | } | 403 | } |
404 | 404 | ||
405 | /* | ||
406 | * Return the current station MAC address. | ||
407 | * Note that the passed-in value must already be in network byte order. | ||
408 | */ | ||
409 | static int netxen_niu_macaddr_get(struct netxen_adapter *adapter, | ||
410 | netxen_ethernet_macaddr_t * addr) | ||
411 | { | ||
412 | u32 stationhigh; | ||
413 | u32 stationlow; | ||
414 | int phy = adapter->physical_port; | ||
415 | u8 val[8]; | ||
416 | |||
417 | if (addr == NULL) | ||
418 | return -EINVAL; | ||
419 | if ((phy < 0) || (phy > 3)) | ||
420 | return -EINVAL; | ||
421 | |||
422 | stationhigh = NXRD32(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy)); | ||
423 | stationlow = NXRD32(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy)); | ||
424 | ((__le32 *)val)[1] = cpu_to_le32(stationhigh); | ||
425 | ((__le32 *)val)[0] = cpu_to_le32(stationlow); | ||
426 | |||
427 | memcpy(addr, val + 2, 6); | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * Set the station MAC address. | ||
434 | * Note that the passed-in value must already be in network byte order. | ||
435 | */ | ||
436 | int netxen_niu_macaddr_set(struct netxen_adapter *adapter, | ||
437 | netxen_ethernet_macaddr_t addr) | ||
438 | { | ||
439 | u8 temp[4]; | ||
440 | u32 val; | ||
441 | int phy = adapter->physical_port; | ||
442 | unsigned char mac_addr[6]; | ||
443 | int i; | ||
444 | |||
445 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
446 | return 0; | ||
447 | |||
448 | for (i = 0; i < 10; i++) { | ||
449 | temp[0] = temp[1] = 0; | ||
450 | memcpy(temp + 2, addr, 2); | ||
451 | val = le32_to_cpu(*(__le32 *)temp); | ||
452 | if (NXWR32(adapter, NETXEN_NIU_GB_STATION_ADDR_1(phy), val)) | ||
453 | return -EIO; | ||
454 | |||
455 | memcpy(temp, ((u8 *) addr) + 2, sizeof(__le32)); | ||
456 | val = le32_to_cpu(*(__le32 *)temp); | ||
457 | if (NXWR32(adapter, NETXEN_NIU_GB_STATION_ADDR_0(phy), val)) | ||
458 | return -2; | ||
459 | |||
460 | netxen_niu_macaddr_get(adapter, | ||
461 | (netxen_ethernet_macaddr_t *) mac_addr); | ||
462 | if (memcmp(mac_addr, addr, 6) == 0) | ||
463 | break; | ||
464 | } | ||
465 | |||
466 | if (i == 10) { | ||
467 | printk(KERN_ERR "%s: cannot set Mac addr for %s\n", | ||
468 | netxen_nic_driver_name, adapter->netdev->name); | ||
469 | printk(KERN_ERR "MAC address set: %pM.\n", addr); | ||
470 | printk(KERN_ERR "MAC address get: %pM.\n", mac_addr); | ||
471 | } | ||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | /* Disable a GbE interface */ | 405 | /* Disable a GbE interface */ |
476 | int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) | 406 | int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) |
477 | { | 407 | { |
@@ -561,57 +491,6 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
561 | return 0; | 491 | return 0; |
562 | } | 492 | } |
563 | 493 | ||
564 | /* | ||
565 | * Set the MAC address for an XG port | ||
566 | * Note that the passed-in value must already be in network byte order. | ||
567 | */ | ||
568 | int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, | ||
569 | netxen_ethernet_macaddr_t addr) | ||
570 | { | ||
571 | int phy = adapter->physical_port; | ||
572 | u8 temp[4]; | ||
573 | u32 val; | ||
574 | |||
575 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
576 | return 0; | ||
577 | |||
578 | if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) | ||
579 | return -EIO; | ||
580 | |||
581 | temp[0] = temp[1] = 0; | ||
582 | switch (phy) { | ||
583 | case 0: | ||
584 | memcpy(temp + 2, addr, 2); | ||
585 | val = le32_to_cpu(*(__le32 *)temp); | ||
586 | if (NXWR32(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_1, val)) | ||
587 | return -EIO; | ||
588 | |||
589 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); | ||
590 | val = le32_to_cpu(*(__le32 *)temp); | ||
591 | if (NXWR32(adapter, NETXEN_NIU_XGE_STATION_ADDR_0_HI, val)) | ||
592 | return -EIO; | ||
593 | break; | ||
594 | |||
595 | case 1: | ||
596 | memcpy(temp + 2, addr, 2); | ||
597 | val = le32_to_cpu(*(__le32 *)temp); | ||
598 | if (NXWR32(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_1, val)) | ||
599 | return -EIO; | ||
600 | |||
601 | memcpy(&temp, ((u8 *) addr) + 2, sizeof(__le32)); | ||
602 | val = le32_to_cpu(*(__le32 *)temp); | ||
603 | if (NXWR32(adapter, NETXEN_NIU_XG1_STATION_ADDR_0_HI, val)) | ||
604 | return -EIO; | ||
605 | break; | ||
606 | |||
607 | default: | ||
608 | printk(KERN_ERR "Unknown port %d\n", phy); | ||
609 | break; | ||
610 | } | ||
611 | |||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | 494 | int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, |
616 | u32 mode) | 495 | u32 mode) |
617 | { | 496 | { |
@@ -636,3 +515,36 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, | |||
636 | 515 | ||
637 | return 0; | 516 | return 0; |
638 | } | 517 | } |
518 | |||
519 | int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr) | ||
520 | { | ||
521 | u32 mac_hi, mac_lo; | ||
522 | u32 reg_hi, reg_lo; | ||
523 | |||
524 | u8 phy = adapter->physical_port; | ||
525 | u8 phy_count = (adapter->ahw.port_type == NETXEN_NIC_XGBE) ? | ||
526 | NETXEN_NIU_MAX_XG_PORTS : NETXEN_NIU_MAX_GBE_PORTS; | ||
527 | |||
528 | if (phy >= phy_count) | ||
529 | return -EINVAL; | ||
530 | |||
531 | mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24); | ||
532 | mac_hi = addr[2] | ((u32)addr[3] << 8) | | ||
533 | ((u32)addr[4] << 16) | ((u32)addr[5] << 24); | ||
534 | |||
535 | if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { | ||
536 | reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy); | ||
537 | reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy); | ||
538 | } else { | ||
539 | reg_lo = NETXEN_NIU_GB_STATION_ADDR_1(phy); | ||
540 | reg_hi = NETXEN_NIU_GB_STATION_ADDR_0(phy); | ||
541 | } | ||
542 | |||
543 | /* write twice to flush */ | ||
544 | if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) | ||
545 | return -EIO; | ||
546 | if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi)) | ||
547 | return -EIO; | ||
548 | |||
549 | return 0; | ||
550 | } | ||
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 6f97b47d74a6..1508b124e3d8 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -49,8 +49,8 @@ | |||
49 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
50 | 50 | ||
51 | #define DRV_NAME "r6040" | 51 | #define DRV_NAME "r6040" |
52 | #define DRV_VERSION "0.22" | 52 | #define DRV_VERSION "0.23" |
53 | #define DRV_RELDATE "25Mar2009" | 53 | #define DRV_RELDATE "05May2009" |
54 | 54 | ||
55 | /* PHY CHIP Address */ | 55 | /* PHY CHIP Address */ |
56 | #define PHY1_ADDR 1 /* For MAC1 */ | 56 | #define PHY1_ADDR 1 /* For MAC1 */ |
@@ -742,6 +742,14 @@ static int r6040_up(struct net_device *dev) | |||
742 | struct r6040_private *lp = netdev_priv(dev); | 742 | struct r6040_private *lp = netdev_priv(dev); |
743 | void __iomem *ioaddr = lp->base; | 743 | void __iomem *ioaddr = lp->base; |
744 | int ret; | 744 | int ret; |
745 | u16 val; | ||
746 | |||
747 | /* Check presence of a second PHY */ | ||
748 | val = r6040_phy_read(ioaddr, lp->phy_addr, 2); | ||
749 | if (val == 0xFFFF) { | ||
750 | printk(KERN_ERR DRV_NAME " no second PHY attached\n"); | ||
751 | return -EIO; | ||
752 | } | ||
745 | 753 | ||
746 | /* Initialise and alloc RX/TX buffers */ | 754 | /* Initialise and alloc RX/TX buffers */ |
747 | r6040_init_txbufs(dev); | 755 | r6040_init_txbufs(dev); |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 5113b26fc2d9..3cff84078a9e 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -1976,7 +1976,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
1976 | retval = -ENODEV; | 1976 | retval = -ENODEV; |
1977 | goto out_0; | 1977 | goto out_0; |
1978 | } | 1978 | } |
1979 | res_size = res->end - res->start; | 1979 | res_size = res->end - res->start + 1; |
1980 | 1980 | ||
1981 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1981 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1982 | if (!irq_res) { | 1982 | if (!irq_res) { |
@@ -2109,12 +2109,58 @@ out_0: | |||
2109 | return retval; | 2109 | return retval; |
2110 | } | 2110 | } |
2111 | 2111 | ||
2112 | #ifdef CONFIG_PM | ||
2113 | /* This implementation assumes the devices remains powered on its VDDVARIO | ||
2114 | * pins during suspend. */ | ||
2115 | |||
2116 | static int smsc911x_suspend(struct platform_device *pdev, pm_message_t state) | ||
2117 | { | ||
2118 | struct net_device *dev = platform_get_drvdata(pdev); | ||
2119 | struct smsc911x_data *pdata = netdev_priv(dev); | ||
2120 | |||
2121 | /* enable wake on LAN, energy detection and the external PME | ||
2122 | * signal. */ | ||
2123 | smsc911x_reg_write(pdata, PMT_CTRL, | ||
2124 | PMT_CTRL_PM_MODE_D1_ | PMT_CTRL_WOL_EN_ | | ||
2125 | PMT_CTRL_ED_EN_ | PMT_CTRL_PME_EN_); | ||
2126 | |||
2127 | return 0; | ||
2128 | } | ||
2129 | |||
2130 | static int smsc911x_resume(struct platform_device *pdev) | ||
2131 | { | ||
2132 | struct net_device *dev = platform_get_drvdata(pdev); | ||
2133 | struct smsc911x_data *pdata = netdev_priv(dev); | ||
2134 | unsigned int to = 100; | ||
2135 | |||
2136 | /* Note 3.11 from the datasheet: | ||
2137 | * "When the LAN9220 is in a power saving state, a write of any | ||
2138 | * data to the BYTE_TEST register will wake-up the device." | ||
2139 | */ | ||
2140 | smsc911x_reg_write(pdata, BYTE_TEST, 0); | ||
2141 | |||
2142 | /* poll the READY bit in PMT_CTRL. Any other access to the device is | ||
2143 | * forbidden while this bit isn't set. Try for 100ms and return -EIO | ||
2144 | * if it failed. */ | ||
2145 | while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) | ||
2146 | udelay(1000); | ||
2147 | |||
2148 | return (to == 0) ? -EIO : 0; | ||
2149 | } | ||
2150 | |||
2151 | #else | ||
2152 | #define smsc911x_suspend NULL | ||
2153 | #define smsc911x_resume NULL | ||
2154 | #endif | ||
2155 | |||
2112 | static struct platform_driver smsc911x_driver = { | 2156 | static struct platform_driver smsc911x_driver = { |
2113 | .probe = smsc911x_drv_probe, | 2157 | .probe = smsc911x_drv_probe, |
2114 | .remove = smsc911x_drv_remove, | 2158 | .remove = smsc911x_drv_remove, |
2115 | .driver = { | 2159 | .driver = { |
2116 | .name = SMSC_CHIPNAME, | 2160 | .name = SMSC_CHIPNAME, |
2117 | }, | 2161 | }, |
2162 | .suspend = smsc911x_suspend, | ||
2163 | .resume = smsc911x_resume, | ||
2118 | }; | 2164 | }; |
2119 | 2165 | ||
2120 | /* Entry point for loading the module */ | 2166 | /* Entry point for loading the module */ |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index e00b5b1f6743..3717569828bf 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -180,6 +180,20 @@ config USB_NET_CDCETHER | |||
180 | IEEE 802 "local assignment" bit is set in the address, a "usbX" | 180 | IEEE 802 "local assignment" bit is set in the address, a "usbX" |
181 | name is used instead. | 181 | name is used instead. |
182 | 182 | ||
183 | config USB_NET_CDC_EEM | ||
184 | tristate "CDC EEM support" | ||
185 | depends on USB_USBNET && EXPERIMENTAL | ||
186 | help | ||
187 | This option supports devices conforming to the Communication Device | ||
188 | Class (CDC) Ethernet Emulation Model, a specification that's easy to | ||
189 | implement in device firmware. The CDC EEM specifications are available | ||
190 | from <http://www.usb.org/>. | ||
191 | |||
192 | This driver creates an interface named "ethX", where X depends on | ||
193 | what other networking devices you have in use. However, if the | ||
194 | IEEE 802 "local assignment" bit is set in the address, a "usbX" | ||
195 | name is used instead. | ||
196 | |||
183 | config USB_NET_DM9601 | 197 | config USB_NET_DM9601 |
184 | tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" | 198 | tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" |
185 | depends on USB_USBNET | 199 | depends on USB_USBNET |
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index f4402a06e52c..b870b0b1cbe0 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o | |||
9 | obj-$(CONFIG_USB_HSO) += hso.o | 9 | obj-$(CONFIG_USB_HSO) += hso.o |
10 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o | 10 | obj-$(CONFIG_USB_NET_AX8817X) += asix.o |
11 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o | 11 | obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o |
12 | obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o | ||
12 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o | 13 | obj-$(CONFIG_USB_NET_DM9601) += dm9601.o |
13 | obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o | 14 | obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o |
14 | obj-$(CONFIG_USB_NET_GL620A) += gl620a.o | 15 | obj-$(CONFIG_USB_NET_GL620A) += gl620a.o |
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c new file mode 100644 index 000000000000..80e01778dd3b --- /dev/null +++ b/drivers/net/usb/cdc_eem.c | |||
@@ -0,0 +1,381 @@ | |||
1 | /* | ||
2 | * USB CDC EEM network interface driver | ||
3 | * Copyright (C) 2009 Oberthur Technologies | ||
4 | * by Omar Laazimani, Olivier Condemine | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/netdevice.h> | ||
24 | #include <linux/etherdevice.h> | ||
25 | #include <linux/ctype.h> | ||
26 | #include <linux/ethtool.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | #include <linux/mii.h> | ||
29 | #include <linux/usb.h> | ||
30 | #include <linux/crc32.h> | ||
31 | #include <linux/usb/cdc.h> | ||
32 | #include <linux/usb/usbnet.h> | ||
33 | |||
34 | |||
35 | /* | ||
36 | * This driver is an implementation of the CDC "Ethernet Emulation | ||
37 | * Model" (EEM) specification, which encapsulates Ethernet frames | ||
38 | * for transport over USB using a simpler USB device model than the | ||
39 | * previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet"). | ||
40 | * | ||
41 | * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf | ||
42 | * | ||
43 | * This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24, | ||
44 | * 2.6.27 and 2.6.30rc2 kernel. | ||
45 | * It has also been validated on Openmoko Om 2008.12 (based on 2.6.24 kernel). | ||
46 | * build on 23-April-2009 | ||
47 | */ | ||
48 | |||
49 | #define EEM_HEAD 2 /* 2 byte header */ | ||
50 | |||
51 | /*-------------------------------------------------------------------------*/ | ||
52 | |||
53 | static void eem_linkcmd_complete(struct urb *urb) | ||
54 | { | ||
55 | dev_kfree_skb(urb->context); | ||
56 | usb_free_urb(urb); | ||
57 | } | ||
58 | |||
59 | static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb) | ||
60 | { | ||
61 | struct urb *urb; | ||
62 | int status; | ||
63 | |||
64 | urb = usb_alloc_urb(0, GFP_ATOMIC); | ||
65 | if (!urb) | ||
66 | goto fail; | ||
67 | |||
68 | usb_fill_bulk_urb(urb, dev->udev, dev->out, | ||
69 | skb->data, skb->len, eem_linkcmd_complete, skb); | ||
70 | |||
71 | status = usb_submit_urb(urb, GFP_ATOMIC); | ||
72 | if (status) { | ||
73 | usb_free_urb(urb); | ||
74 | fail: | ||
75 | dev_kfree_skb(skb); | ||
76 | devwarn(dev, "link cmd failure\n"); | ||
77 | return; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | static int eem_bind(struct usbnet *dev, struct usb_interface *intf) | ||
82 | { | ||
83 | int status = 0; | ||
84 | |||
85 | status = usbnet_get_endpoints(dev, intf); | ||
86 | if (status < 0) { | ||
87 | usb_set_intfdata(intf, NULL); | ||
88 | usb_driver_release_interface(driver_of(intf), intf); | ||
89 | return status; | ||
90 | } | ||
91 | |||
92 | /* no jumbogram (16K) support for now */ | ||
93 | |||
94 | dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * EEM permits packing multiple Ethernet frames into USB transfers | ||
101 | * (a "bundle"), but for TX we don't try to do that. | ||
102 | */ | ||
103 | static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, | ||
104 | gfp_t flags) | ||
105 | { | ||
106 | struct sk_buff *skb2 = NULL; | ||
107 | u16 len = skb->len; | ||
108 | u32 crc = 0; | ||
109 | int padlen = 0; | ||
110 | |||
111 | /* When ((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket) is | ||
112 | * zero, stick two bytes of zero length EEM packet on the end. | ||
113 | * Else the framework would add invalid single byte padding, | ||
114 | * since it can't know whether ZLPs will be handled right by | ||
115 | * all the relevant hardware and software. | ||
116 | */ | ||
117 | if (!((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket)) | ||
118 | padlen += 2; | ||
119 | |||
120 | if (!skb_cloned(skb)) { | ||
121 | int headroom = skb_headroom(skb); | ||
122 | int tailroom = skb_tailroom(skb); | ||
123 | |||
124 | if ((tailroom >= ETH_FCS_LEN + padlen) | ||
125 | && (headroom >= EEM_HEAD)) | ||
126 | goto done; | ||
127 | |||
128 | if ((headroom + tailroom) | ||
129 | > (EEM_HEAD + ETH_FCS_LEN + padlen)) { | ||
130 | skb->data = memmove(skb->head + | ||
131 | EEM_HEAD, | ||
132 | skb->data, | ||
133 | skb->len); | ||
134 | skb_set_tail_pointer(skb, len); | ||
135 | goto done; | ||
136 | } | ||
137 | } | ||
138 | |||
139 | skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); | ||
140 | if (!skb2) | ||
141 | return NULL; | ||
142 | |||
143 | dev_kfree_skb_any(skb); | ||
144 | skb = skb2; | ||
145 | |||
146 | done: | ||
147 | /* we don't use the "no Ethernet CRC" option */ | ||
148 | crc = crc32_le(~0, skb->data, skb->len); | ||
149 | crc = ~crc; | ||
150 | |||
151 | put_unaligned_le32(crc, skb_put(skb, 4)); | ||
152 | |||
153 | /* EEM packet header format: | ||
154 | * b0..13: length of ethernet frame | ||
155 | * b14: bmCRC (1 == valid Ethernet CRC) | ||
156 | * b15: bmType (0 == data) | ||
157 | */ | ||
158 | len = skb->len; | ||
159 | put_unaligned_le16(BIT(14) | len, skb_push(skb, 2)); | ||
160 | |||
161 | /* Bundle a zero length EEM packet if needed */ | ||
162 | if (padlen) | ||
163 | put_unaligned_le16(0, skb_put(skb, 2)); | ||
164 | |||
165 | return skb; | ||
166 | } | ||
167 | |||
168 | static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
169 | { | ||
170 | /* | ||
171 | * Our task here is to strip off framing, leaving skb with one | ||
172 | * data frame for the usbnet framework code to process. But we | ||
173 | * may have received multiple EEM payloads, or command payloads. | ||
174 | * So we must process _everything_ as if it's a header, except | ||
175 | * maybe the last data payload | ||
176 | * | ||
177 | * REVISIT the framework needs updating so that when we consume | ||
178 | * all payloads (the last or only message was a command, or a | ||
179 | * zero length EEM packet) that is not accounted as an rx_error. | ||
180 | */ | ||
181 | do { | ||
182 | struct sk_buff *skb2 = NULL; | ||
183 | u16 header; | ||
184 | u16 len = 0; | ||
185 | |||
186 | /* incomplete EEM header? */ | ||
187 | if (skb->len < EEM_HEAD) | ||
188 | return 0; | ||
189 | |||
190 | /* | ||
191 | * EEM packet header format: | ||
192 | * b0..14: EEM type dependant (Data or Command) | ||
193 | * b15: bmType | ||
194 | */ | ||
195 | header = get_unaligned_le16(skb->data); | ||
196 | skb_pull(skb, EEM_HEAD); | ||
197 | |||
198 | /* | ||
199 | * The bmType bit helps to denote when EEM | ||
200 | * packet is data or command : | ||
201 | * bmType = 0 : EEM data payload | ||
202 | * bmType = 1 : EEM (link) command | ||
203 | */ | ||
204 | if (header & BIT(15)) { | ||
205 | u16 bmEEMCmd; | ||
206 | |||
207 | /* | ||
208 | * EEM (link) command packet: | ||
209 | * b0..10: bmEEMCmdParam | ||
210 | * b11..13: bmEEMCmd | ||
211 | * b14: bmReserved (must be 0) | ||
212 | * b15: 1 (EEM command) | ||
213 | */ | ||
214 | if (header & BIT(14)) { | ||
215 | devdbg(dev, "reserved command %04x\n", header); | ||
216 | continue; | ||
217 | } | ||
218 | |||
219 | bmEEMCmd = (header >> 11) & 0x7; | ||
220 | switch (bmEEMCmd) { | ||
221 | |||
222 | /* Responding to echo requests is mandatory. */ | ||
223 | case 0: /* Echo command */ | ||
224 | len = header & 0x7FF; | ||
225 | |||
226 | /* bogus command? */ | ||
227 | if (skb->len < len) | ||
228 | return 0; | ||
229 | |||
230 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
231 | if (unlikely(!skb2)) | ||
232 | goto next; | ||
233 | skb_trim(skb2, len); | ||
234 | put_unaligned_le16(BIT(15) | (1 << 11) | len, | ||
235 | skb_push(skb2, 2)); | ||
236 | eem_linkcmd(dev, skb2); | ||
237 | break; | ||
238 | |||
239 | /* | ||
240 | * Host may choose to ignore hints. | ||
241 | * - suspend: peripheral ready to suspend | ||
242 | * - response: suggest N millisec polling | ||
243 | * - response complete: suggest N sec polling | ||
244 | */ | ||
245 | case 2: /* Suspend hint */ | ||
246 | case 3: /* Response hint */ | ||
247 | case 4: /* Response complete hint */ | ||
248 | continue; | ||
249 | |||
250 | /* | ||
251 | * Hosts should never receive host-to-peripheral | ||
252 | * or reserved command codes; or responses to an | ||
253 | * echo command we didn't send. | ||
254 | */ | ||
255 | case 1: /* Echo response */ | ||
256 | case 5: /* Tickle */ | ||
257 | default: /* reserved */ | ||
258 | devwarn(dev, "unexpected link command %d\n", | ||
259 | bmEEMCmd); | ||
260 | continue; | ||
261 | } | ||
262 | |||
263 | } else { | ||
264 | u32 crc, crc2; | ||
265 | int is_last; | ||
266 | |||
267 | /* zero length EEM packet? */ | ||
268 | if (header == 0) | ||
269 | continue; | ||
270 | |||
271 | /* | ||
272 | * EEM data packet header : | ||
273 | * b0..13: length of ethernet frame | ||
274 | * b14: bmCRC | ||
275 | * b15: 0 (EEM data) | ||
276 | */ | ||
277 | len = header & 0x3FFF; | ||
278 | |||
279 | /* bogus EEM payload? */ | ||
280 | if (skb->len < len) | ||
281 | return 0; | ||
282 | |||
283 | /* bogus ethernet frame? */ | ||
284 | if (len < (ETH_HLEN + ETH_FCS_LEN)) | ||
285 | goto next; | ||
286 | |||
287 | /* | ||
288 | * Treat the last payload differently: framework | ||
289 | * code expects our "fixup" to have stripped off | ||
290 | * headers, so "skb" is a data packet (or error). | ||
291 | * Else if it's not the last payload, keep "skb" | ||
292 | * for further processing. | ||
293 | */ | ||
294 | is_last = (len == skb->len); | ||
295 | if (is_last) | ||
296 | skb2 = skb; | ||
297 | else { | ||
298 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
299 | if (unlikely(!skb2)) | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | crc = get_unaligned_le32(skb2->data | ||
304 | + len - ETH_FCS_LEN); | ||
305 | skb_trim(skb2, len - ETH_FCS_LEN); | ||
306 | |||
307 | /* | ||
308 | * The bmCRC helps to denote when the CRC field in | ||
309 | * the Ethernet frame contains a calculated CRC: | ||
310 | * bmCRC = 1 : CRC is calculated | ||
311 | * bmCRC = 0 : CRC = 0xDEADBEEF | ||
312 | */ | ||
313 | if (header & BIT(14)) | ||
314 | crc2 = ~crc32_le(~0, skb2->data, len); | ||
315 | else | ||
316 | crc2 = 0xdeadbeef; | ||
317 | |||
318 | if (is_last) | ||
319 | return crc == crc2; | ||
320 | |||
321 | if (unlikely(crc != crc2)) { | ||
322 | dev->stats.rx_errors++; | ||
323 | dev_kfree_skb_any(skb2); | ||
324 | } else | ||
325 | usbnet_skb_return(dev, skb2); | ||
326 | } | ||
327 | |||
328 | next: | ||
329 | skb_pull(skb, len); | ||
330 | } while (skb->len); | ||
331 | |||
332 | return 1; | ||
333 | } | ||
334 | |||
335 | static const struct driver_info eem_info = { | ||
336 | .description = "CDC EEM Device", | ||
337 | .flags = FLAG_ETHER, | ||
338 | .bind = eem_bind, | ||
339 | .rx_fixup = eem_rx_fixup, | ||
340 | .tx_fixup = eem_tx_fixup, | ||
341 | }; | ||
342 | |||
343 | /*-------------------------------------------------------------------------*/ | ||
344 | |||
345 | static const struct usb_device_id products[] = { | ||
346 | { | ||
347 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM, | ||
348 | USB_CDC_PROTO_EEM), | ||
349 | .driver_info = (unsigned long) &eem_info, | ||
350 | }, | ||
351 | { | ||
352 | /* EMPTY == end of list */ | ||
353 | }, | ||
354 | }; | ||
355 | MODULE_DEVICE_TABLE(usb, products); | ||
356 | |||
357 | static struct usb_driver eem_driver = { | ||
358 | .name = "cdc_eem", | ||
359 | .id_table = products, | ||
360 | .probe = usbnet_probe, | ||
361 | .disconnect = usbnet_disconnect, | ||
362 | .suspend = usbnet_suspend, | ||
363 | .resume = usbnet_resume, | ||
364 | }; | ||
365 | |||
366 | |||
367 | static int __init eem_init(void) | ||
368 | { | ||
369 | return usb_register(&eem_driver); | ||
370 | } | ||
371 | module_init(eem_init); | ||
372 | |||
373 | static void __exit eem_exit(void) | ||
374 | { | ||
375 | usb_deregister(&eem_driver); | ||
376 | } | ||
377 | module_exit(eem_exit); | ||
378 | |||
379 | MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>"); | ||
380 | MODULE_DESCRIPTION("USB CDC EEM"); | ||
381 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 28dd225269ad..0eb939c40ac1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c | |||
@@ -719,6 +719,14 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, | |||
719 | { | 719 | { |
720 | unsigned long flags; | 720 | unsigned long flags; |
721 | int ret = 0; | 721 | int ret = 0; |
722 | __le16 key_flags = 0; | ||
723 | |||
724 | key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); | ||
725 | key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | ||
726 | key_flags &= ~STA_KEY_FLG_INVALID; | ||
727 | |||
728 | if (sta_id == priv->hw_params.bcast_sta_id) | ||
729 | key_flags |= STA_KEY_MULTICAST_MSK; | ||
722 | 730 | ||
723 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; | 731 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; |
724 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; | 732 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; |
@@ -738,6 +746,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, | |||
738 | WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, | 746 | WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, |
739 | "no space for a new key"); | 747 | "no space for a new key"); |
740 | 748 | ||
749 | priv->stations[sta_id].sta.key.key_flags = key_flags; | ||
750 | |||
751 | |||
741 | /* This copy is acutally not needed: we get the key with each TX */ | 752 | /* This copy is acutally not needed: we get the key with each TX */ |
742 | memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); | 753 | memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); |
743 | 754 | ||
@@ -754,9 +765,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv, | |||
754 | { | 765 | { |
755 | u8 sta_id = IWL_INVALID_STATION; | 766 | u8 sta_id = IWL_INVALID_STATION; |
756 | unsigned long flags; | 767 | unsigned long flags; |
757 | __le16 key_flags = 0; | ||
758 | int i; | 768 | int i; |
759 | DECLARE_MAC_BUF(mac); | ||
760 | 769 | ||
761 | sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); | 770 | sta_id = priv->cfg->ops->smgmt->find_station(priv, addr); |
762 | if (sta_id == IWL_INVALID_STATION) { | 771 | if (sta_id == IWL_INVALID_STATION) { |
@@ -771,16 +780,8 @@ void iwl_update_tkip_key(struct iwl_priv *priv, | |||
771 | return; | 780 | return; |
772 | } | 781 | } |
773 | 782 | ||
774 | key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); | ||
775 | key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | ||
776 | key_flags &= ~STA_KEY_FLG_INVALID; | ||
777 | |||
778 | if (sta_id == priv->hw_params.bcast_sta_id) | ||
779 | key_flags |= STA_KEY_MULTICAST_MSK; | ||
780 | |||
781 | spin_lock_irqsave(&priv->sta_lock, flags); | 783 | spin_lock_irqsave(&priv->sta_lock, flags); |
782 | 784 | ||
783 | priv->stations[sta_id].sta.key.key_flags = key_flags; | ||
784 | priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; | 785 | priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; |
785 | 786 | ||
786 | for (i = 0; i < 5; i++) | 787 | for (i = 0; i < 5; i++) |
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 9f5191a84a13..f6c1489a0c4a 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -1460,7 +1460,6 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx | |||
1460 | rxq->bd = NULL; | 1460 | rxq->bd = NULL; |
1461 | rxq->rb_stts = NULL; | 1461 | rxq->rb_stts = NULL; |
1462 | } | 1462 | } |
1463 | EXPORT_SYMBOL(iwl3945_rx_queue_free); | ||
1464 | 1463 | ||
1465 | 1464 | ||
1466 | /* Convert linear signal-to-noise ratio into dB */ | 1465 | /* Convert linear signal-to-noise ratio into dB */ |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0254741bece0..c01c1196d45e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -2460,6 +2460,8 @@ static void __devinit quirk_i82576_sriov(struct pci_dev *dev) | |||
2460 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov); | 2460 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c9, quirk_i82576_sriov); |
2461 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); | 2461 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov); |
2462 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); | 2462 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov); |
2463 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov); | ||
2464 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov); | ||
2463 | 2465 | ||
2464 | #endif /* CONFIG_PCI_IOV */ | 2466 | #endif /* CONFIG_PCI_IOV */ |
2465 | 2467 | ||
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index a1f17abba7dc..3d7a6687d247 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
@@ -182,6 +182,33 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], | |||
182 | return compare_ether_addr(addr1, addr2); | 182 | return compare_ether_addr(addr1, addr2); |
183 | #endif | 183 | #endif |
184 | } | 184 | } |
185 | |||
186 | /** | ||
187 | * is_etherdev_addr - Tell if given Ethernet address belongs to the device. | ||
188 | * @dev: Pointer to a device structure | ||
189 | * @addr: Pointer to a six-byte array containing the Ethernet address | ||
190 | * | ||
191 | * Compare passed address with all addresses of the device. Return true if the | ||
192 | * address if one of the device addresses. | ||
193 | * | ||
194 | * Note that this function calls compare_ether_addr_64bits() so take care of | ||
195 | * the right padding. | ||
196 | */ | ||
197 | static inline bool is_etherdev_addr(const struct net_device *dev, | ||
198 | const u8 addr[6 + 2]) | ||
199 | { | ||
200 | struct netdev_hw_addr *ha; | ||
201 | int res = 1; | ||
202 | |||
203 | rcu_read_lock(); | ||
204 | for_each_dev_addr(dev, ha) { | ||
205 | res = compare_ether_addr_64bits(addr, ha->addr); | ||
206 | if (!res) | ||
207 | break; | ||
208 | } | ||
209 | rcu_read_unlock(); | ||
210 | return !res; | ||
211 | } | ||
185 | #endif /* __KERNEL__ */ | 212 | #endif /* __KERNEL__ */ |
186 | 213 | ||
187 | /** | 214 | /** |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ff42aba403c5..2af89b662cad 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -39,6 +39,7 @@ | |||
39 | 39 | ||
40 | #include <linux/device.h> | 40 | #include <linux/device.h> |
41 | #include <linux/percpu.h> | 41 | #include <linux/percpu.h> |
42 | #include <linux/rculist.h> | ||
42 | #include <linux/dmaengine.h> | 43 | #include <linux/dmaengine.h> |
43 | #include <linux/workqueue.h> | 44 | #include <linux/workqueue.h> |
44 | 45 | ||
@@ -210,6 +211,16 @@ struct dev_addr_list | |||
210 | #define dmi_users da_users | 211 | #define dmi_users da_users |
211 | #define dmi_gusers da_gusers | 212 | #define dmi_gusers da_gusers |
212 | 213 | ||
214 | struct netdev_hw_addr { | ||
215 | struct list_head list; | ||
216 | unsigned char addr[MAX_ADDR_LEN]; | ||
217 | unsigned char type; | ||
218 | #define NETDEV_HW_ADDR_T_LAN 1 | ||
219 | #define NETDEV_HW_ADDR_T_SAN 2 | ||
220 | #define NETDEV_HW_ADDR_T_SLAVE 3 | ||
221 | struct rcu_head rcu_head; | ||
222 | }; | ||
223 | |||
213 | struct hh_cache | 224 | struct hh_cache |
214 | { | 225 | { |
215 | struct hh_cache *hh_next; /* Next entry */ | 226 | struct hh_cache *hh_next; /* Next entry */ |
@@ -784,8 +795,11 @@ struct net_device | |||
784 | */ | 795 | */ |
785 | unsigned long last_rx; /* Time of last Rx */ | 796 | unsigned long last_rx; /* Time of last Rx */ |
786 | /* Interface address info used in eth_type_trans() */ | 797 | /* Interface address info used in eth_type_trans() */ |
787 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast | 798 | unsigned char *dev_addr; /* hw address, (before bcast |
788 | because most packets are unicast) */ | 799 | because most packets are |
800 | unicast) */ | ||
801 | |||
802 | struct list_head dev_addr_list; /* list of device hw addresses */ | ||
789 | 803 | ||
790 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 804 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
791 | 805 | ||
@@ -1791,6 +1805,13 @@ static inline void netif_addr_unlock_bh(struct net_device *dev) | |||
1791 | spin_unlock_bh(&dev->addr_list_lock); | 1805 | spin_unlock_bh(&dev->addr_list_lock); |
1792 | } | 1806 | } |
1793 | 1807 | ||
1808 | /* | ||
1809 | * dev_addr_list walker. Should be used only for read access. Call with | ||
1810 | * rcu_read_lock held. | ||
1811 | */ | ||
1812 | #define for_each_dev_addr(dev, ha) \ | ||
1813 | list_for_each_entry_rcu(ha, &dev->dev_addr_list, list) | ||
1814 | |||
1794 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | 1815 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
1795 | 1816 | ||
1796 | extern void ether_setup(struct net_device *dev); | 1817 | extern void ether_setup(struct net_device *dev); |
@@ -1803,6 +1824,19 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
1803 | alloc_netdev_mq(sizeof_priv, name, setup, 1) | 1824 | alloc_netdev_mq(sizeof_priv, name, setup, 1) |
1804 | extern int register_netdev(struct net_device *dev); | 1825 | extern int register_netdev(struct net_device *dev); |
1805 | extern void unregister_netdev(struct net_device *dev); | 1826 | extern void unregister_netdev(struct net_device *dev); |
1827 | |||
1828 | /* Functions used for device addresses handling */ | ||
1829 | extern int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
1830 | unsigned char addr_type); | ||
1831 | extern int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
1832 | unsigned char addr_type); | ||
1833 | extern int dev_addr_add_multiple(struct net_device *to_dev, | ||
1834 | struct net_device *from_dev, | ||
1835 | unsigned char addr_type); | ||
1836 | extern int dev_addr_del_multiple(struct net_device *to_dev, | ||
1837 | struct net_device *from_dev, | ||
1838 | unsigned char addr_type); | ||
1839 | |||
1806 | /* Functions used for secondary unicast and multicast support */ | 1840 | /* Functions used for secondary unicast and multicast support */ |
1807 | extern void dev_set_rx_mode(struct net_device *dev); | 1841 | extern void dev_set_rx_mode(struct net_device *dev); |
1808 | extern void __dev_set_rx_mode(struct net_device *dev); | 1842 | extern void __dev_set_rx_mode(struct net_device *dev); |
diff --git a/include/linux/netfilter/xt_LED.h b/include/linux/netfilter/xt_LED.h index 4c91a0d770d0..f5509e7524d3 100644 --- a/include/linux/netfilter/xt_LED.h +++ b/include/linux/netfilter/xt_LED.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _XT_LED_H | 1 | #ifndef _XT_LED_H |
2 | #define _XT_LED_H | 2 | #define _XT_LED_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
4 | struct xt_led_info { | 6 | struct xt_led_info { |
5 | char id[27]; /* Unique ID for this trigger in the LED class */ | 7 | char id[27]; /* Unique ID for this trigger in the LED class */ |
6 | __u8 always_blink; /* Blink even if the LED is already on */ | 8 | __u8 always_blink; /* Blink even if the LED is already on */ |
diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h index 5e0a0d07b526..886682656f09 100644 --- a/include/linux/netfilter/xt_cluster.h +++ b/include/linux/netfilter/xt_cluster.h | |||
@@ -12,4 +12,6 @@ struct xt_cluster_match_info { | |||
12 | u_int32_t flags; | 12 | u_int32_t flags; |
13 | }; | 13 | }; |
14 | 14 | ||
15 | #define XT_CLUSTER_NODES_MAX 32 | ||
16 | |||
15 | #endif /* _XT_CLUSTER_MATCH_H */ | 17 | #endif /* _XT_CLUSTER_MATCH_H */ |
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 3c86ed25a04c..c24124a42ce5 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define USB_CDC_SUBCLASS_DMM 0x09 | 17 | #define USB_CDC_SUBCLASS_DMM 0x09 |
18 | #define USB_CDC_SUBCLASS_MDLM 0x0a | 18 | #define USB_CDC_SUBCLASS_MDLM 0x0a |
19 | #define USB_CDC_SUBCLASS_OBEX 0x0b | 19 | #define USB_CDC_SUBCLASS_OBEX 0x0b |
20 | #define USB_CDC_SUBCLASS_EEM 0x0c | ||
20 | 21 | ||
21 | #define USB_CDC_PROTO_NONE 0 | 22 | #define USB_CDC_PROTO_NONE 0 |
22 | 23 | ||
@@ -28,6 +29,8 @@ | |||
28 | #define USB_CDC_ACM_PROTO_AT_CDMA 6 | 29 | #define USB_CDC_ACM_PROTO_AT_CDMA 6 |
29 | #define USB_CDC_ACM_PROTO_VENDOR 0xff | 30 | #define USB_CDC_ACM_PROTO_VENDOR 0xff |
30 | 31 | ||
32 | #define USB_CDC_PROTO_EEM 7 | ||
33 | |||
31 | /*-------------------------------------------------------------------------*/ | 34 | /*-------------------------------------------------------------------------*/ |
32 | 35 | ||
33 | /* | 36 | /* |
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index be5bd713d2c9..73aead222b32 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h | |||
@@ -457,6 +457,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count); | |||
457 | 457 | ||
458 | int hci_register_sysfs(struct hci_dev *hdev); | 458 | int hci_register_sysfs(struct hci_dev *hdev); |
459 | void hci_unregister_sysfs(struct hci_dev *hdev); | 459 | void hci_unregister_sysfs(struct hci_dev *hdev); |
460 | void hci_conn_init_sysfs(struct hci_conn *conn); | ||
460 | void hci_conn_add_sysfs(struct hci_conn *conn); | 461 | void hci_conn_add_sysfs(struct hci_conn *conn); |
461 | void hci_conn_del_sysfs(struct hci_conn *conn); | 462 | void hci_conn_del_sysfs(struct hci_conn *conn); |
462 | 463 | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index b55b4891029e..19f4150f4d4d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <net/ip.h> | 41 | #include <net/ip.h> |
42 | #include <net/tcp_states.h> | 42 | #include <net/tcp_states.h> |
43 | #include <net/inet_ecn.h> | 43 | #include <net/inet_ecn.h> |
44 | #include <net/dst.h> | ||
44 | 45 | ||
45 | #include <linux/seq_file.h> | 46 | #include <linux/seq_file.h> |
46 | 47 | ||
@@ -543,6 +544,17 @@ static inline void tcp_fast_path_check(struct sock *sk) | |||
543 | tcp_fast_path_on(tp); | 544 | tcp_fast_path_on(tp); |
544 | } | 545 | } |
545 | 546 | ||
547 | /* Compute the actual rto_min value */ | ||
548 | static inline u32 tcp_rto_min(struct sock *sk) | ||
549 | { | ||
550 | struct dst_entry *dst = __sk_dst_get(sk); | ||
551 | u32 rto_min = TCP_RTO_MIN; | ||
552 | |||
553 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | ||
554 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | ||
555 | return rto_min; | ||
556 | } | ||
557 | |||
546 | /* Compute the actual receive window we are currently advertising. | 558 | /* Compute the actual receive window we are currently advertising. |
547 | * Rcv_nxt can be after the window if our peer push more data | 559 | * Rcv_nxt can be after the window if our peer push more data |
548 | * than the offered window. | 560 | * than the offered window. |
@@ -890,30 +902,32 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) | |||
890 | { | 902 | { |
891 | struct tcp_sock *tp = tcp_sk(sk); | 903 | struct tcp_sock *tp = tcp_sk(sk); |
892 | 904 | ||
893 | if (!sysctl_tcp_low_latency && tp->ucopy.task) { | 905 | if (sysctl_tcp_low_latency || !tp->ucopy.task) |
894 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | 906 | return 0; |
895 | tp->ucopy.memory += skb->truesize; | 907 | |
896 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | 908 | __skb_queue_tail(&tp->ucopy.prequeue, skb); |
897 | struct sk_buff *skb1; | 909 | tp->ucopy.memory += skb->truesize; |
898 | 910 | if (tp->ucopy.memory > sk->sk_rcvbuf) { | |
899 | BUG_ON(sock_owned_by_user(sk)); | 911 | struct sk_buff *skb1; |
900 | 912 | ||
901 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { | 913 | BUG_ON(sock_owned_by_user(sk)); |
902 | sk_backlog_rcv(sk, skb1); | 914 | |
903 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED); | 915 | while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { |
904 | } | 916 | sk_backlog_rcv(sk, skb1); |
905 | 917 | NET_INC_STATS_BH(sock_net(sk), | |
906 | tp->ucopy.memory = 0; | 918 | LINUX_MIB_TCPPREQUEUEDROPPED); |
907 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | ||
908 | wake_up_interruptible(sk->sk_sleep); | ||
909 | if (!inet_csk_ack_scheduled(sk)) | ||
910 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
911 | (3 * TCP_RTO_MIN) / 4, | ||
912 | TCP_RTO_MAX); | ||
913 | } | 919 | } |
914 | return 1; | 920 | |
921 | tp->ucopy.memory = 0; | ||
922 | } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { | ||
923 | wake_up_interruptible_poll(sk->sk_sleep, | ||
924 | POLLIN | POLLRDNORM | POLLRDBAND); | ||
925 | if (!inet_csk_ack_scheduled(sk)) | ||
926 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
927 | (3 * tcp_rto_min(sk)) / 4, | ||
928 | TCP_RTO_MAX); | ||
915 | } | 929 | } |
916 | return 0; | 930 | return 1; |
917 | } | 931 | } |
918 | 932 | ||
919 | 933 | ||
diff --git a/net/Kconfig b/net/Kconfig index ce77db4fcec8..c19f549c8e74 100644 --- a/net/Kconfig +++ b/net/Kconfig | |||
@@ -119,12 +119,6 @@ menuconfig NETFILTER | |||
119 | <file:Documentation/Changes> under "iptables" for the location of | 119 | <file:Documentation/Changes> under "iptables" for the location of |
120 | these packages. | 120 | these packages. |
121 | 121 | ||
122 | Make sure to say N to "Fast switching" below if you intend to say Y | ||
123 | here, as Fast switching currently bypasses netfilter. | ||
124 | |||
125 | Chances are that you should say Y here if you compile a kernel which | ||
126 | will run as a router and N for regular hosts. If unsure, say N. | ||
127 | |||
128 | if NETFILTER | 122 | if NETFILTER |
129 | 123 | ||
130 | config NETFILTER_DEBUG | 124 | config NETFILTER_DEBUG |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 375f4b4f7f79..61309b26f271 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -248,6 +248,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) | |||
248 | if (hdev->notify) | 248 | if (hdev->notify) |
249 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); | 249 | hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); |
250 | 250 | ||
251 | hci_conn_init_sysfs(conn); | ||
252 | |||
251 | tasklet_enable(&hdev->tx_task); | 253 | tasklet_enable(&hdev->tx_task); |
252 | 254 | ||
253 | return conn; | 255 | return conn; |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index b7c51082ddeb..a05d45eb3ba1 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -9,7 +9,7 @@ | |||
9 | struct class *bt_class = NULL; | 9 | struct class *bt_class = NULL; |
10 | EXPORT_SYMBOL_GPL(bt_class); | 10 | EXPORT_SYMBOL_GPL(bt_class); |
11 | 11 | ||
12 | static struct workqueue_struct *bluetooth; | 12 | static struct workqueue_struct *bt_workq; |
13 | 13 | ||
14 | static inline char *link_typetostr(int type) | 14 | static inline char *link_typetostr(int type) |
15 | { | 15 | { |
@@ -88,9 +88,12 @@ static struct device_type bt_link = { | |||
88 | static void add_conn(struct work_struct *work) | 88 | static void add_conn(struct work_struct *work) |
89 | { | 89 | { |
90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); | 90 | struct hci_conn *conn = container_of(work, struct hci_conn, work_add); |
91 | struct hci_dev *hdev = conn->hdev; | ||
92 | |||
93 | /* ensure previous del is complete */ | ||
94 | flush_work(&conn->work_del); | ||
91 | 95 | ||
92 | /* ensure previous add/del is complete */ | 96 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); |
93 | flush_workqueue(bluetooth); | ||
94 | 97 | ||
95 | if (device_add(&conn->dev) < 0) { | 98 | if (device_add(&conn->dev) < 0) { |
96 | BT_ERR("Failed to register connection device"); | 99 | BT_ERR("Failed to register connection device"); |
@@ -98,27 +101,6 @@ static void add_conn(struct work_struct *work) | |||
98 | } | 101 | } |
99 | } | 102 | } |
100 | 103 | ||
101 | void hci_conn_add_sysfs(struct hci_conn *conn) | ||
102 | { | ||
103 | struct hci_dev *hdev = conn->hdev; | ||
104 | |||
105 | BT_DBG("conn %p", conn); | ||
106 | |||
107 | conn->dev.type = &bt_link; | ||
108 | conn->dev.class = bt_class; | ||
109 | conn->dev.parent = &hdev->dev; | ||
110 | |||
111 | dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); | ||
112 | |||
113 | dev_set_drvdata(&conn->dev, conn); | ||
114 | |||
115 | device_initialize(&conn->dev); | ||
116 | |||
117 | INIT_WORK(&conn->work_add, add_conn); | ||
118 | |||
119 | queue_work(bluetooth, &conn->work_add); | ||
120 | } | ||
121 | |||
122 | /* | 104 | /* |
123 | * The rfcomm tty device will possibly retain even when conn | 105 | * The rfcomm tty device will possibly retain even when conn |
124 | * is down, and sysfs doesn't support move zombie device, | 106 | * is down, and sysfs doesn't support move zombie device, |
@@ -134,8 +116,11 @@ static void del_conn(struct work_struct *work) | |||
134 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); | 116 | struct hci_conn *conn = container_of(work, struct hci_conn, work_del); |
135 | struct hci_dev *hdev = conn->hdev; | 117 | struct hci_dev *hdev = conn->hdev; |
136 | 118 | ||
137 | /* ensure previous add/del is complete */ | 119 | /* ensure previous add is complete */ |
138 | flush_workqueue(bluetooth); | 120 | flush_work(&conn->work_add); |
121 | |||
122 | if (!device_is_registered(&conn->dev)) | ||
123 | return; | ||
139 | 124 | ||
140 | while (1) { | 125 | while (1) { |
141 | struct device *dev; | 126 | struct device *dev; |
@@ -152,16 +137,36 @@ static void del_conn(struct work_struct *work) | |||
152 | hci_dev_put(hdev); | 137 | hci_dev_put(hdev); |
153 | } | 138 | } |
154 | 139 | ||
155 | void hci_conn_del_sysfs(struct hci_conn *conn) | 140 | void hci_conn_init_sysfs(struct hci_conn *conn) |
156 | { | 141 | { |
142 | struct hci_dev *hdev = conn->hdev; | ||
143 | |||
157 | BT_DBG("conn %p", conn); | 144 | BT_DBG("conn %p", conn); |
158 | 145 | ||
159 | if (!device_is_registered(&conn->dev)) | 146 | conn->dev.type = &bt_link; |
160 | return; | 147 | conn->dev.class = bt_class; |
148 | conn->dev.parent = &hdev->dev; | ||
149 | |||
150 | dev_set_drvdata(&conn->dev, conn); | ||
151 | |||
152 | device_initialize(&conn->dev); | ||
161 | 153 | ||
154 | INIT_WORK(&conn->work_add, add_conn); | ||
162 | INIT_WORK(&conn->work_del, del_conn); | 155 | INIT_WORK(&conn->work_del, del_conn); |
156 | } | ||
157 | |||
158 | void hci_conn_add_sysfs(struct hci_conn *conn) | ||
159 | { | ||
160 | BT_DBG("conn %p", conn); | ||
161 | |||
162 | queue_work(bt_workq, &conn->work_add); | ||
163 | } | ||
164 | |||
165 | void hci_conn_del_sysfs(struct hci_conn *conn) | ||
166 | { | ||
167 | BT_DBG("conn %p", conn); | ||
163 | 168 | ||
164 | queue_work(bluetooth, &conn->work_del); | 169 | queue_work(bt_workq, &conn->work_del); |
165 | } | 170 | } |
166 | 171 | ||
167 | static inline char *host_typetostr(int type) | 172 | static inline char *host_typetostr(int type) |
@@ -438,13 +443,13 @@ void hci_unregister_sysfs(struct hci_dev *hdev) | |||
438 | 443 | ||
439 | int __init bt_sysfs_init(void) | 444 | int __init bt_sysfs_init(void) |
440 | { | 445 | { |
441 | bluetooth = create_singlethread_workqueue("bluetooth"); | 446 | bt_workq = create_singlethread_workqueue("bluetooth"); |
442 | if (!bluetooth) | 447 | if (!bt_workq) |
443 | return -ENOMEM; | 448 | return -ENOMEM; |
444 | 449 | ||
445 | bt_class = class_create(THIS_MODULE, "bluetooth"); | 450 | bt_class = class_create(THIS_MODULE, "bluetooth"); |
446 | if (IS_ERR(bt_class)) { | 451 | if (IS_ERR(bt_class)) { |
447 | destroy_workqueue(bluetooth); | 452 | destroy_workqueue(bt_workq); |
448 | return PTR_ERR(bt_class); | 453 | return PTR_ERR(bt_class); |
449 | } | 454 | } |
450 | 455 | ||
@@ -453,7 +458,7 @@ int __init bt_sysfs_init(void) | |||
453 | 458 | ||
454 | void bt_sysfs_cleanup(void) | 459 | void bt_sysfs_cleanup(void) |
455 | { | 460 | { |
456 | destroy_workqueue(bluetooth); | 461 | destroy_workqueue(bt_workq); |
457 | 462 | ||
458 | class_destroy(bt_class); | 463 | class_destroy(bt_class); |
459 | } | 464 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index 3c8073fe970a..637ea71b0a0d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3434,6 +3434,252 @@ void dev_set_rx_mode(struct net_device *dev) | |||
3434 | netif_addr_unlock_bh(dev); | 3434 | netif_addr_unlock_bh(dev); |
3435 | } | 3435 | } |
3436 | 3436 | ||
3437 | /* hw addresses list handling functions */ | ||
3438 | |||
3439 | static int __hw_addr_add(struct list_head *list, unsigned char *addr, | ||
3440 | int addr_len, unsigned char addr_type) | ||
3441 | { | ||
3442 | struct netdev_hw_addr *ha; | ||
3443 | int alloc_size; | ||
3444 | |||
3445 | if (addr_len > MAX_ADDR_LEN) | ||
3446 | return -EINVAL; | ||
3447 | |||
3448 | alloc_size = sizeof(*ha); | ||
3449 | if (alloc_size < L1_CACHE_BYTES) | ||
3450 | alloc_size = L1_CACHE_BYTES; | ||
3451 | ha = kmalloc(alloc_size, GFP_ATOMIC); | ||
3452 | if (!ha) | ||
3453 | return -ENOMEM; | ||
3454 | memcpy(ha->addr, addr, addr_len); | ||
3455 | ha->type = addr_type; | ||
3456 | list_add_tail_rcu(&ha->list, list); | ||
3457 | return 0; | ||
3458 | } | ||
3459 | |||
3460 | static void ha_rcu_free(struct rcu_head *head) | ||
3461 | { | ||
3462 | struct netdev_hw_addr *ha; | ||
3463 | |||
3464 | ha = container_of(head, struct netdev_hw_addr, rcu_head); | ||
3465 | kfree(ha); | ||
3466 | } | ||
3467 | |||
3468 | static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr, | ||
3469 | int addr_len, unsigned char addr_type, | ||
3470 | int ignore_index) | ||
3471 | { | ||
3472 | struct netdev_hw_addr *ha; | ||
3473 | int i = 0; | ||
3474 | |||
3475 | list_for_each_entry(ha, list, list) { | ||
3476 | if (i++ != ignore_index && | ||
3477 | !memcmp(ha->addr, addr, addr_len) && | ||
3478 | (ha->type == addr_type || !addr_type)) { | ||
3479 | list_del_rcu(&ha->list); | ||
3480 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3481 | return 0; | ||
3482 | } | ||
3483 | } | ||
3484 | return -ENOENT; | ||
3485 | } | ||
3486 | |||
3487 | static int __hw_addr_add_multiple_ii(struct list_head *to_list, | ||
3488 | struct list_head *from_list, | ||
3489 | int addr_len, unsigned char addr_type, | ||
3490 | int ignore_index) | ||
3491 | { | ||
3492 | int err; | ||
3493 | struct netdev_hw_addr *ha, *ha2; | ||
3494 | unsigned char type; | ||
3495 | |||
3496 | list_for_each_entry(ha, from_list, list) { | ||
3497 | type = addr_type ? addr_type : ha->type; | ||
3498 | err = __hw_addr_add(to_list, ha->addr, addr_len, type); | ||
3499 | if (err) | ||
3500 | goto unroll; | ||
3501 | } | ||
3502 | return 0; | ||
3503 | |||
3504 | unroll: | ||
3505 | list_for_each_entry(ha2, from_list, list) { | ||
3506 | if (ha2 == ha) | ||
3507 | break; | ||
3508 | type = addr_type ? addr_type : ha2->type; | ||
3509 | __hw_addr_del_ii(to_list, ha2->addr, addr_len, type, | ||
3510 | ignore_index); | ||
3511 | } | ||
3512 | return err; | ||
3513 | } | ||
3514 | |||
3515 | static void __hw_addr_del_multiple_ii(struct list_head *to_list, | ||
3516 | struct list_head *from_list, | ||
3517 | int addr_len, unsigned char addr_type, | ||
3518 | int ignore_index) | ||
3519 | { | ||
3520 | struct netdev_hw_addr *ha; | ||
3521 | unsigned char type; | ||
3522 | |||
3523 | list_for_each_entry(ha, from_list, list) { | ||
3524 | type = addr_type ? addr_type : ha->type; | ||
3525 | __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type, | ||
3526 | ignore_index); | ||
3527 | } | ||
3528 | } | ||
3529 | |||
3530 | static void __hw_addr_flush(struct list_head *list) | ||
3531 | { | ||
3532 | struct netdev_hw_addr *ha, *tmp; | ||
3533 | |||
3534 | list_for_each_entry_safe(ha, tmp, list, list) { | ||
3535 | list_del_rcu(&ha->list); | ||
3536 | call_rcu(&ha->rcu_head, ha_rcu_free); | ||
3537 | } | ||
3538 | } | ||
3539 | |||
3540 | /* Device addresses handling functions */ | ||
3541 | |||
3542 | static void dev_addr_flush(struct net_device *dev) | ||
3543 | { | ||
3544 | /* rtnl_mutex must be held here */ | ||
3545 | |||
3546 | __hw_addr_flush(&dev->dev_addr_list); | ||
3547 | dev->dev_addr = NULL; | ||
3548 | } | ||
3549 | |||
3550 | static int dev_addr_init(struct net_device *dev) | ||
3551 | { | ||
3552 | unsigned char addr[MAX_ADDR_LEN]; | ||
3553 | struct netdev_hw_addr *ha; | ||
3554 | int err; | ||
3555 | |||
3556 | /* rtnl_mutex must be held here */ | ||
3557 | |||
3558 | INIT_LIST_HEAD(&dev->dev_addr_list); | ||
3559 | memset(addr, 0, sizeof(*addr)); | ||
3560 | err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr), | ||
3561 | NETDEV_HW_ADDR_T_LAN); | ||
3562 | if (!err) { | ||
3563 | /* | ||
3564 | * Get the first (previously created) address from the list | ||
3565 | * and set dev_addr pointer to this location. | ||
3566 | */ | ||
3567 | ha = list_first_entry(&dev->dev_addr_list, | ||
3568 | struct netdev_hw_addr, list); | ||
3569 | dev->dev_addr = ha->addr; | ||
3570 | } | ||
3571 | return err; | ||
3572 | } | ||
3573 | |||
3574 | /** | ||
3575 | * dev_addr_add - Add a device address | ||
3576 | * @dev: device | ||
3577 | * @addr: address to add | ||
3578 | * @addr_type: address type | ||
3579 | * | ||
3580 | * Add a device address to the device or increase the reference count if | ||
3581 | * it already exists. | ||
3582 | * | ||
3583 | * The caller must hold the rtnl_mutex. | ||
3584 | */ | ||
3585 | int dev_addr_add(struct net_device *dev, unsigned char *addr, | ||
3586 | unsigned char addr_type) | ||
3587 | { | ||
3588 | int err; | ||
3589 | |||
3590 | ASSERT_RTNL(); | ||
3591 | |||
3592 | err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len, | ||
3593 | addr_type); | ||
3594 | if (!err) | ||
3595 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3596 | return err; | ||
3597 | } | ||
3598 | EXPORT_SYMBOL(dev_addr_add); | ||
3599 | |||
3600 | /** | ||
3601 | * dev_addr_del - Release a device address. | ||
3602 | * @dev: device | ||
3603 | * @addr: address to delete | ||
3604 | * @addr_type: address type | ||
3605 | * | ||
3606 | * Release reference to a device address and remove it from the device | ||
3607 | * if the reference count drops to zero. | ||
3608 | * | ||
3609 | * The caller must hold the rtnl_mutex. | ||
3610 | */ | ||
3611 | int dev_addr_del(struct net_device *dev, unsigned char *addr, | ||
3612 | unsigned char addr_type) | ||
3613 | { | ||
3614 | int err; | ||
3615 | |||
3616 | ASSERT_RTNL(); | ||
3617 | |||
3618 | err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len, | ||
3619 | addr_type, 0); | ||
3620 | if (!err) | ||
3621 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
3622 | return err; | ||
3623 | } | ||
3624 | EXPORT_SYMBOL(dev_addr_del); | ||
3625 | |||
3626 | /** | ||
3627 | * dev_addr_add_multiple - Add device addresses from another device | ||
3628 | * @to_dev: device to which addresses will be added | ||
3629 | * @from_dev: device from which addresses will be added | ||
3630 | * @addr_type: address type - 0 means type will be used from from_dev | ||
3631 | * | ||
3632 | * Add device addresses of the one device to another. | ||
3633 | ** | ||
3634 | * The caller must hold the rtnl_mutex. | ||
3635 | */ | ||
3636 | int dev_addr_add_multiple(struct net_device *to_dev, | ||
3637 | struct net_device *from_dev, | ||
3638 | unsigned char addr_type) | ||
3639 | { | ||
3640 | int err; | ||
3641 | |||
3642 | ASSERT_RTNL(); | ||
3643 | |||
3644 | if (from_dev->addr_len != to_dev->addr_len) | ||
3645 | return -EINVAL; | ||
3646 | err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list, | ||
3647 | &from_dev->dev_addr_list, | ||
3648 | to_dev->addr_len, addr_type, 0); | ||
3649 | if (!err) | ||
3650 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
3651 | return err; | ||
3652 | } | ||
3653 | EXPORT_SYMBOL(dev_addr_add_multiple); | ||
3654 | |||
3655 | /** | ||
3656 | * dev_addr_del_multiple - Delete device addresses by another device | ||
3657 | * @to_dev: device where the addresses will be deleted | ||
3658 | * @from_dev: device by which addresses the addresses will be deleted | ||
3659 | * @addr_type: address type - 0 means type will used from from_dev | ||
3660 | * | ||
3661 | * Deletes addresses in to device by the list of addresses in from device. | ||
3662 | * | ||
3663 | * The caller must hold the rtnl_mutex. | ||
3664 | */ | ||
3665 | int dev_addr_del_multiple(struct net_device *to_dev, | ||
3666 | struct net_device *from_dev, | ||
3667 | unsigned char addr_type) | ||
3668 | { | ||
3669 | ASSERT_RTNL(); | ||
3670 | |||
3671 | if (from_dev->addr_len != to_dev->addr_len) | ||
3672 | return -EINVAL; | ||
3673 | __hw_addr_del_multiple_ii(&to_dev->dev_addr_list, | ||
3674 | &from_dev->dev_addr_list, | ||
3675 | to_dev->addr_len, addr_type, 0); | ||
3676 | call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); | ||
3677 | return 0; | ||
3678 | } | ||
3679 | EXPORT_SYMBOL(dev_addr_del_multiple); | ||
3680 | |||
3681 | /* unicast and multicast addresses handling functions */ | ||
3682 | |||
3437 | int __dev_addr_delete(struct dev_addr_list **list, int *count, | 3683 | int __dev_addr_delete(struct dev_addr_list **list, int *count, |
3438 | void *addr, int alen, int glbl) | 3684 | void *addr, int alen, int glbl) |
3439 | { | 3685 | { |
@@ -4776,6 +5022,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, | |||
4776 | 5022 | ||
4777 | dev->gso_max_size = GSO_MAX_SIZE; | 5023 | dev->gso_max_size = GSO_MAX_SIZE; |
4778 | 5024 | ||
5025 | dev_addr_init(dev); | ||
4779 | netdev_init_queues(dev); | 5026 | netdev_init_queues(dev); |
4780 | 5027 | ||
4781 | INIT_LIST_HEAD(&dev->napi_list); | 5028 | INIT_LIST_HEAD(&dev->napi_list); |
@@ -4801,6 +5048,9 @@ void free_netdev(struct net_device *dev) | |||
4801 | 5048 | ||
4802 | kfree(dev->_tx); | 5049 | kfree(dev->_tx); |
4803 | 5050 | ||
5051 | /* Flush device addresses */ | ||
5052 | dev_addr_flush(dev); | ||
5053 | |||
4804 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) | 5054 | list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) |
4805 | netif_napi_del(p); | 5055 | netif_napi_del(p); |
4806 | 5056 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f091a5a845c1..d152394b2611 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -502,7 +502,9 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) | |||
502 | shinfo->gso_segs = 0; | 502 | shinfo->gso_segs = 0; |
503 | shinfo->gso_type = 0; | 503 | shinfo->gso_type = 0; |
504 | shinfo->ip6_frag_id = 0; | 504 | shinfo->ip6_frag_id = 0; |
505 | shinfo->tx_flags.flags = 0; | ||
505 | shinfo->frag_list = NULL; | 506 | shinfo->frag_list = NULL; |
507 | memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps)); | ||
506 | 508 | ||
507 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 509 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
508 | skb->data = skb->head + NET_SKB_PAD; | 510 | skb->data = skb->head + NET_SKB_PAD; |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 8554d0ea1719..68a8d892c711 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -49,19 +49,22 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, | |||
49 | inet_twsk_put(tw); | 49 | inet_twsk_put(tw); |
50 | } | 50 | } |
51 | 51 | ||
52 | void inet_twsk_put(struct inet_timewait_sock *tw) | 52 | static noinline void inet_twsk_free(struct inet_timewait_sock *tw) |
53 | { | 53 | { |
54 | if (atomic_dec_and_test(&tw->tw_refcnt)) { | 54 | struct module *owner = tw->tw_prot->owner; |
55 | struct module *owner = tw->tw_prot->owner; | 55 | twsk_destructor((struct sock *)tw); |
56 | twsk_destructor((struct sock *)tw); | ||
57 | #ifdef SOCK_REFCNT_DEBUG | 56 | #ifdef SOCK_REFCNT_DEBUG |
58 | printk(KERN_DEBUG "%s timewait_sock %p released\n", | 57 | pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw); |
59 | tw->tw_prot->name, tw); | ||
60 | #endif | 58 | #endif |
61 | release_net(twsk_net(tw)); | 59 | release_net(twsk_net(tw)); |
62 | kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); | 60 | kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); |
63 | module_put(owner); | 61 | module_put(owner); |
64 | } | 62 | } |
63 | |||
64 | void inet_twsk_put(struct inet_timewait_sock *tw) | ||
65 | { | ||
66 | if (atomic_dec_and_test(&tw->tw_refcnt)) | ||
67 | inet_twsk_free(tw); | ||
65 | } | 68 | } |
66 | EXPORT_SYMBOL_GPL(inet_twsk_put); | 69 | EXPORT_SYMBOL_GPL(inet_twsk_put); |
67 | 70 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 56dcf97a97fb..eeb8a92aa416 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -597,16 +597,6 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) | |||
597 | tcp_grow_window(sk, skb); | 597 | tcp_grow_window(sk, skb); |
598 | } | 598 | } |
599 | 599 | ||
600 | static u32 tcp_rto_min(struct sock *sk) | ||
601 | { | ||
602 | struct dst_entry *dst = __sk_dst_get(sk); | ||
603 | u32 rto_min = TCP_RTO_MIN; | ||
604 | |||
605 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | ||
606 | rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); | ||
607 | return rto_min; | ||
608 | } | ||
609 | |||
610 | /* Called to compute a smoothed rtt estimate. The data fed to this | 600 | /* Called to compute a smoothed rtt estimate. The data fed to this |
611 | * routine either comes from timestamps, or from segments that were | 601 | * routine either comes from timestamps, or from segments that were |
612 | * known _not_ to have been retransmitted [see Karn/Partridge | 602 | * known _not_ to have been retransmitted [see Karn/Partridge |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bda74e8aed7e..fc79e3416288 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1593,7 +1593,7 @@ process: | |||
1593 | #endif | 1593 | #endif |
1594 | { | 1594 | { |
1595 | if (!tcp_prequeue(sk, skb)) | 1595 | if (!tcp_prequeue(sk, skb)) |
1596 | ret = tcp_v4_do_rcv(sk, skb); | 1596 | ret = tcp_v4_do_rcv(sk, skb); |
1597 | } | 1597 | } |
1598 | } else | 1598 | } else |
1599 | sk_add_backlog(sk, skb); | 1599 | sk_add_backlog(sk, skb); |
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c index 14e6724d5672..91490ad9302c 100644 --- a/net/ipv6/netfilter/ip6t_ipv6header.c +++ b/net/ipv6/netfilter/ip6t_ipv6header.c | |||
@@ -50,14 +50,14 @@ ipv6header_mt6(const struct sk_buff *skb, const struct xt_match_param *par) | |||
50 | struct ipv6_opt_hdr _hdr; | 50 | struct ipv6_opt_hdr _hdr; |
51 | int hdrlen; | 51 | int hdrlen; |
52 | 52 | ||
53 | /* Is there enough space for the next ext header? */ | ||
54 | if (len < (int)sizeof(struct ipv6_opt_hdr)) | ||
55 | return false; | ||
56 | /* No more exthdr -> evaluate */ | 53 | /* No more exthdr -> evaluate */ |
57 | if (nexthdr == NEXTHDR_NONE) { | 54 | if (nexthdr == NEXTHDR_NONE) { |
58 | temp |= MASK_NONE; | 55 | temp |= MASK_NONE; |
59 | break; | 56 | break; |
60 | } | 57 | } |
58 | /* Is there enough space for the next ext header? */ | ||
59 | if (len < (int)sizeof(struct ipv6_opt_hdr)) | ||
60 | return false; | ||
61 | /* ESP -> evaluate */ | 61 | /* ESP -> evaluate */ |
62 | if (nexthdr == NEXTHDR_ESP) { | 62 | if (nexthdr == NEXTHDR_ESP) { |
63 | temp |= MASK_ESP; | 63 | temp |= MASK_ESP; |
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 5196006b35e0..efaf38349731 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -478,7 +478,7 @@ minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) | |||
478 | return NULL; | 478 | return NULL; |
479 | 479 | ||
480 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { | 480 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { |
481 | sband = hw->wiphy->bands[hw->conf.channel->band]; | 481 | sband = hw->wiphy->bands[i]; |
482 | if (sband->n_bitrates > max_rates) | 482 | if (sband->n_bitrates > max_rates) |
483 | max_rates = sband->n_bitrates; | 483 | max_rates = sband->n_bitrates; |
484 | } | 484 | } |
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index 2b33e3a7ee7d..6704fb55c6b2 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c | |||
@@ -319,13 +319,44 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband, | |||
319 | struct ieee80211_sta *sta, void *priv_sta) | 319 | struct ieee80211_sta *sta, void *priv_sta) |
320 | { | 320 | { |
321 | struct rc_pid_sta_info *spinfo = priv_sta; | 321 | struct rc_pid_sta_info *spinfo = priv_sta; |
322 | struct rc_pid_info *pinfo = priv; | ||
323 | struct rc_pid_rateinfo *rinfo = pinfo->rinfo; | ||
322 | struct sta_info *si; | 324 | struct sta_info *si; |
325 | int i, j, tmp; | ||
326 | bool s; | ||
323 | 327 | ||
324 | /* TODO: This routine should consider using RSSI from previous packets | 328 | /* TODO: This routine should consider using RSSI from previous packets |
325 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. | 329 | * as we need to have IEEE 802.1X auth succeed immediately after assoc.. |
326 | * Until that method is implemented, we will use the lowest supported | 330 | * Until that method is implemented, we will use the lowest supported |
327 | * rate as a workaround. */ | 331 | * rate as a workaround. */ |
328 | 332 | ||
333 | /* Sort the rates. This is optimized for the most common case (i.e. | ||
334 | * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed | ||
335 | * mapping too. */ | ||
336 | for (i = 0; i < sband->n_bitrates; i++) { | ||
337 | rinfo[i].index = i; | ||
338 | rinfo[i].rev_index = i; | ||
339 | if (RC_PID_FAST_START) | ||
340 | rinfo[i].diff = 0; | ||
341 | else | ||
342 | rinfo[i].diff = i * pinfo->norm_offset; | ||
343 | } | ||
344 | for (i = 1; i < sband->n_bitrates; i++) { | ||
345 | s = 0; | ||
346 | for (j = 0; j < sband->n_bitrates - i; j++) | ||
347 | if (unlikely(sband->bitrates[rinfo[j].index].bitrate > | ||
348 | sband->bitrates[rinfo[j + 1].index].bitrate)) { | ||
349 | tmp = rinfo[j].index; | ||
350 | rinfo[j].index = rinfo[j + 1].index; | ||
351 | rinfo[j + 1].index = tmp; | ||
352 | rinfo[rinfo[j].index].rev_index = j; | ||
353 | rinfo[rinfo[j + 1].index].rev_index = j + 1; | ||
354 | s = 1; | ||
355 | } | ||
356 | if (!s) | ||
357 | break; | ||
358 | } | ||
359 | |||
329 | spinfo->txrate_idx = rate_lowest_index(sband, sta); | 360 | spinfo->txrate_idx = rate_lowest_index(sband, sta); |
330 | /* HACK */ | 361 | /* HACK */ |
331 | si = container_of(sta, struct sta_info, sta); | 362 | si = container_of(sta, struct sta_info, sta); |
@@ -338,21 +369,22 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw, | |||
338 | struct rc_pid_info *pinfo; | 369 | struct rc_pid_info *pinfo; |
339 | struct rc_pid_rateinfo *rinfo; | 370 | struct rc_pid_rateinfo *rinfo; |
340 | struct ieee80211_supported_band *sband; | 371 | struct ieee80211_supported_band *sband; |
341 | int i, j, tmp; | 372 | int i, max_rates = 0; |
342 | bool s; | ||
343 | #ifdef CONFIG_MAC80211_DEBUGFS | 373 | #ifdef CONFIG_MAC80211_DEBUGFS |
344 | struct rc_pid_debugfs_entries *de; | 374 | struct rc_pid_debugfs_entries *de; |
345 | #endif | 375 | #endif |
346 | 376 | ||
347 | sband = hw->wiphy->bands[hw->conf.channel->band]; | ||
348 | |||
349 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); | 377 | pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); |
350 | if (!pinfo) | 378 | if (!pinfo) |
351 | return NULL; | 379 | return NULL; |
352 | 380 | ||
353 | /* We can safely assume that sband won't change unless we get | 381 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { |
354 | * reinitialized. */ | 382 | sband = hw->wiphy->bands[i]; |
355 | rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC); | 383 | if (sband->n_bitrates > max_rates) |
384 | max_rates = sband->n_bitrates; | ||
385 | } | ||
386 | |||
387 | rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC); | ||
356 | if (!rinfo) { | 388 | if (!rinfo) { |
357 | kfree(pinfo); | 389 | kfree(pinfo); |
358 | return NULL; | 390 | return NULL; |
@@ -370,33 +402,6 @@ static void *rate_control_pid_alloc(struct ieee80211_hw *hw, | |||
370 | pinfo->rinfo = rinfo; | 402 | pinfo->rinfo = rinfo; |
371 | pinfo->oldrate = 0; | 403 | pinfo->oldrate = 0; |
372 | 404 | ||
373 | /* Sort the rates. This is optimized for the most common case (i.e. | ||
374 | * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed | ||
375 | * mapping too. */ | ||
376 | for (i = 0; i < sband->n_bitrates; i++) { | ||
377 | rinfo[i].index = i; | ||
378 | rinfo[i].rev_index = i; | ||
379 | if (RC_PID_FAST_START) | ||
380 | rinfo[i].diff = 0; | ||
381 | else | ||
382 | rinfo[i].diff = i * pinfo->norm_offset; | ||
383 | } | ||
384 | for (i = 1; i < sband->n_bitrates; i++) { | ||
385 | s = 0; | ||
386 | for (j = 0; j < sband->n_bitrates - i; j++) | ||
387 | if (unlikely(sband->bitrates[rinfo[j].index].bitrate > | ||
388 | sband->bitrates[rinfo[j + 1].index].bitrate)) { | ||
389 | tmp = rinfo[j].index; | ||
390 | rinfo[j].index = rinfo[j + 1].index; | ||
391 | rinfo[j + 1].index = tmp; | ||
392 | rinfo[rinfo[j].index].rev_index = j; | ||
393 | rinfo[rinfo[j + 1].index].rev_index = j + 1; | ||
394 | s = 1; | ||
395 | } | ||
396 | if (!s) | ||
397 | break; | ||
398 | } | ||
399 | |||
400 | #ifdef CONFIG_MAC80211_DEBUGFS | 405 | #ifdef CONFIG_MAC80211_DEBUGFS |
401 | de = &pinfo->dentries; | 406 | de = &pinfo->dentries; |
402 | de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, | 407 | de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 36e8e2de980c..5f9a8d7af83d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -792,7 +792,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) | |||
792 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 792 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
793 | 793 | ||
794 | /* internal error, why is TX_FRAGMENTED set? */ | 794 | /* internal error, why is TX_FRAGMENTED set? */ |
795 | if (WARN_ON(skb->len <= frag_threshold)) | 795 | if (WARN_ON(skb->len + FCS_LEN <= frag_threshold)) |
796 | return TX_DROP; | 796 | return TX_DROP; |
797 | 797 | ||
798 | /* | 798 | /* |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index f13fc57e1ecb..c523f0b8cee5 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1186,28 +1186,6 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[]) | |||
1186 | return 0; | 1186 | return 0; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | static inline void | ||
1190 | ctnetlink_event_report(struct nf_conn *ct, u32 pid, int report) | ||
1191 | { | ||
1192 | unsigned int events = 0; | ||
1193 | |||
1194 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) | ||
1195 | events |= IPCT_RELATED; | ||
1196 | else | ||
1197 | events |= IPCT_NEW; | ||
1198 | |||
1199 | nf_conntrack_event_report(IPCT_STATUS | | ||
1200 | IPCT_HELPER | | ||
1201 | IPCT_REFRESH | | ||
1202 | IPCT_PROTOINFO | | ||
1203 | IPCT_NATSEQADJ | | ||
1204 | IPCT_MARK | | ||
1205 | events, | ||
1206 | ct, | ||
1207 | pid, | ||
1208 | report); | ||
1209 | } | ||
1210 | |||
1211 | static struct nf_conn * | 1189 | static struct nf_conn * |
1212 | ctnetlink_create_conntrack(struct nlattr *cda[], | 1190 | ctnetlink_create_conntrack(struct nlattr *cda[], |
1213 | struct nf_conntrack_tuple *otuple, | 1191 | struct nf_conntrack_tuple *otuple, |
@@ -1373,6 +1351,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1373 | err = -ENOENT; | 1351 | err = -ENOENT; |
1374 | if (nlh->nlmsg_flags & NLM_F_CREATE) { | 1352 | if (nlh->nlmsg_flags & NLM_F_CREATE) { |
1375 | struct nf_conn *ct; | 1353 | struct nf_conn *ct; |
1354 | enum ip_conntrack_events events; | ||
1376 | 1355 | ||
1377 | ct = ctnetlink_create_conntrack(cda, &otuple, | 1356 | ct = ctnetlink_create_conntrack(cda, &otuple, |
1378 | &rtuple, u3); | 1357 | &rtuple, u3); |
@@ -1383,9 +1362,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1383 | err = 0; | 1362 | err = 0; |
1384 | nf_conntrack_get(&ct->ct_general); | 1363 | nf_conntrack_get(&ct->ct_general); |
1385 | spin_unlock_bh(&nf_conntrack_lock); | 1364 | spin_unlock_bh(&nf_conntrack_lock); |
1386 | ctnetlink_event_report(ct, | 1365 | if (test_bit(IPS_EXPECTED_BIT, &ct->status)) |
1387 | NETLINK_CB(skb).pid, | 1366 | events = IPCT_RELATED; |
1388 | nlmsg_report(nlh)); | 1367 | else |
1368 | events = IPCT_NEW; | ||
1369 | |||
1370 | nf_conntrack_event_report(IPCT_STATUS | | ||
1371 | IPCT_HELPER | | ||
1372 | IPCT_PROTOINFO | | ||
1373 | IPCT_NATSEQADJ | | ||
1374 | IPCT_MARK | events, | ||
1375 | ct, NETLINK_CB(skb).pid, | ||
1376 | nlmsg_report(nlh)); | ||
1389 | nf_ct_put(ct); | 1377 | nf_ct_put(ct); |
1390 | } else | 1378 | } else |
1391 | spin_unlock_bh(&nf_conntrack_lock); | 1379 | spin_unlock_bh(&nf_conntrack_lock); |
@@ -1404,9 +1392,13 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, | |||
1404 | if (err == 0) { | 1392 | if (err == 0) { |
1405 | nf_conntrack_get(&ct->ct_general); | 1393 | nf_conntrack_get(&ct->ct_general); |
1406 | spin_unlock_bh(&nf_conntrack_lock); | 1394 | spin_unlock_bh(&nf_conntrack_lock); |
1407 | ctnetlink_event_report(ct, | 1395 | nf_conntrack_event_report(IPCT_STATUS | |
1408 | NETLINK_CB(skb).pid, | 1396 | IPCT_HELPER | |
1409 | nlmsg_report(nlh)); | 1397 | IPCT_PROTOINFO | |
1398 | IPCT_NATSEQADJ | | ||
1399 | IPCT_MARK, | ||
1400 | ct, NETLINK_CB(skb).pid, | ||
1401 | nlmsg_report(nlh)); | ||
1410 | nf_ct_put(ct); | 1402 | nf_ct_put(ct); |
1411 | } else | 1403 | } else |
1412 | spin_unlock_bh(&nf_conntrack_lock); | 1404 | spin_unlock_bh(&nf_conntrack_lock); |
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index 6c4847662b85..69a639f35403 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c | |||
@@ -135,7 +135,13 @@ static bool xt_cluster_mt_checkentry(const struct xt_mtchk_param *par) | |||
135 | { | 135 | { |
136 | struct xt_cluster_match_info *info = par->matchinfo; | 136 | struct xt_cluster_match_info *info = par->matchinfo; |
137 | 137 | ||
138 | if (info->node_mask >= (1 << info->total_nodes)) { | 138 | if (info->total_nodes > XT_CLUSTER_NODES_MAX) { |
139 | printk(KERN_ERR "xt_cluster: you have exceeded the maximum " | ||
140 | "number of cluster nodes (%u > %u)\n", | ||
141 | info->total_nodes, XT_CLUSTER_NODES_MAX); | ||
142 | return false; | ||
143 | } | ||
144 | if (info->node_mask >= (1ULL << info->total_nodes)) { | ||
139 | printk(KERN_ERR "xt_cluster: this node mask cannot be " | 145 | printk(KERN_ERR "xt_cluster: this node mask cannot be " |
140 | "higher than the total number of nodes\n"); | 146 | "higher than the total number of nodes\n"); |
141 | return false; | 147 | return false; |
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 92cfc9d7e3b9..69188e8358b4 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -51,7 +51,7 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt) | |||
51 | u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; | 51 | u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; |
52 | 52 | ||
53 | if (sch->ops == &bfifo_qdisc_ops) | 53 | if (sch->ops == &bfifo_qdisc_ops) |
54 | limit *= qdisc_dev(sch)->mtu; | 54 | limit *= psched_mtu(qdisc_dev(sch)); |
55 | 55 | ||
56 | q->limit = limit; | 56 | q->limit = limit; |
57 | } else { | 57 | } else { |
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c index 5d149c1b5f0d..9ad4d893a566 100644 --- a/net/wimax/op-msg.c +++ b/net/wimax/op-msg.c | |||
@@ -149,7 +149,8 @@ struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev, | |||
149 | } | 149 | } |
150 | result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); | 150 | result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg); |
151 | if (result < 0) { | 151 | if (result < 0) { |
152 | dev_err(dev, "no memory to add payload in attribute\n"); | 152 | dev_err(dev, "no memory to add payload (msg %p size %zu) in " |
153 | "attribute: %d\n", msg, size, result); | ||
153 | goto error_nla_put; | 154 | goto error_nla_put; |
154 | } | 155 | } |
155 | genlmsg_end(skb, genl_msg); | 156 | genlmsg_end(skb, genl_msg); |
@@ -299,10 +300,10 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name, | |||
299 | struct sk_buff *skb; | 300 | struct sk_buff *skb; |
300 | 301 | ||
301 | skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); | 302 | skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags); |
302 | if (skb == NULL) | 303 | if (IS_ERR(skb)) |
303 | goto error_msg_new; | 304 | result = PTR_ERR(skb); |
304 | result = wimax_msg_send(wimax_dev, skb); | 305 | else |
305 | error_msg_new: | 306 | result = wimax_msg_send(wimax_dev, skb); |
306 | return result; | 307 | return result; |
307 | } | 308 | } |
308 | EXPORT_SYMBOL_GPL(wimax_msg); | 309 | EXPORT_SYMBOL_GPL(wimax_msg); |
diff --git a/net/wimax/stack.c b/net/wimax/stack.c index a0ee76b52510..933e1422b09f 100644 --- a/net/wimax/stack.c +++ b/net/wimax/stack.c | |||
@@ -338,8 +338,21 @@ out: | |||
338 | */ | 338 | */ |
339 | void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) | 339 | void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state) |
340 | { | 340 | { |
341 | /* | ||
342 | * A driver cannot take the wimax_dev out of the | ||
343 | * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If | ||
344 | * the wimax_dev's state is still NULL, we ignore any request | ||
345 | * to change its state because it means it hasn't been yet | ||
346 | * registered. | ||
347 | * | ||
348 | * There is no need to complain about it, as routines that | ||
349 | * call this might be shared from different code paths that | ||
350 | * are called before or after wimax_dev_add() has done its | ||
351 | * job. | ||
352 | */ | ||
341 | mutex_lock(&wimax_dev->mutex); | 353 | mutex_lock(&wimax_dev->mutex); |
342 | __wimax_state_change(wimax_dev, new_state); | 354 | if (wimax_dev->state > __WIMAX_ST_NULL) |
355 | __wimax_state_change(wimax_dev, new_state); | ||
343 | mutex_unlock(&wimax_dev->mutex); | 356 | mutex_unlock(&wimax_dev->mutex); |
344 | return; | 357 | return; |
345 | } | 358 | } |
@@ -376,7 +389,7 @@ EXPORT_SYMBOL_GPL(wimax_state_get); | |||
376 | void wimax_dev_init(struct wimax_dev *wimax_dev) | 389 | void wimax_dev_init(struct wimax_dev *wimax_dev) |
377 | { | 390 | { |
378 | INIT_LIST_HEAD(&wimax_dev->id_table_node); | 391 | INIT_LIST_HEAD(&wimax_dev->id_table_node); |
379 | __wimax_state_set(wimax_dev, WIMAX_ST_UNINITIALIZED); | 392 | __wimax_state_set(wimax_dev, __WIMAX_ST_NULL); |
380 | mutex_init(&wimax_dev->mutex); | 393 | mutex_init(&wimax_dev->mutex); |
381 | mutex_init(&wimax_dev->mutex_reset); | 394 | mutex_init(&wimax_dev->mutex_reset); |
382 | } | 395 | } |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 9fea910204db..537af62ec42b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -906,6 +906,7 @@ EXPORT_SYMBOL(freq_reg_info); | |||
906 | int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, | 906 | int freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 *bandwidth, |
907 | const struct ieee80211_reg_rule **reg_rule) | 907 | const struct ieee80211_reg_rule **reg_rule) |
908 | { | 908 | { |
909 | assert_cfg80211_lock(); | ||
909 | return freq_reg_info_regd(wiphy, center_freq, | 910 | return freq_reg_info_regd(wiphy, center_freq, |
910 | bandwidth, reg_rule, NULL); | 911 | bandwidth, reg_rule, NULL); |
911 | } | 912 | } |
@@ -1134,7 +1135,8 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) | |||
1134 | if (is_world_regdom(cfg80211_regdomain->alpha2) || | 1135 | if (is_world_regdom(cfg80211_regdomain->alpha2) || |
1135 | (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) | 1136 | (wiphy->regd && is_world_regdom(wiphy->regd->alpha2))) |
1136 | return true; | 1137 | return true; |
1137 | if (last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && | 1138 | if (last_request && |
1139 | last_request->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && | ||
1138 | wiphy->custom_regulatory) | 1140 | wiphy->custom_regulatory) |
1139 | return true; | 1141 | return true; |
1140 | return false; | 1142 | return false; |
@@ -1143,6 +1145,12 @@ static bool reg_is_world_roaming(struct wiphy *wiphy) | |||
1143 | /* Reap the advantages of previously found beacons */ | 1145 | /* Reap the advantages of previously found beacons */ |
1144 | static void reg_process_beacons(struct wiphy *wiphy) | 1146 | static void reg_process_beacons(struct wiphy *wiphy) |
1145 | { | 1147 | { |
1148 | /* | ||
1149 | * Means we are just firing up cfg80211, so no beacons would | ||
1150 | * have been processed yet. | ||
1151 | */ | ||
1152 | if (!last_request) | ||
1153 | return; | ||
1146 | if (!reg_is_world_roaming(wiphy)) | 1154 | if (!reg_is_world_roaming(wiphy)) |
1147 | return; | 1155 | return; |
1148 | wiphy_update_beacon_reg(wiphy); | 1156 | wiphy_update_beacon_reg(wiphy); |
@@ -1177,6 +1185,8 @@ static void handle_channel_custom(struct wiphy *wiphy, | |||
1177 | struct ieee80211_supported_band *sband; | 1185 | struct ieee80211_supported_band *sband; |
1178 | struct ieee80211_channel *chan; | 1186 | struct ieee80211_channel *chan; |
1179 | 1187 | ||
1188 | assert_cfg80211_lock(); | ||
1189 | |||
1180 | sband = wiphy->bands[band]; | 1190 | sband = wiphy->bands[band]; |
1181 | BUG_ON(chan_idx >= sband->n_channels); | 1191 | BUG_ON(chan_idx >= sband->n_channels); |
1182 | chan = &sband->channels[chan_idx]; | 1192 | chan = &sband->channels[chan_idx]; |
@@ -1215,10 +1225,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, | |||
1215 | const struct ieee80211_regdomain *regd) | 1225 | const struct ieee80211_regdomain *regd) |
1216 | { | 1226 | { |
1217 | enum ieee80211_band band; | 1227 | enum ieee80211_band band; |
1228 | |||
1229 | mutex_lock(&cfg80211_mutex); | ||
1218 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { | 1230 | for (band = 0; band < IEEE80211_NUM_BANDS; band++) { |
1219 | if (wiphy->bands[band]) | 1231 | if (wiphy->bands[band]) |
1220 | handle_band_custom(wiphy, band, regd); | 1232 | handle_band_custom(wiphy, band, regd); |
1221 | } | 1233 | } |
1234 | mutex_unlock(&cfg80211_mutex); | ||
1222 | } | 1235 | } |
1223 | EXPORT_SYMBOL(wiphy_apply_custom_regulatory); | 1236 | EXPORT_SYMBOL(wiphy_apply_custom_regulatory); |
1224 | 1237 | ||
@@ -1424,7 +1437,7 @@ new_request: | |||
1424 | return call_crda(last_request->alpha2); | 1437 | return call_crda(last_request->alpha2); |
1425 | } | 1438 | } |
1426 | 1439 | ||
1427 | /* This currently only processes user and driver regulatory hints */ | 1440 | /* This processes *all* regulatory hints */ |
1428 | static void reg_process_hint(struct regulatory_request *reg_request) | 1441 | static void reg_process_hint(struct regulatory_request *reg_request) |
1429 | { | 1442 | { |
1430 | int r = 0; | 1443 | int r = 0; |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 10b4887dfa6b..df59440290e5 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -393,6 +393,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
393 | memcpy(ies, res->pub.information_elements, ielen); | 393 | memcpy(ies, res->pub.information_elements, ielen); |
394 | found->ies_allocated = true; | 394 | found->ies_allocated = true; |
395 | found->pub.information_elements = ies; | 395 | found->pub.information_elements = ies; |
396 | found->pub.len_information_elements = ielen; | ||
396 | } | 397 | } |
397 | } | 398 | } |
398 | } | 399 | } |