aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-29 17:17:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-29 17:17:12 -0400
commit1840897ab5d39b2e510c610ee262ded79919e718 (patch)
tree0b7fe95e3eda357d35b0d017f2b678b652307827
parentd56f84e7e317c69adefb2454a3d538a6d7e11e4b (diff)
parenta4765fa7bfb92d5b9de19a503674b6624f95a7ae (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (34 commits) b43: Fix warning at drivers/mmc/core/core.c:237 in mmc_wait_for_cmd mac80211: fix failure to check kmalloc return value in key_key_read libertas: Fix sd8686 firmware reload ath9k: Fix incorrect access of rate flags in RC netfilter: xt_socket: Make tproto signed in socket_mt6_v1(). stmmac: enable/disable rx/tx in the core with a single write. net: atarilance - flags should be unsigned long netxen: fix kdump pktgen: Limit how much data we copy onto the stack. net: Limit socket I/O iovec total length to INT_MAX. USB: gadget: fix ethernet gadget crash in gether_setup fib: Fix fib zone and its hash leak on namespace stop cxgb3: Fix panic in free_tx_desc() cxgb3: fix crash due to manipulating queues before registration 8390: Don't oops on starting dev queue dccp ccid-2: Stop polling dccp: Refine the wait-for-ccid mechanism dccp: Extend CCID packet dequeueing interface dccp: Return-value convention of hc_tx_send_packet() igbvf: fix panic on load ...
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb3/sge.c4
-rw-r--r--drivers/net/e1000e/82571.c38
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/netdev.c29
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/igbvf/netdev.c8
-rw-r--r--drivers/net/ixgb/ixgb_main.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/lib8390.c1
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c15
-rw-r--r--drivers/net/netxen/netxen_nic_main.c7
-rw-r--r--drivers/net/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/b43/sdio.c2
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c32
-rw-r--r--drivers/usb/gadget/u_ether.c2
-rw-r--r--include/linux/dccp.h4
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--net/compat.c10
-rw-r--r--net/core/iovec.c20
-rw-r--r--net/core/pktgen.c7
-rw-r--r--net/dccp/ccid.h34
-rw-r--r--net/dccp/ccids/ccid2.c23
-rw-r--r--net/dccp/ccids/ccid2.h5
-rw-r--r--net/dccp/ccids/ccid3.c12
-rw-r--r--net/dccp/dccp.h5
-rw-r--r--net/dccp/output.c209
-rw-r--r--net/dccp/proto.c21
-rw-r--r--net/dccp/timer.c27
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_hash.c18
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/mac80211/debugfs_key.c6
-rw-r--r--net/mac80211/main.c5
-rw-r--r--net/netfilter/xt_socket.c7
49 files changed, 525 insertions, 246 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9334539ebf75..f6668cdaac85 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2541,6 +2541,7 @@ source "drivers/net/stmmac/Kconfig"
2541config PCH_GBE 2541config PCH_GBE
2542 tristate "PCH Gigabit Ethernet" 2542 tristate "PCH Gigabit Ethernet"
2543 depends on PCI 2543 depends on PCI
2544 select MII
2544 ---help--- 2545 ---help---
2545 This is a gigabit ethernet driver for Topcliff PCH. 2546 This is a gigabit ethernet driver for Topcliff PCH.
2546 Topcliff PCH is the platform controller hub that is used in Intel's 2547 Topcliff PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 3134e5326231..8cb27cb7bca1 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -407,7 +407,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
407 int writeflag) 407 int writeflag)
408{ 408{
409 int ret; 409 int ret;
410 long flags; 410 unsigned long flags;
411 long *vbr, save_berr; 411 long *vbr, save_berr;
412 412
413 local_irq_save(flags); 413 local_irq_save(flags);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 4e3c12371aae..407d4e272075 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -3301,7 +3301,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3301 pi->rx_offload = T3_RX_CSUM | T3_LRO; 3301 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3302 pi->port_id = i; 3302 pi->port_id = i;
3303 netif_carrier_off(netdev); 3303 netif_carrier_off(netdev);
3304 netif_tx_stop_all_queues(netdev);
3305 netdev->irq = pdev->irq; 3304 netdev->irq = pdev->irq;
3306 netdev->mem_start = mmio_start; 3305 netdev->mem_start = mmio_start;
3307 netdev->mem_end = mmio_start + mmio_len - 1; 3306 netdev->mem_end = mmio_start + mmio_len - 1;
@@ -3342,6 +3341,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3342 adapter->name = adapter->port[i]->name; 3341 adapter->name = adapter->port[i]->name;
3343 3342
3344 __set_bit(i, &adapter->registered_device_map); 3343 __set_bit(i, &adapter->registered_device_map);
3344 netif_tx_stop_all_queues(adapter->port[i]);
3345 } 3345 }
3346 } 3346 }
3347 if (!adapter->registered_device_map) { 3347 if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 5d72bda54389..f9f6645b2e61 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -296,8 +296,10 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
296 if (d->skb) { /* an SGL is present */ 296 if (d->skb) { /* an SGL is present */
297 if (need_unmap) 297 if (need_unmap)
298 unmap_skb(d->skb, q, cidx, pdev); 298 unmap_skb(d->skb, q, cidx, pdev);
299 if (d->eop) 299 if (d->eop) {
300 kfree_skb(d->skb); 300 kfree_skb(d->skb);
301 d->skb = NULL;
302 }
301 } 303 }
302 ++d; 304 ++d;
303 if (++cidx == q->size) { 305 if (++cidx == q->size) {
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index ca663f19d7df..7236f1a53ba0 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,10 @@
52 (ID_LED_DEF1_DEF2)) 52 (ID_LED_DEF1_DEF2))
53 53
54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
55#define E1000_BASE1000T_STATUS 10
56#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
57#define E1000_RECEIVE_ERROR_COUNTER 21
58#define E1000_RECEIVE_ERROR_MAX 0xFFFF
55 59
56#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ 60#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
57 61
@@ -1243,6 +1247,39 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
1243} 1247}
1244 1248
1245/** 1249/**
1250 * e1000_check_phy_82574 - check 82574 phy hung state
1251 * @hw: pointer to the HW structure
1252 *
1253 * Returns whether phy is hung or not
1254 **/
1255bool e1000_check_phy_82574(struct e1000_hw *hw)
1256{
1257 u16 status_1kbt = 0;
1258 u16 receive_errors = 0;
1259 bool phy_hung = false;
1260 s32 ret_val = 0;
1261
1262 /*
1263 * Read PHY Receive Error counter first, if its is max - all F's then
1264 * read the Base1000T status register If both are max then PHY is hung.
1265 */
1266 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
1267
1268 if (ret_val)
1269 goto out;
1270 if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
1271 ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
1272 if (ret_val)
1273 goto out;
1274 if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
1275 E1000_IDLE_ERROR_COUNT_MASK)
1276 phy_hung = true;
1277 }
1278out:
1279 return phy_hung;
1280}
1281
1282/**
1246 * e1000_setup_link_82571 - Setup flow control and link settings 1283 * e1000_setup_link_82571 - Setup flow control and link settings
1247 * @hw: pointer to the HW structure 1284 * @hw: pointer to the HW structure
1248 * 1285 *
@@ -1859,6 +1896,7 @@ struct e1000_info e1000_82574_info = {
1859 | FLAG_HAS_SMART_POWER_DOWN 1896 | FLAG_HAS_SMART_POWER_DOWN
1860 | FLAG_HAS_AMT 1897 | FLAG_HAS_AMT
1861 | FLAG_HAS_CTRLEXT_ON_LOAD, 1898 | FLAG_HAS_CTRLEXT_ON_LOAD,
1899 .flags2 = FLAG2_CHECK_PHY_HANG,
1862 .pba = 36, 1900 .pba = 36,
1863 .max_hw_frame_size = DEFAULT_JUMBO, 1901 .max_hw_frame_size = DEFAULT_JUMBO,
1864 .get_variants = e1000_get_variants_82571, 1902 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cee882dd67bf..fdc67fead4ea 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -397,6 +397,7 @@ struct e1000_adapter {
397 struct work_struct print_hang_task; 397 struct work_struct print_hang_task;
398 398
399 bool idle_check; 399 bool idle_check;
400 int phy_hang_count;
400}; 401};
401 402
402struct e1000_info { 403struct e1000_info {
@@ -454,6 +455,7 @@ struct e1000_info {
454#define FLAG2_HAS_EEE (1 << 5) 455#define FLAG2_HAS_EEE (1 << 5)
455#define FLAG2_DMA_BURST (1 << 6) 456#define FLAG2_DMA_BURST (1 << 6)
456#define FLAG2_DISABLE_AIM (1 << 8) 457#define FLAG2_DISABLE_AIM (1 << 8)
458#define FLAG2_CHECK_PHY_HANG (1 << 9)
457 459
458#define E1000_RX_DESC_PS(R, i) \ 460#define E1000_RX_DESC_PS(R, i) \
459 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 461 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -631,6 +633,7 @@ extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
631extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); 633extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
632extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); 634extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
633extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); 635extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
636extern bool e1000_check_phy_82574(struct e1000_hw *hw);
634 637
635static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) 638static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
636{ 639{
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ec8cf3f51423..c4ca1629f532 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4098,6 +4098,25 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
4098 } 4098 }
4099} 4099}
4100 4100
4101static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4102{
4103 struct e1000_hw *hw = &adapter->hw;
4104
4105 /*
4106 * With 82574 controllers, PHY needs to be checked periodically
4107 * for hung state and reset, if two calls return true
4108 */
4109 if (e1000_check_phy_82574(hw))
4110 adapter->phy_hang_count++;
4111 else
4112 adapter->phy_hang_count = 0;
4113
4114 if (adapter->phy_hang_count > 1) {
4115 adapter->phy_hang_count = 0;
4116 schedule_work(&adapter->reset_task);
4117 }
4118}
4119
4101/** 4120/**
4102 * e1000_watchdog - Timer Call-back 4121 * e1000_watchdog - Timer Call-back
4103 * @data: pointer to adapter cast into an unsigned long 4122 * @data: pointer to adapter cast into an unsigned long
@@ -4333,6 +4352,9 @@ link_up:
4333 if (e1000e_get_laa_state_82571(hw)) 4352 if (e1000e_get_laa_state_82571(hw))
4334 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4353 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
4335 4354
4355 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4356 e1000e_check_82574_phy_workaround(adapter);
4357
4336 /* Reset the timer */ 4358 /* Reset the timer */
4337 if (!test_bit(__E1000_DOWN, &adapter->state)) 4359 if (!test_bit(__E1000_DOWN, &adapter->state))
4338 mod_timer(&adapter->watchdog_timer, 4360 mod_timer(&adapter->watchdog_timer,
@@ -4860,8 +4882,11 @@ static void e1000_reset_task(struct work_struct *work)
4860 struct e1000_adapter *adapter; 4882 struct e1000_adapter *adapter;
4861 adapter = container_of(work, struct e1000_adapter, reset_task); 4883 adapter = container_of(work, struct e1000_adapter, reset_task);
4862 4884
4863 e1000e_dump(adapter); 4885 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4864 e_err("Reset adapter\n"); 4886 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4887 e1000e_dump(adapter);
4888 e_err("Reset adapter\n");
4889 }
4865 e1000e_reinit_locked(adapter); 4890 e1000e_reinit_locked(adapter);
4866} 4891}
4867 4892
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 14db09e2fa8b..892d196f17ac 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4107,7 +4107,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4107netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 4107netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4108 struct igb_ring *tx_ring) 4108 struct igb_ring *tx_ring)
4109{ 4109{
4110 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4111 int tso = 0, count; 4110 int tso = 0, count;
4112 u32 tx_flags = 0; 4111 u32 tx_flags = 0;
4113 u16 first; 4112 u16 first;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index ebfaa68ee630..28af019c97bb 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2783,15 +2783,15 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2783 /* reset the hardware with the new settings */ 2783 /* reset the hardware with the new settings */
2784 igbvf_reset(adapter); 2784 igbvf_reset(adapter);
2785 2785
2786 /* tell the stack to leave us alone until igbvf_open() is called */
2787 netif_carrier_off(netdev);
2788 netif_stop_queue(netdev);
2789
2790 strcpy(netdev->name, "eth%d"); 2786 strcpy(netdev->name, "eth%d");
2791 err = register_netdev(netdev); 2787 err = register_netdev(netdev);
2792 if (err) 2788 if (err)
2793 goto err_hw_init; 2789 goto err_hw_init;
2794 2790
2791 /* tell the stack to leave us alone until igbvf_open() is called */
2792 netif_carrier_off(netdev);
2793 netif_stop_queue(netdev);
2794
2795 igbvf_print_device_info(adapter); 2795 igbvf_print_device_info(adapter);
2796 2796
2797 igbvf_initialize_last_counter_stats(adapter); 2797 igbvf_initialize_last_counter_stats(adapter);
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 666207a9c039..caa8192fff2a 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -533,6 +533,7 @@ ixgb_remove(struct pci_dev *pdev)
533 pci_release_regions(pdev); 533 pci_release_regions(pdev);
534 534
535 free_netdev(netdev); 535 free_netdev(netdev);
536 pci_disable_device(pdev);
536} 537}
537 538
538/** 539/**
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 8bb9ddb6dffe..0d44c6470ca3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -43,9 +43,12 @@
43 * ixgbe_dcb_check_config(). 43 * ixgbe_dcb_check_config().
44 */ 44 */
45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, 45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
46 u8 direction) 46 int max_frame, u8 direction)
47{ 47{
48 struct tc_bw_alloc *p; 48 struct tc_bw_alloc *p;
49 int min_credit;
50 int min_multiplier;
51 int min_percent = 100;
49 s32 ret_val = 0; 52 s32 ret_val = 0;
50 /* Initialization values default for Tx settings */ 53 /* Initialization values default for Tx settings */
51 u32 credit_refill = 0; 54 u32 credit_refill = 0;
@@ -59,6 +62,31 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
59 goto out; 62 goto out;
60 } 63 }
61 64
65 min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
66 DCB_CREDIT_QUANTUM;
67
68 /* Find smallest link percentage */
69 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
70 p = &dcb_config->tc_config[i].path[direction];
71 bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
72 link_percentage = p->bwg_percent;
73
74 link_percentage = (link_percentage * bw_percent) / 100;
75
76 if (link_percentage && link_percentage < min_percent)
77 min_percent = link_percentage;
78 }
79
80 /*
81 * The ratio between traffic classes will control the bandwidth
82 * percentages seen on the wire. To calculate this ratio we use
83 * a multiplier. It is required that the refill credits must be
84 * larger than the max frame size so here we find the smallest
85 * multiplier that will allow all bandwidth percentages to be
86 * greater than the max frame size.
87 */
88 min_multiplier = (min_credit / min_percent) + 1;
89
62 /* Find out the link percentage for each TC first */ 90 /* Find out the link percentage for each TC first */
63 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 91 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
64 p = &dcb_config->tc_config[i].path[direction]; 92 p = &dcb_config->tc_config[i].path[direction];
@@ -73,8 +101,9 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
73 /* Save link_percentage for reference */ 101 /* Save link_percentage for reference */
74 p->link_percent = (u8)link_percentage; 102 p->link_percent = (u8)link_percentage;
75 103
76 /* Calculate credit refill and save it */ 104 /* Calculate credit refill ratio using multiplier */
77 credit_refill = link_percentage * MINIMUM_CREDIT_REFILL; 105 credit_refill = min(link_percentage * min_multiplier,
106 MAX_CREDIT_REFILL);
78 p->data_credits_refill = (u16)credit_refill; 107 p->data_credits_refill = (u16)credit_refill;
79 108
80 /* Calculate maximum credit for the TC */ 109 /* Calculate maximum credit for the TC */
@@ -85,8 +114,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
85 * of a TC is too small, the maximum credit may not be 114 * of a TC is too small, the maximum credit may not be
86 * enough to send out a jumbo frame in data plane arbitration. 115 * enough to send out a jumbo frame in data plane arbitration.
87 */ 116 */
88 if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO)) 117 if (credit_max && (credit_max < min_credit))
89 credit_max = MINIMUM_CREDIT_FOR_JUMBO; 118 credit_max = min_credit;
90 119
91 if (direction == DCB_TX_CONFIG) { 120 if (direction == DCB_TX_CONFIG) {
92 /* 121 /*
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index eb1059f09da0..0208a87b129e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,15 +150,14 @@ struct ixgbe_dcb_config {
150/* DCB driver APIs */ 150/* DCB driver APIs */
151 151
152/* DCB credits calculation */ 152/* DCB credits calculation */
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); 153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
154 154
155/* DCB hw initialization */ 155/* DCB hw initialization */
156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
157 157
158/* DCB definitions for credit calculation */ 158/* DCB definitions for credit calculation */
159#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
159#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ 160#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
160#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
161#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
162#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ 161#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
163#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ 162#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
164#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ 163#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 67c219f86c3a..05f224715073 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -397,6 +397,11 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
397 reg &= ~IXGBE_RTTDCS_ARBDIS; 397 reg &= ~IXGBE_RTTDCS_ARBDIS;
398 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); 398 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
399 399
400 /* Enable Security TX Buffer IFG for DCB */
401 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
402 reg |= IXGBE_SECTX_DCB;
403 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
404
400 return 0; 405 return 0;
401} 406}
402 407
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 18d7fbf6c292..3841649fb954 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -95,6 +95,9 @@
95 95
96#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ 96#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
97 97
98/* SECTXMINIFG DCB */
99#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
100
98 101
99/* DCB hardware-specific driver APIs */ 102/* DCB hardware-specific driver APIs */
100 103
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f85631263af8..2bd3eb4ee5a1 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3347,6 +3347,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3347static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 3347static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3348{ 3348{
3349 struct ixgbe_hw *hw = &adapter->hw; 3349 struct ixgbe_hw *hw = &adapter->hw;
3350 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3350 u32 txdctl; 3351 u32 txdctl;
3351 int i, j; 3352 int i, j;
3352 3353
@@ -3359,8 +3360,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3359 if (hw->mac.type == ixgbe_mac_82598EB) 3360 if (hw->mac.type == ixgbe_mac_82598EB)
3360 netif_set_gso_max_size(adapter->netdev, 32768); 3361 netif_set_gso_max_size(adapter->netdev, 32768);
3361 3362
3362 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); 3363#ifdef CONFIG_FCOE
3363 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); 3364 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3365 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3366#endif
3367
3368 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
3369 DCB_TX_CONFIG);
3370 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
3371 DCB_RX_CONFIG);
3364 3372
3365 /* reconfigure the hardware */ 3373 /* reconfigure the hardware */
3366 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); 3374 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 316bb70775b1..e7030ceb178b 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -1077,7 +1077,6 @@ static void __NS8390_init(struct net_device *dev, int startp)
1077 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); 1077 ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1078 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); 1078 ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1079 1079
1080 netif_start_queue(dev);
1081 ei_local->tx1 = ei_local->tx2 = 0; 1080 ei_local->tx1 = ei_local->tx2 = 0;
1082 ei_local->txing = 0; 1081 ei_local->txing = 0;
1083 1082
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 12612127a087..f7d06cbc70ae 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -255,19 +255,6 @@ out_free_rq:
255} 255}
256 256
257static void 257static void
258nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
259{
260
261 netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
262 adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
263 NX_CDRP_CMD_DESTROY_RX_CTX);
264
265 netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
266 adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
267 NX_CDRP_CMD_DESTROY_TX_CTX);
268}
269
270static void
271nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) 258nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
272{ 259{
273 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 260 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -698,8 +685,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
698 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 685 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
699 if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) 686 if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
700 goto done; 687 goto done;
701 if (reset_devices)
702 nx_fw_cmd_reset_ctx(adapter);
703 err = nx_fw_cmd_create_rx_ctx(adapter); 688 err = nx_fw_cmd_create_rx_ctx(adapter);
704 if (err) 689 if (err)
705 goto err_out_free; 690 goto err_out_free;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 50820beac3aa..35ae1aa12896 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1356,6 +1356,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1356 break; 1356 break;
1357 } 1357 }
1358 1358
1359 if (reset_devices) {
1360 if (adapter->portnum == 0) {
1361 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
1362 adapter->need_fw_reset = 1;
1363 }
1364 }
1365
1359 err = netxen_start_firmware(adapter); 1366 err = netxen_start_firmware(adapter);
1360 if (err) 1367 if (err)
1361 goto err_out_decr_ref; 1368 goto err_out_decr_ref;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 823b9e6431d5..06bc6034ce81 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -337,33 +337,19 @@ static int stmmac_init_phy(struct net_device *dev)
337 return 0; 337 return 0;
338} 338}
339 339
340static inline void stmmac_mac_enable_rx(void __iomem *ioaddr) 340static inline void stmmac_enable_mac(void __iomem *ioaddr)
341{ 341{
342 u32 value = readl(ioaddr + MAC_CTRL_REG); 342 u32 value = readl(ioaddr + MAC_CTRL_REG);
343 value |= MAC_RNABLE_RX;
344 /* Set the RE (receive enable bit into the MAC CTRL register). */
345 writel(value, ioaddr + MAC_CTRL_REG);
346}
347 343
348static inline void stmmac_mac_enable_tx(void __iomem *ioaddr) 344 value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
349{
350 u32 value = readl(ioaddr + MAC_CTRL_REG);
351 value |= MAC_ENABLE_TX;
352 /* Set the TE (transmit enable bit into the MAC CTRL register). */
353 writel(value, ioaddr + MAC_CTRL_REG); 345 writel(value, ioaddr + MAC_CTRL_REG);
354} 346}
355 347
356static inline void stmmac_mac_disable_rx(void __iomem *ioaddr) 348static inline void stmmac_disable_mac(void __iomem *ioaddr)
357{ 349{
358 u32 value = readl(ioaddr + MAC_CTRL_REG); 350 u32 value = readl(ioaddr + MAC_CTRL_REG);
359 value &= ~MAC_RNABLE_RX;
360 writel(value, ioaddr + MAC_CTRL_REG);
361}
362 351
363static inline void stmmac_mac_disable_tx(void __iomem *ioaddr) 352 value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
364{
365 u32 value = readl(ioaddr + MAC_CTRL_REG);
366 value &= ~MAC_ENABLE_TX;
367 writel(value, ioaddr + MAC_CTRL_REG); 353 writel(value, ioaddr + MAC_CTRL_REG);
368} 354}
369 355
@@ -857,8 +843,7 @@ static int stmmac_open(struct net_device *dev)
857 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 843 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
858 844
859 /* Enable the MAC Rx/Tx */ 845 /* Enable the MAC Rx/Tx */
860 stmmac_mac_enable_rx(priv->ioaddr); 846 stmmac_enable_mac(priv->ioaddr);
861 stmmac_mac_enable_tx(priv->ioaddr);
862 847
863 /* Set the HW DMA mode and the COE */ 848 /* Set the HW DMA mode and the COE */
864 stmmac_dma_operation_mode(priv); 849 stmmac_dma_operation_mode(priv);
@@ -928,9 +913,8 @@ static int stmmac_release(struct net_device *dev)
928 /* Release and free the Rx/Tx resources */ 913 /* Release and free the Rx/Tx resources */
929 free_dma_desc_resources(priv); 914 free_dma_desc_resources(priv);
930 915
931 /* Disable the MAC core */ 916 /* Disable the MAC Rx/Tx */
932 stmmac_mac_disable_tx(priv->ioaddr); 917 stmmac_disable_mac(priv->ioaddr);
933 stmmac_mac_disable_rx(priv->ioaddr);
934 918
935 netif_carrier_off(dev); 919 netif_carrier_off(dev);
936 920
@@ -1787,8 +1771,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1787 priv->hw->dma->stop_rx(priv->ioaddr); 1771 priv->hw->dma->stop_rx(priv->ioaddr);
1788 priv->hw->dma->stop_tx(priv->ioaddr); 1772 priv->hw->dma->stop_tx(priv->ioaddr);
1789 1773
1790 stmmac_mac_disable_rx(priv->ioaddr); 1774 stmmac_disable_mac(priv->ioaddr);
1791 stmmac_mac_disable_tx(priv->ioaddr);
1792 1775
1793 netif_carrier_off(ndev); 1776 netif_carrier_off(ndev);
1794 1777
@@ -1839,13 +1822,11 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
1839 dis_ic); 1822 dis_ic);
1840 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1823 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1841 1824
1842 stmmac_mac_disable_tx(priv->ioaddr);
1843
1844 /* Enable Power down mode by programming the PMT regs */ 1825 /* Enable Power down mode by programming the PMT regs */
1845 if (device_can_wakeup(priv->device)) 1826 if (device_can_wakeup(priv->device))
1846 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 1827 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1847 else 1828 else
1848 stmmac_mac_disable_rx(priv->ioaddr); 1829 stmmac_disable_mac(priv->ioaddr);
1849 } else { 1830 } else {
1850 priv->shutdown = 1; 1831 priv->shutdown = 1;
1851 /* Although this can appear slightly redundant it actually 1832 /* Although this can appear slightly redundant it actually
@@ -1886,8 +1867,7 @@ static int stmmac_resume(struct platform_device *pdev)
1886 netif_device_attach(dev); 1867 netif_device_attach(dev);
1887 1868
1888 /* Enable the MAC and DMA */ 1869 /* Enable the MAC and DMA */
1889 stmmac_mac_enable_rx(priv->ioaddr); 1870 stmmac_enable_mac(priv->ioaddr);
1890 stmmac_mac_enable_tx(priv->ioaddr);
1891 priv->hw->dma->start_tx(priv->ioaddr); 1871 priv->hw->dma->start_tx(priv->ioaddr);
1892 priv->hw->dma->start_rx(priv->ioaddr); 1872 priv->hw->dma->start_rx(priv->ioaddr);
1893 1873
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index cd0b14a0a93a..fbe8aca975d8 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -139,12 +139,12 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
139 /* Fill the ath5k_hw struct with the needed functions */ 139 /* Fill the ath5k_hw struct with the needed functions */
140 ret = ath5k_hw_init_desc_functions(ah); 140 ret = ath5k_hw_init_desc_functions(ah);
141 if (ret) 141 if (ret)
142 goto err_free; 142 goto err;
143 143
144 /* Bring device out of sleep and reset its units */ 144 /* Bring device out of sleep and reset its units */
145 ret = ath5k_hw_nic_wakeup(ah, 0, true); 145 ret = ath5k_hw_nic_wakeup(ah, 0, true);
146 if (ret) 146 if (ret)
147 goto err_free; 147 goto err;
148 148
149 /* Get MAC, PHY and RADIO revisions */ 149 /* Get MAC, PHY and RADIO revisions */
150 ah->ah_mac_srev = srev; 150 ah->ah_mac_srev = srev;
@@ -234,7 +234,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
234 } else { 234 } else {
235 ATH5K_ERR(sc, "Couldn't identify radio revision.\n"); 235 ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
236 ret = -ENODEV; 236 ret = -ENODEV;
237 goto err_free; 237 goto err;
238 } 238 }
239 } 239 }
240 240
@@ -244,7 +244,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
244 (srev < AR5K_SREV_AR2425)) { 244 (srev < AR5K_SREV_AR2425)) {
245 ATH5K_ERR(sc, "Device not yet supported.\n"); 245 ATH5K_ERR(sc, "Device not yet supported.\n");
246 ret = -ENODEV; 246 ret = -ENODEV;
247 goto err_free; 247 goto err;
248 } 248 }
249 249
250 /* 250 /*
@@ -252,7 +252,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
252 */ 252 */
253 ret = ath5k_hw_post(ah); 253 ret = ath5k_hw_post(ah);
254 if (ret) 254 if (ret)
255 goto err_free; 255 goto err;
256 256
257 /* Enable pci core retry fix on Hainan (5213A) and later chips */ 257 /* Enable pci core retry fix on Hainan (5213A) and later chips */
258 if (srev >= AR5K_SREV_AR5213A) 258 if (srev >= AR5K_SREV_AR5213A)
@@ -265,7 +265,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
265 ret = ath5k_eeprom_init(ah); 265 ret = ath5k_eeprom_init(ah);
266 if (ret) { 266 if (ret) {
267 ATH5K_ERR(sc, "unable to init EEPROM\n"); 267 ATH5K_ERR(sc, "unable to init EEPROM\n");
268 goto err_free; 268 goto err;
269 } 269 }
270 270
271 ee = &ah->ah_capabilities.cap_eeprom; 271 ee = &ah->ah_capabilities.cap_eeprom;
@@ -307,7 +307,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
307 if (ret) { 307 if (ret) {
308 ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n", 308 ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
309 sc->pdev->device); 309 sc->pdev->device);
310 goto err_free; 310 goto err;
311 } 311 }
312 312
313 /* Crypto settings */ 313 /* Crypto settings */
@@ -341,8 +341,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
341 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT); 341 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
342 342
343 return 0; 343 return 0;
344err_free: 344err:
345 kfree(ah);
346 return ret; 345 return ret;
347} 346}
348 347
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 973c919fdd27..9b8e7e3fcebd 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -310,7 +310,7 @@ struct ath_rx {
310 u8 rxotherant; 310 u8 rxotherant;
311 u32 *rxlink; 311 u32 *rxlink;
312 unsigned int rxfilter; 312 unsigned int rxfilter;
313 spinlock_t rxflushlock; 313 spinlock_t pcu_lock;
314 spinlock_t rxbuflock; 314 spinlock_t rxbuflock;
315 struct list_head rxbuf; 315 struct list_head rxbuf;
316 struct ath_descdma rxdma; 316 struct ath_descdma rxdma;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 728d904c74d7..6576f683dba0 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -801,10 +801,16 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
801 } 801 }
802 kfree(buf); 802 kfree(buf);
803 803
804 if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015)) 804 switch (hif_dev->device_id) {
805 case 0x7010:
806 case 0x7015:
807 case 0x9018:
805 firm_offset = AR7010_FIRMWARE_TEXT; 808 firm_offset = AR7010_FIRMWARE_TEXT;
806 else 809 break;
810 default:
807 firm_offset = AR9271_FIRMWARE_TEXT; 811 firm_offset = AR9271_FIRMWARE_TEXT;
812 break;
813 }
808 814
809 /* 815 /*
810 * Issue FW download complete command to firmware. 816 * Issue FW download complete command to firmware.
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c6ec800d7a6b..b52f1cf8a603 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -241,6 +241,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
241 */ 241 */
242 ath9k_hw_set_interrupts(ah, 0); 242 ath9k_hw_set_interrupts(ah, 0);
243 ath_drain_all_txq(sc, false); 243 ath_drain_all_txq(sc, false);
244
245 spin_lock_bh(&sc->rx.pcu_lock);
246
244 stopped = ath_stoprecv(sc); 247 stopped = ath_stoprecv(sc);
245 248
246 /* XXX: do not flush receive queue here. We don't want 249 /* XXX: do not flush receive queue here. We don't want
@@ -268,6 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
268 "reset status %d\n", 271 "reset status %d\n",
269 channel->center_freq, r); 272 channel->center_freq, r);
270 spin_unlock_bh(&sc->sc_resetlock); 273 spin_unlock_bh(&sc->sc_resetlock);
274 spin_unlock_bh(&sc->rx.pcu_lock);
271 goto ps_restore; 275 goto ps_restore;
272 } 276 }
273 spin_unlock_bh(&sc->sc_resetlock); 277 spin_unlock_bh(&sc->sc_resetlock);
@@ -276,9 +280,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
276 ath_print(common, ATH_DBG_FATAL, 280 ath_print(common, ATH_DBG_FATAL,
277 "Unable to restart recv logic\n"); 281 "Unable to restart recv logic\n");
278 r = -EIO; 282 r = -EIO;
283 spin_unlock_bh(&sc->rx.pcu_lock);
279 goto ps_restore; 284 goto ps_restore;
280 } 285 }
281 286
287 spin_unlock_bh(&sc->rx.pcu_lock);
288
282 ath_update_txpow(sc); 289 ath_update_txpow(sc);
283 ath9k_hw_set_interrupts(ah, ah->imask); 290 ath9k_hw_set_interrupts(ah, ah->imask);
284 291
@@ -613,7 +620,7 @@ void ath9k_tasklet(unsigned long data)
613 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 620 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
614 621
615 if (status & rxmask) { 622 if (status & rxmask) {
616 spin_lock_bh(&sc->rx.rxflushlock); 623 spin_lock_bh(&sc->rx.pcu_lock);
617 624
618 /* Check for high priority Rx first */ 625 /* Check for high priority Rx first */
619 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 626 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
@@ -621,7 +628,7 @@ void ath9k_tasklet(unsigned long data)
621 ath_rx_tasklet(sc, 0, true); 628 ath_rx_tasklet(sc, 0, true);
622 629
623 ath_rx_tasklet(sc, 0, false); 630 ath_rx_tasklet(sc, 0, false);
624 spin_unlock_bh(&sc->rx.rxflushlock); 631 spin_unlock_bh(&sc->rx.pcu_lock);
625 } 632 }
626 633
627 if (status & ATH9K_INT_TX) { 634 if (status & ATH9K_INT_TX) {
@@ -876,6 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
876 if (!ah->curchan) 883 if (!ah->curchan)
877 ah->curchan = ath_get_curchannel(sc, sc->hw); 884 ah->curchan = ath_get_curchannel(sc, sc->hw);
878 885
886 spin_lock_bh(&sc->rx.pcu_lock);
879 spin_lock_bh(&sc->sc_resetlock); 887 spin_lock_bh(&sc->sc_resetlock);
880 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 888 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
881 if (r) { 889 if (r) {
@@ -890,8 +898,10 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
890 if (ath_startrecv(sc) != 0) { 898 if (ath_startrecv(sc) != 0) {
891 ath_print(common, ATH_DBG_FATAL, 899 ath_print(common, ATH_DBG_FATAL,
892 "Unable to restart recv logic\n"); 900 "Unable to restart recv logic\n");
901 spin_unlock_bh(&sc->rx.pcu_lock);
893 return; 902 return;
894 } 903 }
904 spin_unlock_bh(&sc->rx.pcu_lock);
895 905
896 if (sc->sc_flags & SC_OP_BEACONS) 906 if (sc->sc_flags & SC_OP_BEACONS)
897 ath_beacon_config(sc, NULL); /* restart beacons */ 907 ath_beacon_config(sc, NULL); /* restart beacons */
@@ -930,6 +940,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
930 ath9k_hw_set_interrupts(ah, 0); 940 ath9k_hw_set_interrupts(ah, 0);
931 941
932 ath_drain_all_txq(sc, false); /* clear pending tx frames */ 942 ath_drain_all_txq(sc, false); /* clear pending tx frames */
943
944 spin_lock_bh(&sc->rx.pcu_lock);
945
933 ath_stoprecv(sc); /* turn off frame recv */ 946 ath_stoprecv(sc); /* turn off frame recv */
934 ath_flushrecv(sc); /* flush recv queue */ 947 ath_flushrecv(sc); /* flush recv queue */
935 948
@@ -947,6 +960,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
947 spin_unlock_bh(&sc->sc_resetlock); 960 spin_unlock_bh(&sc->sc_resetlock);
948 961
949 ath9k_hw_phy_disable(ah); 962 ath9k_hw_phy_disable(ah);
963
964 spin_unlock_bh(&sc->rx.pcu_lock);
965
950 ath9k_hw_configpcipowersave(ah, 1, 1); 966 ath9k_hw_configpcipowersave(ah, 1, 1);
951 ath9k_ps_restore(sc); 967 ath9k_ps_restore(sc);
952 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 968 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
@@ -966,6 +982,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
966 982
967 ath9k_hw_set_interrupts(ah, 0); 983 ath9k_hw_set_interrupts(ah, 0);
968 ath_drain_all_txq(sc, retry_tx); 984 ath_drain_all_txq(sc, retry_tx);
985
986 spin_lock_bh(&sc->rx.pcu_lock);
987
969 ath_stoprecv(sc); 988 ath_stoprecv(sc);
970 ath_flushrecv(sc); 989 ath_flushrecv(sc);
971 990
@@ -980,6 +999,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
980 ath_print(common, ATH_DBG_FATAL, 999 ath_print(common, ATH_DBG_FATAL,
981 "Unable to start recv logic\n"); 1000 "Unable to start recv logic\n");
982 1001
1002 spin_unlock_bh(&sc->rx.pcu_lock);
1003
983 /* 1004 /*
984 * We may be doing a reset in response to a request 1005 * We may be doing a reset in response to a request
985 * that changes the channel so update any state that 1006 * that changes the channel so update any state that
@@ -1142,6 +1163,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1142 * be followed by initialization of the appropriate bits 1163 * be followed by initialization of the appropriate bits
1143 * and then setup of the interrupt mask. 1164 * and then setup of the interrupt mask.
1144 */ 1165 */
1166 spin_lock_bh(&sc->rx.pcu_lock);
1145 spin_lock_bh(&sc->sc_resetlock); 1167 spin_lock_bh(&sc->sc_resetlock);
1146 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1168 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
1147 if (r) { 1169 if (r) {
@@ -1150,6 +1172,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1150 "(freq %u MHz)\n", r, 1172 "(freq %u MHz)\n", r,
1151 curchan->center_freq); 1173 curchan->center_freq);
1152 spin_unlock_bh(&sc->sc_resetlock); 1174 spin_unlock_bh(&sc->sc_resetlock);
1175 spin_unlock_bh(&sc->rx.pcu_lock);
1153 goto mutex_unlock; 1176 goto mutex_unlock;
1154 } 1177 }
1155 spin_unlock_bh(&sc->sc_resetlock); 1178 spin_unlock_bh(&sc->sc_resetlock);
@@ -1171,8 +1194,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
1171 ath_print(common, ATH_DBG_FATAL, 1194 ath_print(common, ATH_DBG_FATAL,
1172 "Unable to start recv logic\n"); 1195 "Unable to start recv logic\n");
1173 r = -EIO; 1196 r = -EIO;
1197 spin_unlock_bh(&sc->rx.pcu_lock);
1174 goto mutex_unlock; 1198 goto mutex_unlock;
1175 } 1199 }
1200 spin_unlock_bh(&sc->rx.pcu_lock);
1176 1201
1177 /* Setup our intr mask. */ 1202 /* Setup our intr mask. */
1178 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | 1203 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
@@ -1371,12 +1396,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1371 * before setting the invalid flag. */ 1396 * before setting the invalid flag. */
1372 ath9k_hw_set_interrupts(ah, 0); 1397 ath9k_hw_set_interrupts(ah, 0);
1373 1398
1399 spin_lock_bh(&sc->rx.pcu_lock);
1374 if (!(sc->sc_flags & SC_OP_INVALID)) { 1400 if (!(sc->sc_flags & SC_OP_INVALID)) {
1375 ath_drain_all_txq(sc, false); 1401 ath_drain_all_txq(sc, false);
1376 ath_stoprecv(sc); 1402 ath_stoprecv(sc);
1377 ath9k_hw_phy_disable(ah); 1403 ath9k_hw_phy_disable(ah);
1378 } else 1404 } else
1379 sc->rx.rxlink = NULL; 1405 sc->rx.rxlink = NULL;
1406 spin_unlock_bh(&sc->rx.pcu_lock);
1380 1407
1381 /* disable HAL and put h/w to sleep */ 1408 /* disable HAL and put h/w to sleep */
1382 ath9k_hw_disable(ah); 1409 ath9k_hw_disable(ah);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 0cee90cf8dc9..89978d71617f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -527,7 +527,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
527 for (i = 0; i < rateset->rs_nrates; i++) { 527 for (i = 0; i < rateset->rs_nrates; i++) {
528 for (j = 0; j < rate_table->rate_cnt; j++) { 528 for (j = 0; j < rate_table->rate_cnt; j++) {
529 u32 phy = rate_table->info[j].phy; 529 u32 phy = rate_table->info[j].phy;
530 u16 rate_flags = rate_table->info[i].rate_flags; 530 u16 rate_flags = rate_table->info[j].rate_flags;
531 u8 rate = rateset->rs_rates[i]; 531 u8 rate = rateset->rs_rates[i];
532 u8 dot11rate = rate_table->info[j].dot11rate; 532 u8 dot11rate = rate_table->info[j].dot11rate;
533 533
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index fe73fc50082a..fddb0129bb57 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -297,19 +297,17 @@ static void ath_edma_start_recv(struct ath_softc *sc)
297 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 297 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
298 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 298 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
299 299
300 spin_unlock_bh(&sc->rx.rxbuflock);
301
302 ath_opmode_init(sc); 300 ath_opmode_init(sc);
303 301
304 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 302 ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
303
304 spin_unlock_bh(&sc->rx.rxbuflock);
305} 305}
306 306
307static void ath_edma_stop_recv(struct ath_softc *sc) 307static void ath_edma_stop_recv(struct ath_softc *sc)
308{ 308{
309 spin_lock_bh(&sc->rx.rxbuflock);
310 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 309 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
311 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 310 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
312 spin_unlock_bh(&sc->rx.rxbuflock);
313} 311}
314 312
315int ath_rx_init(struct ath_softc *sc, int nbufs) 313int ath_rx_init(struct ath_softc *sc, int nbufs)
@@ -319,7 +317,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
319 struct ath_buf *bf; 317 struct ath_buf *bf;
320 int error = 0; 318 int error = 0;
321 319
322 spin_lock_init(&sc->rx.rxflushlock); 320 spin_lock_init(&sc->rx.pcu_lock);
323 sc->sc_flags &= ~SC_OP_RXFLUSH; 321 sc->sc_flags &= ~SC_OP_RXFLUSH;
324 spin_lock_init(&sc->rx.rxbuflock); 322 spin_lock_init(&sc->rx.rxbuflock);
325 323
@@ -506,10 +504,11 @@ int ath_startrecv(struct ath_softc *sc)
506 ath9k_hw_rxena(ah); 504 ath9k_hw_rxena(ah);
507 505
508start_recv: 506start_recv:
509 spin_unlock_bh(&sc->rx.rxbuflock);
510 ath_opmode_init(sc); 507 ath_opmode_init(sc);
511 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); 508 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
512 509
510 spin_unlock_bh(&sc->rx.rxbuflock);
511
513 return 0; 512 return 0;
514} 513}
515 514
@@ -518,6 +517,7 @@ bool ath_stoprecv(struct ath_softc *sc)
518 struct ath_hw *ah = sc->sc_ah; 517 struct ath_hw *ah = sc->sc_ah;
519 bool stopped; 518 bool stopped;
520 519
520 spin_lock_bh(&sc->rx.rxbuflock);
521 ath9k_hw_stoppcurecv(ah); 521 ath9k_hw_stoppcurecv(ah);
522 ath9k_hw_setrxfilter(ah, 0); 522 ath9k_hw_setrxfilter(ah, 0);
523 stopped = ath9k_hw_stopdmarecv(ah); 523 stopped = ath9k_hw_stopdmarecv(ah);
@@ -526,19 +526,18 @@ bool ath_stoprecv(struct ath_softc *sc)
526 ath_edma_stop_recv(sc); 526 ath_edma_stop_recv(sc);
527 else 527 else
528 sc->rx.rxlink = NULL; 528 sc->rx.rxlink = NULL;
529 spin_unlock_bh(&sc->rx.rxbuflock);
529 530
530 return stopped; 531 return stopped;
531} 532}
532 533
533void ath_flushrecv(struct ath_softc *sc) 534void ath_flushrecv(struct ath_softc *sc)
534{ 535{
535 spin_lock_bh(&sc->rx.rxflushlock);
536 sc->sc_flags |= SC_OP_RXFLUSH; 536 sc->sc_flags |= SC_OP_RXFLUSH;
537 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 537 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
538 ath_rx_tasklet(sc, 1, true); 538 ath_rx_tasklet(sc, 1, true);
539 ath_rx_tasklet(sc, 1, false); 539 ath_rx_tasklet(sc, 1, false);
540 sc->sc_flags &= ~SC_OP_RXFLUSH; 540 sc->sc_flags &= ~SC_OP_RXFLUSH;
541 spin_unlock_bh(&sc->rx.rxflushlock);
542} 541}
543 542
544static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 543static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 30ef2dfc1ed2..f2ade2402ce2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1089,15 +1089,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1089 txq->axq_tx_inprogress = false; 1089 txq->axq_tx_inprogress = false;
1090 spin_unlock_bh(&txq->axq_lock); 1090 spin_unlock_bh(&txq->axq_lock);
1091 1091
1092 /* flush any pending frames if aggregation is enabled */
1093 if (sc->sc_flags & SC_OP_TXAGGR) {
1094 if (!retry_tx) {
1095 spin_lock_bh(&txq->axq_lock);
1096 ath_txq_drain_pending_buffers(sc, txq);
1097 spin_unlock_bh(&txq->axq_lock);
1098 }
1099 }
1100
1101 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1092 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1102 spin_lock_bh(&txq->axq_lock); 1093 spin_lock_bh(&txq->axq_lock);
1103 while (!list_empty(&txq->txq_fifo_pending)) { 1094 while (!list_empty(&txq->txq_fifo_pending)) {
@@ -1118,6 +1109,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1118 } 1109 }
1119 spin_unlock_bh(&txq->axq_lock); 1110 spin_unlock_bh(&txq->axq_lock);
1120 } 1111 }
1112
1113 /* flush any pending frames if aggregation is enabled */
1114 if (sc->sc_flags & SC_OP_TXAGGR) {
1115 if (!retry_tx) {
1116 spin_lock_bh(&txq->axq_lock);
1117 ath_txq_drain_pending_buffers(sc, txq);
1118 spin_unlock_bh(&txq->axq_lock);
1119 }
1120 }
1121} 1121}
1122 1122
1123void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1123void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 45933cf8e8c2..9a55338d957f 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -175,7 +175,9 @@ static void b43_sdio_remove(struct sdio_func *func)
175 struct b43_sdio *sdio = sdio_get_drvdata(func); 175 struct b43_sdio *sdio = sdio_get_drvdata(func);
176 176
177 ssb_bus_unregister(&sdio->ssb); 177 ssb_bus_unregister(&sdio->ssb);
178 sdio_claim_host(func);
178 sdio_disable_func(func); 179 sdio_disable_func(func);
180 sdio_release_host(func);
179 kfree(sdio); 181 kfree(sdio);
180 sdio_set_drvdata(func, NULL); 182 sdio_set_drvdata(func, NULL);
181} 183}
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 296fd00a5129..e5685dc317a8 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -684,18 +684,40 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
684 684
685 lbs_deb_enter(LBS_DEB_SDIO); 685 lbs_deb_enter(LBS_DEB_SDIO);
686 686
687 /*
688 * Disable interrupts
689 */
690 sdio_claim_host(card->func);
691 sdio_writeb(card->func, 0x00, IF_SDIO_H_INT_MASK, &ret);
692 sdio_release_host(card->func);
693
687 sdio_claim_host(card->func); 694 sdio_claim_host(card->func);
688 scratch = if_sdio_read_scratch(card, &ret); 695 scratch = if_sdio_read_scratch(card, &ret);
689 sdio_release_host(card->func); 696 sdio_release_host(card->func);
690 697
698 lbs_deb_sdio("firmware status = %#x\n", scratch);
699 lbs_deb_sdio("scratch ret = %d\n", ret);
700
691 if (ret) 701 if (ret)
692 goto out; 702 goto out;
693 703
694 lbs_deb_sdio("firmware status = %#x\n", scratch);
695 704
705 /*
706 * The manual clearly describes that FEDC is the right code to use
707 * to detect firmware presence, but for SD8686 it is not that simple.
708 * Scratch is also used to store the RX packet length, so we lose
709 * the FEDC value early on. So we use a non-zero check in order
710 * to validate firmware presence.
711 * Additionally, the SD8686 in the Gumstix always has the high scratch
712 * bit set, even when the firmware is not loaded. So we have to
713 * exclude that from the test.
714 */
696 if (scratch == IF_SDIO_FIRMWARE_OK) { 715 if (scratch == IF_SDIO_FIRMWARE_OK) {
697 lbs_deb_sdio("firmware already loaded\n"); 716 lbs_deb_sdio("firmware already loaded\n");
698 goto success; 717 goto success;
718 } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) {
719 lbs_deb_sdio("firmware may be running\n");
720 goto success;
699 } 721 }
700 722
701 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name, 723 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
@@ -709,10 +731,14 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
709 if (ret) 731 if (ret)
710 goto out; 732 goto out;
711 733
734 lbs_deb_sdio("Helper firmware loaded\n");
735
712 ret = if_sdio_prog_real(card, mainfw); 736 ret = if_sdio_prog_real(card, mainfw);
713 if (ret) 737 if (ret)
714 goto out; 738 goto out;
715 739
740 lbs_deb_sdio("Firmware loaded\n");
741
716success: 742success:
717 sdio_claim_host(card->func); 743 sdio_claim_host(card->func);
718 sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE); 744 sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
@@ -1042,8 +1068,6 @@ static int if_sdio_probe(struct sdio_func *func,
1042 priv->exit_deep_sleep = if_sdio_exit_deep_sleep; 1068 priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
1043 priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup; 1069 priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
1044 1070
1045 priv->fw_ready = 1;
1046
1047 sdio_claim_host(func); 1071 sdio_claim_host(func);
1048 1072
1049 /* 1073 /*
@@ -1064,6 +1088,8 @@ static int if_sdio_probe(struct sdio_func *func,
1064 if (ret) 1088 if (ret)
1065 goto reclaim; 1089 goto reclaim;
1066 1090
1091 priv->fw_ready = 1;
1092
1067 /* 1093 /*
1068 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions 1094 * FUNC_INIT is required for SD8688 WLAN/BT multiple functions
1069 */ 1095 */
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 6bb876d65252..cb23355f52d3 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -797,7 +797,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
797 * - iff DATA transfer is active, carrier is "on" 797 * - iff DATA transfer is active, carrier is "on"
798 * - tx queueing enabled if open *and* carrier is "on" 798 * - tx queueing enabled if open *and* carrier is "on"
799 */ 799 */
800 netif_stop_queue(net);
801 netif_carrier_off(net); 800 netif_carrier_off(net);
802 801
803 dev->gadget = g; 802 dev->gadget = g;
@@ -812,6 +811,7 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
812 INFO(dev, "MAC %pM\n", net->dev_addr); 811 INFO(dev, "MAC %pM\n", net->dev_addr);
813 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 812 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
814 813
814 netif_stop_queue(net);
815 the_dev = dev; 815 the_dev = dev;
816 } 816 }
817 817
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 7187bd8a75f6..749f01ccd26e 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -462,7 +462,8 @@ struct dccp_ackvec;
462 * @dccps_hc_rx_insert_options - receiver wants to add options when acking 462 * @dccps_hc_rx_insert_options - receiver wants to add options when acking
463 * @dccps_hc_tx_insert_options - sender wants to add options when sending 463 * @dccps_hc_tx_insert_options - sender wants to add options when sending
464 * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) 464 * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
465 * @dccps_xmit_timer - timer for when CCID is not ready to send 465 * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
466 * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
466 * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) 467 * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
467 */ 468 */
468struct dccp_sock { 469struct dccp_sock {
@@ -502,6 +503,7 @@ struct dccp_sock {
502 __u8 dccps_hc_rx_insert_options:1; 503 __u8 dccps_hc_rx_insert_options:1;
503 __u8 dccps_hc_tx_insert_options:1; 504 __u8 dccps_hc_tx_insert_options:1;
504 __u8 dccps_server_timewait:1; 505 __u8 dccps_server_timewait:1;
506 struct tasklet_struct dccps_xmitlet;
505 struct timer_list dccps_xmit_timer; 507 struct timer_list dccps_xmit_timer;
506}; 508};
507 509
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5146b50202ce..86b652fabf6e 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
322 int offset, 322 int offset,
323 unsigned int len, __wsum *csump); 323 unsigned int len, __wsum *csump);
324 324
325extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); 325extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
326extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); 326extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
327extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 327extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
328 int offset, int len); 328 int offset, int len);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index ba3666d31766..07bdb5e9e8ac 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -158,6 +158,8 @@ extern int fib_table_flush(struct fib_table *table);
158extern void fib_table_select_default(struct fib_table *table, 158extern void fib_table_select_default(struct fib_table *table,
159 const struct flowi *flp, 159 const struct flowi *flp,
160 struct fib_result *res); 160 struct fib_result *res);
161extern void fib_free_table(struct fib_table *tb);
162
161 163
162 164
163#ifndef CONFIG_IP_MULTIPLE_TABLES 165#ifndef CONFIG_IP_MULTIPLE_TABLES
diff --git a/net/compat.c b/net/compat.c
index 63d260e81472..3649d5895361 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -41,10 +41,12 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
41 compat_size_t len; 41 compat_size_t len;
42 42
43 if (get_user(len, &uiov32->iov_len) || 43 if (get_user(len, &uiov32->iov_len) ||
44 get_user(buf, &uiov32->iov_base)) { 44 get_user(buf, &uiov32->iov_base))
45 tot_len = -EFAULT; 45 return -EFAULT;
46 break; 46
47 } 47 if (len > INT_MAX - tot_len)
48 len = INT_MAX - tot_len;
49
48 tot_len += len; 50 tot_len += len;
49 kiov->iov_base = compat_ptr(buf); 51 kiov->iov_base = compat_ptr(buf);
50 kiov->iov_len = (__kernel_size_t) len; 52 kiov->iov_len = (__kernel_size_t) len;
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 72aceb1fe4fa..c40f27e7d208 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,10 +35,9 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
39{ 39{
40 int size, ct; 40 int size, ct, err;
41 long err;
42 41
43 if (m->msg_namelen) { 42 if (m->msg_namelen) {
44 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
@@ -62,14 +61,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
62 err = 0; 61 err = 0;
63 62
64 for (ct = 0; ct < m->msg_iovlen; ct++) { 63 for (ct = 0; ct < m->msg_iovlen; ct++) {
65 err += iov[ct].iov_len; 64 size_t len = iov[ct].iov_len;
66 /* 65
67 * Goal is not to verify user data, but to prevent returning 66 if (len > INT_MAX - err) {
68 * negative value, which is interpreted as errno. 67 len = INT_MAX - err;
69 * Overflow is still possible, but it is harmless. 68 iov[ct].iov_len = len;
70 */ 69 }
71 if (err < 0) 70 err += len;
72 return -EMSGSIZE;
73 } 71 }
74 72
75 return err; 73 return err;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 679b797d06b1..fbce4b05a53e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -887,10 +887,11 @@ static ssize_t pktgen_if_write(struct file *file,
887 i += len; 887 i += len;
888 888
889 if (debug) { 889 if (debug) {
890 char tb[count + 1]; 890 size_t copy = min(count, 1023);
891 if (copy_from_user(tb, user_buffer, count)) 891 char tb[copy + 1];
892 if (copy_from_user(tb, user_buffer, copy))
892 return -EFAULT; 893 return -EFAULT;
893 tb[count] = 0; 894 tb[copy] = 0;
894 printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, 895 printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
895 (unsigned long)count, tb); 896 (unsigned long)count, tb);
896 } 897 }
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 117fb093dcaf..75c3582a7678 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -134,13 +134,41 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp)
134extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); 134extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
135extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); 135extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
136 136
137/*
138 * Congestion control of queued data packets via CCID decision.
139 *
140 * The TX CCID performs its congestion-control by indicating whether and when a
141 * queued packet may be sent, using the return code of ccid_hc_tx_send_packet().
142 * The following modes are supported via the symbolic constants below:
143 * - timer-based pacing (CCID returns a delay value in milliseconds);
144 * - autonomous dequeueing (CCID internally schedules dccps_xmitlet).
145 */
146
147enum ccid_dequeueing_decision {
148 CCID_PACKET_SEND_AT_ONCE = 0x00000, /* "green light": no delay */
149 CCID_PACKET_DELAY_MAX = 0x0FFFF, /* maximum delay in msecs */
150 CCID_PACKET_DELAY = 0x10000, /* CCID msec-delay mode */
151 CCID_PACKET_WILL_DEQUEUE_LATER = 0x20000, /* CCID autonomous mode */
152 CCID_PACKET_ERR = 0xF0000, /* error condition */
153};
154
155static inline int ccid_packet_dequeue_eval(const int return_code)
156{
157 if (return_code < 0)
158 return CCID_PACKET_ERR;
159 if (return_code == 0)
160 return CCID_PACKET_SEND_AT_ONCE;
161 if (return_code <= CCID_PACKET_DELAY_MAX)
162 return CCID_PACKET_DELAY;
163 return return_code;
164}
165
137static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, 166static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
138 struct sk_buff *skb) 167 struct sk_buff *skb)
139{ 168{
140 int rc = 0;
141 if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) 169 if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL)
142 rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); 170 return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
143 return rc; 171 return CCID_PACKET_SEND_AT_ONCE;
144} 172}
145 173
146static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, 174static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index d850e291f87c..6576eae9e779 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -78,12 +78,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
78 78
79static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) 79static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
80{ 80{
81 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 81 if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
82 82 return CCID_PACKET_WILL_DEQUEUE_LATER;
83 if (hc->tx_pipe < hc->tx_cwnd) 83 return CCID_PACKET_SEND_AT_ONCE;
84 return 0;
85
86 return 1; /* XXX CCID should dequeue when ready instead of polling */
87} 84}
88 85
89static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) 86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@@ -115,6 +112,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
115{ 112{
116 struct sock *sk = (struct sock *)data; 113 struct sock *sk = (struct sock *)data;
117 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 114 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
115 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
118 116
119 bh_lock_sock(sk); 117 bh_lock_sock(sk);
120 if (sock_owned_by_user(sk)) { 118 if (sock_owned_by_user(sk)) {
@@ -129,8 +127,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
129 if (hc->tx_rto > DCCP_RTO_MAX) 127 if (hc->tx_rto > DCCP_RTO_MAX)
130 hc->tx_rto = DCCP_RTO_MAX; 128 hc->tx_rto = DCCP_RTO_MAX;
131 129
132 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
133
134 /* adjust pipe, cwnd etc */ 130 /* adjust pipe, cwnd etc */
135 hc->tx_ssthresh = hc->tx_cwnd / 2; 131 hc->tx_ssthresh = hc->tx_cwnd / 2;
136 if (hc->tx_ssthresh < 2) 132 if (hc->tx_ssthresh < 2)
@@ -146,6 +142,12 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
146 hc->tx_rpseq = 0; 142 hc->tx_rpseq = 0;
147 hc->tx_rpdupack = -1; 143 hc->tx_rpdupack = -1;
148 ccid2_change_l_ack_ratio(sk, 1); 144 ccid2_change_l_ack_ratio(sk, 1);
145
146 /* if we were blocked before, we may now send cwnd=1 packet */
147 if (sender_was_blocked)
148 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
149 /* restart backed-off timer */
150 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
149out: 151out:
150 bh_unlock_sock(sk); 152 bh_unlock_sock(sk);
151 sock_put(sk); 153 sock_put(sk);
@@ -434,6 +436,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
434{ 436{
435 struct dccp_sock *dp = dccp_sk(sk); 437 struct dccp_sock *dp = dccp_sk(sk);
436 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 438 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
439 const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
437 u64 ackno, seqno; 440 u64 ackno, seqno;
438 struct ccid2_seq *seqp; 441 struct ccid2_seq *seqp;
439 unsigned char *vector; 442 unsigned char *vector;
@@ -631,6 +634,10 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
631 sk_stop_timer(sk, &hc->tx_rtotimer); 634 sk_stop_timer(sk, &hc->tx_rtotimer);
632 else 635 else
633 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 636 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
637
638 /* check if incoming Acks allow pending packets to be sent */
639 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
640 tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
634} 641}
635 642
636static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 643static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 9731c2dc1487..25cb6b216eda 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -81,6 +81,11 @@ struct ccid2_hc_tx_sock {
81 u64 tx_high_ack; 81 u64 tx_high_ack;
82}; 82};
83 83
84static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
85{
86 return hc->tx_pipe >= hc->tx_cwnd;
87}
88
84struct ccid2_hc_rx_sock { 89struct ccid2_hc_rx_sock {
85 int rx_data; 90 int rx_data;
86}; 91};
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3060a60ed5ab..3d604e1349c0 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -268,11 +268,11 @@ out:
268 sock_put(sk); 268 sock_put(sk);
269} 269}
270 270
271/* 271/**
272 * returns 272 * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
273 * > 0: delay (in msecs) that should pass before actually sending 273 * @skb: next packet candidate to send on @sk
274 * = 0: can send immediately 274 * This function uses the convention of ccid_packet_dequeue_eval() and
275 * < 0: error condition; do not send packet 275 * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
276 */ 276 */
277static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) 277static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
278{ 278{
@@ -348,7 +348,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
348 348
349 /* set the nominal send time for the next following packet */ 349 /* set the nominal send time for the next following packet */
350 hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); 350 hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
351 return 0; 351 return CCID_PACKET_SEND_AT_ONCE;
352} 352}
353 353
354static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len) 354static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 3eb264b60823..a8ed459508b2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -243,8 +243,9 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
243extern void dccp_send_sync(struct sock *sk, const u64 seq, 243extern void dccp_send_sync(struct sock *sk, const u64 seq,
244 const enum dccp_pkt_type pkt_type); 244 const enum dccp_pkt_type pkt_type);
245 245
246extern void dccp_write_xmit(struct sock *sk, int block); 246extern void dccp_write_xmit(struct sock *sk);
247extern void dccp_write_space(struct sock *sk); 247extern void dccp_write_space(struct sock *sk);
248extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
248 249
249extern void dccp_init_xmit_timers(struct sock *sk); 250extern void dccp_init_xmit_timers(struct sock *sk);
250static inline void dccp_clear_xmit_timers(struct sock *sk) 251static inline void dccp_clear_xmit_timers(struct sock *sk)
diff --git a/net/dccp/output.c b/net/dccp/output.c
index a988fe9ffcba..45b91853f5ae 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -209,108 +209,150 @@ void dccp_write_space(struct sock *sk)
209} 209}
210 210
211/** 211/**
212 * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet 212 * dccp_wait_for_ccid - Await CCID send permission
213 * @sk: socket to wait for 213 * @sk: socket to wait for
214 * @skb: current skb to pass on for waiting 214 * @delay: timeout in jiffies
215 * @delay: sleep timeout in milliseconds (> 0) 215 * This is used by CCIDs which need to delay the send time in process context.
216 * This function is called by default when the socket is closed, and
217 * when a non-zero linger time is set on the socket. For consistency
218 */ 216 */
219static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) 217static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
220{ 218{
221 struct dccp_sock *dp = dccp_sk(sk);
222 DEFINE_WAIT(wait); 219 DEFINE_WAIT(wait);
223 unsigned long jiffdelay; 220 long remaining;
224 int rc; 221
222 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
223 sk->sk_write_pending++;
224 release_sock(sk);
225
226 remaining = schedule_timeout(delay);
227
228 lock_sock(sk);
229 sk->sk_write_pending--;
230 finish_wait(sk_sleep(sk), &wait);
231
232 if (signal_pending(current) || sk->sk_err)
233 return -1;
234 return remaining;
235}
236
237/**
238 * dccp_xmit_packet - Send data packet under control of CCID
239 * Transmits next-queued payload and informs CCID to account for the packet.
240 */
241static void dccp_xmit_packet(struct sock *sk)
242{
243 int err, len;
244 struct dccp_sock *dp = dccp_sk(sk);
245 struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
225 246
226 do { 247 if (unlikely(skb == NULL))
227 dccp_pr_debug("delayed send by %d msec\n", delay); 248 return;
228 jiffdelay = msecs_to_jiffies(delay); 249 len = skb->len;
229 250
230 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 251 if (sk->sk_state == DCCP_PARTOPEN) {
252 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
253 /*
254 * See 8.1.5 - Handshake Completion.
255 *
256 * For robustness we resend Confirm options until the client has
257 * entered OPEN. During the initial feature negotiation, the MPS
258 * is smaller than usual, reduced by the Change/Confirm options.
259 */
260 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
261 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
262 dccp_send_ack(sk);
263 dccp_feat_list_purge(&dp->dccps_featneg);
264 }
231 265
232 sk->sk_write_pending++; 266 inet_csk_schedule_ack(sk);
233 release_sock(sk); 267 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
234 schedule_timeout(jiffdelay); 268 inet_csk(sk)->icsk_rto,
235 lock_sock(sk); 269 DCCP_RTO_MAX);
236 sk->sk_write_pending--; 270 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
271 } else if (dccp_ack_pending(sk)) {
272 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273 } else {
274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
275 }
276
277 err = dccp_transmit_skb(sk, skb);
278 if (err)
279 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
280 /*
281 * Register this one as sent even if an error occurred. To the remote
282 * end a local packet drop is indistinguishable from network loss, i.e.
283 * any local drop will eventually be reported via receiver feedback.
284 */
285 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
286}
237 287
238 if (sk->sk_err) 288/**
239 goto do_error; 289 * dccp_flush_write_queue - Drain queue at end of connection
240 if (signal_pending(current)) 290 * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
241 goto do_interrupted; 291 * happen that the TX queue is not empty at the end of a connection. We give the
292 * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
293 * returns with a non-empty write queue, it will be purged later.
294 */
295void dccp_flush_write_queue(struct sock *sk, long *time_budget)
296{
297 struct dccp_sock *dp = dccp_sk(sk);
298 struct sk_buff *skb;
299 long delay, rc;
242 300
301 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
243 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 302 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
244 } while ((delay = rc) > 0); 303
245out: 304 switch (ccid_packet_dequeue_eval(rc)) {
246 finish_wait(sk_sleep(sk), &wait); 305 case CCID_PACKET_WILL_DEQUEUE_LATER:
247 return rc; 306 /*
248 307 * If the CCID determines when to send, the next sending
249do_error: 308 * time is unknown or the CCID may not even send again
250 rc = -EPIPE; 309 * (e.g. remote host crashes or lost Ack packets).
251 goto out; 310 */
252do_interrupted: 311 DCCP_WARN("CCID did not manage to send all packets\n");
253 rc = -EINTR; 312 return;
254 goto out; 313 case CCID_PACKET_DELAY:
314 delay = msecs_to_jiffies(rc);
315 if (delay > *time_budget)
316 return;
317 rc = dccp_wait_for_ccid(sk, delay);
318 if (rc < 0)
319 return;
320 *time_budget -= (delay - rc);
321 /* check again if we can send now */
322 break;
323 case CCID_PACKET_SEND_AT_ONCE:
324 dccp_xmit_packet(sk);
325 break;
326 case CCID_PACKET_ERR:
327 skb_dequeue(&sk->sk_write_queue);
328 kfree_skb(skb);
329 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
330 }
331 }
255} 332}
256 333
257void dccp_write_xmit(struct sock *sk, int block) 334void dccp_write_xmit(struct sock *sk)
258{ 335{
259 struct dccp_sock *dp = dccp_sk(sk); 336 struct dccp_sock *dp = dccp_sk(sk);
260 struct sk_buff *skb; 337 struct sk_buff *skb;
261 338
262 while ((skb = skb_peek(&sk->sk_write_queue))) { 339 while ((skb = skb_peek(&sk->sk_write_queue))) {
263 int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 340 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
264
265 if (err > 0) {
266 if (!block) {
267 sk_reset_timer(sk, &dp->dccps_xmit_timer,
268 msecs_to_jiffies(err)+jiffies);
269 break;
270 } else
271 err = dccp_wait_for_ccid(sk, skb, err);
272 if (err && err != -EINTR)
273 DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
274 }
275 341
276 skb_dequeue(&sk->sk_write_queue); 342 switch (ccid_packet_dequeue_eval(rc)) {
277 if (err == 0) { 343 case CCID_PACKET_WILL_DEQUEUE_LATER:
278 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 344 return;
279 const int len = skb->len; 345 case CCID_PACKET_DELAY:
280 346 sk_reset_timer(sk, &dp->dccps_xmit_timer,
281 if (sk->sk_state == DCCP_PARTOPEN) { 347 jiffies + msecs_to_jiffies(rc));
282 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; 348 return;
283 /* 349 case CCID_PACKET_SEND_AT_ONCE:
284 * See 8.1.5 - Handshake Completion. 350 dccp_xmit_packet(sk);
285 * 351 break;
286 * For robustness we resend Confirm options until the client has 352 case CCID_PACKET_ERR:
287 * entered OPEN. During the initial feature negotiation, the MPS 353 skb_dequeue(&sk->sk_write_queue);
288 * is smaller than usual, reduced by the Change/Confirm options.
289 */
290 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
291 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
292 dccp_send_ack(sk);
293 dccp_feat_list_purge(&dp->dccps_featneg);
294 }
295
296 inet_csk_schedule_ack(sk);
297 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
298 inet_csk(sk)->icsk_rto,
299 DCCP_RTO_MAX);
300 dcb->dccpd_type = DCCP_PKT_DATAACK;
301 } else if (dccp_ack_pending(sk))
302 dcb->dccpd_type = DCCP_PKT_DATAACK;
303 else
304 dcb->dccpd_type = DCCP_PKT_DATA;
305
306 err = dccp_transmit_skb(sk, skb);
307 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
308 if (err)
309 DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
310 err);
311 } else {
312 dccp_pr_debug("packet discarded due to err=%d\n", err);
313 kfree_skb(skb); 354 kfree_skb(skb);
355 dccp_pr_debug("packet discarded due to err=%d\n", rc);
314 } 356 }
315 } 357 }
316} 358}
@@ -622,7 +664,6 @@ void dccp_send_close(struct sock *sk, const int active)
622 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; 664 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
623 665
624 if (active) { 666 if (active) {
625 dccp_write_xmit(sk, 1);
626 dccp_skb_entail(sk, skb); 667 dccp_skb_entail(sk, skb);
627 dccp_transmit_skb(sk, skb_clone(skb, prio)); 668 dccp_transmit_skb(sk, skb_clone(skb, prio));
628 /* 669 /*
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 7e5fc04eb6d1..ef343d53fcea 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -726,7 +726,13 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
726 goto out_discard; 726 goto out_discard;
727 727
728 skb_queue_tail(&sk->sk_write_queue, skb); 728 skb_queue_tail(&sk->sk_write_queue, skb);
729 dccp_write_xmit(sk,0); 729 /*
730 * The xmit_timer is set if the TX CCID is rate-based and will expire
731 * when congestion control permits to release further packets into the
732 * network. Window-based CCIDs do not use this timer.
733 */
734 if (!timer_pending(&dp->dccps_xmit_timer))
735 dccp_write_xmit(sk);
730out_release: 736out_release:
731 release_sock(sk); 737 release_sock(sk);
732 return rc ? : len; 738 return rc ? : len;
@@ -951,9 +957,22 @@ void dccp_close(struct sock *sk, long timeout)
951 /* Check zero linger _after_ checking for unread data. */ 957 /* Check zero linger _after_ checking for unread data. */
952 sk->sk_prot->disconnect(sk, 0); 958 sk->sk_prot->disconnect(sk, 0);
953 } else if (sk->sk_state != DCCP_CLOSED) { 959 } else if (sk->sk_state != DCCP_CLOSED) {
960 /*
961 * Normal connection termination. May need to wait if there are
962 * still packets in the TX queue that are delayed by the CCID.
963 */
964 dccp_flush_write_queue(sk, &timeout);
954 dccp_terminate_connection(sk); 965 dccp_terminate_connection(sk);
955 } 966 }
956 967
968 /*
969 * Flush write queue. This may be necessary in several cases:
970 * - we have been closed by the peer but still have application data;
971 * - abortive termination (unread data or zero linger time),
972 * - normal termination but queue could not be flushed within time limit
973 */
974 __skb_queue_purge(&sk->sk_write_queue);
975
957 sk_stream_wait_close(sk, timeout); 976 sk_stream_wait_close(sk, timeout);
958 977
959adjudge_to_death: 978adjudge_to_death:
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 1a9aa05d4dc4..7587870b7040 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -237,32 +237,35 @@ out:
237 sock_put(sk); 237 sock_put(sk);
238} 238}
239 239
240/* Transmit-delay timer: used by the CCIDs to delay actual send time */ 240/**
241static void dccp_write_xmit_timer(unsigned long data) 241 * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface
242 * See the comments above %ccid_dequeueing_decision for supported modes.
243 */
244static void dccp_write_xmitlet(unsigned long data)
242{ 245{
243 struct sock *sk = (struct sock *)data; 246 struct sock *sk = (struct sock *)data;
244 struct dccp_sock *dp = dccp_sk(sk);
245 247
246 bh_lock_sock(sk); 248 bh_lock_sock(sk);
247 if (sock_owned_by_user(sk)) 249 if (sock_owned_by_user(sk))
248 sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); 250 sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
249 else 251 else
250 dccp_write_xmit(sk, 0); 252 dccp_write_xmit(sk);
251 bh_unlock_sock(sk); 253 bh_unlock_sock(sk);
252 sock_put(sk);
253} 254}
254 255
255static void dccp_init_write_xmit_timer(struct sock *sk) 256static void dccp_write_xmit_timer(unsigned long data)
256{ 257{
257 struct dccp_sock *dp = dccp_sk(sk); 258 dccp_write_xmitlet(data);
258 259 sock_put((struct sock *)data);
259 setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
260 (unsigned long)sk);
261} 260}
262 261
263void dccp_init_xmit_timers(struct sock *sk) 262void dccp_init_xmit_timers(struct sock *sk)
264{ 263{
265 dccp_init_write_xmit_timer(sk); 264 struct dccp_sock *dp = dccp_sk(sk);
265
266 tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
267 setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
268 (unsigned long)sk);
266 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, 269 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
267 &dccp_keepalive_timer); 270 &dccp_keepalive_timer);
268} 271}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 36e27c2107de..eb6f69a8f27a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1052,7 +1052,7 @@ static void ip_fib_net_exit(struct net *net)
1052 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { 1052 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
1053 hlist_del(node); 1053 hlist_del(node);
1054 fib_table_flush(tb); 1054 fib_table_flush(tb);
1055 kfree(tb); 1055 fib_free_table(tb);
1056 } 1056 }
1057 } 1057 }
1058 kfree(net->ipv4.fib_table_hash); 1058 kfree(net->ipv4.fib_table_hash);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index b232375a0b75..b3acb0417b21 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -716,6 +716,24 @@ int fib_table_flush(struct fib_table *tb)
716 return found; 716 return found;
717} 717}
718 718
719void fib_free_table(struct fib_table *tb)
720{
721 struct fn_hash *table = (struct fn_hash *) tb->tb_data;
722 struct fn_zone *fz, *next;
723
724 next = table->fn_zone_list;
725 while (next != NULL) {
726 fz = next;
727 next = fz->fz_next;
728
729 if (fz->fz_hash != fz->fz_embedded_hash)
730 fz_hash_free(fz->fz_hash, fz->fz_divisor);
731
732 kfree(fz);
733 }
734
735 kfree(tb);
736}
719 737
720static inline int 738static inline int
721fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, 739fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b14450895102..200eb538fbb3 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1797,6 +1797,11 @@ int fib_table_flush(struct fib_table *tb)
1797 return found; 1797 return found;
1798} 1798}
1799 1799
1800void fib_free_table(struct fib_table *tb)
1801{
1802 kfree(tb);
1803}
1804
1800void fib_table_select_default(struct fib_table *tb, 1805void fib_table_select_default(struct fib_table *tb,
1801 const struct flowi *flp, 1806 const struct flowi *flp,
1802 struct fib_result *res) 1807 struct fib_result *res)
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 4aa47d074a79..1243d1db5c59 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -203,9 +203,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
203 size_t count, loff_t *ppos) 203 size_t count, loff_t *ppos)
204{ 204{
205 struct ieee80211_key *key = file->private_data; 205 struct ieee80211_key *key = file->private_data;
206 int i, res, bufsize = 2 * key->conf.keylen + 2; 206 int i, bufsize = 2 * key->conf.keylen + 2;
207 char *buf = kmalloc(bufsize, GFP_KERNEL); 207 char *buf = kmalloc(bufsize, GFP_KERNEL);
208 char *p = buf; 208 char *p = buf;
209 ssize_t res;
210
211 if (!buf)
212 return -ENOMEM;
209 213
210 for (i = 0; i < key->conf.keylen; i++) 214 for (i = 0; i < key->conf.keylen; i++)
211 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); 215 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 6b322fa681f5..107a0cbe52ac 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -677,10 +677,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
677 /* 677 /*
678 * Calculate scan IE length -- we need this to alloc 678 * Calculate scan IE length -- we need this to alloc
679 * memory and to subtract from the driver limit. It 679 * memory and to subtract from the driver limit. It
680 * includes the (extended) supported rates and HT 680 * includes the DS Params, (extended) supported rates, and HT
681 * information -- SSID is the driver's responsibility. 681 * information -- SSID is the driver's responsibility.
682 */ 682 */
683 local->scan_ies_len = 4 + max_bitrates; /* (ext) supp rates */ 683 local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ +
684 3 /* DS Params */;
684 if (supp_ht) 685 if (supp_ht)
685 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); 686 local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
686 687
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index d94a858dc52a..00d6ae838303 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -195,7 +195,7 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par)
195static int 195static int
196extract_icmp6_fields(const struct sk_buff *skb, 196extract_icmp6_fields(const struct sk_buff *skb,
197 unsigned int outside_hdrlen, 197 unsigned int outside_hdrlen,
198 u8 *protocol, 198 int *protocol,
199 struct in6_addr **raddr, 199 struct in6_addr **raddr,
200 struct in6_addr **laddr, 200 struct in6_addr **laddr,
201 __be16 *rport, 201 __be16 *rport,
@@ -252,8 +252,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
252 struct sock *sk; 252 struct sock *sk;
253 struct in6_addr *daddr, *saddr; 253 struct in6_addr *daddr, *saddr;
254 __be16 dport, sport; 254 __be16 dport, sport;
255 int thoff; 255 int thoff, tproto;
256 u8 tproto;
257 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; 256 const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
258 257
259 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); 258 tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
@@ -305,7 +304,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
305 sk = NULL; 304 sk = NULL;
306 } 305 }
307 306
308 pr_debug("proto %hhu %pI6:%hu -> %pI6:%hu " 307 pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
309 "(orig %pI6:%hu) sock %p\n", 308 "(orig %pI6:%hu) sock %p\n",
310 tproto, saddr, ntohs(sport), 309 tproto, saddr, ntohs(sport),
311 daddr, ntohs(dport), 310 daddr, ntohs(dport),