diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-12-21 21:29:26 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-12-21 21:29:26 -0500 |
commit | ecefc36b41ac0fe92d76273a23faf27b2da13411 (patch) | |
tree | d006141ac8aec23f551b33e405e8759bffa39d73 | |
parent | d5ed5e48f4a6333cde05c5235b88a8a94e72afe8 (diff) | |
parent | c0ed1c14a72ca9ebacd51fb94a8aca488b0d361e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
net: Add a flow_cache_flush_deferred function
ipv4: reintroduce route cache garbage collector
net: have ipconfig not wait if no dev is available
sctp: Do not account for sizeof(struct sk_buff) in estimated rwnd
asix: new device id
davinci-cpdma: fix locking issue in cpdma_chan_stop
sctp: fix incorrect overflow check on autoclose
r8169: fix Config2 MSIEnable bit setting.
llc: llc_cmsg_rcv was getting called after sk_eat_skb.
net: bpf_jit: fix an off-one bug in x86_64 cond jump target
iwlwifi: update SCD BC table for all SCD queues
Revert "Bluetooth: Revert: Fix L2CAP connection establishment"
Bluetooth: Clear RFCOMM session timer when disconnecting last channel
Bluetooth: Prevent uninitialized data access in L2CAP configuration
iwlwifi: allow to switch to HT40 if not associated
iwlwifi: tx_sync only on PAN context
mwifiex: avoid double list_del in command cancel path
ath9k: fix max phy rate at rate control init
nfc: signedness bug in __nci_request()
iwlwifi: do not set the sequence control bit is not needed
27 files changed, 220 insertions, 47 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index bfab3fa10edc..7b65f752c5f8 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; | |||
568 | break; | 568 | break; |
569 | } | 569 | } |
570 | if (filter[i].jt != 0) { | 570 | if (filter[i].jt != 0) { |
571 | if (filter[i].jf) | 571 | if (filter[i].jf && f_offset) |
572 | t_offset += is_near(f_offset) ? 2 : 6; | 572 | t_offset += is_near(f_offset) ? 2 : 5; |
573 | EMIT_COND_JMP(t_op, t_offset); | 573 | EMIT_COND_JMP(t_op, t_offset); |
574 | if (filter[i].jf) | 574 | if (filter[i].jf) |
575 | EMIT_JMP(f_offset); | 575 | EMIT_JMP(f_offset); |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 67bf07819992..c8f47f17186f 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -477,7 +477,6 @@ enum rtl_register_content { | |||
477 | /* Config1 register p.24 */ | 477 | /* Config1 register p.24 */ |
478 | LEDS1 = (1 << 7), | 478 | LEDS1 = (1 << 7), |
479 | LEDS0 = (1 << 6), | 479 | LEDS0 = (1 << 6), |
480 | MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ | ||
481 | Speed_down = (1 << 4), | 480 | Speed_down = (1 << 4), |
482 | MEMMAP = (1 << 3), | 481 | MEMMAP = (1 << 3), |
483 | IOMAP = (1 << 2), | 482 | IOMAP = (1 << 2), |
@@ -485,6 +484,7 @@ enum rtl_register_content { | |||
485 | PMEnable = (1 << 0), /* Power Management Enable */ | 484 | PMEnable = (1 << 0), /* Power Management Enable */ |
486 | 485 | ||
487 | /* Config2 register p. 25 */ | 486 | /* Config2 register p. 25 */ |
487 | MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ | ||
488 | PCI_Clock_66MHz = 0x01, | 488 | PCI_Clock_66MHz = 0x01, |
489 | PCI_Clock_33MHz = 0x00, | 489 | PCI_Clock_33MHz = 0x00, |
490 | 490 | ||
@@ -3426,22 +3426,24 @@ static const struct rtl_cfg_info { | |||
3426 | }; | 3426 | }; |
3427 | 3427 | ||
3428 | /* Cfg9346_Unlock assumed. */ | 3428 | /* Cfg9346_Unlock assumed. */ |
3429 | static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr, | 3429 | static unsigned rtl_try_msi(struct rtl8169_private *tp, |
3430 | const struct rtl_cfg_info *cfg) | 3430 | const struct rtl_cfg_info *cfg) |
3431 | { | 3431 | { |
3432 | void __iomem *ioaddr = tp->mmio_addr; | ||
3432 | unsigned msi = 0; | 3433 | unsigned msi = 0; |
3433 | u8 cfg2; | 3434 | u8 cfg2; |
3434 | 3435 | ||
3435 | cfg2 = RTL_R8(Config2) & ~MSIEnable; | 3436 | cfg2 = RTL_R8(Config2) & ~MSIEnable; |
3436 | if (cfg->features & RTL_FEATURE_MSI) { | 3437 | if (cfg->features & RTL_FEATURE_MSI) { |
3437 | if (pci_enable_msi(pdev)) { | 3438 | if (pci_enable_msi(tp->pci_dev)) { |
3438 | dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); | 3439 | netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); |
3439 | } else { | 3440 | } else { |
3440 | cfg2 |= MSIEnable; | 3441 | cfg2 |= MSIEnable; |
3441 | msi = RTL_FEATURE_MSI; | 3442 | msi = RTL_FEATURE_MSI; |
3442 | } | 3443 | } |
3443 | } | 3444 | } |
3444 | RTL_W8(Config2, cfg2); | 3445 | if (tp->mac_version <= RTL_GIGA_MAC_VER_06) |
3446 | RTL_W8(Config2, cfg2); | ||
3445 | return msi; | 3447 | return msi; |
3446 | } | 3448 | } |
3447 | 3449 | ||
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4077 | tp->features |= RTL_FEATURE_WOL; | 4079 | tp->features |= RTL_FEATURE_WOL; |
4078 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) | 4080 | if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) |
4079 | tp->features |= RTL_FEATURE_WOL; | 4081 | tp->features |= RTL_FEATURE_WOL; |
4080 | tp->features |= rtl_try_msi(pdev, ioaddr, cfg); | 4082 | tp->features |= rtl_try_msi(tp, cfg); |
4081 | RTL_W8(Cfg9346, Cfg9346_Lock); | 4083 | RTL_W8(Cfg9346, Cfg9346_Lock); |
4082 | 4084 | ||
4083 | if (rtl_tbi_enabled(tp)) { | 4085 | if (rtl_tbi_enabled(tp)) { |
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index dca9d3369cdd..c97d2f590855 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c | |||
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan) | |||
836 | chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); | 836 | chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); |
837 | 837 | ||
838 | /* handle completed packets */ | 838 | /* handle completed packets */ |
839 | spin_unlock_irqrestore(&chan->lock, flags); | ||
839 | do { | 840 | do { |
840 | ret = __cpdma_chan_process(chan); | 841 | ret = __cpdma_chan_process(chan); |
841 | if (ret < 0) | 842 | if (ret < 0) |
842 | break; | 843 | break; |
843 | } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); | 844 | } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); |
845 | spin_lock_irqsave(&chan->lock, flags); | ||
844 | 846 | ||
845 | /* remaining packets haven't been tx/rx'ed, clean them up */ | 847 | /* remaining packets haven't been tx/rx'ed, clean them up */ |
846 | while (chan->head) { | 848 | while (chan->head) { |
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index e6fed4d4cb77..e95f0e60a9bc 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c | |||
@@ -1655,6 +1655,10 @@ static const struct usb_device_id products [] = { | |||
1655 | // ASIX 88772a | 1655 | // ASIX 88772a |
1656 | USB_DEVICE(0x0db0, 0xa877), | 1656 | USB_DEVICE(0x0db0, 0xa877), |
1657 | .driver_info = (unsigned long) &ax88772_info, | 1657 | .driver_info = (unsigned long) &ax88772_info, |
1658 | }, { | ||
1659 | // Asus USB Ethernet Adapter | ||
1660 | USB_DEVICE (0x0b95, 0x7e2b), | ||
1661 | .driver_info = (unsigned long) &ax88772_info, | ||
1658 | }, | 1662 | }, |
1659 | { }, // END | 1663 | { }, // END |
1660 | }; | 1664 | }; |
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 888abc2be3a5..528d5f3e868c 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c | |||
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc, | |||
1271 | 1271 | ||
1272 | ath_rc_priv->max_valid_rate = k; | 1272 | ath_rc_priv->max_valid_rate = k; |
1273 | ath_rc_sort_validrates(rate_table, ath_rc_priv); | 1273 | ath_rc_sort_validrates(rate_table, ath_rc_priv); |
1274 | ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; | 1274 | ath_rc_priv->rate_max_phy = (k > 4) ? |
1275 | ath_rc_priv->valid_rate_index[k-4] : | ||
1276 | ath_rc_priv->valid_rate_index[k-1]; | ||
1275 | ath_rc_priv->rate_table = rate_table; | 1277 | ath_rc_priv->rate_table = rate_table; |
1276 | 1278 | ||
1277 | ath_dbg(common, ATH_DBG_CONFIG, | 1279 | ath_dbg(common, ATH_DBG_CONFIG, |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index a7a6def40d05..5c7c17c7166a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c | |||
@@ -606,8 +606,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) | |||
606 | if (ctx->ht.enabled) { | 606 | if (ctx->ht.enabled) { |
607 | /* if HT40 is used, it should not change | 607 | /* if HT40 is used, it should not change |
608 | * after associated except channel switch */ | 608 | * after associated except channel switch */ |
609 | if (iwl_is_associated_ctx(ctx) && | 609 | if (!ctx->ht.is_40mhz || |
610 | !ctx->ht.is_40mhz) | 610 | !iwl_is_associated_ctx(ctx)) |
611 | iwlagn_config_ht40(conf, ctx); | 611 | iwlagn_config_ht40(conf, ctx); |
612 | } else | 612 | } else |
613 | ctx->ht.is_40mhz = false; | 613 | ctx->ht.is_40mhz = false; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 35a6b71f358c..df1540ca6102 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c | |||
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, | |||
91 | tx_cmd->tid_tspec = qc[0] & 0xf; | 91 | tx_cmd->tid_tspec = qc[0] & 0xf; |
92 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | 92 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; |
93 | } else { | 93 | } else { |
94 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | 94 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) |
95 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
96 | else | ||
97 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
95 | } | 98 | } |
96 | 99 | ||
97 | iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); | 100 | iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index bacc06c95e7a..e0e9a3dfbc00 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c | |||
@@ -2850,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw, | |||
2850 | int ret; | 2850 | int ret; |
2851 | u8 sta_id; | 2851 | u8 sta_id; |
2852 | 2852 | ||
2853 | if (ctx->ctxid != IWL_RXON_CTX_PAN) | ||
2854 | return 0; | ||
2855 | |||
2853 | IWL_DEBUG_MAC80211(priv, "enter\n"); | 2856 | IWL_DEBUG_MAC80211(priv, "enter\n"); |
2854 | mutex_lock(&priv->shrd->mutex); | 2857 | mutex_lock(&priv->shrd->mutex); |
2855 | 2858 | ||
@@ -2898,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw, | |||
2898 | struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; | 2901 | struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; |
2899 | struct iwl_rxon_context *ctx = vif_priv->ctx; | 2902 | struct iwl_rxon_context *ctx = vif_priv->ctx; |
2900 | 2903 | ||
2904 | if (ctx->ctxid != IWL_RXON_CTX_PAN) | ||
2905 | return; | ||
2906 | |||
2901 | IWL_DEBUG_MAC80211(priv, "enter\n"); | 2907 | IWL_DEBUG_MAC80211(priv, "enter\n"); |
2902 | mutex_lock(&priv->shrd->mutex); | 2908 | mutex_lock(&priv->shrd->mutex); |
2903 | 2909 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index ce918980e977..5f17ab8e76ba 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
@@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, | |||
1197 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | 1197 | iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); |
1198 | 1198 | ||
1199 | /* Set up entry for this TFD in Tx byte-count array */ | 1199 | /* Set up entry for this TFD in Tx byte-count array */ |
1200 | if (is_agg) | 1200 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); |
1201 | iwl_trans_txq_update_byte_cnt_tbl(trans, txq, | ||
1202 | le16_to_cpu(tx_cmd->len)); | ||
1203 | 1201 | ||
1204 | dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen, | 1202 | dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen, |
1205 | DMA_BIDIRECTIONAL); | 1203 | DMA_BIDIRECTIONAL); |
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index ac278156d390..6e0a3eaecf70 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c | |||
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) | |||
939 | { | 939 | { |
940 | struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; | 940 | struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL; |
941 | unsigned long cmd_flags; | 941 | unsigned long cmd_flags; |
942 | unsigned long cmd_pending_q_flags; | ||
943 | unsigned long scan_pending_q_flags; | 942 | unsigned long scan_pending_q_flags; |
944 | uint16_t cancel_scan_cmd = false; | 943 | uint16_t cancel_scan_cmd = false; |
945 | 944 | ||
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) | |||
949 | cmd_node = adapter->curr_cmd; | 948 | cmd_node = adapter->curr_cmd; |
950 | cmd_node->wait_q_enabled = false; | 949 | cmd_node->wait_q_enabled = false; |
951 | cmd_node->cmd_flag |= CMD_F_CANCELED; | 950 | cmd_node->cmd_flag |= CMD_F_CANCELED; |
952 | spin_lock_irqsave(&adapter->cmd_pending_q_lock, | ||
953 | cmd_pending_q_flags); | ||
954 | list_del(&cmd_node->list); | ||
955 | spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, | ||
956 | cmd_pending_q_flags); | ||
957 | mwifiex_insert_cmd_to_free_q(adapter, cmd_node); | 951 | mwifiex_insert_cmd_to_free_q(adapter, cmd_node); |
952 | mwifiex_complete_cmd(adapter, adapter->curr_cmd); | ||
953 | adapter->curr_cmd = NULL; | ||
958 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); | 954 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); |
959 | } | 955 | } |
960 | 956 | ||
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) | |||
981 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); | 977 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); |
982 | } | 978 | } |
983 | adapter->cmd_wait_q.status = -1; | 979 | adapter->cmd_wait_q.status = -1; |
984 | mwifiex_complete_cmd(adapter, adapter->curr_cmd); | ||
985 | } | 980 | } |
986 | 981 | ||
987 | /* | 982 | /* |
diff --git a/include/net/flow.h b/include/net/flow.h index a09447749e2d..57f15a7f1cdd 100644 --- a/include/net/flow.h +++ b/include/net/flow.h | |||
@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup( | |||
207 | u8 dir, flow_resolve_t resolver, void *ctx); | 207 | u8 dir, flow_resolve_t resolver, void *ctx); |
208 | 208 | ||
209 | extern void flow_cache_flush(void); | 209 | extern void flow_cache_flush(void); |
210 | extern void flow_cache_flush_deferred(void); | ||
210 | extern atomic_t flow_cache_genid; | 211 | extern atomic_t flow_cache_genid; |
211 | 212 | ||
212 | #endif | 213 | #endif |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index e90e7a9935dd..a15432da27c3 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -241,6 +241,9 @@ extern struct sctp_globals { | |||
241 | * bits is an indicator of when to send and window update SACK. | 241 | * bits is an indicator of when to send and window update SACK. |
242 | */ | 242 | */ |
243 | int rwnd_update_shift; | 243 | int rwnd_update_shift; |
244 | |||
245 | /* Threshold for autoclose timeout, in seconds. */ | ||
246 | unsigned long max_autoclose; | ||
244 | } sctp_globals; | 247 | } sctp_globals; |
245 | 248 | ||
246 | #define sctp_rto_initial (sctp_globals.rto_initial) | 249 | #define sctp_rto_initial (sctp_globals.rto_initial) |
@@ -281,6 +284,7 @@ extern struct sctp_globals { | |||
281 | #define sctp_auth_enable (sctp_globals.auth_enable) | 284 | #define sctp_auth_enable (sctp_globals.auth_enable) |
282 | #define sctp_checksum_disable (sctp_globals.checksum_disable) | 285 | #define sctp_checksum_disable (sctp_globals.checksum_disable) |
283 | #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift) | 286 | #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift) |
287 | #define sctp_max_autoclose (sctp_globals.max_autoclose) | ||
284 | 288 | ||
285 | /* SCTP Socket type: UDP or TCP style. */ | 289 | /* SCTP Socket type: UDP or TCP style. */ |
286 | typedef enum { | 290 | typedef enum { |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index e0af7237cd92..c1c597e3e198 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) | |||
673 | goto encrypt; | 673 | goto encrypt; |
674 | 674 | ||
675 | auth: | 675 | auth: |
676 | if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) | 676 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) |
677 | return 0; | 677 | return 0; |
678 | 678 | ||
679 | if (!hci_conn_auth(conn, sec_level, auth_type)) | 679 | if (!hci_conn_auth(conn, sec_level, auth_type)) |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 5ea94a1eecf2..17b5b1cd9657 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi | |||
2152 | void *ptr = req->data; | 2152 | void *ptr = req->data; |
2153 | int type, olen; | 2153 | int type, olen; |
2154 | unsigned long val; | 2154 | unsigned long val; |
2155 | struct l2cap_conf_rfc rfc; | 2155 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; |
2156 | 2156 | ||
2157 | BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); | 2157 | BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); |
2158 | 2158 | ||
@@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) | |||
2271 | } | 2271 | } |
2272 | } | 2272 | } |
2273 | 2273 | ||
2274 | /* Use sane default values in case a misbehaving remote device | ||
2275 | * did not send an RFC option. | ||
2276 | */ | ||
2277 | rfc.mode = chan->mode; | ||
2278 | rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); | ||
2279 | rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); | ||
2280 | rfc.max_pdu_size = cpu_to_le16(chan->imtu); | ||
2281 | |||
2282 | BT_ERR("Expected RFC option was not found, using defaults"); | ||
2283 | |||
2274 | done: | 2284 | done: |
2275 | switch (rfc.mode) { | 2285 | switch (rfc.mode) { |
2276 | case L2CAP_MODE_ERTM: | 2286 | case L2CAP_MODE_ERTM: |
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 4e32e18211f9..2d28dfe98389 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c | |||
@@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) | |||
1146 | if (list_empty(&s->dlcs)) { | 1146 | if (list_empty(&s->dlcs)) { |
1147 | s->state = BT_DISCONN; | 1147 | s->state = BT_DISCONN; |
1148 | rfcomm_send_disc(s, 0); | 1148 | rfcomm_send_disc(s, 0); |
1149 | rfcomm_session_clear_timer(s); | ||
1149 | } | 1150 | } |
1150 | 1151 | ||
1151 | break; | 1152 | break; |
diff --git a/net/core/flow.c b/net/core/flow.c index 8ae42de9c79e..e318c7e98042 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -358,6 +358,18 @@ void flow_cache_flush(void) | |||
358 | put_online_cpus(); | 358 | put_online_cpus(); |
359 | } | 359 | } |
360 | 360 | ||
361 | static void flow_cache_flush_task(struct work_struct *work) | ||
362 | { | ||
363 | flow_cache_flush(); | ||
364 | } | ||
365 | |||
366 | static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task); | ||
367 | |||
368 | void flow_cache_flush_deferred(void) | ||
369 | { | ||
370 | schedule_work(&flow_cache_flush_work); | ||
371 | } | ||
372 | |||
361 | static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) | 373 | static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) |
362 | { | 374 | { |
363 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); | 375 | struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 0da2afc97f32..99ec116bef14 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void) | |||
253 | } | 253 | } |
254 | } | 254 | } |
255 | 255 | ||
256 | /* no point in waiting if we could not bring up at least one device */ | ||
257 | if (!ic_first_dev) | ||
258 | goto have_carrier; | ||
259 | |||
256 | /* wait for a carrier on at least one device */ | 260 | /* wait for a carrier on at least one device */ |
257 | start = jiffies; | 261 | start = jiffies; |
258 | while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { | 262 | while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 46af62363b8c..252c512e8a81 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -120,6 +120,7 @@ | |||
120 | 120 | ||
121 | static int ip_rt_max_size; | 121 | static int ip_rt_max_size; |
122 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; | 122 | static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; |
123 | static int ip_rt_gc_interval __read_mostly = 60 * HZ; | ||
123 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; | 124 | static int ip_rt_gc_min_interval __read_mostly = HZ / 2; |
124 | static int ip_rt_redirect_number __read_mostly = 9; | 125 | static int ip_rt_redirect_number __read_mostly = 9; |
125 | static int ip_rt_redirect_load __read_mostly = HZ / 50; | 126 | static int ip_rt_redirect_load __read_mostly = HZ / 50; |
@@ -133,6 +134,9 @@ static int ip_rt_min_advmss __read_mostly = 256; | |||
133 | static int rt_chain_length_max __read_mostly = 20; | 134 | static int rt_chain_length_max __read_mostly = 20; |
134 | static int redirect_genid; | 135 | static int redirect_genid; |
135 | 136 | ||
137 | static struct delayed_work expires_work; | ||
138 | static unsigned long expires_ljiffies; | ||
139 | |||
136 | /* | 140 | /* |
137 | * Interface to generic destination cache. | 141 | * Interface to generic destination cache. |
138 | */ | 142 | */ |
@@ -830,6 +834,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) | |||
830 | return ONE; | 834 | return ONE; |
831 | } | 835 | } |
832 | 836 | ||
837 | static void rt_check_expire(void) | ||
838 | { | ||
839 | static unsigned int rover; | ||
840 | unsigned int i = rover, goal; | ||
841 | struct rtable *rth; | ||
842 | struct rtable __rcu **rthp; | ||
843 | unsigned long samples = 0; | ||
844 | unsigned long sum = 0, sum2 = 0; | ||
845 | unsigned long delta; | ||
846 | u64 mult; | ||
847 | |||
848 | delta = jiffies - expires_ljiffies; | ||
849 | expires_ljiffies = jiffies; | ||
850 | mult = ((u64)delta) << rt_hash_log; | ||
851 | if (ip_rt_gc_timeout > 1) | ||
852 | do_div(mult, ip_rt_gc_timeout); | ||
853 | goal = (unsigned int)mult; | ||
854 | if (goal > rt_hash_mask) | ||
855 | goal = rt_hash_mask + 1; | ||
856 | for (; goal > 0; goal--) { | ||
857 | unsigned long tmo = ip_rt_gc_timeout; | ||
858 | unsigned long length; | ||
859 | |||
860 | i = (i + 1) & rt_hash_mask; | ||
861 | rthp = &rt_hash_table[i].chain; | ||
862 | |||
863 | if (need_resched()) | ||
864 | cond_resched(); | ||
865 | |||
866 | samples++; | ||
867 | |||
868 | if (rcu_dereference_raw(*rthp) == NULL) | ||
869 | continue; | ||
870 | length = 0; | ||
871 | spin_lock_bh(rt_hash_lock_addr(i)); | ||
872 | while ((rth = rcu_dereference_protected(*rthp, | ||
873 | lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) { | ||
874 | prefetch(rth->dst.rt_next); | ||
875 | if (rt_is_expired(rth)) { | ||
876 | *rthp = rth->dst.rt_next; | ||
877 | rt_free(rth); | ||
878 | continue; | ||
879 | } | ||
880 | if (rth->dst.expires) { | ||
881 | /* Entry is expired even if it is in use */ | ||
882 | if (time_before_eq(jiffies, rth->dst.expires)) { | ||
883 | nofree: | ||
884 | tmo >>= 1; | ||
885 | rthp = &rth->dst.rt_next; | ||
886 | /* | ||
887 | * We only count entries on | ||
888 | * a chain with equal hash inputs once | ||
889 | * so that entries for different QOS | ||
890 | * levels, and other non-hash input | ||
891 | * attributes don't unfairly skew | ||
892 | * the length computation | ||
893 | */ | ||
894 | length += has_noalias(rt_hash_table[i].chain, rth); | ||
895 | continue; | ||
896 | } | ||
897 | } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) | ||
898 | goto nofree; | ||
899 | |||
900 | /* Cleanup aged off entries. */ | ||
901 | *rthp = rth->dst.rt_next; | ||
902 | rt_free(rth); | ||
903 | } | ||
904 | spin_unlock_bh(rt_hash_lock_addr(i)); | ||
905 | sum += length; | ||
906 | sum2 += length*length; | ||
907 | } | ||
908 | if (samples) { | ||
909 | unsigned long avg = sum / samples; | ||
910 | unsigned long sd = int_sqrt(sum2 / samples - avg*avg); | ||
911 | rt_chain_length_max = max_t(unsigned long, | ||
912 | ip_rt_gc_elasticity, | ||
913 | (avg + 4*sd) >> FRACT_BITS); | ||
914 | } | ||
915 | rover = i; | ||
916 | } | ||
917 | |||
918 | /* | ||
919 | * rt_worker_func() is run in process context. | ||
920 | * we call rt_check_expire() to scan part of the hash table | ||
921 | */ | ||
922 | static void rt_worker_func(struct work_struct *work) | ||
923 | { | ||
924 | rt_check_expire(); | ||
925 | schedule_delayed_work(&expires_work, ip_rt_gc_interval); | ||
926 | } | ||
927 | |||
833 | /* | 928 | /* |
834 | * Perturbation of rt_genid by a small quantity [1..256] | 929 | * Perturbation of rt_genid by a small quantity [1..256] |
835 | * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() | 930 | * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() |
@@ -3179,6 +3274,13 @@ static ctl_table ipv4_route_table[] = { | |||
3179 | .proc_handler = proc_dointvec_jiffies, | 3274 | .proc_handler = proc_dointvec_jiffies, |
3180 | }, | 3275 | }, |
3181 | { | 3276 | { |
3277 | .procname = "gc_interval", | ||
3278 | .data = &ip_rt_gc_interval, | ||
3279 | .maxlen = sizeof(int), | ||
3280 | .mode = 0644, | ||
3281 | .proc_handler = proc_dointvec_jiffies, | ||
3282 | }, | ||
3283 | { | ||
3182 | .procname = "redirect_load", | 3284 | .procname = "redirect_load", |
3183 | .data = &ip_rt_redirect_load, | 3285 | .data = &ip_rt_redirect_load, |
3184 | .maxlen = sizeof(int), | 3286 | .maxlen = sizeof(int), |
@@ -3388,6 +3490,11 @@ int __init ip_rt_init(void) | |||
3388 | devinet_init(); | 3490 | devinet_init(); |
3389 | ip_fib_init(); | 3491 | ip_fib_init(); |
3390 | 3492 | ||
3493 | INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func); | ||
3494 | expires_ljiffies = jiffies; | ||
3495 | schedule_delayed_work(&expires_work, | ||
3496 | net_random() % ip_rt_gc_interval + ip_rt_gc_interval); | ||
3497 | |||
3391 | if (ip_rt_proc_init()) | 3498 | if (ip_rt_proc_init()) |
3392 | printk(KERN_ERR "Unable to create route proc files\n"); | 3499 | printk(KERN_ERR "Unable to create route proc files\n"); |
3393 | #ifdef CONFIG_XFRM | 3500 | #ifdef CONFIG_XFRM |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index dfd3a648a551..a18e6c3d36e3 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
833 | copied += used; | 833 | copied += used; |
834 | len -= used; | 834 | len -= used; |
835 | 835 | ||
836 | /* For non stream protcols we get one packet per recvmsg call */ | ||
837 | if (sk->sk_type != SOCK_STREAM) | ||
838 | goto copy_uaddr; | ||
839 | |||
836 | if (!(flags & MSG_PEEK)) { | 840 | if (!(flags & MSG_PEEK)) { |
837 | sk_eat_skb(sk, skb, 0); | 841 | sk_eat_skb(sk, skb, 0); |
838 | *seq = 0; | 842 | *seq = 0; |
839 | } | 843 | } |
840 | 844 | ||
841 | /* For non stream protcols we get one packet per recvmsg call */ | ||
842 | if (sk->sk_type != SOCK_STREAM) | ||
843 | goto copy_uaddr; | ||
844 | |||
845 | /* Partial read */ | 845 | /* Partial read */ |
846 | if (used + offset < skb->len) | 846 | if (used + offset < skb->len) |
847 | continue; | 847 | continue; |
@@ -857,6 +857,12 @@ copy_uaddr: | |||
857 | } | 857 | } |
858 | if (llc_sk(sk)->cmsg_flags) | 858 | if (llc_sk(sk)->cmsg_flags) |
859 | llc_cmsg_rcv(msg, skb); | 859 | llc_cmsg_rcv(msg, skb); |
860 | |||
861 | if (!(flags & MSG_PEEK)) { | ||
862 | sk_eat_skb(sk, skb, 0); | ||
863 | *seq = 0; | ||
864 | } | ||
865 | |||
860 | goto out; | 866 | goto out; |
861 | } | 867 | } |
862 | 868 | ||
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 3925c6578767..ea66034499ce 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c | |||
@@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev, | |||
69 | __u32 timeout) | 69 | __u32 timeout) |
70 | { | 70 | { |
71 | int rc = 0; | 71 | int rc = 0; |
72 | unsigned long completion_rc; | 72 | long completion_rc; |
73 | 73 | ||
74 | ndev->req_status = NCI_REQ_PEND; | 74 | ndev->req_status = NCI_REQ_PEND; |
75 | 75 | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 152b5b3c3fff..acd2edbc073e 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
173 | asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; | 173 | asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; |
174 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; | 174 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; |
175 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = | 175 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = |
176 | (unsigned long)sp->autoclose * HZ; | 176 | min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ; |
177 | 177 | ||
178 | /* Initializes the timers */ | 178 | /* Initializes the timers */ |
179 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) | 179 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 08b3cead6503..817174eb5f41 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet, | |||
697 | /* Keep track of how many bytes are in flight to the receiver. */ | 697 | /* Keep track of how many bytes are in flight to the receiver. */ |
698 | asoc->outqueue.outstanding_bytes += datasize; | 698 | asoc->outqueue.outstanding_bytes += datasize; |
699 | 699 | ||
700 | /* Update our view of the receiver's rwnd. Include sk_buff overhead | 700 | /* Update our view of the receiver's rwnd. */ |
701 | * while updating peer.rwnd so that it reduces the chances of a | ||
702 | * receiver running out of receive buffer space even when receive | ||
703 | * window is still open. This can happen when a sender is sending | ||
704 | * sending small messages. | ||
705 | */ | ||
706 | datasize += sizeof(struct sk_buff); | ||
707 | if (datasize < rwnd) | 701 | if (datasize < rwnd) |
708 | rwnd -= datasize; | 702 | rwnd -= datasize; |
709 | else | 703 | else |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 14c2b06028ff..cfeb1d4a1ee6 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
411 | chunk->transport->flight_size -= | 411 | chunk->transport->flight_size -= |
412 | sctp_data_size(chunk); | 412 | sctp_data_size(chunk); |
413 | q->outstanding_bytes -= sctp_data_size(chunk); | 413 | q->outstanding_bytes -= sctp_data_size(chunk); |
414 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + | 414 | q->asoc->peer.rwnd += sctp_data_size(chunk); |
415 | sizeof(struct sk_buff)); | ||
416 | } | 415 | } |
417 | continue; | 416 | continue; |
418 | } | 417 | } |
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, | |||
432 | * (Section 7.2.4)), add the data size of those | 431 | * (Section 7.2.4)), add the data size of those |
433 | * chunks to the rwnd. | 432 | * chunks to the rwnd. |
434 | */ | 433 | */ |
435 | q->asoc->peer.rwnd += (sctp_data_size(chunk) + | 434 | q->asoc->peer.rwnd += sctp_data_size(chunk); |
436 | sizeof(struct sk_buff)); | ||
437 | q->outstanding_bytes -= sctp_data_size(chunk); | 435 | q->outstanding_bytes -= sctp_data_size(chunk); |
438 | if (chunk->transport) | 436 | if (chunk->transport) |
439 | transport->flight_size -= sctp_data_size(chunk); | 437 | transport->flight_size -= sctp_data_size(chunk); |
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 61b9fca5a173..6f6ad8686833 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c | |||
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void) | |||
1285 | sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; | 1285 | sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; |
1286 | sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; | 1286 | sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; |
1287 | 1287 | ||
1288 | /* Initialize maximum autoclose timeout. */ | ||
1289 | sctp_max_autoclose = INT_MAX / HZ; | ||
1290 | |||
1288 | /* Initialize handle used for association ids. */ | 1291 | /* Initialize handle used for association ids. */ |
1289 | idr_init(&sctp_assocs_id); | 1292 | idr_init(&sctp_assocs_id); |
1290 | 1293 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 13bf5fcdbff1..54a7cd2fdd7a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, | |||
2200 | return -EINVAL; | 2200 | return -EINVAL; |
2201 | if (copy_from_user(&sp->autoclose, optval, optlen)) | 2201 | if (copy_from_user(&sp->autoclose, optval, optlen)) |
2202 | return -EFAULT; | 2202 | return -EFAULT; |
2203 | /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ | ||
2204 | sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ); | ||
2205 | 2203 | ||
2206 | return 0; | 2204 | return 0; |
2207 | } | 2205 | } |
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 6b3952961b85..60ffbd067ff7 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c | |||
@@ -53,6 +53,10 @@ static int sack_timer_min = 1; | |||
53 | static int sack_timer_max = 500; | 53 | static int sack_timer_max = 500; |
54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ | 54 | static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ |
55 | static int rwnd_scale_max = 16; | 55 | static int rwnd_scale_max = 16; |
56 | static unsigned long max_autoclose_min = 0; | ||
57 | static unsigned long max_autoclose_max = | ||
58 | (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) | ||
59 | ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ; | ||
56 | 60 | ||
57 | extern long sysctl_sctp_mem[3]; | 61 | extern long sysctl_sctp_mem[3]; |
58 | extern int sysctl_sctp_rmem[3]; | 62 | extern int sysctl_sctp_rmem[3]; |
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = { | |||
258 | .extra1 = &one, | 262 | .extra1 = &one, |
259 | .extra2 = &rwnd_scale_max, | 263 | .extra2 = &rwnd_scale_max, |
260 | }, | 264 | }, |
265 | { | ||
266 | .procname = "max_autoclose", | ||
267 | .data = &sctp_max_autoclose, | ||
268 | .maxlen = sizeof(unsigned long), | ||
269 | .mode = 0644, | ||
270 | .proc_handler = &proc_doulongvec_minmax, | ||
271 | .extra1 = &max_autoclose_min, | ||
272 | .extra2 = &max_autoclose_max, | ||
273 | }, | ||
261 | 274 | ||
262 | { /* sentinel */ } | 275 | { /* sentinel */ } |
263 | }; | 276 | }; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2118d6446630..9049a5caeb25 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net) | |||
2276 | { | 2276 | { |
2277 | struct dst_entry *head, *next; | 2277 | struct dst_entry *head, *next; |
2278 | 2278 | ||
2279 | flow_cache_flush(); | ||
2280 | |||
2281 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); | 2279 | spin_lock_bh(&xfrm_policy_sk_bundle_lock); |
2282 | head = xfrm_policy_sk_bundles; | 2280 | head = xfrm_policy_sk_bundles; |
2283 | xfrm_policy_sk_bundles = NULL; | 2281 | xfrm_policy_sk_bundles = NULL; |
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net) | |||
2290 | } | 2288 | } |
2291 | } | 2289 | } |
2292 | 2290 | ||
2291 | static void xfrm_garbage_collect(struct net *net) | ||
2292 | { | ||
2293 | flow_cache_flush(); | ||
2294 | __xfrm_garbage_collect(net); | ||
2295 | } | ||
2296 | |||
2297 | static void xfrm_garbage_collect_deferred(struct net *net) | ||
2298 | { | ||
2299 | flow_cache_flush_deferred(); | ||
2300 | __xfrm_garbage_collect(net); | ||
2301 | } | ||
2302 | |||
2293 | static void xfrm_init_pmtu(struct dst_entry *dst) | 2303 | static void xfrm_init_pmtu(struct dst_entry *dst) |
2294 | { | 2304 | { |
2295 | do { | 2305 | do { |
@@ -2422,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
2422 | if (likely(dst_ops->neigh_lookup == NULL)) | 2432 | if (likely(dst_ops->neigh_lookup == NULL)) |
2423 | dst_ops->neigh_lookup = xfrm_neigh_lookup; | 2433 | dst_ops->neigh_lookup = xfrm_neigh_lookup; |
2424 | if (likely(afinfo->garbage_collect == NULL)) | 2434 | if (likely(afinfo->garbage_collect == NULL)) |
2425 | afinfo->garbage_collect = __xfrm_garbage_collect; | 2435 | afinfo->garbage_collect = xfrm_garbage_collect_deferred; |
2426 | xfrm_policy_afinfo[afinfo->family] = afinfo; | 2436 | xfrm_policy_afinfo[afinfo->family] = afinfo; |
2427 | } | 2437 | } |
2428 | write_unlock_bh(&xfrm_policy_afinfo_lock); | 2438 | write_unlock_bh(&xfrm_policy_afinfo_lock); |
@@ -2516,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void | |||
2516 | 2526 | ||
2517 | switch (event) { | 2527 | switch (event) { |
2518 | case NETDEV_DOWN: | 2528 | case NETDEV_DOWN: |
2519 | __xfrm_garbage_collect(dev_net(dev)); | 2529 | xfrm_garbage_collect(dev_net(dev)); |
2520 | } | 2530 | } |
2521 | return NOTIFY_DONE; | 2531 | return NOTIFY_DONE; |
2522 | } | 2532 | } |