diff options
44 files changed, 332 insertions, 214 deletions
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index af7c40ac1455..e1a8f4e19983 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -581,7 +581,11 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( | |||
581 | struct xgene_enet_desc_ring *ring; | 581 | struct xgene_enet_desc_ring *ring; |
582 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 582 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
583 | struct device *dev = ndev_to_dev(ndev); | 583 | struct device *dev = ndev_to_dev(ndev); |
584 | u32 size; | 584 | int size; |
585 | |||
586 | size = xgene_enet_get_ring_size(dev, cfgsize); | ||
587 | if (size < 0) | ||
588 | return NULL; | ||
585 | 589 | ||
586 | ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), | 590 | ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring), |
587 | GFP_KERNEL); | 591 | GFP_KERNEL); |
@@ -593,7 +597,6 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( | |||
593 | ring->cfgsize = cfgsize; | 597 | ring->cfgsize = cfgsize; |
594 | ring->id = ring_id; | 598 | ring->id = ring_id; |
595 | 599 | ||
596 | size = xgene_enet_get_ring_size(dev, cfgsize); | ||
597 | ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, | 600 | ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, |
598 | GFP_KERNEL); | 601 | GFP_KERNEL); |
599 | if (!ring->desc_addr) { | 602 | if (!ring->desc_addr) { |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a3dd5dc64f4c..4296b3d26f02 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14093,8 +14093,9 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, | |||
14093 | 14093 | ||
14094 | spin_lock_bh(&tp->lock); | 14094 | spin_lock_bh(&tp->lock); |
14095 | if (!tp->hw_stats) { | 14095 | if (!tp->hw_stats) { |
14096 | *stats = tp->net_stats_prev; | ||
14096 | spin_unlock_bh(&tp->lock); | 14097 | spin_unlock_bh(&tp->lock); |
14097 | return &tp->net_stats_prev; | 14098 | return stats; |
14098 | } | 14099 | } |
14099 | 14100 | ||
14100 | tg3_get_nstats(tp, stats); | 14101 | tg3_get_nstats(tp, stats); |
diff --git a/drivers/net/ethernet/ibm/ehea/Makefile b/drivers/net/ethernet/ibm/ehea/Makefile index 775d9969b5c2..cd473e295242 100644 --- a/drivers/net/ethernet/ibm/ehea/Makefile +++ b/drivers/net/ethernet/ibm/ehea/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the eHEA ethernet device driver for IBM eServer System p | 2 | # Makefile for the eHEA ethernet device driver for IBM eServer System p |
3 | # | 3 | # |
4 | ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o | 4 | ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o |
5 | obj-$(CONFIG_EHEA) += ehea.o | 5 | obj-$(CONFIG_EHEA) += ehea.o |
6 | 6 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index 58856032298d..06edfca1a35e 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c | |||
@@ -47,7 +47,7 @@ static u8 e1000_calculate_checksum(u8 *buffer, u32 length) | |||
47 | * e1000_mng_enable_host_if - Checks host interface is enabled | 47 | * e1000_mng_enable_host_if - Checks host interface is enabled |
48 | * @hw: pointer to the HW structure | 48 | * @hw: pointer to the HW structure |
49 | * | 49 | * |
50 | * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND | 50 | * Returns 0 upon success, else -E1000_ERR_HOST_INTERFACE_COMMAND |
51 | * | 51 | * |
52 | * This function checks whether the HOST IF is enabled for command operation | 52 | * This function checks whether the HOST IF is enabled for command operation |
53 | * and also checks whether the previous command is completed. It busy waits | 53 | * and also checks whether the previous command is completed. It busy waits |
@@ -78,7 +78,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { | 80 | if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { |
81 | e_dbg("Previous command timeout failed .\n"); | 81 | e_dbg("Previous command timeout failed.\n"); |
82 | return -E1000_ERR_HOST_INTERFACE_COMMAND; | 82 | return -E1000_ERR_HOST_INTERFACE_COMMAND; |
83 | } | 83 | } |
84 | 84 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 6938fc1ad877..5d01db1d789b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <scsi/fc/fc_fcoe.h> | 33 | #include <scsi/fc/fc_fcoe.h> |
34 | #include <scsi/libfc.h> | 34 | #include <scsi/libfc.h> |
35 | #include <scsi/libfcoe.h> | 35 | #include <scsi/libfcoe.h> |
36 | #include <uapi/linux/dcbnl.h> | ||
36 | 37 | ||
37 | #include "i40e.h" | 38 | #include "i40e.h" |
38 | #include "i40e_fcoe.h" | 39 | #include "i40e_fcoe.h" |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 51bc03072ed3..871474f6fe62 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -4415,13 +4415,13 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) | |||
4415 | 4415 | ||
4416 | switch (vsi->back->hw.phy.link_info.link_speed) { | 4416 | switch (vsi->back->hw.phy.link_info.link_speed) { |
4417 | case I40E_LINK_SPEED_40GB: | 4417 | case I40E_LINK_SPEED_40GB: |
4418 | strncpy(speed, "40 Gbps", SPEED_SIZE); | 4418 | strlcpy(speed, "40 Gbps", SPEED_SIZE); |
4419 | break; | 4419 | break; |
4420 | case I40E_LINK_SPEED_10GB: | 4420 | case I40E_LINK_SPEED_10GB: |
4421 | strncpy(speed, "10 Gbps", SPEED_SIZE); | 4421 | strlcpy(speed, "10 Gbps", SPEED_SIZE); |
4422 | break; | 4422 | break; |
4423 | case I40E_LINK_SPEED_1GB: | 4423 | case I40E_LINK_SPEED_1GB: |
4424 | strncpy(speed, "1000 Mbps", SPEED_SIZE); | 4424 | strlcpy(speed, "1000 Mbps", SPEED_SIZE); |
4425 | break; | 4425 | break; |
4426 | default: | 4426 | default: |
4427 | break; | 4427 | break; |
@@ -4429,16 +4429,16 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) | |||
4429 | 4429 | ||
4430 | switch (vsi->back->hw.fc.current_mode) { | 4430 | switch (vsi->back->hw.fc.current_mode) { |
4431 | case I40E_FC_FULL: | 4431 | case I40E_FC_FULL: |
4432 | strncpy(fc, "RX/TX", FC_SIZE); | 4432 | strlcpy(fc, "RX/TX", FC_SIZE); |
4433 | break; | 4433 | break; |
4434 | case I40E_FC_TX_PAUSE: | 4434 | case I40E_FC_TX_PAUSE: |
4435 | strncpy(fc, "TX", FC_SIZE); | 4435 | strlcpy(fc, "TX", FC_SIZE); |
4436 | break; | 4436 | break; |
4437 | case I40E_FC_RX_PAUSE: | 4437 | case I40E_FC_RX_PAUSE: |
4438 | strncpy(fc, "RX", FC_SIZE); | 4438 | strlcpy(fc, "RX", FC_SIZE); |
4439 | break; | 4439 | break; |
4440 | default: | 4440 | default: |
4441 | strncpy(fc, "None", FC_SIZE); | 4441 | strlcpy(fc, "None", FC_SIZE); |
4442 | break; | 4442 | break; |
4443 | } | 4443 | } |
4444 | 4444 | ||
@@ -5839,7 +5839,7 @@ static void i40e_send_version(struct i40e_pf *pf) | |||
5839 | dv.minor_version = DRV_VERSION_MINOR; | 5839 | dv.minor_version = DRV_VERSION_MINOR; |
5840 | dv.build_version = DRV_VERSION_BUILD; | 5840 | dv.build_version = DRV_VERSION_BUILD; |
5841 | dv.subbuild_version = 0; | 5841 | dv.subbuild_version = 0; |
5842 | strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); | 5842 | strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); |
5843 | i40e_aq_send_driver_version(&pf->hw, &dv, NULL); | 5843 | i40e_aq_send_driver_version(&pf->hw, &dv, NULL); |
5844 | } | 5844 | } |
5845 | 5845 | ||
@@ -6293,7 +6293,7 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) | |||
6293 | 6293 | ||
6294 | if (alloc_qvectors) { | 6294 | if (alloc_qvectors) { |
6295 | /* allocate memory for q_vector pointers */ | 6295 | /* allocate memory for q_vector pointers */ |
6296 | size = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; | 6296 | size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; |
6297 | vsi->q_vectors = kzalloc(size, GFP_KERNEL); | 6297 | vsi->q_vectors = kzalloc(size, GFP_KERNEL); |
6298 | if (!vsi->q_vectors) { | 6298 | if (!vsi->q_vectors) { |
6299 | ret = -ENOMEM; | 6299 | ret = -ENOMEM; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 97bda3dffd49..25c4f9a3011f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c | |||
@@ -251,9 +251,9 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, | |||
251 | * | 251 | * |
252 | * Writes a 16 bit words buffer to the Shadow RAM using the admin command. | 252 | * Writes a 16 bit words buffer to the Shadow RAM using the admin command. |
253 | **/ | 253 | **/ |
254 | i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, | 254 | static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, |
255 | u32 offset, u16 words, void *data, | 255 | u32 offset, u16 words, void *data, |
256 | bool last_command) | 256 | bool last_command) |
257 | { | 257 | { |
258 | i40e_status ret_code = I40E_ERR_NVM; | 258 | i40e_status ret_code = I40E_ERR_NVM; |
259 | 259 | ||
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 69c26f04d8ce..679db026f4be 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c | |||
@@ -873,6 +873,10 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) | |||
873 | return -ENOMEM; | 873 | return -ENOMEM; |
874 | dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, | 874 | dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, |
875 | DMA_BIDIRECTIONAL); | 875 | DMA_BIDIRECTIONAL); |
876 | if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { | ||
877 | __free_page(dmatest_page); | ||
878 | return -ENOMEM; | ||
879 | } | ||
876 | 880 | ||
877 | /* Run a small DMA test. | 881 | /* Run a small DMA test. |
878 | * The magic multipliers to the length tell the firmware | 882 | * The magic multipliers to the length tell the firmware |
@@ -1294,6 +1298,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1294 | int bytes, int watchdog) | 1298 | int bytes, int watchdog) |
1295 | { | 1299 | { |
1296 | struct page *page; | 1300 | struct page *page; |
1301 | dma_addr_t bus; | ||
1297 | int idx; | 1302 | int idx; |
1298 | #if MYRI10GE_ALLOC_SIZE > 4096 | 1303 | #if MYRI10GE_ALLOC_SIZE > 4096 |
1299 | int end_offset; | 1304 | int end_offset; |
@@ -1318,11 +1323,21 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1318 | rx->watchdog_needed = 1; | 1323 | rx->watchdog_needed = 1; |
1319 | return; | 1324 | return; |
1320 | } | 1325 | } |
1326 | |||
1327 | bus = pci_map_page(mgp->pdev, page, 0, | ||
1328 | MYRI10GE_ALLOC_SIZE, | ||
1329 | PCI_DMA_FROMDEVICE); | ||
1330 | if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { | ||
1331 | __free_pages(page, MYRI10GE_ALLOC_ORDER); | ||
1332 | if (rx->fill_cnt - rx->cnt < 16) | ||
1333 | rx->watchdog_needed = 1; | ||
1334 | return; | ||
1335 | } | ||
1336 | |||
1321 | rx->page = page; | 1337 | rx->page = page; |
1322 | rx->page_offset = 0; | 1338 | rx->page_offset = 0; |
1323 | rx->bus = pci_map_page(mgp->pdev, page, 0, | 1339 | rx->bus = bus; |
1324 | MYRI10GE_ALLOC_SIZE, | 1340 | |
1325 | PCI_DMA_FROMDEVICE); | ||
1326 | } | 1341 | } |
1327 | rx->info[idx].page = rx->page; | 1342 | rx->info[idx].page = rx->page; |
1328 | rx->info[idx].page_offset = rx->page_offset; | 1343 | rx->info[idx].page_offset = rx->page_offset; |
@@ -2764,6 +2779,35 @@ myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, | |||
2764 | mb(); | 2779 | mb(); |
2765 | } | 2780 | } |
2766 | 2781 | ||
2782 | static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, | ||
2783 | struct myri10ge_tx_buf *tx, int idx) | ||
2784 | { | ||
2785 | unsigned int len; | ||
2786 | int last_idx; | ||
2787 | |||
2788 | /* Free any DMA resources we've alloced and clear out the skb slot */ | ||
2789 | last_idx = (idx + 1) & tx->mask; | ||
2790 | idx = tx->req & tx->mask; | ||
2791 | do { | ||
2792 | len = dma_unmap_len(&tx->info[idx], len); | ||
2793 | if (len) { | ||
2794 | if (tx->info[idx].skb != NULL) | ||
2795 | pci_unmap_single(mgp->pdev, | ||
2796 | dma_unmap_addr(&tx->info[idx], | ||
2797 | bus), len, | ||
2798 | PCI_DMA_TODEVICE); | ||
2799 | else | ||
2800 | pci_unmap_page(mgp->pdev, | ||
2801 | dma_unmap_addr(&tx->info[idx], | ||
2802 | bus), len, | ||
2803 | PCI_DMA_TODEVICE); | ||
2804 | dma_unmap_len_set(&tx->info[idx], len, 0); | ||
2805 | tx->info[idx].skb = NULL; | ||
2806 | } | ||
2807 | idx = (idx + 1) & tx->mask; | ||
2808 | } while (idx != last_idx); | ||
2809 | } | ||
2810 | |||
2767 | /* | 2811 | /* |
2768 | * Transmit a packet. We need to split the packet so that a single | 2812 | * Transmit a packet. We need to split the packet so that a single |
2769 | * segment does not cross myri10ge->tx_boundary, so this makes segment | 2813 | * segment does not cross myri10ge->tx_boundary, so this makes segment |
@@ -2787,7 +2831,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, | |||
2787 | u32 low; | 2831 | u32 low; |
2788 | __be32 high_swapped; | 2832 | __be32 high_swapped; |
2789 | unsigned int len; | 2833 | unsigned int len; |
2790 | int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments; | 2834 | int idx, avail, frag_cnt, frag_idx, count, mss, max_segments; |
2791 | u16 pseudo_hdr_offset, cksum_offset, queue; | 2835 | u16 pseudo_hdr_offset, cksum_offset, queue; |
2792 | int cum_len, seglen, boundary, rdma_count; | 2836 | int cum_len, seglen, boundary, rdma_count; |
2793 | u8 flags, odd_flag; | 2837 | u8 flags, odd_flag; |
@@ -2884,9 +2928,12 @@ again: | |||
2884 | 2928 | ||
2885 | /* map the skb for DMA */ | 2929 | /* map the skb for DMA */ |
2886 | len = skb_headlen(skb); | 2930 | len = skb_headlen(skb); |
2931 | bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
2932 | if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) | ||
2933 | goto drop; | ||
2934 | |||
2887 | idx = tx->req & tx->mask; | 2935 | idx = tx->req & tx->mask; |
2888 | tx->info[idx].skb = skb; | 2936 | tx->info[idx].skb = skb; |
2889 | bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
2890 | dma_unmap_addr_set(&tx->info[idx], bus, bus); | 2937 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
2891 | dma_unmap_len_set(&tx->info[idx], len, len); | 2938 | dma_unmap_len_set(&tx->info[idx], len, len); |
2892 | 2939 | ||
@@ -2985,12 +3032,16 @@ again: | |||
2985 | break; | 3032 | break; |
2986 | 3033 | ||
2987 | /* map next fragment for DMA */ | 3034 | /* map next fragment for DMA */ |
2988 | idx = (count + tx->req) & tx->mask; | ||
2989 | frag = &skb_shinfo(skb)->frags[frag_idx]; | 3035 | frag = &skb_shinfo(skb)->frags[frag_idx]; |
2990 | frag_idx++; | 3036 | frag_idx++; |
2991 | len = skb_frag_size(frag); | 3037 | len = skb_frag_size(frag); |
2992 | bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, | 3038 | bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, |
2993 | DMA_TO_DEVICE); | 3039 | DMA_TO_DEVICE); |
3040 | if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { | ||
3041 | myri10ge_unmap_tx_dma(mgp, tx, idx); | ||
3042 | goto drop; | ||
3043 | } | ||
3044 | idx = (count + tx->req) & tx->mask; | ||
2994 | dma_unmap_addr_set(&tx->info[idx], bus, bus); | 3045 | dma_unmap_addr_set(&tx->info[idx], bus, bus); |
2995 | dma_unmap_len_set(&tx->info[idx], len, len); | 3046 | dma_unmap_len_set(&tx->info[idx], len, len); |
2996 | } | 3047 | } |
@@ -3021,31 +3072,8 @@ again: | |||
3021 | return NETDEV_TX_OK; | 3072 | return NETDEV_TX_OK; |
3022 | 3073 | ||
3023 | abort_linearize: | 3074 | abort_linearize: |
3024 | /* Free any DMA resources we've alloced and clear out the skb | 3075 | myri10ge_unmap_tx_dma(mgp, tx, idx); |
3025 | * slot so as to not trip up assertions, and to avoid a | ||
3026 | * double-free if linearizing fails */ | ||
3027 | 3076 | ||
3028 | last_idx = (idx + 1) & tx->mask; | ||
3029 | idx = tx->req & tx->mask; | ||
3030 | tx->info[idx].skb = NULL; | ||
3031 | do { | ||
3032 | len = dma_unmap_len(&tx->info[idx], len); | ||
3033 | if (len) { | ||
3034 | if (tx->info[idx].skb != NULL) | ||
3035 | pci_unmap_single(mgp->pdev, | ||
3036 | dma_unmap_addr(&tx->info[idx], | ||
3037 | bus), len, | ||
3038 | PCI_DMA_TODEVICE); | ||
3039 | else | ||
3040 | pci_unmap_page(mgp->pdev, | ||
3041 | dma_unmap_addr(&tx->info[idx], | ||
3042 | bus), len, | ||
3043 | PCI_DMA_TODEVICE); | ||
3044 | dma_unmap_len_set(&tx->info[idx], len, 0); | ||
3045 | tx->info[idx].skb = NULL; | ||
3046 | } | ||
3047 | idx = (idx + 1) & tx->mask; | ||
3048 | } while (idx != last_idx); | ||
3049 | if (skb_is_gso(skb)) { | 3077 | if (skb_is_gso(skb)) { |
3050 | netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); | 3078 | netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n"); |
3051 | goto drop; | 3079 | goto drop; |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index d813bfb1a847..23c89ab5a6ad 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -32,6 +32,11 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); | |||
32 | MODULE_LICENSE("GPL"); | 32 | MODULE_LICENSE("GPL"); |
33 | MODULE_VERSION(DRV_MODULE_VERSION); | 33 | MODULE_VERSION(DRV_MODULE_VERSION); |
34 | 34 | ||
35 | /* Heuristic for the number of times to exponentially backoff and | ||
36 | * retry sending an LDC trigger when EAGAIN is encountered | ||
37 | */ | ||
38 | #define VNET_MAX_RETRIES 10 | ||
39 | |||
35 | /* Ordered from largest major to lowest */ | 40 | /* Ordered from largest major to lowest */ |
36 | static struct vio_version vnet_versions[] = { | 41 | static struct vio_version vnet_versions[] = { |
37 | { .major = 1, .minor = 0 }, | 42 | { .major = 1, .minor = 0 }, |
@@ -260,6 +265,7 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, | |||
260 | .state = vio_dring_state, | 265 | .state = vio_dring_state, |
261 | }; | 266 | }; |
262 | int err, delay; | 267 | int err, delay; |
268 | int retries = 0; | ||
263 | 269 | ||
264 | hdr.seq = dr->snd_nxt; | 270 | hdr.seq = dr->snd_nxt; |
265 | delay = 1; | 271 | delay = 1; |
@@ -272,6 +278,13 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, | |||
272 | udelay(delay); | 278 | udelay(delay); |
273 | if ((delay <<= 1) > 128) | 279 | if ((delay <<= 1) > 128) |
274 | delay = 128; | 280 | delay = 128; |
281 | if (retries++ > VNET_MAX_RETRIES) { | ||
282 | pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", | ||
283 | port->raddr[0], port->raddr[1], | ||
284 | port->raddr[2], port->raddr[3], | ||
285 | port->raddr[4], port->raddr[5]); | ||
286 | err = -ECONNRESET; | ||
287 | } | ||
275 | } while (err == -EAGAIN); | 288 | } while (err == -EAGAIN); |
276 | 289 | ||
277 | return err; | 290 | return err; |
@@ -475,8 +488,9 @@ static int handle_mcast(struct vnet_port *port, void *msgbuf) | |||
475 | return 0; | 488 | return 0; |
476 | } | 489 | } |
477 | 490 | ||
478 | static void maybe_tx_wakeup(struct vnet *vp) | 491 | static void maybe_tx_wakeup(unsigned long param) |
479 | { | 492 | { |
493 | struct vnet *vp = (struct vnet *)param; | ||
480 | struct net_device *dev = vp->dev; | 494 | struct net_device *dev = vp->dev; |
481 | 495 | ||
482 | netif_tx_lock(dev); | 496 | netif_tx_lock(dev); |
@@ -573,8 +587,13 @@ static void vnet_event(void *arg, int event) | |||
573 | break; | 587 | break; |
574 | } | 588 | } |
575 | spin_unlock(&vio->lock); | 589 | spin_unlock(&vio->lock); |
590 | /* Kick off a tasklet to wake the queue. We cannot call | ||
591 | * maybe_tx_wakeup directly here because we could deadlock on | ||
592 | * netif_tx_lock() with dev_watchdog() | ||
593 | */ | ||
576 | if (unlikely(tx_wakeup && err != -ECONNRESET)) | 594 | if (unlikely(tx_wakeup && err != -ECONNRESET)) |
577 | maybe_tx_wakeup(port->vp); | 595 | tasklet_schedule(&port->vp->vnet_tx_wakeup); |
596 | |||
578 | local_irq_restore(flags); | 597 | local_irq_restore(flags); |
579 | } | 598 | } |
580 | 599 | ||
@@ -593,6 +612,7 @@ static int __vnet_tx_trigger(struct vnet_port *port) | |||
593 | .end_idx = (u32) -1, | 612 | .end_idx = (u32) -1, |
594 | }; | 613 | }; |
595 | int err, delay; | 614 | int err, delay; |
615 | int retries = 0; | ||
596 | 616 | ||
597 | hdr.seq = dr->snd_nxt; | 617 | hdr.seq = dr->snd_nxt; |
598 | delay = 1; | 618 | delay = 1; |
@@ -605,6 +625,8 @@ static int __vnet_tx_trigger(struct vnet_port *port) | |||
605 | udelay(delay); | 625 | udelay(delay); |
606 | if ((delay <<= 1) > 128) | 626 | if ((delay <<= 1) > 128) |
607 | delay = 128; | 627 | delay = 128; |
628 | if (retries++ > VNET_MAX_RETRIES) | ||
629 | break; | ||
608 | } while (err == -EAGAIN); | 630 | } while (err == -EAGAIN); |
609 | 631 | ||
610 | return err; | 632 | return err; |
@@ -691,7 +713,15 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
691 | memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); | 713 | memset(tx_buf+VNET_PACKET_SKIP+skb->len, 0, len - skb->len); |
692 | } | 714 | } |
693 | 715 | ||
694 | d->hdr.ack = VIO_ACK_ENABLE; | 716 | /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), |
717 | * thus it is safe to not set VIO_ACK_ENABLE for each transmission: | ||
718 | * the protocol itself does not require it as long as the peer | ||
719 | * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. | ||
720 | * | ||
721 | * An ACK for every packet in the ring is expensive as the | ||
722 | * sending of LDC messages is slow and affects performance. | ||
723 | */ | ||
724 | d->hdr.ack = VIO_ACK_DISABLE; | ||
695 | d->size = len; | 725 | d->size = len; |
696 | d->ncookies = port->tx_bufs[dr->prod].ncookies; | 726 | d->ncookies = port->tx_bufs[dr->prod].ncookies; |
697 | for (i = 0; i < d->ncookies; i++) | 727 | for (i = 0; i < d->ncookies; i++) |
@@ -1046,6 +1076,7 @@ static struct vnet *vnet_new(const u64 *local_mac) | |||
1046 | vp = netdev_priv(dev); | 1076 | vp = netdev_priv(dev); |
1047 | 1077 | ||
1048 | spin_lock_init(&vp->lock); | 1078 | spin_lock_init(&vp->lock); |
1079 | tasklet_init(&vp->vnet_tx_wakeup, maybe_tx_wakeup, (unsigned long)vp); | ||
1049 | vp->dev = dev; | 1080 | vp->dev = dev; |
1050 | 1081 | ||
1051 | INIT_LIST_HEAD(&vp->port_list); | 1082 | INIT_LIST_HEAD(&vp->port_list); |
@@ -1105,6 +1136,7 @@ static void vnet_cleanup(void) | |||
1105 | vp = list_first_entry(&vnet_list, struct vnet, list); | 1136 | vp = list_first_entry(&vnet_list, struct vnet, list); |
1106 | list_del(&vp->list); | 1137 | list_del(&vp->list); |
1107 | dev = vp->dev; | 1138 | dev = vp->dev; |
1139 | tasklet_kill(&vp->vnet_tx_wakeup); | ||
1108 | /* vio_unregister_driver() should have cleaned up port_list */ | 1140 | /* vio_unregister_driver() should have cleaned up port_list */ |
1109 | BUG_ON(!list_empty(&vp->port_list)); | 1141 | BUG_ON(!list_empty(&vp->port_list)); |
1110 | unregister_netdev(dev); | 1142 | unregister_netdev(dev); |
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h index d347a5bf24b0..de5c2c64996f 100644 --- a/drivers/net/ethernet/sun/sunvnet.h +++ b/drivers/net/ethernet/sun/sunvnet.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _SUNVNET_H | 1 | #ifndef _SUNVNET_H |
2 | #define _SUNVNET_H | 2 | #define _SUNVNET_H |
3 | 3 | ||
4 | #include <linux/interrupt.h> | ||
5 | |||
4 | #define DESC_NCOOKIES(entry_size) \ | 6 | #define DESC_NCOOKIES(entry_size) \ |
5 | ((entry_size) - sizeof(struct vio_net_desc)) | 7 | ((entry_size) - sizeof(struct vio_net_desc)) |
6 | 8 | ||
@@ -78,6 +80,8 @@ struct vnet { | |||
78 | 80 | ||
79 | struct list_head list; | 81 | struct list_head list; |
80 | u64 local_mac; | 82 | u64 local_mac; |
83 | |||
84 | struct tasklet_struct vnet_tx_wakeup; | ||
81 | }; | 85 | }; |
82 | 86 | ||
83 | #endif /* _SUNVNET_H */ | 87 | #endif /* _SUNVNET_H */ |
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 36f4459520c3..fda5891835d4 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c | |||
@@ -1170,7 +1170,6 @@ static struct platform_driver temac_of_driver = { | |||
1170 | .probe = temac_of_probe, | 1170 | .probe = temac_of_probe, |
1171 | .remove = temac_of_remove, | 1171 | .remove = temac_of_remove, |
1172 | .driver = { | 1172 | .driver = { |
1173 | .owner = THIS_MODULE, | ||
1174 | .name = "xilinx_temac", | 1173 | .name = "xilinx_temac", |
1175 | .of_match_table = temac_of_match, | 1174 | .of_match_table = temac_of_match, |
1176 | }, | 1175 | }, |
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 30e8608ff050..c8fd94133ecd 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c | |||
@@ -1645,7 +1645,6 @@ static struct platform_driver axienet_of_driver = { | |||
1645 | .probe = axienet_of_probe, | 1645 | .probe = axienet_of_probe, |
1646 | .remove = axienet_of_remove, | 1646 | .remove = axienet_of_remove, |
1647 | .driver = { | 1647 | .driver = { |
1648 | .owner = THIS_MODULE, | ||
1649 | .name = "xilinx_axienet", | 1648 | .name = "xilinx_axienet", |
1650 | .of_match_table = axienet_of_match, | 1649 | .of_match_table = axienet_of_match, |
1651 | }, | 1650 | }, |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 782bb9373cd8..28dbbdc393eb 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c | |||
@@ -1245,7 +1245,6 @@ MODULE_DEVICE_TABLE(of, xemaclite_of_match); | |||
1245 | static struct platform_driver xemaclite_of_driver = { | 1245 | static struct platform_driver xemaclite_of_driver = { |
1246 | .driver = { | 1246 | .driver = { |
1247 | .name = DRIVER_NAME, | 1247 | .name = DRIVER_NAME, |
1248 | .owner = THIS_MODULE, | ||
1249 | .of_match_table = xemaclite_of_match, | 1248 | .of_match_table = xemaclite_of_match, |
1250 | }, | 1249 | }, |
1251 | .probe = xemaclite_of_probe, | 1250 | .probe = xemaclite_of_probe, |
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 768dfe9a9315..6d3e2093bf7f 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c | |||
@@ -1755,17 +1755,4 @@ static struct pci_driver donauboe_pci_driver = { | |||
1755 | .resume = toshoboe_wakeup | 1755 | .resume = toshoboe_wakeup |
1756 | }; | 1756 | }; |
1757 | 1757 | ||
1758 | static int __init | 1758 | module_pci_driver(donauboe_pci_driver); |
1759 | donauboe_init (void) | ||
1760 | { | ||
1761 | return pci_register_driver(&donauboe_pci_driver); | ||
1762 | } | ||
1763 | |||
1764 | static void __exit | ||
1765 | donauboe_cleanup (void) | ||
1766 | { | ||
1767 | pci_unregister_driver(&donauboe_pci_driver); | ||
1768 | } | ||
1769 | |||
1770 | module_init(donauboe_init); | ||
1771 | module_exit(donauboe_cleanup); | ||
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index ef8a5c20236a..60e4ca01ccbb 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -45,10 +45,9 @@ struct macvlan_port { | |||
45 | struct sk_buff_head bc_queue; | 45 | struct sk_buff_head bc_queue; |
46 | struct work_struct bc_work; | 46 | struct work_struct bc_work; |
47 | bool passthru; | 47 | bool passthru; |
48 | int count; | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | #define MACVLAN_PORT_IS_EMPTY(port) list_empty(&port->vlans) | ||
51 | |||
52 | struct macvlan_skb_cb { | 51 | struct macvlan_skb_cb { |
53 | const struct macvlan_dev *src; | 52 | const struct macvlan_dev *src; |
54 | }; | 53 | }; |
@@ -667,7 +666,8 @@ static void macvlan_uninit(struct net_device *dev) | |||
667 | 666 | ||
668 | free_percpu(vlan->pcpu_stats); | 667 | free_percpu(vlan->pcpu_stats); |
669 | 668 | ||
670 | if (MACVLAN_PORT_IS_EMPTY(port)) | 669 | port->count -= 1; |
670 | if (!port->count) | ||
671 | macvlan_port_destroy(port->dev); | 671 | macvlan_port_destroy(port->dev); |
672 | } | 672 | } |
673 | 673 | ||
@@ -1020,12 +1020,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1020 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); | 1020 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); |
1021 | 1021 | ||
1022 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { | 1022 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { |
1023 | if (!MACVLAN_PORT_IS_EMPTY(port)) | 1023 | if (port->count) |
1024 | return -EINVAL; | 1024 | return -EINVAL; |
1025 | port->passthru = true; | 1025 | port->passthru = true; |
1026 | eth_hw_addr_inherit(dev, lowerdev); | 1026 | eth_hw_addr_inherit(dev, lowerdev); |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | port->count += 1; | ||
1029 | err = register_netdevice(dev); | 1030 | err = register_netdevice(dev); |
1030 | if (err < 0) | 1031 | if (err < 0) |
1031 | goto destroy_port; | 1032 | goto destroy_port; |
@@ -1043,7 +1044,8 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1043 | unregister_netdev: | 1044 | unregister_netdev: |
1044 | unregister_netdevice(dev); | 1045 | unregister_netdevice(dev); |
1045 | destroy_port: | 1046 | destroy_port: |
1046 | if (MACVLAN_PORT_IS_EMPTY(port)) | 1047 | port->count -= 1; |
1048 | if (!port->count) | ||
1047 | macvlan_port_destroy(lowerdev); | 1049 | macvlan_port_destroy(lowerdev); |
1048 | 1050 | ||
1049 | return err; | 1051 | return err; |
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index 8596aba34f96..237d0cda1bcb 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h | |||
@@ -256,6 +256,7 @@ struct ar9170 { | |||
256 | atomic_t rx_work_urbs; | 256 | atomic_t rx_work_urbs; |
257 | atomic_t rx_pool_urbs; | 257 | atomic_t rx_pool_urbs; |
258 | kernel_ulong_t features; | 258 | kernel_ulong_t features; |
259 | bool usb_ep_cmd_is_bulk; | ||
259 | 260 | ||
260 | /* firmware settings */ | 261 | /* firmware settings */ |
261 | struct completion fw_load_wait; | 262 | struct completion fw_load_wait; |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index f35c7f30f9a6..c9f93310c0d6 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -621,9 +621,16 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd, | |||
621 | goto err_free; | 621 | goto err_free; |
622 | } | 622 | } |
623 | 623 | ||
624 | usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev, | 624 | if (ar->usb_ep_cmd_is_bulk) |
625 | AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4, | 625 | usb_fill_bulk_urb(urb, ar->udev, |
626 | carl9170_usb_cmd_complete, ar, 1); | 626 | usb_sndbulkpipe(ar->udev, AR9170_USB_EP_CMD), |
627 | cmd, cmd->hdr.len + 4, | ||
628 | carl9170_usb_cmd_complete, ar); | ||
629 | else | ||
630 | usb_fill_int_urb(urb, ar->udev, | ||
631 | usb_sndintpipe(ar->udev, AR9170_USB_EP_CMD), | ||
632 | cmd, cmd->hdr.len + 4, | ||
633 | carl9170_usb_cmd_complete, ar, 1); | ||
627 | 634 | ||
628 | if (free_buf) | 635 | if (free_buf) |
629 | urb->transfer_flags |= URB_FREE_BUFFER; | 636 | urb->transfer_flags |= URB_FREE_BUFFER; |
@@ -1032,9 +1039,10 @@ static void carl9170_usb_firmware_step2(const struct firmware *fw, | |||
1032 | static int carl9170_usb_probe(struct usb_interface *intf, | 1039 | static int carl9170_usb_probe(struct usb_interface *intf, |
1033 | const struct usb_device_id *id) | 1040 | const struct usb_device_id *id) |
1034 | { | 1041 | { |
1042 | struct usb_endpoint_descriptor *ep; | ||
1035 | struct ar9170 *ar; | 1043 | struct ar9170 *ar; |
1036 | struct usb_device *udev; | 1044 | struct usb_device *udev; |
1037 | int err; | 1045 | int i, err; |
1038 | 1046 | ||
1039 | err = usb_reset_device(interface_to_usbdev(intf)); | 1047 | err = usb_reset_device(interface_to_usbdev(intf)); |
1040 | if (err) | 1048 | if (err) |
@@ -1050,6 +1058,21 @@ static int carl9170_usb_probe(struct usb_interface *intf, | |||
1050 | ar->intf = intf; | 1058 | ar->intf = intf; |
1051 | ar->features = id->driver_info; | 1059 | ar->features = id->driver_info; |
1052 | 1060 | ||
1061 | /* We need to remember the type of endpoint 4 because it differs | ||
1062 | * between high- and full-speed configuration. The high-speed | ||
1063 | * configuration specifies it as interrupt and the full-speed | ||
1064 | * configuration as bulk endpoint. This information is required | ||
1065 | * later when sending urbs to that endpoint. | ||
1066 | */ | ||
1067 | for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; ++i) { | ||
1068 | ep = &intf->cur_altsetting->endpoint[i].desc; | ||
1069 | |||
1070 | if (usb_endpoint_num(ep) == AR9170_USB_EP_CMD && | ||
1071 | usb_endpoint_dir_out(ep) && | ||
1072 | usb_endpoint_type(ep) == USB_ENDPOINT_XFER_BULK) | ||
1073 | ar->usb_ep_cmd_is_bulk = true; | ||
1074 | } | ||
1075 | |||
1053 | usb_set_intfdata(intf, ar); | 1076 | usb_set_intfdata(intf, ar); |
1054 | SET_IEEE80211_DEV(ar->hw, &intf->dev); | 1077 | SET_IEEE80211_DEV(ar->hw, &intf->dev); |
1055 | 1078 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 535c7eb01b3a..8f8b9373de95 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c | |||
@@ -1318,6 +1318,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) | |||
1318 | msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings; | 1318 | msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings; |
1319 | msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings * | 1319 | msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings * |
1320 | sizeof(*msgbuf->flowring_dma_handle), GFP_ATOMIC); | 1320 | sizeof(*msgbuf->flowring_dma_handle), GFP_ATOMIC); |
1321 | if (!msgbuf->flowring_dma_handle) | ||
1322 | goto fail; | ||
1321 | 1323 | ||
1322 | msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; | 1324 | msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; |
1323 | msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; | 1325 | msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; |
@@ -1362,6 +1364,7 @@ fail: | |||
1362 | kfree(msgbuf->flow_map); | 1364 | kfree(msgbuf->flow_map); |
1363 | kfree(msgbuf->txstatus_done_map); | 1365 | kfree(msgbuf->txstatus_done_map); |
1364 | brcmf_msgbuf_release_pktids(msgbuf); | 1366 | brcmf_msgbuf_release_pktids(msgbuf); |
1367 | kfree(msgbuf->flowring_dma_handle); | ||
1365 | if (msgbuf->ioctbuf) | 1368 | if (msgbuf->ioctbuf) |
1366 | dma_free_coherent(drvr->bus_if->dev, | 1369 | dma_free_coherent(drvr->bus_if->dev, |
1367 | BRCMF_TX_IOCTL_MAX_MSG_SIZE, | 1370 | BRCMF_TX_IOCTL_MAX_MSG_SIZE, |
@@ -1391,6 +1394,7 @@ void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) | |||
1391 | BRCMF_TX_IOCTL_MAX_MSG_SIZE, | 1394 | BRCMF_TX_IOCTL_MAX_MSG_SIZE, |
1392 | msgbuf->ioctbuf, msgbuf->ioctbuf_handle); | 1395 | msgbuf->ioctbuf, msgbuf->ioctbuf_handle); |
1393 | brcmf_msgbuf_release_pktids(msgbuf); | 1396 | brcmf_msgbuf_release_pktids(msgbuf); |
1397 | kfree(msgbuf->flowring_dma_handle); | ||
1394 | kfree(msgbuf); | 1398 | kfree(msgbuf); |
1395 | drvr->proto->pd = NULL; | 1399 | drvr->proto->pd = NULL; |
1396 | } | 1400 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index bc972c0ba5f8..e5101b287e4e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c | |||
@@ -591,12 +591,13 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) | |||
591 | } | 591 | } |
592 | if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE) | 592 | if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE) |
593 | brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); | 593 | brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); |
594 | if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) | 594 | if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) { |
595 | brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); | 595 | brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); |
596 | if (waitqueue_active(&devinfo->mbdata_resp_wait)) { | 596 | if (waitqueue_active(&devinfo->mbdata_resp_wait)) { |
597 | devinfo->mbdata_completed = true; | 597 | devinfo->mbdata_completed = true; |
598 | wake_up(&devinfo->mbdata_resp_wait); | 598 | wake_up(&devinfo->mbdata_resp_wait); |
599 | } | 599 | } |
600 | } | ||
600 | } | 601 | } |
601 | 602 | ||
602 | 603 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index c5aa404069f3..389656bd1a74 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -9853,6 +9853,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev, | |||
9853 | strncpy(extra, "unknown", MAX_WX_STRING); | 9853 | strncpy(extra, "unknown", MAX_WX_STRING); |
9854 | break; | 9854 | break; |
9855 | } | 9855 | } |
9856 | extra[MAX_WX_STRING - 1] = '\0'; | ||
9856 | 9857 | ||
9857 | IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); | 9858 | IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); |
9858 | 9859 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 0d6a8b768a68..7c8796584c25 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -396,7 +396,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) | |||
396 | else | 396 | else |
397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; | 397 | hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; |
398 | 398 | ||
399 | hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; | 399 | /* TODO: enable that only for firmwares that don't crash */ |
400 | /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */ | ||
400 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; | 401 | hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; |
401 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; | 402 | hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; |
402 | /* we create the 802.11 header and zero length SSID IE. */ | 403 | /* we create the 802.11 header and zero length SSID IE. */ |
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index ef3026f46a37..d4eb8d2e9cb7 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h | |||
@@ -165,6 +165,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ | |||
165 | u16 dealloc_ring[MAX_PENDING_REQS]; | 165 | u16 dealloc_ring[MAX_PENDING_REQS]; |
166 | struct task_struct *dealloc_task; | 166 | struct task_struct *dealloc_task; |
167 | wait_queue_head_t dealloc_wq; | 167 | wait_queue_head_t dealloc_wq; |
168 | atomic_t inflight_packets; | ||
168 | 169 | ||
169 | /* Use kthread for guest RX */ | 170 | /* Use kthread for guest RX */ |
170 | struct task_struct *task; | 171 | struct task_struct *task; |
@@ -329,4 +330,8 @@ extern unsigned int xenvif_max_queues; | |||
329 | extern struct dentry *xen_netback_dbg_root; | 330 | extern struct dentry *xen_netback_dbg_root; |
330 | #endif | 331 | #endif |
331 | 332 | ||
333 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, | ||
334 | struct sk_buff *skb); | ||
335 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue); | ||
336 | |||
332 | #endif /* __XEN_NETBACK__COMMON_H__ */ | 337 | #endif /* __XEN_NETBACK__COMMON_H__ */ |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index bfd10cb9c8de..e29e15dca86e 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -43,6 +43,23 @@ | |||
43 | #define XENVIF_QUEUE_LENGTH 32 | 43 | #define XENVIF_QUEUE_LENGTH 32 |
44 | #define XENVIF_NAPI_WEIGHT 64 | 44 | #define XENVIF_NAPI_WEIGHT 64 |
45 | 45 | ||
46 | /* This function is used to set SKBTX_DEV_ZEROCOPY as well as | ||
47 | * increasing the inflight counter. We need to increase the inflight | ||
48 | * counter because core driver calls into xenvif_zerocopy_callback | ||
49 | * which calls xenvif_skb_zerocopy_complete. | ||
50 | */ | ||
51 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, | ||
52 | struct sk_buff *skb) | ||
53 | { | ||
54 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | ||
55 | atomic_inc(&queue->inflight_packets); | ||
56 | } | ||
57 | |||
58 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) | ||
59 | { | ||
60 | atomic_dec(&queue->inflight_packets); | ||
61 | } | ||
62 | |||
46 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) | 63 | static inline void xenvif_stop_queue(struct xenvif_queue *queue) |
47 | { | 64 | { |
48 | struct net_device *dev = queue->vif->dev; | 65 | struct net_device *dev = queue->vif->dev; |
@@ -524,9 +541,6 @@ int xenvif_init_queue(struct xenvif_queue *queue) | |||
524 | 541 | ||
525 | init_timer(&queue->rx_stalled); | 542 | init_timer(&queue->rx_stalled); |
526 | 543 | ||
527 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
528 | XENVIF_NAPI_WEIGHT); | ||
529 | |||
530 | return 0; | 544 | return 0; |
531 | } | 545 | } |
532 | 546 | ||
@@ -560,6 +574,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
560 | 574 | ||
561 | init_waitqueue_head(&queue->wq); | 575 | init_waitqueue_head(&queue->wq); |
562 | init_waitqueue_head(&queue->dealloc_wq); | 576 | init_waitqueue_head(&queue->dealloc_wq); |
577 | atomic_set(&queue->inflight_packets, 0); | ||
563 | 578 | ||
564 | if (tx_evtchn == rx_evtchn) { | 579 | if (tx_evtchn == rx_evtchn) { |
565 | /* feature-split-event-channels == 0 */ | 580 | /* feature-split-event-channels == 0 */ |
@@ -614,6 +629,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
614 | wake_up_process(queue->task); | 629 | wake_up_process(queue->task); |
615 | wake_up_process(queue->dealloc_task); | 630 | wake_up_process(queue->dealloc_task); |
616 | 631 | ||
632 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
633 | XENVIF_NAPI_WEIGHT); | ||
634 | |||
617 | return 0; | 635 | return 0; |
618 | 636 | ||
619 | err_rx_unbind: | 637 | err_rx_unbind: |
@@ -642,25 +660,6 @@ void xenvif_carrier_off(struct xenvif *vif) | |||
642 | rtnl_unlock(); | 660 | rtnl_unlock(); |
643 | } | 661 | } |
644 | 662 | ||
645 | static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue, | ||
646 | unsigned int worst_case_skb_lifetime) | ||
647 | { | ||
648 | int i, unmap_timeout = 0; | ||
649 | |||
650 | for (i = 0; i < MAX_PENDING_REQS; ++i) { | ||
651 | if (queue->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) { | ||
652 | unmap_timeout++; | ||
653 | schedule_timeout(msecs_to_jiffies(1000)); | ||
654 | if (unmap_timeout > worst_case_skb_lifetime && | ||
655 | net_ratelimit()) | ||
656 | netdev_err(queue->vif->dev, | ||
657 | "Page still granted! Index: %x\n", | ||
658 | i); | ||
659 | i = -1; | ||
660 | } | ||
661 | } | ||
662 | } | ||
663 | |||
664 | void xenvif_disconnect(struct xenvif *vif) | 663 | void xenvif_disconnect(struct xenvif *vif) |
665 | { | 664 | { |
666 | struct xenvif_queue *queue = NULL; | 665 | struct xenvif_queue *queue = NULL; |
@@ -672,6 +671,8 @@ void xenvif_disconnect(struct xenvif *vif) | |||
672 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 671 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
673 | queue = &vif->queues[queue_index]; | 672 | queue = &vif->queues[queue_index]; |
674 | 673 | ||
674 | netif_napi_del(&queue->napi); | ||
675 | |||
675 | if (queue->task) { | 676 | if (queue->task) { |
676 | del_timer_sync(&queue->rx_stalled); | 677 | del_timer_sync(&queue->rx_stalled); |
677 | kthread_stop(queue->task); | 678 | kthread_stop(queue->task); |
@@ -704,7 +705,6 @@ void xenvif_disconnect(struct xenvif *vif) | |||
704 | void xenvif_deinit_queue(struct xenvif_queue *queue) | 705 | void xenvif_deinit_queue(struct xenvif_queue *queue) |
705 | { | 706 | { |
706 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); | 707 | free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages); |
707 | netif_napi_del(&queue->napi); | ||
708 | } | 708 | } |
709 | 709 | ||
710 | void xenvif_free(struct xenvif *vif) | 710 | void xenvif_free(struct xenvif *vif) |
@@ -712,21 +712,11 @@ void xenvif_free(struct xenvif *vif) | |||
712 | struct xenvif_queue *queue = NULL; | 712 | struct xenvif_queue *queue = NULL; |
713 | unsigned int num_queues = vif->num_queues; | 713 | unsigned int num_queues = vif->num_queues; |
714 | unsigned int queue_index; | 714 | unsigned int queue_index; |
715 | /* Here we want to avoid timeout messages if an skb can be legitimately | ||
716 | * stuck somewhere else. Realistically this could be an another vif's | ||
717 | * internal or QDisc queue. That another vif also has this | ||
718 | * rx_drain_timeout_msecs timeout, so give it time to drain out. | ||
719 | * Although if that other guest wakes up just before its timeout happens | ||
720 | * and takes only one skb from QDisc, it can hold onto other skbs for a | ||
721 | * longer period. | ||
722 | */ | ||
723 | unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000); | ||
724 | 715 | ||
725 | unregister_netdev(vif->dev); | 716 | unregister_netdev(vif->dev); |
726 | 717 | ||
727 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | 718 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
728 | queue = &vif->queues[queue_index]; | 719 | queue = &vif->queues[queue_index]; |
729 | xenvif_wait_unmap_timeout(queue, worst_case_skb_lifetime); | ||
730 | xenvif_deinit_queue(queue); | 720 | xenvif_deinit_queue(queue); |
731 | } | 721 | } |
732 | 722 | ||
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4734472aa620..08f65996534c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -1525,10 +1525,12 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s | |||
1525 | /* remove traces of mapped pages and frag_list */ | 1525 | /* remove traces of mapped pages and frag_list */ |
1526 | skb_frag_list_init(skb); | 1526 | skb_frag_list_init(skb); |
1527 | uarg = skb_shinfo(skb)->destructor_arg; | 1527 | uarg = skb_shinfo(skb)->destructor_arg; |
1528 | /* increase inflight counter to offset decrement in callback */ | ||
1529 | atomic_inc(&queue->inflight_packets); | ||
1528 | uarg->callback(uarg, true); | 1530 | uarg->callback(uarg, true); |
1529 | skb_shinfo(skb)->destructor_arg = NULL; | 1531 | skb_shinfo(skb)->destructor_arg = NULL; |
1530 | 1532 | ||
1531 | skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 1533 | xenvif_skb_zerocopy_prepare(queue, nskb); |
1532 | kfree_skb(nskb); | 1534 | kfree_skb(nskb); |
1533 | 1535 | ||
1534 | return 0; | 1536 | return 0; |
@@ -1589,7 +1591,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) | |||
1589 | if (net_ratelimit()) | 1591 | if (net_ratelimit()) |
1590 | netdev_err(queue->vif->dev, | 1592 | netdev_err(queue->vif->dev, |
1591 | "Not enough memory to consolidate frag_list!\n"); | 1593 | "Not enough memory to consolidate frag_list!\n"); |
1592 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 1594 | xenvif_skb_zerocopy_prepare(queue, skb); |
1593 | kfree_skb(skb); | 1595 | kfree_skb(skb); |
1594 | continue; | 1596 | continue; |
1595 | } | 1597 | } |
@@ -1609,7 +1611,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) | |||
1609 | "Can't setup checksum in net_tx_action\n"); | 1611 | "Can't setup checksum in net_tx_action\n"); |
1610 | /* We have to set this flag to trigger the callback */ | 1612 | /* We have to set this flag to trigger the callback */ |
1611 | if (skb_shinfo(skb)->destructor_arg) | 1613 | if (skb_shinfo(skb)->destructor_arg) |
1612 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 1614 | xenvif_skb_zerocopy_prepare(queue, skb); |
1613 | kfree_skb(skb); | 1615 | kfree_skb(skb); |
1614 | continue; | 1616 | continue; |
1615 | } | 1617 | } |
@@ -1641,7 +1643,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) | |||
1641 | * skb. E.g. the __pskb_pull_tail earlier can do such thing. | 1643 | * skb. E.g. the __pskb_pull_tail earlier can do such thing. |
1642 | */ | 1644 | */ |
1643 | if (skb_shinfo(skb)->destructor_arg) { | 1645 | if (skb_shinfo(skb)->destructor_arg) { |
1644 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 1646 | xenvif_skb_zerocopy_prepare(queue, skb); |
1645 | queue->stats.tx_zerocopy_sent++; | 1647 | queue->stats.tx_zerocopy_sent++; |
1646 | } | 1648 | } |
1647 | 1649 | ||
@@ -1681,6 +1683,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) | |||
1681 | queue->stats.tx_zerocopy_success++; | 1683 | queue->stats.tx_zerocopy_success++; |
1682 | else | 1684 | else |
1683 | queue->stats.tx_zerocopy_fail++; | 1685 | queue->stats.tx_zerocopy_fail++; |
1686 | xenvif_skb_zerocopy_complete(queue); | ||
1684 | } | 1687 | } |
1685 | 1688 | ||
1686 | static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) | 1689 | static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) |
@@ -2058,15 +2061,24 @@ int xenvif_kthread_guest_rx(void *data) | |||
2058 | return 0; | 2061 | return 0; |
2059 | } | 2062 | } |
2060 | 2063 | ||
2064 | static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) | ||
2065 | { | ||
2066 | /* Dealloc thread must remain running until all inflight | ||
2067 | * packets complete. | ||
2068 | */ | ||
2069 | return kthread_should_stop() && | ||
2070 | !atomic_read(&queue->inflight_packets); | ||
2071 | } | ||
2072 | |||
2061 | int xenvif_dealloc_kthread(void *data) | 2073 | int xenvif_dealloc_kthread(void *data) |
2062 | { | 2074 | { |
2063 | struct xenvif_queue *queue = data; | 2075 | struct xenvif_queue *queue = data; |
2064 | 2076 | ||
2065 | while (!kthread_should_stop()) { | 2077 | for (;;) { |
2066 | wait_event_interruptible(queue->dealloc_wq, | 2078 | wait_event_interruptible(queue->dealloc_wq, |
2067 | tx_dealloc_work_todo(queue) || | 2079 | tx_dealloc_work_todo(queue) || |
2068 | kthread_should_stop()); | 2080 | xenvif_dealloc_kthread_should_stop(queue)); |
2069 | if (kthread_should_stop()) | 2081 | if (xenvif_dealloc_kthread_should_stop(queue)) |
2070 | break; | 2082 | break; |
2071 | 2083 | ||
2072 | xenvif_tx_dealloc_action(queue); | 2084 | xenvif_tx_dealloc_action(queue); |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 580517d857bf..9c47b897b6d2 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
@@ -116,6 +116,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v) | |||
116 | } | 116 | } |
117 | 117 | ||
118 | #define XENVIF_KICK_STR "kick" | 118 | #define XENVIF_KICK_STR "kick" |
119 | #define BUFFER_SIZE 32 | ||
119 | 120 | ||
120 | static ssize_t | 121 | static ssize_t |
121 | xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, | 122 | xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, |
@@ -124,22 +125,24 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, | |||
124 | struct xenvif_queue *queue = | 125 | struct xenvif_queue *queue = |
125 | ((struct seq_file *)filp->private_data)->private; | 126 | ((struct seq_file *)filp->private_data)->private; |
126 | int len; | 127 | int len; |
127 | char write[sizeof(XENVIF_KICK_STR)]; | 128 | char write[BUFFER_SIZE]; |
128 | 129 | ||
129 | /* don't allow partial writes and check the length */ | 130 | /* don't allow partial writes and check the length */ |
130 | if (*ppos != 0) | 131 | if (*ppos != 0) |
131 | return 0; | 132 | return 0; |
132 | if (count < sizeof(XENVIF_KICK_STR) - 1) | 133 | if (count >= sizeof(write)) |
133 | return -ENOSPC; | 134 | return -ENOSPC; |
134 | 135 | ||
135 | len = simple_write_to_buffer(write, | 136 | len = simple_write_to_buffer(write, |
136 | sizeof(write), | 137 | sizeof(write) - 1, |
137 | ppos, | 138 | ppos, |
138 | buf, | 139 | buf, |
139 | count); | 140 | count); |
140 | if (len < 0) | 141 | if (len < 0) |
141 | return len; | 142 | return len; |
142 | 143 | ||
144 | write[len] = '\0'; | ||
145 | |||
143 | if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1)) | 146 | if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1)) |
144 | xenvif_interrupt(0, (void *)queue); | 147 | xenvif_interrupt(0, (void *)queue); |
145 | else { | 148 | else { |
@@ -171,10 +174,9 @@ static const struct file_operations xenvif_dbg_io_ring_ops_fops = { | |||
171 | .write = xenvif_write_io_ring, | 174 | .write = xenvif_write_io_ring, |
172 | }; | 175 | }; |
173 | 176 | ||
174 | static void xenvif_debugfs_addif(struct xenvif_queue *queue) | 177 | static void xenvif_debugfs_addif(struct xenvif *vif) |
175 | { | 178 | { |
176 | struct dentry *pfile; | 179 | struct dentry *pfile; |
177 | struct xenvif *vif = queue->vif; | ||
178 | int i; | 180 | int i; |
179 | 181 | ||
180 | if (IS_ERR_OR_NULL(xen_netback_dbg_root)) | 182 | if (IS_ERR_OR_NULL(xen_netback_dbg_root)) |
@@ -733,10 +735,11 @@ static void connect(struct backend_info *be) | |||
733 | be->vif->num_queues = queue_index; | 735 | be->vif->num_queues = queue_index; |
734 | goto err; | 736 | goto err; |
735 | } | 737 | } |
738 | } | ||
739 | |||
736 | #ifdef CONFIG_DEBUG_FS | 740 | #ifdef CONFIG_DEBUG_FS |
737 | xenvif_debugfs_addif(queue); | 741 | xenvif_debugfs_addif(be->vif); |
738 | #endif /* CONFIG_DEBUG_FS */ | 742 | #endif /* CONFIG_DEBUG_FS */ |
739 | } | ||
740 | 743 | ||
741 | /* Initialisation completed, tell core driver the number of | 744 | /* Initialisation completed, tell core driver the number of |
742 | * active queues. | 745 | * active queues. |
diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig index 6bbc36fbd6ec..e4603985dce3 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config SCSI_CXGB3_ISCSI | 1 | config SCSI_CXGB3_ISCSI |
2 | tristate "Chelsio T3 iSCSI support" | 2 | tristate "Chelsio T3 iSCSI support" |
3 | depends on PCI && INET | 3 | depends on PCI && INET && (IPV6 || IPV6=n) |
4 | select NETDEVICES | 4 | select NETDEVICES |
5 | select ETHERNET | 5 | select ETHERNET |
6 | select NET_VENDOR_CHELSIO | 6 | select NET_VENDOR_CHELSIO |
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index 16b2c7d26617..8c4e423037b6 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config SCSI_CXGB4_ISCSI | 1 | config SCSI_CXGB4_ISCSI |
2 | tristate "Chelsio T4 iSCSI support" | 2 | tristate "Chelsio T4 iSCSI support" |
3 | depends on PCI && INET | 3 | depends on PCI && INET && (IPV6 || IPV6=n) |
4 | select NETDEVICES | 4 | select NETDEVICES |
5 | select ETHERNET | 5 | select ETHERNET |
6 | select NET_VENDOR_CHELSIO | 6 | select NET_VENDOR_CHELSIO |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 9cda293c867d..36826c0166c5 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/rculist.h> | 21 | #include <linux/rculist.h> |
22 | 22 | ||
23 | struct rhash_head { | 23 | struct rhash_head { |
24 | struct rhash_head *next; | 24 | struct rhash_head __rcu *next; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | #define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) | 27 | #define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) |
@@ -97,7 +97,7 @@ u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); | |||
97 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); | 97 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); |
98 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); | 98 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); |
99 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 99 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, |
100 | struct rhash_head **pprev, gfp_t flags); | 100 | struct rhash_head __rcu **pprev, gfp_t flags); |
101 | 101 | ||
102 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); | 102 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); |
103 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); | 103 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); |
@@ -117,18 +117,12 @@ void rhashtable_destroy(const struct rhashtable *ht); | |||
117 | #define rht_dereference_rcu(p, ht) \ | 117 | #define rht_dereference_rcu(p, ht) \ |
118 | rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) | 118 | rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) |
119 | 119 | ||
120 | /* Internal, use rht_obj() instead */ | ||
121 | #define rht_entry(ptr, type, member) container_of(ptr, type, member) | 120 | #define rht_entry(ptr, type, member) container_of(ptr, type, member) |
122 | #define rht_entry_safe(ptr, type, member) \ | 121 | #define rht_entry_safe(ptr, type, member) \ |
123 | ({ \ | 122 | ({ \ |
124 | typeof(ptr) __ptr = (ptr); \ | 123 | typeof(ptr) __ptr = (ptr); \ |
125 | __ptr ? rht_entry(__ptr, type, member) : NULL; \ | 124 | __ptr ? rht_entry(__ptr, type, member) : NULL; \ |
126 | }) | 125 | }) |
127 | #define rht_entry_safe_rcu(ptr, type, member) \ | ||
128 | ({ \ | ||
129 | typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ | ||
130 | __ptr ? container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member) : NULL; \ | ||
131 | }) | ||
132 | 126 | ||
133 | #define rht_next_entry_safe(pos, ht, member) \ | 127 | #define rht_next_entry_safe(pos, ht, member) \ |
134 | ({ \ | 128 | ({ \ |
@@ -205,9 +199,10 @@ void rhashtable_destroy(const struct rhashtable *ht); | |||
205 | * traversal is guarded by rcu_read_lock(). | 199 | * traversal is guarded by rcu_read_lock(). |
206 | */ | 200 | */ |
207 | #define rht_for_each_entry_rcu(pos, head, member) \ | 201 | #define rht_for_each_entry_rcu(pos, head, member) \ |
208 | for (pos = rht_entry_safe_rcu(head, typeof(*(pos)), member); \ | 202 | for (pos = rht_entry_safe(rcu_dereference_raw(head), \ |
203 | typeof(*(pos)), member); \ | ||
209 | pos; \ | 204 | pos; \ |
210 | pos = rht_entry_safe_rcu((pos)->member.next, \ | 205 | pos = rht_entry_safe(rcu_dereference_raw((pos)->member.next), \ |
211 | typeof(*(pos)), member)) | 206 | typeof(*(pos)), member)) |
212 | 207 | ||
213 | #endif /* _LINUX_RHASHTABLE_H */ | 208 | #endif /* _LINUX_RHASHTABLE_H */ |
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 7a4313887568..5fbe6568c3cf 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h | |||
@@ -62,6 +62,7 @@ struct inet_connection_sock_af_ops { | |||
62 | void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); | 62 | void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); |
63 | int (*bind_conflict)(const struct sock *sk, | 63 | int (*bind_conflict)(const struct sock *sk, |
64 | const struct inet_bind_bucket *tb, bool relax); | 64 | const struct inet_bind_bucket *tb, bool relax); |
65 | void (*mtu_reduced)(struct sock *sk); | ||
65 | }; | 66 | }; |
66 | 67 | ||
67 | /** inet_connection_sock - INET connection oriented sock | 68 | /** inet_connection_sock - INET connection oriented sock |
diff --git a/include/net/sock.h b/include/net/sock.h index 38805fa02e48..7f2ab72f321a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -987,7 +987,6 @@ struct proto { | |||
987 | struct sk_buff *skb); | 987 | struct sk_buff *skb); |
988 | 988 | ||
989 | void (*release_cb)(struct sock *sk); | 989 | void (*release_cb)(struct sock *sk); |
990 | void (*mtu_reduced)(struct sock *sk); | ||
991 | 990 | ||
992 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 991 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
993 | void (*hash)(struct sock *sk); | 992 | void (*hash)(struct sock *sk); |
diff --git a/include/net/tcp.h b/include/net/tcp.h index dafa1cbc149b..590e01a476ac 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -417,7 +417,7 @@ void tcp_update_metrics(struct sock *sk); | |||
417 | void tcp_init_metrics(struct sock *sk); | 417 | void tcp_init_metrics(struct sock *sk); |
418 | void tcp_metrics_init(void); | 418 | void tcp_metrics_init(void); |
419 | bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, | 419 | bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, |
420 | bool paws_check); | 420 | bool paws_check, bool timestamps); |
421 | bool tcp_remember_stamp(struct sock *sk); | 421 | bool tcp_remember_stamp(struct sock *sk); |
422 | bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw); | 422 | bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw); |
423 | void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); | 423 | void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); |
@@ -448,6 +448,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); | |||
448 | */ | 448 | */ |
449 | 449 | ||
450 | void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); | 450 | void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); |
451 | void tcp_v4_mtu_reduced(struct sock *sk); | ||
451 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); | 452 | int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); |
452 | struct sock *tcp_create_openreq_child(struct sock *sk, | 453 | struct sock *tcp_create_openreq_child(struct sock *sk, |
453 | struct request_sock *req, | 454 | struct request_sock *req, |
@@ -705,8 +706,10 @@ struct tcp_skb_cb { | |||
705 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ | 706 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ |
706 | #define TCPCB_LOST 0x04 /* SKB is lost */ | 707 | #define TCPCB_LOST 0x04 /* SKB is lost */ |
707 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ | 708 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ |
709 | #define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */ | ||
708 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | 710 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ |
709 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | 711 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ |
712 | TCPCB_REPAIRED) | ||
710 | 713 | ||
711 | __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ | 714 | __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ |
712 | /* 1 byte hole */ | 715 | /* 1 byte hole */ |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e6940cf16628..a2c78810ebc1 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -38,16 +38,10 @@ int lockdep_rht_mutex_is_held(const struct rhashtable *ht) | |||
38 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | 38 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | /** | 41 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) |
42 | * rht_obj - cast hash head to outer object | ||
43 | * @ht: hash table | ||
44 | * @he: hashed node | ||
45 | */ | ||
46 | void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) | ||
47 | { | 42 | { |
48 | return (void *) he - ht->p.head_offset; | 43 | return (void *) he - ht->p.head_offset; |
49 | } | 44 | } |
50 | EXPORT_SYMBOL_GPL(rht_obj); | ||
51 | 45 | ||
52 | static u32 __hashfn(const struct rhashtable *ht, const void *key, | 46 | static u32 __hashfn(const struct rhashtable *ht, const void *key, |
53 | u32 len, u32 hsize) | 47 | u32 len, u32 hsize) |
@@ -386,7 +380,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert); | |||
386 | * deletion when combined with walking or lookup. | 380 | * deletion when combined with walking or lookup. |
387 | */ | 381 | */ |
388 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 382 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, |
389 | struct rhash_head **pprev, gfp_t flags) | 383 | struct rhash_head __rcu **pprev, gfp_t flags) |
390 | { | 384 | { |
391 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 385 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); |
392 | 386 | ||
diff --git a/net/atm/lec.c b/net/atm/lec.c index 4c5b8ba0f84f..e4853b50cf40 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -833,7 +833,6 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, | |||
833 | loff_t *l) | 833 | loff_t *l) |
834 | { | 834 | { |
835 | struct hlist_node *e = state->node; | 835 | struct hlist_node *e = state->node; |
836 | struct lec_arp_table *tmp; | ||
837 | 836 | ||
838 | if (!e) | 837 | if (!e) |
839 | e = tbl->first; | 838 | e = tbl->first; |
@@ -842,9 +841,7 @@ static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, | |||
842 | --*l; | 841 | --*l; |
843 | } | 842 | } |
844 | 843 | ||
845 | tmp = container_of(e, struct lec_arp_table, next); | 844 | for (; e; e = e->next) { |
846 | |||
847 | hlist_for_each_entry_from(tmp, next) { | ||
848 | if (--*l < 0) | 845 | if (--*l < 0) |
849 | break; | 846 | break; |
850 | } | 847 | } |
diff --git a/net/atm/svc.c b/net/atm/svc.c index d8e5d0c2ebbc..1ba23f5018e7 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
@@ -50,12 +50,12 @@ static void svc_disconnect(struct atm_vcc *vcc) | |||
50 | 50 | ||
51 | pr_debug("%p\n", vcc); | 51 | pr_debug("%p\n", vcc); |
52 | if (test_bit(ATM_VF_REGIS, &vcc->flags)) { | 52 | if (test_bit(ATM_VF_REGIS, &vcc->flags)) { |
53 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | ||
54 | sigd_enq(vcc, as_close, NULL, NULL, NULL); | 53 | sigd_enq(vcc, as_close, NULL, NULL, NULL); |
55 | while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { | 54 | for (;;) { |
55 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | ||
56 | if (test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) | ||
57 | break; | ||
56 | schedule(); | 58 | schedule(); |
57 | prepare_to_wait(sk_sleep(sk), &wait, | ||
58 | TASK_UNINTERRUPTIBLE); | ||
59 | } | 59 | } |
60 | finish_wait(sk_sleep(sk), &wait); | 60 | finish_wait(sk_sleep(sk), &wait); |
61 | } | 61 | } |
@@ -126,11 +126,12 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr, | |||
126 | } | 126 | } |
127 | vcc->local = *addr; | 127 | vcc->local = *addr; |
128 | set_bit(ATM_VF_WAITING, &vcc->flags); | 128 | set_bit(ATM_VF_WAITING, &vcc->flags); |
129 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | ||
130 | sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); | 129 | sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); |
131 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 130 | for (;;) { |
132 | schedule(); | ||
133 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | 131 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
132 | if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) | ||
133 | break; | ||
134 | schedule(); | ||
134 | } | 135 | } |
135 | finish_wait(sk_sleep(sk), &wait); | 136 | finish_wait(sk_sleep(sk), &wait); |
136 | clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ | 137 | clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ |
@@ -202,15 +203,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, | |||
202 | } | 203 | } |
203 | vcc->remote = *addr; | 204 | vcc->remote = *addr; |
204 | set_bit(ATM_VF_WAITING, &vcc->flags); | 205 | set_bit(ATM_VF_WAITING, &vcc->flags); |
205 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
206 | sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); | 206 | sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); |
207 | if (flags & O_NONBLOCK) { | 207 | if (flags & O_NONBLOCK) { |
208 | finish_wait(sk_sleep(sk), &wait); | ||
209 | sock->state = SS_CONNECTING; | 208 | sock->state = SS_CONNECTING; |
210 | error = -EINPROGRESS; | 209 | error = -EINPROGRESS; |
211 | goto out; | 210 | goto out; |
212 | } | 211 | } |
213 | error = 0; | 212 | error = 0; |
213 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
214 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 214 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { |
215 | schedule(); | 215 | schedule(); |
216 | if (!signal_pending(current)) { | 216 | if (!signal_pending(current)) { |
@@ -297,11 +297,12 @@ static int svc_listen(struct socket *sock, int backlog) | |||
297 | goto out; | 297 | goto out; |
298 | } | 298 | } |
299 | set_bit(ATM_VF_WAITING, &vcc->flags); | 299 | set_bit(ATM_VF_WAITING, &vcc->flags); |
300 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | ||
301 | sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); | 300 | sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); |
302 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 301 | for (;;) { |
303 | schedule(); | ||
304 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | 302 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
303 | if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) | ||
304 | break; | ||
305 | schedule(); | ||
305 | } | 306 | } |
306 | finish_wait(sk_sleep(sk), &wait); | 307 | finish_wait(sk_sleep(sk), &wait); |
307 | if (!sigd) { | 308 | if (!sigd) { |
@@ -387,15 +388,15 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags) | |||
387 | } | 388 | } |
388 | /* wait should be short, so we ignore the non-blocking flag */ | 389 | /* wait should be short, so we ignore the non-blocking flag */ |
389 | set_bit(ATM_VF_WAITING, &new_vcc->flags); | 390 | set_bit(ATM_VF_WAITING, &new_vcc->flags); |
390 | prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, | ||
391 | TASK_UNINTERRUPTIBLE); | ||
392 | sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); | 391 | sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); |
393 | while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { | 392 | for (;;) { |
393 | prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, | ||
394 | TASK_UNINTERRUPTIBLE); | ||
395 | if (!test_bit(ATM_VF_WAITING, &new_vcc->flags) || !sigd) | ||
396 | break; | ||
394 | release_sock(sk); | 397 | release_sock(sk); |
395 | schedule(); | 398 | schedule(); |
396 | lock_sock(sk); | 399 | lock_sock(sk); |
397 | prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, | ||
398 | TASK_UNINTERRUPTIBLE); | ||
399 | } | 400 | } |
400 | finish_wait(sk_sleep(sk_atm(new_vcc)), &wait); | 401 | finish_wait(sk_sleep(sk_atm(new_vcc)), &wait); |
401 | if (!sigd) { | 402 | if (!sigd) { |
@@ -433,12 +434,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) | |||
433 | DEFINE_WAIT(wait); | 434 | DEFINE_WAIT(wait); |
434 | 435 | ||
435 | set_bit(ATM_VF_WAITING, &vcc->flags); | 436 | set_bit(ATM_VF_WAITING, &vcc->flags); |
436 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | ||
437 | sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); | 437 | sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); |
438 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && | 438 | for (;;) { |
439 | !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { | ||
440 | schedule(); | ||
441 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); | 439 | prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); |
440 | if (!test_bit(ATM_VF_WAITING, &vcc->flags) || | ||
441 | test_bit(ATM_VF_RELEASED, &vcc->flags) || !sigd) { | ||
442 | break; | ||
443 | } | ||
444 | schedule(); | ||
442 | } | 445 | } |
443 | finish_wait(sk_sleep(sk), &wait); | 446 | finish_wait(sk_sleep(sk), &wait); |
444 | if (!sigd) | 447 | if (!sigd) |
@@ -529,18 +532,18 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, | |||
529 | 532 | ||
530 | lock_sock(sk); | 533 | lock_sock(sk); |
531 | set_bit(ATM_VF_WAITING, &vcc->flags); | 534 | set_bit(ATM_VF_WAITING, &vcc->flags); |
532 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
533 | sigd_enq(vcc, as_addparty, NULL, NULL, | 535 | sigd_enq(vcc, as_addparty, NULL, NULL, |
534 | (struct sockaddr_atmsvc *) sockaddr); | 536 | (struct sockaddr_atmsvc *) sockaddr); |
535 | if (flags & O_NONBLOCK) { | 537 | if (flags & O_NONBLOCK) { |
536 | finish_wait(sk_sleep(sk), &wait); | ||
537 | error = -EINPROGRESS; | 538 | error = -EINPROGRESS; |
538 | goto out; | 539 | goto out; |
539 | } | 540 | } |
540 | pr_debug("added wait queue\n"); | 541 | pr_debug("added wait queue\n"); |
541 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 542 | for (;;) { |
542 | schedule(); | ||
543 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 543 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
544 | if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) | ||
545 | break; | ||
546 | schedule(); | ||
544 | } | 547 | } |
545 | finish_wait(sk_sleep(sk), &wait); | 548 | finish_wait(sk_sleep(sk), &wait); |
546 | error = xchg(&sk->sk_err_soft, 0); | 549 | error = xchg(&sk->sk_err_soft, 0); |
@@ -558,11 +561,12 @@ static int svc_dropparty(struct socket *sock, int ep_ref) | |||
558 | 561 | ||
559 | lock_sock(sk); | 562 | lock_sock(sk); |
560 | set_bit(ATM_VF_WAITING, &vcc->flags); | 563 | set_bit(ATM_VF_WAITING, &vcc->flags); |
561 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
562 | sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); | 564 | sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); |
563 | while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { | 565 | for (;;) { |
564 | schedule(); | ||
565 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 566 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
567 | if (!test_bit(ATM_VF_WAITING, &vcc->flags) || !sigd) | ||
568 | break; | ||
569 | schedule(); | ||
566 | } | 570 | } |
567 | finish_wait(sk_sleep(sk), &wait); | 571 | finish_wait(sk_sleep(sk), &wait); |
568 | if (!sigd) { | 572 | if (!sigd) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 181b70ebd964..541f26a67ba2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1188,13 +1188,6 @@ new_segment: | |||
1188 | goto wait_for_memory; | 1188 | goto wait_for_memory; |
1189 | 1189 | ||
1190 | /* | 1190 | /* |
1191 | * All packets are restored as if they have | ||
1192 | * already been sent. | ||
1193 | */ | ||
1194 | if (tp->repair) | ||
1195 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | ||
1196 | |||
1197 | /* | ||
1198 | * Check whether we can use HW checksum. | 1191 | * Check whether we can use HW checksum. |
1199 | */ | 1192 | */ |
1200 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) | 1193 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
@@ -1203,6 +1196,13 @@ new_segment: | |||
1203 | skb_entail(sk, skb); | 1196 | skb_entail(sk, skb); |
1204 | copy = size_goal; | 1197 | copy = size_goal; |
1205 | max = size_goal; | 1198 | max = size_goal; |
1199 | |||
1200 | /* All packets are restored as if they have | ||
1201 | * already been sent. skb_mstamp isn't set to | ||
1202 | * avoid wrong rtt estimation. | ||
1203 | */ | ||
1204 | if (tp->repair) | ||
1205 | TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; | ||
1206 | } | 1206 | } |
1207 | 1207 | ||
1208 | /* Try to append data to the end of skb. */ | 1208 | /* Try to append data to the end of skb. */ |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a3d47af01906..a906e0200ff2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2687,7 +2687,6 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) | |||
2687 | */ | 2687 | */ |
2688 | static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) | 2688 | static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) |
2689 | { | 2689 | { |
2690 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
2691 | struct tcp_sock *tp = tcp_sk(sk); | 2690 | struct tcp_sock *tp = tcp_sk(sk); |
2692 | bool recovered = !before(tp->snd_una, tp->high_seq); | 2691 | bool recovered = !before(tp->snd_una, tp->high_seq); |
2693 | 2692 | ||
@@ -2713,12 +2712,9 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) | |||
2713 | 2712 | ||
2714 | if (recovered) { | 2713 | if (recovered) { |
2715 | /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ | 2714 | /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ |
2716 | icsk->icsk_retransmits = 0; | ||
2717 | tcp_try_undo_recovery(sk); | 2715 | tcp_try_undo_recovery(sk); |
2718 | return; | 2716 | return; |
2719 | } | 2717 | } |
2720 | if (flag & FLAG_DATA_ACKED) | ||
2721 | icsk->icsk_retransmits = 0; | ||
2722 | if (tcp_is_reno(tp)) { | 2718 | if (tcp_is_reno(tp)) { |
2723 | /* A Reno DUPACK means new data in F-RTO step 2.b above are | 2719 | /* A Reno DUPACK means new data in F-RTO step 2.b above are |
2724 | * delivered. Lower inflight to clock out (re)tranmissions. | 2720 | * delivered. Lower inflight to clock out (re)tranmissions. |
@@ -3050,10 +3046,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3050 | first_ackt.v64 = 0; | 3046 | first_ackt.v64 = 0; |
3051 | 3047 | ||
3052 | while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { | 3048 | while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { |
3049 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
3053 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); | 3050 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
3054 | u8 sacked = scb->sacked; | 3051 | u8 sacked = scb->sacked; |
3055 | u32 acked_pcount; | 3052 | u32 acked_pcount; |
3056 | 3053 | ||
3054 | if (unlikely(shinfo->tx_flags & SKBTX_ACK_TSTAMP) && | ||
3055 | between(shinfo->tskey, prior_snd_una, tp->snd_una - 1)) | ||
3056 | __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); | ||
3057 | |||
3057 | /* Determine how many packets and what bytes were acked, tso and else */ | 3058 | /* Determine how many packets and what bytes were acked, tso and else */ |
3058 | if (after(scb->end_seq, tp->snd_una)) { | 3059 | if (after(scb->end_seq, tp->snd_una)) { |
3059 | if (tcp_skb_pcount(skb) == 1 || | 3060 | if (tcp_skb_pcount(skb) == 1 || |
@@ -3107,11 +3108,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, | |||
3107 | tp->retrans_stamp = 0; | 3108 | tp->retrans_stamp = 0; |
3108 | } | 3109 | } |
3109 | 3110 | ||
3110 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_ACK_TSTAMP) && | ||
3111 | between(skb_shinfo(skb)->tskey, prior_snd_una, | ||
3112 | tp->snd_una + 1)) | ||
3113 | __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); | ||
3114 | |||
3115 | if (!fully_acked) | 3111 | if (!fully_acked) |
3116 | break; | 3112 | break; |
3117 | 3113 | ||
@@ -3405,8 +3401,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3405 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) | 3401 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) |
3406 | tcp_rearm_rto(sk); | 3402 | tcp_rearm_rto(sk); |
3407 | 3403 | ||
3408 | if (after(ack, prior_snd_una)) | 3404 | if (after(ack, prior_snd_una)) { |
3409 | flag |= FLAG_SND_UNA_ADVANCED; | 3405 | flag |= FLAG_SND_UNA_ADVANCED; |
3406 | icsk->icsk_retransmits = 0; | ||
3407 | } | ||
3410 | 3408 | ||
3411 | prior_fackets = tp->fackets_out; | 3409 | prior_fackets = tp->fackets_out; |
3412 | 3410 | ||
@@ -5979,12 +5977,14 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, | |||
5979 | * timewait bucket, so that all the necessary checks | 5977 | * timewait bucket, so that all the necessary checks |
5980 | * are made in the function processing timewait state. | 5978 | * are made in the function processing timewait state. |
5981 | */ | 5979 | */ |
5982 | if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) { | 5980 | if (tcp_death_row.sysctl_tw_recycle) { |
5983 | bool strict; | 5981 | bool strict; |
5984 | 5982 | ||
5985 | dst = af_ops->route_req(sk, &fl, req, &strict); | 5983 | dst = af_ops->route_req(sk, &fl, req, &strict); |
5984 | |||
5986 | if (dst && strict && | 5985 | if (dst && strict && |
5987 | !tcp_peer_is_proven(req, dst, true)) { | 5986 | !tcp_peer_is_proven(req, dst, true, |
5987 | tmp_opt.saw_tstamp)) { | ||
5988 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); | 5988 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); |
5989 | goto drop_and_release; | 5989 | goto drop_and_release; |
5990 | } | 5990 | } |
@@ -5993,7 +5993,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, | |||
5993 | else if (!sysctl_tcp_syncookies && | 5993 | else if (!sysctl_tcp_syncookies && |
5994 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < | 5994 | (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < |
5995 | (sysctl_max_syn_backlog >> 2)) && | 5995 | (sysctl_max_syn_backlog >> 2)) && |
5996 | !tcp_peer_is_proven(req, dst, false)) { | 5996 | !tcp_peer_is_proven(req, dst, false, |
5997 | tmp_opt.saw_tstamp)) { | ||
5997 | /* Without syncookies last quarter of | 5998 | /* Without syncookies last quarter of |
5998 | * backlog is filled with destinations, | 5999 | * backlog is filled with destinations, |
5999 | * proven to be alive. | 6000 | * proven to be alive. |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index dceff5fe8e66..cd17f009aede 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -271,7 +271,7 @@ EXPORT_SYMBOL(tcp_v4_connect); | |||
271 | * It can be called through tcp_release_cb() if socket was owned by user | 271 | * It can be called through tcp_release_cb() if socket was owned by user |
272 | * at the time tcp_v4_err() was called to handle ICMP message. | 272 | * at the time tcp_v4_err() was called to handle ICMP message. |
273 | */ | 273 | */ |
274 | static void tcp_v4_mtu_reduced(struct sock *sk) | 274 | void tcp_v4_mtu_reduced(struct sock *sk) |
275 | { | 275 | { |
276 | struct dst_entry *dst; | 276 | struct dst_entry *dst; |
277 | struct inet_sock *inet = inet_sk(sk); | 277 | struct inet_sock *inet = inet_sk(sk); |
@@ -302,6 +302,7 @@ static void tcp_v4_mtu_reduced(struct sock *sk) | |||
302 | tcp_simple_retransmit(sk); | 302 | tcp_simple_retransmit(sk); |
303 | } /* else let the usual retransmit timer handle it */ | 303 | } /* else let the usual retransmit timer handle it */ |
304 | } | 304 | } |
305 | EXPORT_SYMBOL(tcp_v4_mtu_reduced); | ||
305 | 306 | ||
306 | static void do_redirect(struct sk_buff *skb, struct sock *sk) | 307 | static void do_redirect(struct sk_buff *skb, struct sock *sk) |
307 | { | 308 | { |
@@ -1787,6 +1788,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = { | |||
1787 | .compat_setsockopt = compat_ip_setsockopt, | 1788 | .compat_setsockopt = compat_ip_setsockopt, |
1788 | .compat_getsockopt = compat_ip_getsockopt, | 1789 | .compat_getsockopt = compat_ip_getsockopt, |
1789 | #endif | 1790 | #endif |
1791 | .mtu_reduced = tcp_v4_mtu_reduced, | ||
1790 | }; | 1792 | }; |
1791 | EXPORT_SYMBOL(ipv4_specific); | 1793 | EXPORT_SYMBOL(ipv4_specific); |
1792 | 1794 | ||
@@ -2406,7 +2408,6 @@ struct proto tcp_prot = { | |||
2406 | .sendpage = tcp_sendpage, | 2408 | .sendpage = tcp_sendpage, |
2407 | .backlog_rcv = tcp_v4_do_rcv, | 2409 | .backlog_rcv = tcp_v4_do_rcv, |
2408 | .release_cb = tcp_release_cb, | 2410 | .release_cb = tcp_release_cb, |
2409 | .mtu_reduced = tcp_v4_mtu_reduced, | ||
2410 | .hash = inet_hash, | 2411 | .hash = inet_hash, |
2411 | .unhash = inet_unhash, | 2412 | .unhash = inet_unhash, |
2412 | .get_port = inet_csk_get_port, | 2413 | .get_port = inet_csk_get_port, |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 0d54e59b9ea8..ed9c9a91851c 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c | |||
@@ -576,7 +576,8 @@ reset: | |||
576 | tp->snd_cwnd_stamp = tcp_time_stamp; | 576 | tp->snd_cwnd_stamp = tcp_time_stamp; |
577 | } | 577 | } |
578 | 578 | ||
579 | bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check) | 579 | bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, |
580 | bool paws_check, bool timestamps) | ||
580 | { | 581 | { |
581 | struct tcp_metrics_block *tm; | 582 | struct tcp_metrics_block *tm; |
582 | bool ret; | 583 | bool ret; |
@@ -589,7 +590,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool pa | |||
589 | if (paws_check) { | 590 | if (paws_check) { |
590 | if (tm && | 591 | if (tm && |
591 | (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && | 592 | (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL && |
592 | (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW) | 593 | ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW || |
594 | !timestamps)) | ||
593 | ret = false; | 595 | ret = false; |
594 | else | 596 | else |
595 | ret = true; | 597 | ret = true; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8fcfc91964ec..5a7c41fbc6d3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -800,7 +800,7 @@ void tcp_release_cb(struct sock *sk) | |||
800 | __sock_put(sk); | 800 | __sock_put(sk); |
801 | } | 801 | } |
802 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { | 802 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { |
803 | sk->sk_prot->mtu_reduced(sk); | 803 | inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); |
804 | __sock_put(sk); | 804 | __sock_put(sk); |
805 | } | 805 | } |
806 | } | 806 | } |
@@ -1069,6 +1069,21 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de | |||
1069 | tcp_verify_left_out(tp); | 1069 | tcp_verify_left_out(tp); |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) | ||
1073 | { | ||
1074 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
1075 | |||
1076 | if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) && | ||
1077 | !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { | ||
1078 | struct skb_shared_info *shinfo2 = skb_shinfo(skb2); | ||
1079 | u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; | ||
1080 | |||
1081 | shinfo->tx_flags &= ~tsflags; | ||
1082 | shinfo2->tx_flags |= tsflags; | ||
1083 | swap(shinfo->tskey, shinfo2->tskey); | ||
1084 | } | ||
1085 | } | ||
1086 | |||
1072 | /* Function to create two new TCP segments. Shrinks the given segment | 1087 | /* Function to create two new TCP segments. Shrinks the given segment |
1073 | * to the specified size and appends a new segment with the rest of the | 1088 | * to the specified size and appends a new segment with the rest of the |
1074 | * packet to the list. This won't be called frequently, I hope. | 1089 | * packet to the list. This won't be called frequently, I hope. |
@@ -1136,6 +1151,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, | |||
1136 | */ | 1151 | */ |
1137 | TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; | 1152 | TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; |
1138 | buff->tstamp = skb->tstamp; | 1153 | buff->tstamp = skb->tstamp; |
1154 | tcp_fragment_tstamp(skb, buff); | ||
1139 | 1155 | ||
1140 | old_factor = tcp_skb_pcount(skb); | 1156 | old_factor = tcp_skb_pcount(skb); |
1141 | 1157 | ||
@@ -1652,6 +1668,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, | |||
1652 | 1668 | ||
1653 | buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; | 1669 | buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; |
1654 | skb_split(skb, buff, len); | 1670 | skb_split(skb, buff, len); |
1671 | tcp_fragment_tstamp(skb, buff); | ||
1655 | 1672 | ||
1656 | /* Fix up tso_factor for both original and new SKB. */ | 1673 | /* Fix up tso_factor for both original and new SKB. */ |
1657 | tcp_set_skb_tso_segs(sk, skb, mss_now); | 1674 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
@@ -1917,8 +1934,11 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1917 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); | 1934 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
1918 | BUG_ON(!tso_segs); | 1935 | BUG_ON(!tso_segs); |
1919 | 1936 | ||
1920 | if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) | 1937 | if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { |
1938 | /* "when" is used as a start point for the retransmit timer */ | ||
1939 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | ||
1921 | goto repair; /* Skip network transmission */ | 1940 | goto repair; /* Skip network transmission */ |
1941 | } | ||
1922 | 1942 | ||
1923 | cwnd_quota = tcp_cwnd_test(tp, skb); | 1943 | cwnd_quota = tcp_cwnd_test(tp, skb); |
1924 | if (!cwnd_quota) { | 1944 | if (!cwnd_quota) { |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 2e9ba035fb5f..6163f851dc01 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -101,19 +101,19 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, | |||
101 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) { | 101 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r_l[h0 ^ h1]) { |
102 | if (local == t->parms.iph.saddr && | 102 | if (local == t->parms.iph.saddr && |
103 | remote == t->parms.iph.daddr && | 103 | remote == t->parms.iph.daddr && |
104 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 104 | (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
105 | (t->dev->flags & IFF_UP)) | 105 | (t->dev->flags & IFF_UP)) |
106 | return t; | 106 | return t; |
107 | } | 107 | } |
108 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) { | 108 | for_each_ip_tunnel_rcu(t, sitn->tunnels_r[h0]) { |
109 | if (remote == t->parms.iph.daddr && | 109 | if (remote == t->parms.iph.daddr && |
110 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 110 | (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
111 | (t->dev->flags & IFF_UP)) | 111 | (t->dev->flags & IFF_UP)) |
112 | return t; | 112 | return t; |
113 | } | 113 | } |
114 | for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) { | 114 | for_each_ip_tunnel_rcu(t, sitn->tunnels_l[h1]) { |
115 | if (local == t->parms.iph.saddr && | 115 | if (local == t->parms.iph.saddr && |
116 | (!dev || !t->parms.link || dev->iflink == t->parms.link) && | 116 | (!dev || !t->parms.link || dev->ifindex == t->parms.link) && |
117 | (t->dev->flags & IFF_UP)) | 117 | (t->dev->flags & IFF_UP)) |
118 | return t; | 118 | return t; |
119 | } | 119 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f2ce95502392..29964c3d363c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1595,6 +1595,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { | |||
1595 | .compat_setsockopt = compat_ipv6_setsockopt, | 1595 | .compat_setsockopt = compat_ipv6_setsockopt, |
1596 | .compat_getsockopt = compat_ipv6_getsockopt, | 1596 | .compat_getsockopt = compat_ipv6_getsockopt, |
1597 | #endif | 1597 | #endif |
1598 | .mtu_reduced = tcp_v6_mtu_reduced, | ||
1598 | }; | 1599 | }; |
1599 | 1600 | ||
1600 | #ifdef CONFIG_TCP_MD5SIG | 1601 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1625,6 +1626,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { | |||
1625 | .compat_setsockopt = compat_ipv6_setsockopt, | 1626 | .compat_setsockopt = compat_ipv6_setsockopt, |
1626 | .compat_getsockopt = compat_ipv6_getsockopt, | 1627 | .compat_getsockopt = compat_ipv6_getsockopt, |
1627 | #endif | 1628 | #endif |
1629 | .mtu_reduced = tcp_v4_mtu_reduced, | ||
1628 | }; | 1630 | }; |
1629 | 1631 | ||
1630 | #ifdef CONFIG_TCP_MD5SIG | 1632 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1864,7 +1866,6 @@ struct proto tcpv6_prot = { | |||
1864 | .sendpage = tcp_sendpage, | 1866 | .sendpage = tcp_sendpage, |
1865 | .backlog_rcv = tcp_v6_do_rcv, | 1867 | .backlog_rcv = tcp_v6_do_rcv, |
1866 | .release_cb = tcp_release_cb, | 1868 | .release_cb = tcp_release_cb, |
1867 | .mtu_reduced = tcp_v6_mtu_reduced, | ||
1868 | .hash = tcp_v6_hash, | 1869 | .hash = tcp_v6_hash, |
1869 | .unhash = inet_unhash, | 1870 | .unhash = inet_unhash, |
1870 | .get_port = inet_csk_get_port, | 1871 | .get_port = inet_csk_get_port, |
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 9ea0c933b9ff..a37998c6273d 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c | |||
@@ -622,7 +622,7 @@ void irlap_send_rd_frame(struct irlap_cb *self) | |||
622 | frame = (struct rd_frame *)skb_put(tx_skb, 2); | 622 | frame = (struct rd_frame *)skb_put(tx_skb, 2); |
623 | 623 | ||
624 | frame->caddr = self->caddr; | 624 | frame->caddr = self->caddr; |
625 | frame->caddr = RD_RSP | PF_BIT; | 625 | frame->control = RD_RSP | PF_BIT; |
626 | 626 | ||
627 | irlap_queue_xmit(self, tx_skb); | 627 | irlap_queue_xmit(self, tx_skb); |
628 | } | 628 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2e152e5f2186..c416725d28c4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2921,6 +2921,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | |||
2921 | } | 2921 | } |
2922 | 2922 | ||
2923 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) | 2923 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) |
2924 | __acquires(RCU) | ||
2924 | { | 2925 | { |
2925 | rcu_read_lock(); | 2926 | rcu_read_lock(); |
2926 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; | 2927 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; |
@@ -2970,6 +2971,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2970 | } | 2971 | } |
2971 | 2972 | ||
2972 | static void netlink_seq_stop(struct seq_file *seq, void *v) | 2973 | static void netlink_seq_stop(struct seq_file *seq, void *v) |
2974 | __releases(RCU) | ||
2973 | { | 2975 | { |
2974 | rcu_read_unlock(); | 2976 | rcu_read_unlock(); |
2975 | } | 2977 | } |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 702fb21bfe15..6d8f2ec481d9 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -137,8 +137,10 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, | |||
137 | vport->ops = ops; | 137 | vport->ops = ops; |
138 | INIT_HLIST_NODE(&vport->dp_hash_node); | 138 | INIT_HLIST_NODE(&vport->dp_hash_node); |
139 | 139 | ||
140 | if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) | 140 | if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) { |
141 | kfree(vport); | ||
141 | return ERR_PTR(-EINVAL); | 142 | return ERR_PTR(-EINVAL); |
143 | } | ||
142 | 144 | ||
143 | vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); | 145 | vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
144 | if (!vport->percpu_stats) { | 146 | if (!vport->percpu_stats) { |