diff options
| author | David S. Miller <davem@davemloft.net> | 2019-09-15 08:17:27 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-09-15 08:17:27 -0400 |
| commit | aa2eaa8c272a3211dec07ce9c6c863a7e355c10e (patch) | |
| tree | 8454a23d36b2ff36133c276ee0ba80eabc00850e /drivers/net | |
| parent | a3d3c74da49c65fc63a937fa559186b0e16adca3 (diff) | |
| parent | 1609d7604b847a9820e63393d1a3b6cac7286d40 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes in the btusb and ixgbe drivers.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
24 files changed, 227 insertions, 127 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 58c6231aaa00..87dece0e745d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | |||
| @@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { | |||
| 98 | .reset_level = HNAE3_GLOBAL_RESET }, | 98 | .reset_level = HNAE3_GLOBAL_RESET }, |
| 99 | { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", | 99 | { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", |
| 100 | .reset_level = HNAE3_GLOBAL_RESET }, | 100 | .reset_level = HNAE3_GLOBAL_RESET }, |
| 101 | { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", | 101 | { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", |
| 102 | .reset_level = HNAE3_GLOBAL_RESET }, | 102 | .reset_level = HNAE3_GLOBAL_RESET }, |
| 103 | { .int_msk = BIT(3), .msg = "tx_buf_overflow", | 103 | { .int_msk = BIT(3), .msg = "tx_buf_overflow", |
| 104 | .reset_level = HNAE3_GLOBAL_RESET }, | 104 | .reset_level = HNAE3_GLOBAL_RESET }, |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4f83f97ffe8b..2e5172f61564 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work) | |||
| 1984 | rwi = get_next_rwi(adapter); | 1984 | rwi = get_next_rwi(adapter); |
| 1985 | while (rwi) { | 1985 | while (rwi) { |
| 1986 | if (adapter->state == VNIC_REMOVING || | 1986 | if (adapter->state == VNIC_REMOVING || |
| 1987 | adapter->state == VNIC_REMOVED) | 1987 | adapter->state == VNIC_REMOVED) { |
| 1988 | goto out; | 1988 | kfree(rwi); |
| 1989 | rc = EBUSY; | ||
| 1990 | break; | ||
| 1991 | } | ||
| 1989 | 1992 | ||
| 1990 | if (adapter->force_reset_recovery) { | 1993 | if (adapter->force_reset_recovery) { |
| 1991 | adapter->force_reset_recovery = false; | 1994 | adapter->force_reset_recovery = false; |
| @@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work) | |||
| 2011 | netdev_dbg(adapter->netdev, "Reset failed\n"); | 2014 | netdev_dbg(adapter->netdev, "Reset failed\n"); |
| 2012 | free_all_rwi(adapter); | 2015 | free_all_rwi(adapter); |
| 2013 | } | 2016 | } |
| 2014 | out: | 2017 | |
| 2015 | adapter->resetting = false; | 2018 | adapter->resetting = false; |
| 2016 | if (we_lock_rtnl) | 2019 | if (we_lock_rtnl) |
| 2017 | rtnl_unlock(); | 2020 | rtnl_unlock(); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dc034f4e8cf6..1ce2397306b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <net/vxlan.h> | 36 | #include <net/vxlan.h> |
| 37 | #include <net/mpls.h> | 37 | #include <net/mpls.h> |
| 38 | #include <net/xdp_sock.h> | 38 | #include <net/xdp_sock.h> |
| 39 | #include <net/xfrm.h> | ||
| 39 | 40 | ||
| 40 | #include "ixgbe.h" | 41 | #include "ixgbe.h" |
| 41 | #include "ixgbe_common.h" | 42 | #include "ixgbe_common.h" |
| @@ -2623,7 +2624,7 @@ adjust_by_size: | |||
| 2623 | /* 16K ints/sec to 9.2K ints/sec */ | 2624 | /* 16K ints/sec to 9.2K ints/sec */ |
| 2624 | avg_wire_size *= 15; | 2625 | avg_wire_size *= 15; |
| 2625 | avg_wire_size += 11452; | 2626 | avg_wire_size += 11452; |
| 2626 | } else if (avg_wire_size <= 1980) { | 2627 | } else if (avg_wire_size < 1968) { |
| 2627 | /* 9.2K ints/sec to 8K ints/sec */ | 2628 | /* 9.2K ints/sec to 8K ints/sec */ |
| 2628 | avg_wire_size *= 5; | 2629 | avg_wire_size *= 5; |
| 2629 | avg_wire_size += 22420; | 2630 | avg_wire_size += 22420; |
| @@ -2656,6 +2657,8 @@ adjust_by_size: | |||
| 2656 | case IXGBE_LINK_SPEED_2_5GB_FULL: | 2657 | case IXGBE_LINK_SPEED_2_5GB_FULL: |
| 2657 | case IXGBE_LINK_SPEED_1GB_FULL: | 2658 | case IXGBE_LINK_SPEED_1GB_FULL: |
| 2658 | case IXGBE_LINK_SPEED_10_FULL: | 2659 | case IXGBE_LINK_SPEED_10_FULL: |
| 2660 | if (avg_wire_size > 8064) | ||
| 2661 | avg_wire_size = 8064; | ||
| 2659 | itr += DIV_ROUND_UP(avg_wire_size, | 2662 | itr += DIV_ROUND_UP(avg_wire_size, |
| 2660 | IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * | 2663 | IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * |
| 2661 | IXGBE_ITR_ADAPTIVE_MIN_INC; | 2664 | IXGBE_ITR_ADAPTIVE_MIN_INC; |
| @@ -8698,7 +8701,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
| 8698 | #endif /* IXGBE_FCOE */ | 8701 | #endif /* IXGBE_FCOE */ |
| 8699 | 8702 | ||
| 8700 | #ifdef CONFIG_IXGBE_IPSEC | 8703 | #ifdef CONFIG_IXGBE_IPSEC |
| 8701 | if (secpath_exists(skb) && | 8704 | if (xfrm_offload(skb) && |
| 8702 | !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) | 8705 | !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) |
| 8703 | goto out_drop; | 8706 | goto out_drop; |
| 8704 | #endif | 8707 | #endif |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index ad802a8909e0..a37dcd140f63 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | |||
| @@ -642,19 +642,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, | |||
| 642 | bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, | 642 | bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, |
| 643 | struct ixgbe_ring *tx_ring, int napi_budget) | 643 | struct ixgbe_ring *tx_ring, int napi_budget) |
| 644 | { | 644 | { |
| 645 | u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; | ||
| 645 | unsigned int total_packets = 0, total_bytes = 0; | 646 | unsigned int total_packets = 0, total_bytes = 0; |
| 646 | u32 i = tx_ring->next_to_clean, xsk_frames = 0; | ||
| 647 | unsigned int budget = q_vector->tx.work_limit; | ||
| 648 | struct xdp_umem *umem = tx_ring->xsk_umem; | 647 | struct xdp_umem *umem = tx_ring->xsk_umem; |
| 649 | union ixgbe_adv_tx_desc *tx_desc; | 648 | union ixgbe_adv_tx_desc *tx_desc; |
| 650 | struct ixgbe_tx_buffer *tx_bi; | 649 | struct ixgbe_tx_buffer *tx_bi; |
| 651 | bool xmit_done; | 650 | u32 xsk_frames = 0; |
| 652 | 651 | ||
| 653 | tx_bi = &tx_ring->tx_buffer_info[i]; | 652 | tx_bi = &tx_ring->tx_buffer_info[ntc]; |
| 654 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | 653 | tx_desc = IXGBE_TX_DESC(tx_ring, ntc); |
| 655 | i -= tx_ring->count; | ||
| 656 | 654 | ||
| 657 | do { | 655 | while (ntc != ntu) { |
| 658 | if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) | 656 | if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) |
| 659 | break; | 657 | break; |
| 660 | 658 | ||
| @@ -670,22 +668,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, | |||
| 670 | 668 | ||
| 671 | tx_bi++; | 669 | tx_bi++; |
| 672 | tx_desc++; | 670 | tx_desc++; |
| 673 | i++; | 671 | ntc++; |
| 674 | if (unlikely(!i)) { | 672 | if (unlikely(ntc == tx_ring->count)) { |
| 675 | i -= tx_ring->count; | 673 | ntc = 0; |
| 676 | tx_bi = tx_ring->tx_buffer_info; | 674 | tx_bi = tx_ring->tx_buffer_info; |
| 677 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | 675 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
| 678 | } | 676 | } |
| 679 | 677 | ||
| 680 | /* issue prefetch for next Tx descriptor */ | 678 | /* issue prefetch for next Tx descriptor */ |
| 681 | prefetch(tx_desc); | 679 | prefetch(tx_desc); |
| 680 | } | ||
| 682 | 681 | ||
| 683 | /* update budget accounting */ | 682 | tx_ring->next_to_clean = ntc; |
| 684 | budget--; | ||
| 685 | } while (likely(budget)); | ||
| 686 | |||
| 687 | i += tx_ring->count; | ||
| 688 | tx_ring->next_to_clean = i; | ||
| 689 | 683 | ||
| 690 | u64_stats_update_begin(&tx_ring->syncp); | 684 | u64_stats_update_begin(&tx_ring->syncp); |
| 691 | tx_ring->stats.bytes += total_bytes; | 685 | tx_ring->stats.bytes += total_bytes; |
| @@ -704,9 +698,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, | |||
| 704 | xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); | 698 | xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); |
| 705 | } | 699 | } |
| 706 | 700 | ||
| 707 | xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); | 701 | return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); |
| 708 | |||
| 709 | return budget > 0 && xmit_done; | ||
| 710 | } | 702 | } |
| 711 | 703 | ||
| 712 | int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) | 704 | int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 75e93ce2ed99..076f2da36f27 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/bpf.h> | 30 | #include <linux/bpf.h> |
| 31 | #include <linux/bpf_trace.h> | 31 | #include <linux/bpf_trace.h> |
| 32 | #include <linux/atomic.h> | 32 | #include <linux/atomic.h> |
| 33 | #include <net/xfrm.h> | ||
| 33 | 34 | ||
| 34 | #include "ixgbevf.h" | 35 | #include "ixgbevf.h" |
| 35 | 36 | ||
| @@ -4167,7 +4168,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, | |||
| 4167 | first->protocol = vlan_get_protocol(skb); | 4168 | first->protocol = vlan_get_protocol(skb); |
| 4168 | 4169 | ||
| 4169 | #ifdef CONFIG_IXGBEVF_IPSEC | 4170 | #ifdef CONFIG_IXGBEVF_IPSEC |
| 4170 | if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) | 4171 | if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) |
| 4171 | goto out_drop; | 4172 | goto out_drop; |
| 4172 | #endif | 4173 | #endif |
| 4173 | tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); | 4174 | tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index ef3f3d06ff1e..fce9b3a24347 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) | |||
| 2240 | for (i = 1; i <= dev->caps.num_ports; i++) { | 2240 | for (i = 1; i <= dev->caps.num_ports; i++) { |
| 2241 | if (mlx4_dev_port(dev, i, &port_cap)) { | 2241 | if (mlx4_dev_port(dev, i, &port_cap)) { |
| 2242 | mlx4_err(dev, | 2242 | mlx4_err(dev, |
| 2243 | "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); | 2243 | "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n"); |
| 2244 | } else if ((dev->caps.dmfs_high_steer_mode != | 2244 | } else if ((dev->caps.dmfs_high_steer_mode != |
| 2245 | MLX4_STEERING_DMFS_A0_DEFAULT) && | 2245 | MLX4_STEERING_DMFS_A0_DEFAULT) && |
| 2246 | (port_cap.dmfs_optimized_state == | 2246 | (port_cap.dmfs_optimized_state == |
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index d0a01e8f000a..b339125b2f09 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c | |||
| @@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) | |||
| 232 | 232 | ||
| 233 | laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); | 233 | laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); |
| 234 | if (!laddr) { | 234 | if (!laddr) { |
| 235 | printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); | 235 | pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); |
| 236 | dev_kfree_skb(skb); | 236 | dev_kfree_skb_any(skb); |
| 237 | return NETDEV_TX_BUSY; | 237 | return NETDEV_TX_OK; |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ | 240 | sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index d5bbe3d6048b..05981b54eaab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c | |||
| @@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) | |||
| 260 | 260 | ||
| 261 | type = cmsg_hdr->type; | 261 | type = cmsg_hdr->type; |
| 262 | switch (type) { | 262 | switch (type) { |
| 263 | case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: | ||
| 264 | nfp_flower_cmsg_portreify_rx(app, skb); | ||
| 265 | break; | ||
| 266 | case NFP_FLOWER_CMSG_TYPE_PORT_MOD: | 263 | case NFP_FLOWER_CMSG_TYPE_PORT_MOD: |
| 267 | nfp_flower_cmsg_portmod_rx(app, skb); | 264 | nfp_flower_cmsg_portmod_rx(app, skb); |
| 268 | break; | 265 | break; |
| @@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type) | |||
| 328 | struct nfp_flower_priv *priv = app->priv; | 325 | struct nfp_flower_priv *priv = app->priv; |
| 329 | struct sk_buff_head *skb_head; | 326 | struct sk_buff_head *skb_head; |
| 330 | 327 | ||
| 331 | if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || | 328 | if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) |
| 332 | type == NFP_FLOWER_CMSG_TYPE_PORT_MOD) | ||
| 333 | skb_head = &priv->cmsg_skbs_high; | 329 | skb_head = &priv->cmsg_skbs_high; |
| 334 | else | 330 | else |
| 335 | skb_head = &priv->cmsg_skbs_low; | 331 | skb_head = &priv->cmsg_skbs_low; |
| @@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) | |||
| 368 | } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { | 364 | } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { |
| 369 | /* Acks from the NFP that the route is added - ignore. */ | 365 | /* Acks from the NFP that the route is added - ignore. */ |
| 370 | dev_consume_skb_any(skb); | 366 | dev_consume_skb_any(skb); |
| 367 | } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) { | ||
| 368 | /* Handle REIFY acks outside wq to prevent RTNL conflict. */ | ||
| 369 | nfp_flower_cmsg_portreify_rx(app, skb); | ||
| 370 | dev_consume_skb_any(skb); | ||
| 371 | } else { | 371 | } else { |
| 372 | nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); | 372 | nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); |
| 373 | } | 373 | } |
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ecca794c55e2..05d2b478c99b 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
| @@ -713,6 +713,21 @@ struct nv_skb_map { | |||
| 713 | struct nv_skb_map *next_tx_ctx; | 713 | struct nv_skb_map *next_tx_ctx; |
| 714 | }; | 714 | }; |
| 715 | 715 | ||
| 716 | struct nv_txrx_stats { | ||
| 717 | u64 stat_rx_packets; | ||
| 718 | u64 stat_rx_bytes; /* not always available in HW */ | ||
| 719 | u64 stat_rx_missed_errors; | ||
| 720 | u64 stat_rx_dropped; | ||
| 721 | u64 stat_tx_packets; /* not always available in HW */ | ||
| 722 | u64 stat_tx_bytes; | ||
| 723 | u64 stat_tx_dropped; | ||
| 724 | }; | ||
| 725 | |||
| 726 | #define nv_txrx_stats_inc(member) \ | ||
| 727 | __this_cpu_inc(np->txrx_stats->member) | ||
| 728 | #define nv_txrx_stats_add(member, count) \ | ||
| 729 | __this_cpu_add(np->txrx_stats->member, (count)) | ||
| 730 | |||
| 716 | /* | 731 | /* |
| 717 | * SMP locking: | 732 | * SMP locking: |
| 718 | * All hardware access under netdev_priv(dev)->lock, except the performance | 733 | * All hardware access under netdev_priv(dev)->lock, except the performance |
| @@ -797,10 +812,7 @@ struct fe_priv { | |||
| 797 | 812 | ||
| 798 | /* RX software stats */ | 813 | /* RX software stats */ |
| 799 | struct u64_stats_sync swstats_rx_syncp; | 814 | struct u64_stats_sync swstats_rx_syncp; |
| 800 | u64 stat_rx_packets; | 815 | struct nv_txrx_stats __percpu *txrx_stats; |
| 801 | u64 stat_rx_bytes; /* not always available in HW */ | ||
| 802 | u64 stat_rx_missed_errors; | ||
| 803 | u64 stat_rx_dropped; | ||
| 804 | 816 | ||
| 805 | /* media detection workaround. | 817 | /* media detection workaround. |
| 806 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); | 818 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
| @@ -826,9 +838,6 @@ struct fe_priv { | |||
| 826 | 838 | ||
| 827 | /* TX software stats */ | 839 | /* TX software stats */ |
| 828 | struct u64_stats_sync swstats_tx_syncp; | 840 | struct u64_stats_sync swstats_tx_syncp; |
| 829 | u64 stat_tx_packets; /* not always available in HW */ | ||
| 830 | u64 stat_tx_bytes; | ||
| 831 | u64 stat_tx_dropped; | ||
| 832 | 841 | ||
| 833 | /* msi/msi-x fields */ | 842 | /* msi/msi-x fields */ |
| 834 | u32 msi_flags; | 843 | u32 msi_flags; |
| @@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev) | |||
| 1721 | } | 1730 | } |
| 1722 | } | 1731 | } |
| 1723 | 1732 | ||
| 1733 | static void nv_get_stats(int cpu, struct fe_priv *np, | ||
| 1734 | struct rtnl_link_stats64 *storage) | ||
| 1735 | { | ||
| 1736 | struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu); | ||
| 1737 | unsigned int syncp_start; | ||
| 1738 | u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors; | ||
| 1739 | u64 tx_packets, tx_bytes, tx_dropped; | ||
| 1740 | |||
| 1741 | do { | ||
| 1742 | syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); | ||
| 1743 | rx_packets = src->stat_rx_packets; | ||
| 1744 | rx_bytes = src->stat_rx_bytes; | ||
| 1745 | rx_dropped = src->stat_rx_dropped; | ||
| 1746 | rx_missed_errors = src->stat_rx_missed_errors; | ||
| 1747 | } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); | ||
| 1748 | |||
| 1749 | storage->rx_packets += rx_packets; | ||
| 1750 | storage->rx_bytes += rx_bytes; | ||
| 1751 | storage->rx_dropped += rx_dropped; | ||
| 1752 | storage->rx_missed_errors += rx_missed_errors; | ||
| 1753 | |||
| 1754 | do { | ||
| 1755 | syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); | ||
| 1756 | tx_packets = src->stat_tx_packets; | ||
| 1757 | tx_bytes = src->stat_tx_bytes; | ||
| 1758 | tx_dropped = src->stat_tx_dropped; | ||
| 1759 | } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); | ||
| 1760 | |||
| 1761 | storage->tx_packets += tx_packets; | ||
| 1762 | storage->tx_bytes += tx_bytes; | ||
| 1763 | storage->tx_dropped += tx_dropped; | ||
| 1764 | } | ||
| 1765 | |||
| 1724 | /* | 1766 | /* |
| 1725 | * nv_get_stats64: dev->ndo_get_stats64 function | 1767 | * nv_get_stats64: dev->ndo_get_stats64 function |
| 1726 | * Get latest stats value from the nic. | 1768 | * Get latest stats value from the nic. |
| @@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) | |||
| 1733 | __releases(&netdev_priv(dev)->hwstats_lock) | 1775 | __releases(&netdev_priv(dev)->hwstats_lock) |
| 1734 | { | 1776 | { |
| 1735 | struct fe_priv *np = netdev_priv(dev); | 1777 | struct fe_priv *np = netdev_priv(dev); |
| 1736 | unsigned int syncp_start; | 1778 | int cpu; |
| 1737 | 1779 | ||
| 1738 | /* | 1780 | /* |
| 1739 | * Note: because HW stats are not always available and for | 1781 | * Note: because HW stats are not always available and for |
| @@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) | |||
| 1746 | */ | 1788 | */ |
| 1747 | 1789 | ||
| 1748 | /* software stats */ | 1790 | /* software stats */ |
| 1749 | do { | 1791 | for_each_online_cpu(cpu) |
| 1750 | syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); | 1792 | nv_get_stats(cpu, np, storage); |
| 1751 | storage->rx_packets = np->stat_rx_packets; | ||
| 1752 | storage->rx_bytes = np->stat_rx_bytes; | ||
| 1753 | storage->rx_dropped = np->stat_rx_dropped; | ||
| 1754 | storage->rx_missed_errors = np->stat_rx_missed_errors; | ||
| 1755 | } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); | ||
| 1756 | |||
| 1757 | do { | ||
| 1758 | syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); | ||
| 1759 | storage->tx_packets = np->stat_tx_packets; | ||
| 1760 | storage->tx_bytes = np->stat_tx_bytes; | ||
| 1761 | storage->tx_dropped = np->stat_tx_dropped; | ||
| 1762 | } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); | ||
| 1763 | 1793 | ||
| 1764 | /* If the nic supports hw counters then retrieve latest values */ | 1794 | /* If the nic supports hw counters then retrieve latest values */ |
| 1765 | if (np->driver_data & DEV_HAS_STATISTICS_V123) { | 1795 | if (np->driver_data & DEV_HAS_STATISTICS_V123) { |
| @@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev) | |||
| 1827 | } else { | 1857 | } else { |
| 1828 | packet_dropped: | 1858 | packet_dropped: |
| 1829 | u64_stats_update_begin(&np->swstats_rx_syncp); | 1859 | u64_stats_update_begin(&np->swstats_rx_syncp); |
| 1830 | np->stat_rx_dropped++; | 1860 | nv_txrx_stats_inc(stat_rx_dropped); |
| 1831 | u64_stats_update_end(&np->swstats_rx_syncp); | 1861 | u64_stats_update_end(&np->swstats_rx_syncp); |
| 1832 | return 1; | 1862 | return 1; |
| 1833 | } | 1863 | } |
| @@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev) | |||
| 1869 | } else { | 1899 | } else { |
| 1870 | packet_dropped: | 1900 | packet_dropped: |
| 1871 | u64_stats_update_begin(&np->swstats_rx_syncp); | 1901 | u64_stats_update_begin(&np->swstats_rx_syncp); |
| 1872 | np->stat_rx_dropped++; | 1902 | nv_txrx_stats_inc(stat_rx_dropped); |
| 1873 | u64_stats_update_end(&np->swstats_rx_syncp); | 1903 | u64_stats_update_end(&np->swstats_rx_syncp); |
| 1874 | return 1; | 1904 | return 1; |
| 1875 | } | 1905 | } |
| @@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev) | |||
| 2013 | } | 2043 | } |
| 2014 | if (nv_release_txskb(np, &np->tx_skb[i])) { | 2044 | if (nv_release_txskb(np, &np->tx_skb[i])) { |
| 2015 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2045 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2016 | np->stat_tx_dropped++; | 2046 | nv_txrx_stats_inc(stat_tx_dropped); |
| 2017 | u64_stats_update_end(&np->swstats_tx_syncp); | 2047 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2018 | } | 2048 | } |
| 2019 | np->tx_skb[i].dma = 0; | 2049 | np->tx_skb[i].dma = 0; |
| @@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2227 | /* on DMA mapping error - drop the packet */ | 2257 | /* on DMA mapping error - drop the packet */ |
| 2228 | dev_kfree_skb_any(skb); | 2258 | dev_kfree_skb_any(skb); |
| 2229 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2259 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2230 | np->stat_tx_dropped++; | 2260 | nv_txrx_stats_inc(stat_tx_dropped); |
| 2231 | u64_stats_update_end(&np->swstats_tx_syncp); | 2261 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2232 | return NETDEV_TX_OK; | 2262 | return NETDEV_TX_OK; |
| 2233 | } | 2263 | } |
| @@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2273 | dev_kfree_skb_any(skb); | 2303 | dev_kfree_skb_any(skb); |
| 2274 | np->put_tx_ctx = start_tx_ctx; | 2304 | np->put_tx_ctx = start_tx_ctx; |
| 2275 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2305 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2276 | np->stat_tx_dropped++; | 2306 | nv_txrx_stats_inc(stat_tx_dropped); |
| 2277 | u64_stats_update_end(&np->swstats_tx_syncp); | 2307 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2278 | return NETDEV_TX_OK; | 2308 | return NETDEV_TX_OK; |
| 2279 | } | 2309 | } |
| @@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
| 2384 | /* on DMA mapping error - drop the packet */ | 2414 | /* on DMA mapping error - drop the packet */ |
| 2385 | dev_kfree_skb_any(skb); | 2415 | dev_kfree_skb_any(skb); |
| 2386 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2416 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2387 | np->stat_tx_dropped++; | 2417 | nv_txrx_stats_inc(stat_tx_dropped); |
| 2388 | u64_stats_update_end(&np->swstats_tx_syncp); | 2418 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2389 | return NETDEV_TX_OK; | 2419 | return NETDEV_TX_OK; |
| 2390 | } | 2420 | } |
| @@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
| 2431 | dev_kfree_skb_any(skb); | 2461 | dev_kfree_skb_any(skb); |
| 2432 | np->put_tx_ctx = start_tx_ctx; | 2462 | np->put_tx_ctx = start_tx_ctx; |
| 2433 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2463 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2434 | np->stat_tx_dropped++; | 2464 | nv_txrx_stats_inc(stat_tx_dropped); |
| 2435 | u64_stats_update_end(&np->swstats_tx_syncp); | 2465 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2436 | return NETDEV_TX_OK; | 2466 | return NETDEV_TX_OK; |
| 2437 | } | 2467 | } |
| @@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
| 2560 | && !(flags & NV_TX_RETRYCOUNT_MASK)) | 2590 | && !(flags & NV_TX_RETRYCOUNT_MASK)) |
| 2561 | nv_legacybackoff_reseed(dev); | 2591 | nv_legacybackoff_reseed(dev); |
| 2562 | } else { | 2592 | } else { |
| 2593 | unsigned int len; | ||
| 2594 | |||
| 2563 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2595 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2564 | np->stat_tx_packets++; | 2596 | nv_txrx_stats_inc(stat_tx_packets); |
| 2565 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | 2597 | len = np->get_tx_ctx->skb->len; |
| 2598 | nv_txrx_stats_add(stat_tx_bytes, len); | ||
| 2566 | u64_stats_update_end(&np->swstats_tx_syncp); | 2599 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2567 | } | 2600 | } |
| 2568 | bytes_compl += np->get_tx_ctx->skb->len; | 2601 | bytes_compl += np->get_tx_ctx->skb->len; |
| @@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
| 2577 | && !(flags & NV_TX2_RETRYCOUNT_MASK)) | 2610 | && !(flags & NV_TX2_RETRYCOUNT_MASK)) |
| 2578 | nv_legacybackoff_reseed(dev); | 2611 | nv_legacybackoff_reseed(dev); |
| 2579 | } else { | 2612 | } else { |
| 2613 | unsigned int len; | ||
| 2614 | |||
| 2580 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2615 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2581 | np->stat_tx_packets++; | 2616 | nv_txrx_stats_inc(stat_tx_packets); |
| 2582 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | 2617 | len = np->get_tx_ctx->skb->len; |
| 2618 | nv_txrx_stats_add(stat_tx_bytes, len); | ||
| 2583 | u64_stats_update_end(&np->swstats_tx_syncp); | 2619 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2584 | } | 2620 | } |
| 2585 | bytes_compl += np->get_tx_ctx->skb->len; | 2621 | bytes_compl += np->get_tx_ctx->skb->len; |
| @@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) | |||
| 2627 | nv_legacybackoff_reseed(dev); | 2663 | nv_legacybackoff_reseed(dev); |
| 2628 | } | 2664 | } |
| 2629 | } else { | 2665 | } else { |
| 2666 | unsigned int len; | ||
| 2667 | |||
| 2630 | u64_stats_update_begin(&np->swstats_tx_syncp); | 2668 | u64_stats_update_begin(&np->swstats_tx_syncp); |
| 2631 | np->stat_tx_packets++; | 2669 | nv_txrx_stats_inc(stat_tx_packets); |
| 2632 | np->stat_tx_bytes += np->get_tx_ctx->skb->len; | 2670 | len = np->get_tx_ctx->skb->len; |
| 2671 | nv_txrx_stats_add(stat_tx_bytes, len); | ||
| 2633 | u64_stats_update_end(&np->swstats_tx_syncp); | 2672 | u64_stats_update_end(&np->swstats_tx_syncp); |
| 2634 | } | 2673 | } |
| 2635 | 2674 | ||
| @@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) | |||
| 2806 | } | 2845 | } |
| 2807 | } | 2846 | } |
| 2808 | 2847 | ||
| 2848 | static void rx_missing_handler(u32 flags, struct fe_priv *np) | ||
| 2849 | { | ||
| 2850 | if (flags & NV_RX_MISSEDFRAME) { | ||
| 2851 | u64_stats_update_begin(&np->swstats_rx_syncp); | ||
| 2852 | nv_txrx_stats_inc(stat_rx_missed_errors); | ||
| 2853 | u64_stats_update_end(&np->swstats_rx_syncp); | ||
| 2854 | } | ||
| 2855 | } | ||
| 2856 | |||
| 2809 | static int nv_rx_process(struct net_device *dev, int limit) | 2857 | static int nv_rx_process(struct net_device *dev, int limit) |
| 2810 | { | 2858 | { |
| 2811 | struct fe_priv *np = netdev_priv(dev); | 2859 | struct fe_priv *np = netdev_priv(dev); |
| @@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
| 2848 | } | 2896 | } |
| 2849 | /* the rest are hard errors */ | 2897 | /* the rest are hard errors */ |
| 2850 | else { | 2898 | else { |
| 2851 | if (flags & NV_RX_MISSEDFRAME) { | 2899 | rx_missing_handler(flags, np); |
| 2852 | u64_stats_update_begin(&np->swstats_rx_syncp); | ||
| 2853 | np->stat_rx_missed_errors++; | ||
| 2854 | u64_stats_update_end(&np->swstats_rx_syncp); | ||
| 2855 | } | ||
| 2856 | dev_kfree_skb(skb); | 2900 | dev_kfree_skb(skb); |
| 2857 | goto next_pkt; | 2901 | goto next_pkt; |
| 2858 | } | 2902 | } |
| @@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
| 2896 | skb->protocol = eth_type_trans(skb, dev); | 2940 | skb->protocol = eth_type_trans(skb, dev); |
| 2897 | napi_gro_receive(&np->napi, skb); | 2941 | napi_gro_receive(&np->napi, skb); |
| 2898 | u64_stats_update_begin(&np->swstats_rx_syncp); | 2942 | u64_stats_update_begin(&np->swstats_rx_syncp); |
| 2899 | np->stat_rx_packets++; | 2943 | nv_txrx_stats_inc(stat_rx_packets); |
| 2900 | np->stat_rx_bytes += len; | 2944 | nv_txrx_stats_add(stat_rx_bytes, len); |
| 2901 | u64_stats_update_end(&np->swstats_rx_syncp); | 2945 | u64_stats_update_end(&np->swstats_rx_syncp); |
| 2902 | next_pkt: | 2946 | next_pkt: |
| 2903 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) | 2947 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
| @@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
| 2982 | } | 3026 | } |
| 2983 | napi_gro_receive(&np->napi, skb); | 3027 | napi_gro_receive(&np->napi, skb); |
| 2984 | u64_stats_update_begin(&np->swstats_rx_syncp); | 3028 | u64_stats_update_begin(&np->swstats_rx_syncp); |
| 2985 | np->stat_rx_packets++; | 3029 | nv_txrx_stats_inc(stat_rx_packets); |
| 2986 | np->stat_rx_bytes += len; | 3030 | nv_txrx_stats_add(stat_rx_bytes, len); |
| 2987 | u64_stats_update_end(&np->swstats_rx_syncp); | 3031 | u64_stats_update_end(&np->swstats_rx_syncp); |
| 2988 | } else { | 3032 | } else { |
| 2989 | dev_kfree_skb(skb); | 3033 | dev_kfree_skb(skb); |
| @@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
| 5651 | SET_NETDEV_DEV(dev, &pci_dev->dev); | 5695 | SET_NETDEV_DEV(dev, &pci_dev->dev); |
| 5652 | u64_stats_init(&np->swstats_rx_syncp); | 5696 | u64_stats_init(&np->swstats_rx_syncp); |
| 5653 | u64_stats_init(&np->swstats_tx_syncp); | 5697 | u64_stats_init(&np->swstats_tx_syncp); |
| 5698 | np->txrx_stats = alloc_percpu(struct nv_txrx_stats); | ||
| 5699 | if (!np->txrx_stats) { | ||
| 5700 | pr_err("np->txrx_stats, alloc memory error.\n"); | ||
| 5701 | err = -ENOMEM; | ||
| 5702 | goto out_alloc_percpu; | ||
| 5703 | } | ||
| 5654 | 5704 | ||
| 5655 | timer_setup(&np->oom_kick, nv_do_rx_refill, 0); | 5705 | timer_setup(&np->oom_kick, nv_do_rx_refill, 0); |
| 5656 | timer_setup(&np->nic_poll, nv_do_nic_poll, 0); | 5706 | timer_setup(&np->nic_poll, nv_do_nic_poll, 0); |
| @@ -6060,6 +6110,8 @@ out_relreg: | |||
| 6060 | out_disable: | 6110 | out_disable: |
| 6061 | pci_disable_device(pci_dev); | 6111 | pci_disable_device(pci_dev); |
| 6062 | out_free: | 6112 | out_free: |
| 6113 | free_percpu(np->txrx_stats); | ||
| 6114 | out_alloc_percpu: | ||
| 6063 | free_netdev(dev); | 6115 | free_netdev(dev); |
| 6064 | out: | 6116 | out: |
| 6065 | return err; | 6117 | return err; |
| @@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev) | |||
| 6105 | static void nv_remove(struct pci_dev *pci_dev) | 6157 | static void nv_remove(struct pci_dev *pci_dev) |
| 6106 | { | 6158 | { |
| 6107 | struct net_device *dev = pci_get_drvdata(pci_dev); | 6159 | struct net_device *dev = pci_get_drvdata(pci_dev); |
| 6160 | struct fe_priv *np = netdev_priv(dev); | ||
| 6161 | |||
| 6162 | free_percpu(np->txrx_stats); | ||
| 6108 | 6163 | ||
| 6109 | unregister_netdev(dev); | 6164 | unregister_netdev(dev); |
| 6110 | 6165 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4083019c547a..f97a4096f8fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | |||
| @@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) | |||
| 873 | int ret; | 873 | int ret; |
| 874 | u32 reg, val; | 874 | u32 reg, val; |
| 875 | 875 | ||
| 876 | regmap_field_read(gmac->regmap_field, &val); | 876 | ret = regmap_field_read(gmac->regmap_field, &val); |
| 877 | if (ret) { | ||
| 878 | dev_err(priv->device, "Fail to read from regmap field.\n"); | ||
| 879 | return ret; | ||
| 880 | } | ||
| 881 | |||
| 877 | reg = gmac->variant->default_syscon_value; | 882 | reg = gmac->variant->default_syscon_value; |
| 878 | if (reg != val) | 883 | if (reg != val) |
| 879 | dev_warn(priv->device, | 884 | dev_warn(priv->device, |
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 331c16d30d5d..23281aeeb222 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c | |||
| @@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd) | |||
| 344 | 344 | ||
| 345 | sp->dev->stats.rx_bytes += count; | 345 | sp->dev->stats.rx_bytes += count; |
| 346 | 346 | ||
| 347 | if ((skb = dev_alloc_skb(count)) == NULL) | 347 | if ((skb = dev_alloc_skb(count + 1)) == NULL) |
| 348 | goto out_mem; | 348 | goto out_mem; |
| 349 | 349 | ||
| 350 | ptr = skb_put(skb, count); | 350 | ptr = skb_put(skb, count + 1); |
| 351 | *ptr++ = cmd; /* KISS command */ | 351 | *ptr++ = cmd; /* KISS command */ |
| 352 | 352 | ||
| 353 | memcpy(ptr, sp->cooked_buf + 1, count); | 353 | memcpy(ptr, sp->cooked_buf + 1, count); |
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index a45c5de96ab1..a5a57ca94c1a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c | |||
| @@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat | |||
| 376 | * Local device Link partner | 376 | * Local device Link partner |
| 377 | * Pause AsymDir Pause AsymDir Result | 377 | * Pause AsymDir Pause AsymDir Result |
| 378 | * 1 X 1 X TX+RX | 378 | * 1 X 1 X TX+RX |
| 379 | * 0 1 1 1 RX | 379 | * 0 1 1 1 TX |
| 380 | * 1 1 0 1 TX | 380 | * 1 1 0 1 RX |
| 381 | */ | 381 | */ |
| 382 | static void phylink_resolve_flow(struct phylink *pl, | 382 | static void phylink_resolve_flow(struct phylink *pl, |
| 383 | struct phylink_link_state *state) | 383 | struct phylink_link_state *state) |
| @@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl, | |||
| 398 | new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; | 398 | new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; |
| 399 | else if (pause & MLO_PAUSE_ASYM) | 399 | else if (pause & MLO_PAUSE_ASYM) |
| 400 | new_pause = state->pause & MLO_PAUSE_SYM ? | 400 | new_pause = state->pause & MLO_PAUSE_SYM ? |
| 401 | MLO_PAUSE_RX : MLO_PAUSE_TX; | 401 | MLO_PAUSE_TX : MLO_PAUSE_RX; |
| 402 | } else { | 402 | } else { |
| 403 | new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; | 403 | new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; |
| 404 | } | 404 | } |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db16d7a13e00..aab0be40d443 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev) | |||
| 787 | } | 787 | } |
| 788 | 788 | ||
| 789 | static int tun_attach(struct tun_struct *tun, struct file *file, | 789 | static int tun_attach(struct tun_struct *tun, struct file *file, |
| 790 | bool skip_filter, bool napi, bool napi_frags) | 790 | bool skip_filter, bool napi, bool napi_frags, |
| 791 | bool publish_tun) | ||
| 791 | { | 792 | { |
| 792 | struct tun_file *tfile = file->private_data; | 793 | struct tun_file *tfile = file->private_data; |
| 793 | struct net_device *dev = tun->dev; | 794 | struct net_device *dev = tun->dev; |
| @@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
| 870 | * initialized tfile; otherwise we risk using half-initialized | 871 | * initialized tfile; otherwise we risk using half-initialized |
| 871 | * object. | 872 | * object. |
| 872 | */ | 873 | */ |
| 873 | rcu_assign_pointer(tfile->tun, tun); | 874 | if (publish_tun) |
| 875 | rcu_assign_pointer(tfile->tun, tun); | ||
| 874 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | 876 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); |
| 875 | tun->numqueues++; | 877 | tun->numqueues++; |
| 876 | tun_set_real_num_queues(tun); | 878 | tun_set_real_num_queues(tun); |
| @@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 2730 | 2732 | ||
| 2731 | err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, | 2733 | err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, |
| 2732 | ifr->ifr_flags & IFF_NAPI, | 2734 | ifr->ifr_flags & IFF_NAPI, |
| 2733 | ifr->ifr_flags & IFF_NAPI_FRAGS); | 2735 | ifr->ifr_flags & IFF_NAPI_FRAGS, true); |
| 2734 | if (err < 0) | 2736 | if (err < 0) |
| 2735 | return err; | 2737 | return err; |
| 2736 | 2738 | ||
| @@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
| 2829 | 2831 | ||
| 2830 | INIT_LIST_HEAD(&tun->disabled); | 2832 | INIT_LIST_HEAD(&tun->disabled); |
| 2831 | err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, | 2833 | err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, |
| 2832 | ifr->ifr_flags & IFF_NAPI_FRAGS); | 2834 | ifr->ifr_flags & IFF_NAPI_FRAGS, false); |
| 2833 | if (err < 0) | 2835 | if (err < 0) |
| 2834 | goto err_free_flow; | 2836 | goto err_free_flow; |
| 2835 | 2837 | ||
| 2836 | err = register_netdevice(tun->dev); | 2838 | err = register_netdevice(tun->dev); |
| 2837 | if (err < 0) | 2839 | if (err < 0) |
| 2838 | goto err_detach; | 2840 | goto err_detach; |
| 2841 | /* free_netdev() won't check refcnt, to aovid race | ||
| 2842 | * with dev_put() we need publish tun after registration. | ||
| 2843 | */ | ||
| 2844 | rcu_assign_pointer(tfile->tun, tun); | ||
| 2839 | } | 2845 | } |
| 2840 | 2846 | ||
| 2841 | netif_carrier_on(tun->dev); | 2847 | netif_carrier_on(tun->dev); |
| @@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) | |||
| 2978 | if (ret < 0) | 2984 | if (ret < 0) |
| 2979 | goto unlock; | 2985 | goto unlock; |
| 2980 | ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, | 2986 | ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, |
| 2981 | tun->flags & IFF_NAPI_FRAGS); | 2987 | tun->flags & IFF_NAPI_FRAGS, true); |
| 2982 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { | 2988 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { |
| 2983 | tun = rtnl_dereference(tfile->tun); | 2989 | tun = rtnl_dereference(tfile->tun); |
| 2984 | if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) | 2990 | if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8458e88c18e9..32f53de5b1fe 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 206 | goto bad_desc; | 206 | goto bad_desc; |
| 207 | } | 207 | } |
| 208 | skip: | 208 | skip: |
| 209 | if (rndis && header.usb_cdc_acm_descriptor && | 209 | /* Communcation class functions with bmCapabilities are not |
| 210 | * RNDIS. But some Wireless class RNDIS functions use | ||
| 211 | * bmCapabilities for their own purpose. The failsafe is | ||
| 212 | * therefore applied only to Communication class RNDIS | ||
| 213 | * functions. The rndis test is redundant, but a cheap | ||
| 214 | * optimization. | ||
| 215 | */ | ||
| 216 | if (rndis && is_rndis(&intf->cur_altsetting->desc) && | ||
| 217 | header.usb_cdc_acm_descriptor && | ||
| 210 | header.usb_cdc_acm_descriptor->bmCapabilities) { | 218 | header.usb_cdc_acm_descriptor->bmCapabilities) { |
| 211 | dev_dbg(&intf->dev, | 219 | dev_dbg(&intf->dev, |
| 212 | "ACM capabilities %02x, not really RNDIS?\n", | 220 | "ACM capabilities %02x, not really RNDIS?\n", |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4f3de0ac8b0b..ba98e0971b84 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, | |||
| 1331 | } | 1331 | } |
| 1332 | } | 1332 | } |
| 1333 | 1333 | ||
| 1334 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { | 1334 | if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { |
| 1335 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) | 1335 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
| 1336 | schedule_delayed_work(&vi->refill, 0); | 1336 | schedule_delayed_work(&vi->refill, 0); |
| 1337 | } | 1337 | } |
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index d74349628db2..0e6a51525d91 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c | |||
| @@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ | |||
| 1115 | sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); | 1115 | sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); |
| 1116 | LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); | 1116 | LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); |
| 1117 | 1117 | ||
| 1118 | lmc_trace(dev, "lmc_runnin_reset_out"); | 1118 | lmc_trace(dev, "lmc_running_reset_out"); |
| 1119 | } | 1119 | } |
| 1120 | 1120 | ||
| 1121 | 1121 | ||
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c index 6642bcb27761..8efb493ceec2 100644 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ b/drivers/net/wimax/i2400m/op-rfkill.c | |||
| @@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, | |||
| 127 | "%d\n", result); | 127 | "%d\n", result); |
| 128 | result = 0; | 128 | result = 0; |
| 129 | error_cmd: | 129 | error_cmd: |
| 130 | kfree(cmd); | ||
| 130 | kfree_skb(ack_skb); | 131 | kfree_skb(ack_skb); |
| 131 | error_msg_to_dev: | 132 | error_msg_to_dev: |
| 132 | error_alloc: | 133 | error_alloc: |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e5ca1f2685b6..e29c47744ef5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
| @@ -1114,18 +1114,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1114 | 1114 | ||
| 1115 | /* same thing for QuZ... */ | 1115 | /* same thing for QuZ... */ |
| 1116 | if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { | 1116 | if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { |
| 1117 | if (cfg == &iwl_ax101_cfg_qu_hr) | 1117 | if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr) |
| 1118 | cfg = &iwl_ax101_cfg_quz_hr; | 1118 | iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; |
| 1119 | else if (cfg == &iwl_ax201_cfg_qu_hr) | 1119 | else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) |
| 1120 | cfg = &iwl_ax201_cfg_quz_hr; | 1120 | iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; |
| 1121 | else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) | 1121 | else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) |
| 1122 | cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; | 1122 | iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; |
| 1123 | else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) | 1123 | else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) |
| 1124 | cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; | 1124 | iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; |
| 1125 | else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) | 1125 | else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) |
| 1126 | cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; | 1126 | iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; |
| 1127 | else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) | 1127 | else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) |
| 1128 | cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; | 1128 | iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; |
| 1129 | } | 1129 | } |
| 1130 | 1130 | ||
| 1131 | #endif | 1131 | #endif |
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 653d347a9a19..580387f9f12a 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c | |||
| @@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len, | |||
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | vs_ie = (struct ieee_types_header *)vendor_ie; | 243 | vs_ie = (struct ieee_types_header *)vendor_ie; |
| 244 | if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 > | ||
| 245 | IEEE_MAX_IE_SIZE) | ||
| 246 | return -EINVAL; | ||
| 244 | memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), | 247 | memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), |
| 245 | vs_ie, vs_ie->len + 2); | 248 | vs_ie, vs_ie->len + 2); |
| 246 | le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); | 249 | le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); |
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 18f7d9bf30b2..0939a8c8f3ab 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c | |||
| @@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, | |||
| 265 | 265 | ||
| 266 | rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); | 266 | rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); |
| 267 | if (rate_ie) { | 267 | if (rate_ie) { |
| 268 | if (rate_ie->len > MWIFIEX_SUPPORTED_RATES) | ||
| 269 | return; | ||
| 268 | memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); | 270 | memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); |
| 269 | rate_len = rate_ie->len; | 271 | rate_len = rate_ie->len; |
| 270 | } | 272 | } |
| @@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, | |||
| 272 | rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, | 274 | rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, |
| 273 | params->beacon.tail, | 275 | params->beacon.tail, |
| 274 | params->beacon.tail_len); | 276 | params->beacon.tail_len); |
| 275 | if (rate_ie) | 277 | if (rate_ie) { |
| 278 | if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len) | ||
| 279 | return; | ||
| 276 | memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); | 280 | memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); |
| 281 | } | ||
| 277 | 282 | ||
| 278 | return; | 283 | return; |
| 279 | } | 284 | } |
| @@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, | |||
| 391 | params->beacon.tail_len); | 396 | params->beacon.tail_len); |
| 392 | if (vendor_ie) { | 397 | if (vendor_ie) { |
| 393 | wmm_ie = vendor_ie; | 398 | wmm_ie = vendor_ie; |
| 399 | if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) | ||
| 400 | return; | ||
| 394 | memcpy(&bss_cfg->wmm_info, wmm_ie + | 401 | memcpy(&bss_cfg->wmm_info, wmm_ie + |
| 395 | sizeof(struct ieee_types_header), *(wmm_ie + 1)); | 402 | sizeof(struct ieee_types_header), *(wmm_ie + 1)); |
| 396 | priv->wmm_enabled = 1; | 403 | priv->wmm_enabled = 1; |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 40c0d536e20d..9d4426f6905f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c | |||
| @@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) | |||
| 59 | dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); | 59 | dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | if (is_mt7630(dev)) { | ||
| 63 | dev->mt76.cap.has_5ghz = false; | ||
| 64 | dev_dbg(dev->mt76.dev, "mask out 5GHz support\n"); | ||
| 65 | } | ||
| 66 | |||
| 62 | if (!mt76x02_field_valid(nic_conf1 & 0xff)) | 67 | if (!mt76x02_field_valid(nic_conf1 & 0xff)) |
| 63 | nic_conf1 &= 0xff00; | 68 | nic_conf1 &= 0xff00; |
| 64 | 69 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index f84a7df296ea..7705e55aa3d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c | |||
| @@ -51,6 +51,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw) | |||
| 51 | mt76x0e_stop_hw(dev); | 51 | mt76x0e_stop_hw(dev); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static int | ||
| 55 | mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | ||
| 56 | struct ieee80211_vif *vif, struct ieee80211_sta *sta, | ||
| 57 | struct ieee80211_key_conf *key) | ||
| 58 | { | ||
| 59 | struct mt76x02_dev *dev = hw->priv; | ||
| 60 | |||
| 61 | if (is_mt7630(dev)) | ||
| 62 | return -EOPNOTSUPP; | ||
| 63 | |||
| 64 | return mt76x02_set_key(hw, cmd, vif, sta, key); | ||
| 65 | } | ||
| 66 | |||
| 54 | static void | 67 | static void |
| 55 | mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 68 | mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
| 56 | u32 queues, bool drop) | 69 | u32 queues, bool drop) |
| @@ -67,7 +80,7 @@ static const struct ieee80211_ops mt76x0e_ops = { | |||
| 67 | .configure_filter = mt76x02_configure_filter, | 80 | .configure_filter = mt76x02_configure_filter, |
| 68 | .bss_info_changed = mt76x02_bss_info_changed, | 81 | .bss_info_changed = mt76x02_bss_info_changed, |
| 69 | .sta_state = mt76_sta_state, | 82 | .sta_state = mt76_sta_state, |
| 70 | .set_key = mt76x02_set_key, | 83 | .set_key = mt76x0e_set_key, |
| 71 | .conf_tx = mt76x02_conf_tx, | 84 | .conf_tx = mt76x02_conf_tx, |
| 72 | .sw_scan_start = mt76_sw_scan, | 85 | .sw_scan_start = mt76_sw_scan, |
| 73 | .sw_scan_complete = mt76x02_sw_scan_complete, | 86 | .sw_scan_complete = mt76x02_sw_scan_complete, |
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index ecbe78b8027b..f1cdcd61c54a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c | |||
| @@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev, | |||
| 1654 | 1654 | ||
| 1655 | offset = MAC_IVEIV_ENTRY(key->hw_key_idx); | 1655 | offset = MAC_IVEIV_ENTRY(key->hw_key_idx); |
| 1656 | 1656 | ||
| 1657 | rt2800_register_multiread(rt2x00dev, offset, | 1657 | if (crypto->cmd == SET_KEY) { |
| 1658 | &iveiv_entry, sizeof(iveiv_entry)); | 1658 | rt2800_register_multiread(rt2x00dev, offset, |
| 1659 | if ((crypto->cipher == CIPHER_TKIP) || | 1659 | &iveiv_entry, sizeof(iveiv_entry)); |
| 1660 | (crypto->cipher == CIPHER_TKIP_NO_MIC) || | 1660 | if ((crypto->cipher == CIPHER_TKIP) || |
| 1661 | (crypto->cipher == CIPHER_AES)) | 1661 | (crypto->cipher == CIPHER_TKIP_NO_MIC) || |
| 1662 | iveiv_entry.iv[3] |= 0x20; | 1662 | (crypto->cipher == CIPHER_AES)) |
| 1663 | iveiv_entry.iv[3] |= key->keyidx << 6; | 1663 | iveiv_entry.iv[3] |= 0x20; |
| 1664 | iveiv_entry.iv[3] |= key->keyidx << 6; | ||
| 1665 | } else { | ||
| 1666 | memset(&iveiv_entry, 0, sizeof(iveiv_entry)); | ||
| 1667 | } | ||
| 1668 | |||
| 1664 | rt2800_register_multiwrite(rt2x00dev, offset, | 1669 | rt2800_register_multiwrite(rt2x00dev, offset, |
| 1665 | &iveiv_entry, sizeof(iveiv_entry)); | 1670 | &iveiv_entry, sizeof(iveiv_entry)); |
| 1666 | } | 1671 | } |
| @@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, | |||
| 4237 | switch (rt2x00dev->default_ant.rx_chain_num) { | 4242 | switch (rt2x00dev->default_ant.rx_chain_num) { |
| 4238 | case 3: | 4243 | case 3: |
| 4239 | /* Turn on tertiary LNAs */ | 4244 | /* Turn on tertiary LNAs */ |
| 4240 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, | 4245 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1); |
| 4241 | rf->channel > 14); | 4246 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1); |
| 4242 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, | ||
| 4243 | rf->channel <= 14); | ||
| 4244 | /* fall-through */ | 4247 | /* fall-through */ |
| 4245 | case 2: | 4248 | case 2: |
| 4246 | /* Turn on secondary LNAs */ | 4249 | /* Turn on secondary LNAs */ |
| 4247 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, | 4250 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); |
| 4248 | rf->channel > 14); | 4251 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); |
| 4249 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, | ||
| 4250 | rf->channel <= 14); | ||
| 4251 | /* fall-through */ | 4252 | /* fall-through */ |
| 4252 | case 1: | 4253 | case 1: |
| 4253 | /* Turn on primary LNAs */ | 4254 | /* Turn on primary LNAs */ |
| 4254 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, | 4255 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1); |
| 4255 | rf->channel > 14); | 4256 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1); |
| 4256 | rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, | ||
| 4257 | rf->channel <= 14); | ||
| 4258 | break; | 4257 | break; |
| 4259 | } | 4258 | } |
| 4260 | 4259 | ||
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index f5048d4b8cb6..760eaffeebd6 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c | |||
| @@ -645,7 +645,6 @@ fail_rx: | |||
| 645 | kfree(rsi_dev->tx_buffer); | 645 | kfree(rsi_dev->tx_buffer); |
| 646 | 646 | ||
| 647 | fail_eps: | 647 | fail_eps: |
| 648 | kfree(rsi_dev); | ||
| 649 | 648 | ||
| 650 | return status; | 649 | return status; |
| 651 | } | 650 | } |
