diff options
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_ethtool.c')
| -rw-r--r-- | drivers/net/vmxnet3/vmxnet3_ethtool.c | 274 |
1 files changed, 160 insertions, 114 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 8e17fc8a7fe7..81254be85b92 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c | |||
| @@ -45,6 +45,7 @@ static int | |||
| 45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | 45 | vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) |
| 46 | { | 46 | { |
| 47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 47 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 48 | unsigned long flags; | ||
| 48 | 49 | ||
| 49 | if (adapter->rxcsum != val) { | 50 | if (adapter->rxcsum != val) { |
| 50 | adapter->rxcsum = val; | 51 | adapter->rxcsum = val; |
| @@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | |||
| 56 | adapter->shared->devRead.misc.uptFeatures &= | 57 | adapter->shared->devRead.misc.uptFeatures &= |
| 57 | ~UPT1_F_RXCSUM; | 58 | ~UPT1_F_RXCSUM; |
| 58 | 59 | ||
| 60 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
| 59 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 61 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
| 60 | VMXNET3_CMD_UPDATE_FEATURE); | 62 | VMXNET3_CMD_UPDATE_FEATURE); |
| 63 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
| 61 | } | 64 | } |
| 62 | } | 65 | } |
| 63 | return 0; | 66 | return 0; |
| @@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) | |||
| 68 | static const struct vmxnet3_stat_desc | 71 | static const struct vmxnet3_stat_desc |
| 69 | vmxnet3_tq_dev_stats[] = { | 72 | vmxnet3_tq_dev_stats[] = { |
| 70 | /* description, offset */ | 73 | /* description, offset */ |
| 71 | { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, | 74 | { "Tx Queue#", 0 }, |
| 72 | { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, | 75 | { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, |
| 73 | { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, | 76 | { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, |
| 74 | { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, | 77 | { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, |
| 75 | { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, | 78 | { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, |
| 76 | { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, | 79 | { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, |
| 77 | { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, | 80 | { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, |
| 78 | { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, | 81 | { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, |
| 79 | { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, | 82 | { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, |
| 80 | { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | 83 | { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, |
| 84 | { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, | ||
| 81 | }; | 85 | }; |
| 82 | 86 | ||
| 83 | /* per tq stats maintained by the driver */ | 87 | /* per tq stats maintained by the driver */ |
| 84 | static const struct vmxnet3_stat_desc | 88 | static const struct vmxnet3_stat_desc |
| 85 | vmxnet3_tq_driver_stats[] = { | 89 | vmxnet3_tq_driver_stats[] = { |
| 86 | /* description, offset */ | 90 | /* description, offset */ |
| 87 | {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, | 91 | {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, |
| 88 | drop_total) }, | 92 | drop_total) }, |
| 89 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, | 93 | { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, |
| 90 | drop_too_many_frags) }, | 94 | drop_too_many_frags) }, |
| 91 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 95 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
| 92 | drop_oversized_hdr) }, | 96 | drop_oversized_hdr) }, |
| 93 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, | 97 | { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, |
| 94 | drop_hdr_inspect_err) }, | 98 | drop_hdr_inspect_err) }, |
| 95 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, | 99 | { " tso", offsetof(struct vmxnet3_tq_driver_stats, |
| 96 | drop_tso) }, | 100 | drop_tso) }, |
| 97 | { "ring full", offsetof(struct vmxnet3_tq_driver_stats, | 101 | { " ring full", offsetof(struct vmxnet3_tq_driver_stats, |
| 98 | tx_ring_full) }, | 102 | tx_ring_full) }, |
| 99 | { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, | 103 | { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, |
| 100 | linearized) }, | 104 | linearized) }, |
| 101 | { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, | 105 | { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, |
| 102 | copy_skb_header) }, | 106 | copy_skb_header) }, |
| 103 | { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, | 107 | { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, |
| 104 | oversized_hdr) }, | 108 | oversized_hdr) }, |
| 105 | }; | 109 | }; |
| 106 | 110 | ||
| 107 | /* per rq stats maintained by the device */ | 111 | /* per rq stats maintained by the device */ |
| 108 | static const struct vmxnet3_stat_desc | 112 | static const struct vmxnet3_stat_desc |
| 109 | vmxnet3_rq_dev_stats[] = { | 113 | vmxnet3_rq_dev_stats[] = { |
| 110 | { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, | 114 | { "Rx Queue#", 0 }, |
| 111 | { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, | 115 | { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, |
| 112 | { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, | 116 | { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, |
| 113 | { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, | 117 | { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, |
| 114 | { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, | 118 | { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, |
| 115 | { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, | 119 | { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, |
| 116 | { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, | 120 | { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, |
| 117 | { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, | 121 | { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, |
| 118 | { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, | 122 | { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, |
| 119 | { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | 123 | { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, |
| 124 | { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, | ||
| 120 | }; | 125 | }; |
| 121 | 126 | ||
| 122 | /* per rq stats maintained by the driver */ | 127 | /* per rq stats maintained by the driver */ |
| 123 | static const struct vmxnet3_stat_desc | 128 | static const struct vmxnet3_stat_desc |
| 124 | vmxnet3_rq_driver_stats[] = { | 129 | vmxnet3_rq_driver_stats[] = { |
| 125 | /* description, offset */ | 130 | /* description, offset */ |
| 126 | { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, | 131 | { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, |
| 127 | drop_total) }, | 132 | drop_total) }, |
| 128 | { " err", offsetof(struct vmxnet3_rq_driver_stats, | 133 | { " err", offsetof(struct vmxnet3_rq_driver_stats, |
| 129 | drop_err) }, | 134 | drop_err) }, |
| 130 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, | 135 | { " fcs", offsetof(struct vmxnet3_rq_driver_stats, |
| 131 | drop_fcs) }, | 136 | drop_fcs) }, |
| 132 | { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, | 137 | { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, |
| 133 | rx_buf_alloc_failure) }, | 138 | rx_buf_alloc_failure) }, |
| 134 | }; | 139 | }; |
| 135 | 140 | ||
| 136 | /* gloabl stats maintained by the driver */ | 141 | /* gloabl stats maintained by the driver */ |
| 137 | static const struct vmxnet3_stat_desc | 142 | static const struct vmxnet3_stat_desc |
| 138 | vmxnet3_global_stats[] = { | 143 | vmxnet3_global_stats[] = { |
| 139 | /* description, offset */ | 144 | /* description, offset */ |
| 140 | { "tx timeout count", offsetof(struct vmxnet3_adapter, | 145 | { "tx timeout count", offsetof(struct vmxnet3_adapter, |
| 141 | tx_timeout_count) } | 146 | tx_timeout_count) } |
| 142 | }; | 147 | }; |
| 143 | 148 | ||
| @@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev) | |||
| 151 | struct UPT1_TxStats *devTxStats; | 156 | struct UPT1_TxStats *devTxStats; |
| 152 | struct UPT1_RxStats *devRxStats; | 157 | struct UPT1_RxStats *devRxStats; |
| 153 | struct net_device_stats *net_stats = &netdev->stats; | 158 | struct net_device_stats *net_stats = &netdev->stats; |
| 159 | unsigned long flags; | ||
| 154 | int i; | 160 | int i; |
| 155 | 161 | ||
| 156 | adapter = netdev_priv(netdev); | 162 | adapter = netdev_priv(netdev); |
| 157 | 163 | ||
| 158 | /* Collect the dev stats into the shared area */ | 164 | /* Collect the dev stats into the shared area */ |
| 165 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
| 159 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 166 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
| 167 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
| 160 | 168 | ||
| 161 | memset(net_stats, 0, sizeof(*net_stats)); | 169 | memset(net_stats, 0, sizeof(*net_stats)); |
| 162 | for (i = 0; i < adapter->num_tx_queues; i++) { | 170 | for (i = 0; i < adapter->num_tx_queues; i++) { |
| @@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev) | |||
| 193 | static int | 201 | static int |
| 194 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) | 202 | vmxnet3_get_sset_count(struct net_device *netdev, int sset) |
| 195 | { | 203 | { |
| 204 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
| 196 | switch (sset) { | 205 | switch (sset) { |
| 197 | case ETH_SS_STATS: | 206 | case ETH_SS_STATS: |
| 198 | return ARRAY_SIZE(vmxnet3_tq_dev_stats) + | 207 | return (ARRAY_SIZE(vmxnet3_tq_dev_stats) + |
| 199 | ARRAY_SIZE(vmxnet3_tq_driver_stats) + | 208 | ARRAY_SIZE(vmxnet3_tq_driver_stats)) * |
| 200 | ARRAY_SIZE(vmxnet3_rq_dev_stats) + | 209 | adapter->num_tx_queues + |
| 201 | ARRAY_SIZE(vmxnet3_rq_driver_stats) + | 210 | (ARRAY_SIZE(vmxnet3_rq_dev_stats) + |
| 211 | ARRAY_SIZE(vmxnet3_rq_driver_stats)) * | ||
| 212 | adapter->num_rx_queues + | ||
| 202 | ARRAY_SIZE(vmxnet3_global_stats); | 213 | ARRAY_SIZE(vmxnet3_global_stats); |
| 203 | default: | 214 | default: |
| 204 | return -EOPNOTSUPP; | 215 | return -EOPNOTSUPP; |
| @@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset) | |||
| 206 | } | 217 | } |
| 207 | 218 | ||
| 208 | 219 | ||
| 220 | /* Should be multiple of 4 */ | ||
| 221 | #define NUM_TX_REGS 8 | ||
| 222 | #define NUM_RX_REGS 12 | ||
| 223 | |||
| 209 | static int | 224 | static int |
| 210 | vmxnet3_get_regs_len(struct net_device *netdev) | 225 | vmxnet3_get_regs_len(struct net_device *netdev) |
| 211 | { | 226 | { |
| 212 | return 20 * sizeof(u32); | 227 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 228 | return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) + | ||
| 229 | adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32)); | ||
| 213 | } | 230 | } |
| 214 | 231 | ||
| 215 | 232 | ||
| @@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) | |||
| 240 | static void | 257 | static void |
| 241 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) | 258 | vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) |
| 242 | { | 259 | { |
| 260 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | ||
| 243 | if (stringset == ETH_SS_STATS) { | 261 | if (stringset == ETH_SS_STATS) { |
| 244 | int i; | 262 | int i, j; |
| 245 | 263 | for (j = 0; j < adapter->num_tx_queues; j++) { | |
| 246 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { | 264 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { |
| 247 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, | 265 | memcpy(buf, vmxnet3_tq_dev_stats[i].desc, |
| 248 | ETH_GSTRING_LEN); | 266 | ETH_GSTRING_LEN); |
| 249 | buf += ETH_GSTRING_LEN; | 267 | buf += ETH_GSTRING_LEN; |
| 250 | } | 268 | } |
| 251 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { | 269 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); |
| 252 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, | 270 | i++) { |
| 253 | ETH_GSTRING_LEN); | 271 | memcpy(buf, vmxnet3_tq_driver_stats[i].desc, |
| 254 | buf += ETH_GSTRING_LEN; | 272 | ETH_GSTRING_LEN); |
| 255 | } | 273 | buf += ETH_GSTRING_LEN; |
| 256 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { | 274 | } |
| 257 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, | ||
| 258 | ETH_GSTRING_LEN); | ||
| 259 | buf += ETH_GSTRING_LEN; | ||
| 260 | } | 275 | } |
| 261 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { | 276 | |
| 262 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | 277 | for (j = 0; j < adapter->num_rx_queues; j++) { |
| 263 | ETH_GSTRING_LEN); | 278 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { |
| 264 | buf += ETH_GSTRING_LEN; | 279 | memcpy(buf, vmxnet3_rq_dev_stats[i].desc, |
| 280 | ETH_GSTRING_LEN); | ||
| 281 | buf += ETH_GSTRING_LEN; | ||
| 282 | } | ||
| 283 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); | ||
| 284 | i++) { | ||
| 285 | memcpy(buf, vmxnet3_rq_driver_stats[i].desc, | ||
| 286 | ETH_GSTRING_LEN); | ||
| 287 | buf += ETH_GSTRING_LEN; | ||
| 288 | } | ||
| 265 | } | 289 | } |
| 290 | |||
| 266 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { | 291 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { |
| 267 | memcpy(buf, vmxnet3_global_stats[i].desc, | 292 | memcpy(buf, vmxnet3_global_stats[i].desc, |
| 268 | ETH_GSTRING_LEN); | 293 | ETH_GSTRING_LEN); |
| @@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
| 277 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 302 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 278 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; | 303 | u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; |
| 279 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; | 304 | u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; |
| 305 | unsigned long flags; | ||
| 280 | 306 | ||
| 281 | if (data & ~ETH_FLAG_LRO) | 307 | if (data & ~ETH_FLAG_LRO) |
| 282 | return -EOPNOTSUPP; | 308 | return -EOPNOTSUPP; |
| @@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) | |||
| 292 | else | 318 | else |
| 293 | adapter->shared->devRead.misc.uptFeatures &= | 319 | adapter->shared->devRead.misc.uptFeatures &= |
| 294 | ~UPT1_F_LRO; | 320 | ~UPT1_F_LRO; |
| 321 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
| 295 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 322 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
| 296 | VMXNET3_CMD_UPDATE_FEATURE); | 323 | VMXNET3_CMD_UPDATE_FEATURE); |
| 324 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
| 297 | } | 325 | } |
| 298 | return 0; | 326 | return 0; |
| 299 | } | 327 | } |
| @@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev, | |||
| 303 | struct ethtool_stats *stats, u64 *buf) | 331 | struct ethtool_stats *stats, u64 *buf) |
| 304 | { | 332 | { |
| 305 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 333 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 334 | unsigned long flags; | ||
| 306 | u8 *base; | 335 | u8 *base; |
| 307 | int i; | 336 | int i; |
| 308 | int j = 0; | 337 | int j = 0; |
| 309 | 338 | ||
| 339 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
| 310 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); | 340 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); |
| 341 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
| 311 | 342 | ||
| 312 | /* this does assume each counter is 64-bit wide */ | 343 | /* this does assume each counter is 64-bit wide */ |
| 313 | /* TODO change this for multiple queues */ | 344 | for (j = 0; j < adapter->num_tx_queues; j++) { |
| 314 | 345 | base = (u8 *)&adapter->tqd_start[j].stats; | |
| 315 | base = (u8 *)&adapter->tqd_start[j].stats; | 346 | *buf++ = (u64)j; |
| 316 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) | 347 | for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) |
| 317 | *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); | 348 | *buf++ = *(u64 *)(base + |
| 318 | 349 | vmxnet3_tq_dev_stats[i].offset); | |
| 319 | base = (u8 *)&adapter->tx_queue[j].stats; | 350 | |
| 320 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) | 351 | base = (u8 *)&adapter->tx_queue[j].stats; |
| 321 | *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); | 352 | for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) |
| 322 | 353 | *buf++ = *(u64 *)(base + | |
| 323 | base = (u8 *)&adapter->rqd_start[j].stats; | 354 | vmxnet3_tq_driver_stats[i].offset); |
| 324 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | 355 | } |
| 325 | *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); | ||
| 326 | 356 | ||
| 327 | base = (u8 *)&adapter->rx_queue[j].stats; | 357 | for (j = 0; j < adapter->num_tx_queues; j++) { |
| 328 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | 358 | base = (u8 *)&adapter->rqd_start[j].stats; |
| 329 | *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); | 359 | *buf++ = (u64) j; |
| 360 | for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) | ||
| 361 | *buf++ = *(u64 *)(base + | ||
| 362 | vmxnet3_rq_dev_stats[i].offset); | ||
| 363 | |||
| 364 | base = (u8 *)&adapter->rx_queue[j].stats; | ||
| 365 | for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) | ||
| 366 | *buf++ = *(u64 *)(base + | ||
| 367 | vmxnet3_rq_driver_stats[i].offset); | ||
| 368 | } | ||
| 330 | 369 | ||
| 331 | base = (u8 *)adapter; | 370 | base = (u8 *)adapter; |
| 332 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) | 371 | for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) |
| @@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
| 339 | { | 378 | { |
| 340 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 379 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 341 | u32 *buf = p; | 380 | u32 *buf = p; |
| 342 | int i = 0; | 381 | int i = 0, j = 0; |
| 343 | 382 | ||
| 344 | memset(p, 0, vmxnet3_get_regs_len(netdev)); | 383 | memset(p, 0, vmxnet3_get_regs_len(netdev)); |
| 345 | 384 | ||
| @@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) | |||
| 348 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ | 387 | /* Update vmxnet3_get_regs_len if we want to dump more registers */ |
| 349 | 388 | ||
| 350 | /* make each ring use multiple of 16 bytes */ | 389 | /* make each ring use multiple of 16 bytes */ |
| 351 | /* TODO change this for multiple queues */ | 390 | for (i = 0; i < adapter->num_tx_queues; i++) { |
| 352 | buf[0] = adapter->tx_queue[i].tx_ring.next2fill; | 391 | buf[j++] = adapter->tx_queue[i].tx_ring.next2fill; |
| 353 | buf[1] = adapter->tx_queue[i].tx_ring.next2comp; | 392 | buf[j++] = adapter->tx_queue[i].tx_ring.next2comp; |
| 354 | buf[2] = adapter->tx_queue[i].tx_ring.gen; | 393 | buf[j++] = adapter->tx_queue[i].tx_ring.gen; |
| 355 | buf[3] = 0; | 394 | buf[j++] = 0; |
| 356 | 395 | ||
| 357 | buf[4] = adapter->tx_queue[i].comp_ring.next2proc; | 396 | buf[j++] = adapter->tx_queue[i].comp_ring.next2proc; |
| 358 | buf[5] = adapter->tx_queue[i].comp_ring.gen; | 397 | buf[j++] = adapter->tx_queue[i].comp_ring.gen; |
| 359 | buf[6] = adapter->tx_queue[i].stopped; | 398 | buf[j++] = adapter->tx_queue[i].stopped; |
| 360 | buf[7] = 0; | 399 | buf[j++] = 0; |
| 361 | 400 | } | |
| 362 | buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; | 401 | |
| 363 | buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; | 402 | for (i = 0; i < adapter->num_rx_queues; i++) { |
| 364 | buf[10] = adapter->rx_queue[i].rx_ring[0].gen; | 403 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill; |
| 365 | buf[11] = 0; | 404 | buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp; |
| 366 | 405 | buf[j++] = adapter->rx_queue[i].rx_ring[0].gen; | |
| 367 | buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; | 406 | buf[j++] = 0; |
| 368 | buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; | 407 | |
| 369 | buf[14] = adapter->rx_queue[i].rx_ring[1].gen; | 408 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill; |
| 370 | buf[15] = 0; | 409 | buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp; |
| 371 | 410 | buf[j++] = adapter->rx_queue[i].rx_ring[1].gen; | |
| 372 | buf[16] = adapter->rx_queue[i].comp_ring.next2proc; | 411 | buf[j++] = 0; |
| 373 | buf[17] = adapter->rx_queue[i].comp_ring.gen; | 412 | |
| 374 | buf[18] = 0; | 413 | buf[j++] = adapter->rx_queue[i].comp_ring.next2proc; |
| 375 | buf[19] = 0; | 414 | buf[j++] = adapter->rx_queue[i].comp_ring.gen; |
| 415 | buf[j++] = 0; | ||
| 416 | buf[j++] = 0; | ||
| 417 | } | ||
| 418 | |||
| 376 | } | 419 | } |
| 377 | 420 | ||
| 378 | 421 | ||
| @@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev, | |||
| 574 | const struct ethtool_rxfh_indir *p) | 617 | const struct ethtool_rxfh_indir *p) |
| 575 | { | 618 | { |
| 576 | unsigned int i; | 619 | unsigned int i; |
| 620 | unsigned long flags; | ||
| 577 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); | 621 | struct vmxnet3_adapter *adapter = netdev_priv(netdev); |
| 578 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; | 622 | struct UPT1_RSSConf *rssConf = adapter->rss_conf; |
| 579 | 623 | ||
| @@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev, | |||
| 592 | for (i = 0; i < rssConf->indTableSize; i++) | 636 | for (i = 0; i < rssConf->indTableSize; i++) |
| 593 | rssConf->indTable[i] = p->ring_index[i]; | 637 | rssConf->indTable[i] = p->ring_index[i]; |
| 594 | 638 | ||
| 639 | spin_lock_irqsave(&adapter->cmd_lock, flags); | ||
| 595 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, | 640 | VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, |
| 596 | VMXNET3_CMD_UPDATE_RSSIDT); | 641 | VMXNET3_CMD_UPDATE_RSSIDT); |
| 642 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | ||
| 597 | 643 | ||
| 598 | return 0; | 644 | return 0; |
| 599 | 645 | ||
