diff options
author | John Fastabend <john.r.fastabend@intel.com> | 2012-01-27 22:32:17 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2012-02-09 04:27:47 -0500 |
commit | 9cc00b51a3cbb3b933065a55eaa9bfc306411b7a (patch) | |
tree | 5dc0dec3837ed23e0f2f0d070ebbf98d4243cdc0 /drivers | |
parent | 5facb8e0c4dc1c8c47b71fd7f376defe16185733 (diff) |
ixgbe: ethtool: stats user buffer overrun
If the number of tx/rx queues changes the ethtool ioctl
ETHTOOL_GSTATS may overrun the userspace buffer. This
occurs because the general practice in user space to
query stats is to issue a ETHTOOL_GSSET cmd to learn the
buffer size needed, allocate the buffer, then call
ETHTOOL_GSTIRNGS and ETHTOOL_GSTATS. If the number of
real_num_queues is changed or flow control attributes
are changed after ETHTOOL_GSSET but before the
ETHTOOL_GSTRINGS/ETHTOOL_GSTATS a user space buffer
overrun occurs.
To fix the overrun always return the max buffer size
needed from get_sset_count() then return all strings
and stats from get_strings()/get_ethtool_stats().
This _will_ change the output from the ioctl() call
which could break applications and script parsing in
theory. I believe these changes should not break existing
tools because the only changes will be more {tx|rx}_queues
and the {tx|rx}_pb_* stats will always be returned.
Existing scripts already need to handle changing number
of queues because this occurs today depending on system
and current features. The {tx|rx}_pb_* stats are at the
end of the output and should be handled by scripts today
regardless.
Finally get_ethtool_stats and get_strings are free-form
outputs tools parsing these outputs should be defensive
anyways. In the end these updates are better then
having a tool segfault because of a buffer overrun.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 88 |
1 files changed, 51 insertions, 37 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 1f31a04d3c91..a62975480e37 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -120,19 +120,23 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { | |||
120 | #endif /* IXGBE_FCOE */ | 120 | #endif /* IXGBE_FCOE */ |
121 | }; | 121 | }; |
122 | 122 | ||
123 | #define IXGBE_QUEUE_STATS_LEN \ | 123 | /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so |
124 | ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ | 124 | * we set the num_rx_queues to evaluate to num_tx_queues. This is |
125 | ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ | 125 | * used because we do not have a good way to get the max number of |
126 | * rx queues with CONFIG_RPS disabled. | ||
127 | */ | ||
128 | #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues | ||
129 | |||
130 | #define IXGBE_QUEUE_STATS_LEN ( \ | ||
131 | (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ | ||
126 | (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) | 132 | (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) |
127 | #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) | 133 | #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) |
128 | #define IXGBE_PB_STATS_LEN ( \ | 134 | #define IXGBE_PB_STATS_LEN ( \ |
129 | (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ | 135 | (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ |
130 | IXGBE_FLAG_DCB_ENABLED) ? \ | 136 | sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ |
131 | (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ | 137 | sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ |
132 | sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ | 138 | sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ |
133 | sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ | 139 | / sizeof(u64)) |
134 | sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ | ||
135 | / sizeof(u64) : 0) | ||
136 | #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ | 140 | #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ |
137 | IXGBE_PB_STATS_LEN + \ | 141 | IXGBE_PB_STATS_LEN + \ |
138 | IXGBE_QUEUE_STATS_LEN) | 142 | IXGBE_QUEUE_STATS_LEN) |
@@ -1078,8 +1082,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
1078 | data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == | 1082 | data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == |
1079 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | 1083 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; |
1080 | } | 1084 | } |
1081 | for (j = 0; j < adapter->num_tx_queues; j++) { | 1085 | for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { |
1082 | ring = adapter->tx_ring[j]; | 1086 | ring = adapter->tx_ring[j]; |
1087 | if (!ring) { | ||
1088 | data[i] = 0; | ||
1089 | data[i+1] = 0; | ||
1090 | i += 2; | ||
1091 | continue; | ||
1092 | } | ||
1093 | |||
1083 | do { | 1094 | do { |
1084 | start = u64_stats_fetch_begin_bh(&ring->syncp); | 1095 | start = u64_stats_fetch_begin_bh(&ring->syncp); |
1085 | data[i] = ring->stats.packets; | 1096 | data[i] = ring->stats.packets; |
@@ -1087,8 +1098,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
1087 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | 1098 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); |
1088 | i += 2; | 1099 | i += 2; |
1089 | } | 1100 | } |
1090 | for (j = 0; j < adapter->num_rx_queues; j++) { | 1101 | for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { |
1091 | ring = adapter->rx_ring[j]; | 1102 | ring = adapter->rx_ring[j]; |
1103 | if (!ring) { | ||
1104 | data[i] = 0; | ||
1105 | data[i+1] = 0; | ||
1106 | i += 2; | ||
1107 | continue; | ||
1108 | } | ||
1109 | |||
1092 | do { | 1110 | do { |
1093 | start = u64_stats_fetch_begin_bh(&ring->syncp); | 1111 | start = u64_stats_fetch_begin_bh(&ring->syncp); |
1094 | data[i] = ring->stats.packets; | 1112 | data[i] = ring->stats.packets; |
@@ -1096,22 +1114,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, | |||
1096 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); | 1114 | } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); |
1097 | i += 2; | 1115 | i += 2; |
1098 | } | 1116 | } |
1099 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 1117 | |
1100 | for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { | 1118 | for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { |
1101 | data[i++] = adapter->stats.pxontxc[j]; | 1119 | data[i++] = adapter->stats.pxontxc[j]; |
1102 | data[i++] = adapter->stats.pxofftxc[j]; | 1120 | data[i++] = adapter->stats.pxofftxc[j]; |
1103 | } | 1121 | } |
1104 | for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { | 1122 | for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { |
1105 | data[i++] = adapter->stats.pxonrxc[j]; | 1123 | data[i++] = adapter->stats.pxonrxc[j]; |
1106 | data[i++] = adapter->stats.pxoffrxc[j]; | 1124 | data[i++] = adapter->stats.pxoffrxc[j]; |
1107 | } | ||
1108 | } | 1125 | } |
1109 | } | 1126 | } |
1110 | 1127 | ||
1111 | static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | 1128 | static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, |
1112 | u8 *data) | 1129 | u8 *data) |
1113 | { | 1130 | { |
1114 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
1115 | char *p = (char *)data; | 1131 | char *p = (char *)data; |
1116 | int i; | 1132 | int i; |
1117 | 1133 | ||
@@ -1126,31 +1142,29 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, | |||
1126 | ETH_GSTRING_LEN); | 1142 | ETH_GSTRING_LEN); |
1127 | p += ETH_GSTRING_LEN; | 1143 | p += ETH_GSTRING_LEN; |
1128 | } | 1144 | } |
1129 | for (i = 0; i < adapter->num_tx_queues; i++) { | 1145 | for (i = 0; i < netdev->num_tx_queues; i++) { |
1130 | sprintf(p, "tx_queue_%u_packets", i); | 1146 | sprintf(p, "tx_queue_%u_packets", i); |
1131 | p += ETH_GSTRING_LEN; | 1147 | p += ETH_GSTRING_LEN; |
1132 | sprintf(p, "tx_queue_%u_bytes", i); | 1148 | sprintf(p, "tx_queue_%u_bytes", i); |
1133 | p += ETH_GSTRING_LEN; | 1149 | p += ETH_GSTRING_LEN; |
1134 | } | 1150 | } |
1135 | for (i = 0; i < adapter->num_rx_queues; i++) { | 1151 | for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { |
1136 | sprintf(p, "rx_queue_%u_packets", i); | 1152 | sprintf(p, "rx_queue_%u_packets", i); |
1137 | p += ETH_GSTRING_LEN; | 1153 | p += ETH_GSTRING_LEN; |
1138 | sprintf(p, "rx_queue_%u_bytes", i); | 1154 | sprintf(p, "rx_queue_%u_bytes", i); |
1139 | p += ETH_GSTRING_LEN; | 1155 | p += ETH_GSTRING_LEN; |
1140 | } | 1156 | } |
1141 | if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { | 1157 | for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { |
1142 | for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { | 1158 | sprintf(p, "tx_pb_%u_pxon", i); |
1143 | sprintf(p, "tx_pb_%u_pxon", i); | 1159 | p += ETH_GSTRING_LEN; |
1144 | p += ETH_GSTRING_LEN; | 1160 | sprintf(p, "tx_pb_%u_pxoff", i); |
1145 | sprintf(p, "tx_pb_%u_pxoff", i); | 1161 | p += ETH_GSTRING_LEN; |
1146 | p += ETH_GSTRING_LEN; | 1162 | } |
1147 | } | 1163 | for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { |
1148 | for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { | 1164 | sprintf(p, "rx_pb_%u_pxon", i); |
1149 | sprintf(p, "rx_pb_%u_pxon", i); | 1165 | p += ETH_GSTRING_LEN; |
1150 | p += ETH_GSTRING_LEN; | 1166 | sprintf(p, "rx_pb_%u_pxoff", i); |
1151 | sprintf(p, "rx_pb_%u_pxoff", i); | 1167 | p += ETH_GSTRING_LEN; |
1152 | p += ETH_GSTRING_LEN; | ||
1153 | } | ||
1154 | } | 1168 | } |
1155 | /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ | 1169 | /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ |
1156 | break; | 1170 | break; |