diff options
| author | David S. Miller <davem@davemloft.net> | 2017-10-10 16:18:34 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2017-10-10 16:18:34 -0400 |
| commit | befc7a3324b45b7478b29861f2a77f8b2bc17922 (patch) | |
| tree | 97ba442e53e8eb00ea1527077fbd67c3179c5a32 | |
| parent | 3668bb8da197f90c2e9cb71e89da6fa629ff8450 (diff) | |
| parent | 5f0ca2fb71e28df146f590eebfe32b41171b737f (diff) | |
Merge branch 'nfp-fix-ethtool-stats-and-page-allocation'
Jakub Kicinski says:
====================
nfp: fix ethtool stats and page allocation
Two fixes for net. First one makes sure we handle gather of stats on
32bit machines correctly (ouch). The second fix solves a potential
NULL-deref if we fail to allocate a page with XDP running.
I used Fixes: tags pointing to where the bug was introduced, but for
patch 1 it has been in the driver "for ever" and fix won't backport
cleanly beyond commit 325945ede6d4 ("nfp: split software and hardware
vNIC statistics") which is in net.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 20 | ||||
| -rw-r--r-- | drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 8 |
2 files changed, 19 insertions, 9 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 1c0187f0af51..e118b5f23996 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) | |||
| 1180 | { | 1180 | { |
| 1181 | void *frag; | 1181 | void *frag; |
| 1182 | 1182 | ||
| 1183 | if (!dp->xdp_prog) | 1183 | if (!dp->xdp_prog) { |
| 1184 | frag = netdev_alloc_frag(dp->fl_bufsz); | 1184 | frag = netdev_alloc_frag(dp->fl_bufsz); |
| 1185 | else | 1185 | } else { |
| 1186 | frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); | 1186 | struct page *page; |
| 1187 | |||
| 1188 | page = alloc_page(GFP_KERNEL | __GFP_COLD); | ||
| 1189 | frag = page ? page_address(page) : NULL; | ||
| 1190 | } | ||
| 1187 | if (!frag) { | 1191 | if (!frag) { |
| 1188 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); | 1192 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); |
| 1189 | return NULL; | 1193 | return NULL; |
| @@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) | |||
| 1203 | { | 1207 | { |
| 1204 | void *frag; | 1208 | void *frag; |
| 1205 | 1209 | ||
| 1206 | if (!dp->xdp_prog) | 1210 | if (!dp->xdp_prog) { |
| 1207 | frag = napi_alloc_frag(dp->fl_bufsz); | 1211 | frag = napi_alloc_frag(dp->fl_bufsz); |
| 1208 | else | 1212 | } else { |
| 1209 | frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); | 1213 | struct page *page; |
| 1214 | |||
| 1215 | page = alloc_page(GFP_ATOMIC | __GFP_COLD); | ||
| 1216 | frag = page ? page_address(page) : NULL; | ||
| 1217 | } | ||
| 1210 | if (!frag) { | 1218 | if (!frag) { |
| 1211 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); | 1219 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); |
| 1212 | return NULL; | 1220 | return NULL; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 07969f06df10..dc016dfec64d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |||
| @@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |||
| 464 | 464 | ||
| 465 | do { | 465 | do { |
| 466 | start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); | 466 | start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); |
| 467 | *data++ = nn->r_vecs[i].rx_pkts; | 467 | data[0] = nn->r_vecs[i].rx_pkts; |
| 468 | tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; | 468 | tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; |
| 469 | tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; | 469 | tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; |
| 470 | tmp[2] = nn->r_vecs[i].hw_csum_rx_error; | 470 | tmp[2] = nn->r_vecs[i].hw_csum_rx_error; |
| @@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |||
| 472 | 472 | ||
| 473 | do { | 473 | do { |
| 474 | start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); | 474 | start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); |
| 475 | *data++ = nn->r_vecs[i].tx_pkts; | 475 | data[1] = nn->r_vecs[i].tx_pkts; |
| 476 | *data++ = nn->r_vecs[i].tx_busy; | 476 | data[2] = nn->r_vecs[i].tx_busy; |
| 477 | tmp[3] = nn->r_vecs[i].hw_csum_tx; | 477 | tmp[3] = nn->r_vecs[i].hw_csum_tx; |
| 478 | tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; | 478 | tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; |
| 479 | tmp[5] = nn->r_vecs[i].tx_gather; | 479 | tmp[5] = nn->r_vecs[i].tx_gather; |
| 480 | tmp[6] = nn->r_vecs[i].tx_lso; | 480 | tmp[6] = nn->r_vecs[i].tx_lso; |
| 481 | } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); | 481 | } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); |
| 482 | 482 | ||
| 483 | data += 3; | ||
| 484 | |||
| 483 | for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) | 485 | for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) |
| 484 | gathered_stats[j] += tmp[j]; | 486 | gathered_stats[j] += tmp[j]; |
| 485 | } | 487 | } |
