diff options
author | Neil Horman <nhorman@tuxdriver.com> | 2013-04-01 00:31:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-04-02 14:32:24 -0400 |
commit | f7f22874267bddcf2f2017d5045fdce390aee8c8 (patch) | |
tree | f2c2a369d2516ef7c19bea2958f166a46dd0952a /drivers/net/ethernet/nvidia | |
parent | 287ecefb4bbea962b5646ef496c7a32a86d30b60 (diff) |
forcedeth: Do a dma_mapping_error check after skb_frag_dma_map
This backtrace was recently reported on a 3.9 kernel:
Actual results: from syslog /var/log/messsages:
kernel: [17539.340285] ------------[ cut here ]------------
kernel: [17539.341012] WARNING: at lib/dma-debug.c:937 check_unmap+0x493/0x960()
kernel: [17539.341012] Hardware name: MS-7125
kernel: [17539.341012] forcedeth 0000:00:0a.0: DMA-API: device driver failed to
check map error[device address=0x0000000013c88000] [size=544 bytes] [mapped as
page]
kernel: [17539.341012] Modules linked in: fuse ebtable_nat ipt_MASQUERADE
nf_conntrack_netbios_ns nf_conntrack_broadcast ip6table_nat nf_nat_ipv6
ip6table_mangle ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 iptable_nat
nf_nat_ipv4 nf_nat iptable_mangle nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack
nf_conntrack bnep bluetooth rfkill ebtable_filter ebtables ip6table_filter
ip6_tables snd_hda_codec_hdmi snd_cmipci snd_mpu401_uart snd_hda_intel
snd_intel8x0 snd_opl3_lib snd_ac97_codec gameport snd_hda_codec snd_rawmidi
ac97_bus snd_hwdep snd_seq snd_seq_device snd_pcm snd_page_alloc snd_timer snd
k8temp soundcore serio_raw i2c_nforce2 forcedeth ata_generic pata_acpi nouveau
video mxm_wmi wmi i2c_algo_bit drm_kms_helper ttm drm i2c_core sata_sil pata_amd
sata_nv uinput
kernel: [17539.341012] Pid: 17340, comm: sshd Not tainted
3.9.0-0.rc4.git0.1.fc19.i686.PAE #1
kernel: [17539.341012] Call Trace:
kernel: [17539.341012] [<c045573c>] warn_slowpath_common+0x6c/0xa0
kernel: [17539.341012] [<c0701953>] ? check_unmap+0x493/0x960
kernel: [17539.341012] [<c0701953>] ? check_unmap+0x493/0x960
kernel: [17539.341012] [<c04557a3>] warn_slowpath_fmt+0x33/0x40
kernel: [17539.341012] [<c0701953>] check_unmap+0x493/0x960
kernel: [17539.341012] [<c049238f>] ? sched_clock_cpu+0xdf/0x150
kernel: [17539.341012] [<c0701e87>] debug_dma_unmap_page+0x67/0x70
kernel: [17539.341012] [<f7eae8f2>] nv_unmap_txskb.isra.32+0x92/0x100
Its pretty plainly the result of an skb fragment getting unmapped without having
its initial mapping operation checked for errors. This patch corrects that.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: "David S. Miller" <davem@davemloft.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/nvidia')
-rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 41 |
1 files changed, 40 insertions, 1 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index b62262cfe4d9..5ae124719790 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
@@ -2200,6 +2200,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2200 | struct ring_desc *start_tx; | 2200 | struct ring_desc *start_tx; |
2201 | struct ring_desc *prev_tx; | 2201 | struct ring_desc *prev_tx; |
2202 | struct nv_skb_map *prev_tx_ctx; | 2202 | struct nv_skb_map *prev_tx_ctx; |
2203 | struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL; | ||
2203 | unsigned long flags; | 2204 | unsigned long flags; |
2204 | 2205 | ||
2205 | /* add fragments to entries count */ | 2206 | /* add fragments to entries count */ |
@@ -2261,12 +2262,31 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2261 | do { | 2262 | do { |
2262 | prev_tx = put_tx; | 2263 | prev_tx = put_tx; |
2263 | prev_tx_ctx = np->put_tx_ctx; | 2264 | prev_tx_ctx = np->put_tx_ctx; |
2265 | if (!start_tx_ctx) | ||
2266 | start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; | ||
2267 | |||
2264 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; | 2268 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; |
2265 | np->put_tx_ctx->dma = skb_frag_dma_map( | 2269 | np->put_tx_ctx->dma = skb_frag_dma_map( |
2266 | &np->pci_dev->dev, | 2270 | &np->pci_dev->dev, |
2267 | frag, offset, | 2271 | frag, offset, |
2268 | bcnt, | 2272 | bcnt, |
2269 | DMA_TO_DEVICE); | 2273 | DMA_TO_DEVICE); |
2274 | if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { | ||
2275 | |||
2276 | /* Unwind the mapped fragments */ | ||
2277 | do { | ||
2278 | nv_unmap_txskb(np, start_tx_ctx); | ||
2279 | if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) | ||
2280 | tmp_tx_ctx = np->first_tx_ctx; | ||
2281 | } while (tmp_tx_ctx != np->put_tx_ctx); | ||
2282 | kfree_skb(skb); | ||
2283 | np->put_tx_ctx = start_tx_ctx; | ||
2284 | u64_stats_update_begin(&np->swstats_tx_syncp); | ||
2285 | np->stat_tx_dropped++; | ||
2286 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2287 | return NETDEV_TX_OK; | ||
2288 | } | ||
2289 | |||
2270 | np->put_tx_ctx->dma_len = bcnt; | 2290 | np->put_tx_ctx->dma_len = bcnt; |
2271 | np->put_tx_ctx->dma_single = 0; | 2291 | np->put_tx_ctx->dma_single = 0; |
2272 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); | 2292 | put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); |
@@ -2327,7 +2347,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2327 | struct ring_desc_ex *start_tx; | 2347 | struct ring_desc_ex *start_tx; |
2328 | struct ring_desc_ex *prev_tx; | 2348 | struct ring_desc_ex *prev_tx; |
2329 | struct nv_skb_map *prev_tx_ctx; | 2349 | struct nv_skb_map *prev_tx_ctx; |
2330 | struct nv_skb_map *start_tx_ctx; | 2350 | struct nv_skb_map *start_tx_ctx = NULL; |
2351 | struct nv_skb_map *tmp_tx_ctx = NULL; | ||
2331 | unsigned long flags; | 2352 | unsigned long flags; |
2332 | 2353 | ||
2333 | /* add fragments to entries count */ | 2354 | /* add fragments to entries count */ |
@@ -2392,11 +2413,29 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2392 | prev_tx = put_tx; | 2413 | prev_tx = put_tx; |
2393 | prev_tx_ctx = np->put_tx_ctx; | 2414 | prev_tx_ctx = np->put_tx_ctx; |
2394 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; | 2415 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; |
2416 | if (!start_tx_ctx) | ||
2417 | start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; | ||
2395 | np->put_tx_ctx->dma = skb_frag_dma_map( | 2418 | np->put_tx_ctx->dma = skb_frag_dma_map( |
2396 | &np->pci_dev->dev, | 2419 | &np->pci_dev->dev, |
2397 | frag, offset, | 2420 | frag, offset, |
2398 | bcnt, | 2421 | bcnt, |
2399 | DMA_TO_DEVICE); | 2422 | DMA_TO_DEVICE); |
2423 | |||
2424 | if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { | ||
2425 | |||
2426 | /* Unwind the mapped fragments */ | ||
2427 | do { | ||
2428 | nv_unmap_txskb(np, start_tx_ctx); | ||
2429 | if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) | ||
2430 | tmp_tx_ctx = np->first_tx_ctx; | ||
2431 | } while (tmp_tx_ctx != np->put_tx_ctx); | ||
2432 | kfree_skb(skb); | ||
2433 | np->put_tx_ctx = start_tx_ctx; | ||
2434 | u64_stats_update_begin(&np->swstats_tx_syncp); | ||
2435 | np->stat_tx_dropped++; | ||
2436 | u64_stats_update_end(&np->swstats_tx_syncp); | ||
2437 | return NETDEV_TX_OK; | ||
2438 | } | ||
2400 | np->put_tx_ctx->dma_len = bcnt; | 2439 | np->put_tx_ctx->dma_len = bcnt; |
2401 | np->put_tx_ctx->dma_single = 0; | 2440 | np->put_tx_ctx->dma_single = 0; |
2402 | put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); | 2441 | put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); |