diff options
author | Govindarajulu Varadarajan <_govind@gmx.com> | 2014-12-24 05:29:37 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-12-31 13:08:45 -0500 |
commit | 065df159ec588630a360a689e4e3043c6622a6e9 (patch) | |
tree | 86aefb07acfcf6b7266fe1c26af0891cc51cdfa9 | |
parent | 5e32066d0085af3c53834323c90b7c69a8f788f6 (diff) |
enic: check dma_mapping_error
This patch checks for pci_dma_mapping_error() after dma mapping the data.
If the dma mapping fails we remove the previously queued frags and return
NETDEV_TX_OK.
Reported-by: Jan Stancek <jstancek@redhat.com>
Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic.h | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 152 |
2 files changed, 106 insertions, 58 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 25c4d88853d8..b2ea35a3a881 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h | |||
@@ -242,6 +242,18 @@ static inline unsigned int enic_msix_notify_intr(struct enic *enic) | |||
242 | return enic->rq_count + enic->wq_count + 1; | 242 | return enic->rq_count + enic->wq_count + 1; |
243 | } | 243 | } |
244 | 244 | ||
245 | static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr) | ||
246 | { | ||
247 | if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) { | ||
248 | net_warn_ratelimited("%s: PCI dma mapping failed!\n", | ||
249 | enic->netdev->name); | ||
250 | |||
251 | return -ENOMEM; | ||
252 | } | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
245 | void enic_reset_addr_lists(struct enic *enic); | 257 | void enic_reset_addr_lists(struct enic *enic); |
246 | int enic_sriov_enabled(struct enic *enic); | 258 | int enic_sriov_enabled(struct enic *enic); |
247 | int enic_is_valid_vf(struct enic *enic, int vf); | 259 | int enic_is_valid_vf(struct enic *enic, int vf); |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 705f334ebb85..142c9b5509ae 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -351,80 +351,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) | |||
351 | return IRQ_HANDLED; | 351 | return IRQ_HANDLED; |
352 | } | 352 | } |
353 | 353 | ||
354 | static inline void enic_queue_wq_skb_cont(struct enic *enic, | 354 | static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, |
355 | struct vnic_wq *wq, struct sk_buff *skb, | 355 | struct sk_buff *skb, unsigned int len_left, |
356 | unsigned int len_left, int loopback) | 356 | int loopback) |
357 | { | 357 | { |
358 | const skb_frag_t *frag; | 358 | const skb_frag_t *frag; |
359 | dma_addr_t dma_addr; | ||
359 | 360 | ||
360 | /* Queue additional data fragments */ | 361 | /* Queue additional data fragments */ |
361 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | 362 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { |
362 | len_left -= skb_frag_size(frag); | 363 | len_left -= skb_frag_size(frag); |
363 | enic_queue_wq_desc_cont(wq, skb, | 364 | dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, |
364 | skb_frag_dma_map(&enic->pdev->dev, | 365 | skb_frag_size(frag), |
365 | frag, 0, skb_frag_size(frag), | 366 | DMA_TO_DEVICE); |
366 | DMA_TO_DEVICE), | 367 | if (unlikely(enic_dma_map_check(enic, dma_addr))) |
367 | skb_frag_size(frag), | 368 | return -ENOMEM; |
368 | (len_left == 0), /* EOP? */ | 369 | enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), |
369 | loopback); | 370 | (len_left == 0), /* EOP? */ |
371 | loopback); | ||
370 | } | 372 | } |
373 | |||
374 | return 0; | ||
371 | } | 375 | } |
372 | 376 | ||
373 | static inline void enic_queue_wq_skb_vlan(struct enic *enic, | 377 | static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, |
374 | struct vnic_wq *wq, struct sk_buff *skb, | 378 | struct sk_buff *skb, int vlan_tag_insert, |
375 | int vlan_tag_insert, unsigned int vlan_tag, int loopback) | 379 | unsigned int vlan_tag, int loopback) |
376 | { | 380 | { |
377 | unsigned int head_len = skb_headlen(skb); | 381 | unsigned int head_len = skb_headlen(skb); |
378 | unsigned int len_left = skb->len - head_len; | 382 | unsigned int len_left = skb->len - head_len; |
379 | int eop = (len_left == 0); | 383 | int eop = (len_left == 0); |
384 | dma_addr_t dma_addr; | ||
385 | int err = 0; | ||
386 | |||
387 | dma_addr = pci_map_single(enic->pdev, skb->data, head_len, | ||
388 | PCI_DMA_TODEVICE); | ||
389 | if (unlikely(enic_dma_map_check(enic, dma_addr))) | ||
390 | return -ENOMEM; | ||
380 | 391 | ||
381 | /* Queue the main skb fragment. The fragments are no larger | 392 | /* Queue the main skb fragment. The fragments are no larger |
382 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | 393 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less |
383 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | 394 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor |
384 | * per fragment is queued. | 395 | * per fragment is queued. |
385 | */ | 396 | */ |
386 | enic_queue_wq_desc(wq, skb, | 397 | enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, |
387 | pci_map_single(enic->pdev, skb->data, | 398 | vlan_tag, eop, loopback); |
388 | head_len, PCI_DMA_TODEVICE), | ||
389 | head_len, | ||
390 | vlan_tag_insert, vlan_tag, | ||
391 | eop, loopback); | ||
392 | 399 | ||
393 | if (!eop) | 400 | if (!eop) |
394 | enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); | 401 | err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); |
402 | |||
403 | return err; | ||
395 | } | 404 | } |
396 | 405 | ||
397 | static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | 406 | static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, |
398 | struct vnic_wq *wq, struct sk_buff *skb, | 407 | struct sk_buff *skb, int vlan_tag_insert, |
399 | int vlan_tag_insert, unsigned int vlan_tag, int loopback) | 408 | unsigned int vlan_tag, int loopback) |
400 | { | 409 | { |
401 | unsigned int head_len = skb_headlen(skb); | 410 | unsigned int head_len = skb_headlen(skb); |
402 | unsigned int len_left = skb->len - head_len; | 411 | unsigned int len_left = skb->len - head_len; |
403 | unsigned int hdr_len = skb_checksum_start_offset(skb); | 412 | unsigned int hdr_len = skb_checksum_start_offset(skb); |
404 | unsigned int csum_offset = hdr_len + skb->csum_offset; | 413 | unsigned int csum_offset = hdr_len + skb->csum_offset; |
405 | int eop = (len_left == 0); | 414 | int eop = (len_left == 0); |
415 | dma_addr_t dma_addr; | ||
416 | int err = 0; | ||
417 | |||
418 | dma_addr = pci_map_single(enic->pdev, skb->data, head_len, | ||
419 | PCI_DMA_TODEVICE); | ||
420 | if (unlikely(enic_dma_map_check(enic, dma_addr))) | ||
421 | return -ENOMEM; | ||
406 | 422 | ||
407 | /* Queue the main skb fragment. The fragments are no larger | 423 | /* Queue the main skb fragment. The fragments are no larger |
408 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | 424 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less |
409 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | 425 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor |
410 | * per fragment is queued. | 426 | * per fragment is queued. |
411 | */ | 427 | */ |
412 | enic_queue_wq_desc_csum_l4(wq, skb, | 428 | enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, |
413 | pci_map_single(enic->pdev, skb->data, | 429 | hdr_len, vlan_tag_insert, vlan_tag, eop, |
414 | head_len, PCI_DMA_TODEVICE), | 430 | loopback); |
415 | head_len, | ||
416 | csum_offset, | ||
417 | hdr_len, | ||
418 | vlan_tag_insert, vlan_tag, | ||
419 | eop, loopback); | ||
420 | 431 | ||
421 | if (!eop) | 432 | if (!eop) |
422 | enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); | 433 | err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); |
434 | |||
435 | return err; | ||
423 | } | 436 | } |
424 | 437 | ||
425 | static inline void enic_queue_wq_skb_tso(struct enic *enic, | 438 | static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, |
426 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | 439 | struct sk_buff *skb, unsigned int mss, |
427 | int vlan_tag_insert, unsigned int vlan_tag, int loopback) | 440 | int vlan_tag_insert, unsigned int vlan_tag, |
441 | int loopback) | ||
428 | { | 442 | { |
429 | unsigned int frag_len_left = skb_headlen(skb); | 443 | unsigned int frag_len_left = skb_headlen(skb); |
430 | unsigned int len_left = skb->len - frag_len_left; | 444 | unsigned int len_left = skb->len - frag_len_left; |
@@ -454,20 +468,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, | |||
454 | */ | 468 | */ |
455 | while (frag_len_left) { | 469 | while (frag_len_left) { |
456 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); | 470 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); |
457 | dma_addr = pci_map_single(enic->pdev, skb->data + offset, | 471 | dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, |
458 | len, PCI_DMA_TODEVICE); | 472 | PCI_DMA_TODEVICE); |
459 | enic_queue_wq_desc_tso(wq, skb, | 473 | if (unlikely(enic_dma_map_check(enic, dma_addr))) |
460 | dma_addr, | 474 | return -ENOMEM; |
461 | len, | 475 | enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, |
462 | mss, hdr_len, | 476 | vlan_tag_insert, vlan_tag, |
463 | vlan_tag_insert, vlan_tag, | 477 | eop && (len == frag_len_left), loopback); |
464 | eop && (len == frag_len_left), loopback); | ||
465 | frag_len_left -= len; | 478 | frag_len_left -= len; |
466 | offset += len; | 479 | offset += len; |
467 | } | 480 | } |
468 | 481 | ||
469 | if (eop) | 482 | if (eop) |
470 | return; | 483 | return 0; |
471 | 484 | ||
472 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors | 485 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors |
473 | * for additional data fragments | 486 | * for additional data fragments |
@@ -483,16 +496,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic, | |||
483 | dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, | 496 | dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, |
484 | offset, len, | 497 | offset, len, |
485 | DMA_TO_DEVICE); | 498 | DMA_TO_DEVICE); |
486 | enic_queue_wq_desc_cont(wq, skb, | 499 | if (unlikely(enic_dma_map_check(enic, dma_addr))) |
487 | dma_addr, | 500 | return -ENOMEM; |
488 | len, | 501 | enic_queue_wq_desc_cont(wq, skb, dma_addr, len, |
489 | (len_left == 0) && | 502 | (len_left == 0) && |
490 | (len == frag_len_left), /* EOP? */ | 503 | (len == frag_len_left),/*EOP*/ |
491 | loopback); | 504 | loopback); |
492 | frag_len_left -= len; | 505 | frag_len_left -= len; |
493 | offset += len; | 506 | offset += len; |
494 | } | 507 | } |
495 | } | 508 | } |
509 | |||
510 | return 0; | ||
496 | } | 511 | } |
497 | 512 | ||
498 | static inline void enic_queue_wq_skb(struct enic *enic, | 513 | static inline void enic_queue_wq_skb(struct enic *enic, |
@@ -502,6 +517,7 @@ static inline void enic_queue_wq_skb(struct enic *enic, | |||
502 | unsigned int vlan_tag = 0; | 517 | unsigned int vlan_tag = 0; |
503 | int vlan_tag_insert = 0; | 518 | int vlan_tag_insert = 0; |
504 | int loopback = 0; | 519 | int loopback = 0; |
520 | int err; | ||
505 | 521 | ||
506 | if (vlan_tx_tag_present(skb)) { | 522 | if (vlan_tx_tag_present(skb)) { |
507 | /* VLAN tag from trunking driver */ | 523 | /* VLAN tag from trunking driver */ |
@@ -513,14 +529,30 @@ static inline void enic_queue_wq_skb(struct enic *enic, | |||
513 | } | 529 | } |
514 | 530 | ||
515 | if (mss) | 531 | if (mss) |
516 | enic_queue_wq_skb_tso(enic, wq, skb, mss, | 532 | err = enic_queue_wq_skb_tso(enic, wq, skb, mss, |
517 | vlan_tag_insert, vlan_tag, loopback); | 533 | vlan_tag_insert, vlan_tag, |
534 | loopback); | ||
518 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | 535 | else if (skb->ip_summed == CHECKSUM_PARTIAL) |
519 | enic_queue_wq_skb_csum_l4(enic, wq, skb, | 536 | err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, |
520 | vlan_tag_insert, vlan_tag, loopback); | 537 | vlan_tag, loopback); |
521 | else | 538 | else |
522 | enic_queue_wq_skb_vlan(enic, wq, skb, | 539 | err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, |
523 | vlan_tag_insert, vlan_tag, loopback); | 540 | vlan_tag, loopback); |
541 | if (unlikely(err)) { | ||
542 | struct vnic_wq_buf *buf; | ||
543 | |||
544 | buf = wq->to_use->prev; | ||
545 | /* while not EOP of previous pkt && queue not empty. | ||
546 | * For all non EOP bufs, os_buf is NULL. | ||
547 | */ | ||
548 | while (!buf->os_buf && (buf->next != wq->to_clean)) { | ||
549 | enic_free_wq_buf(wq, buf); | ||
550 | wq->ring.desc_avail++; | ||
551 | buf = buf->prev; | ||
552 | } | ||
553 | wq->to_use = buf->next; | ||
554 | dev_kfree_skb(skb); | ||
555 | } | ||
524 | } | 556 | } |
525 | 557 | ||
526 | /* netif_tx_lock held, process context with BHs disabled, or BH */ | 558 | /* netif_tx_lock held, process context with BHs disabled, or BH */ |
@@ -950,8 +982,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) | |||
950 | if (!skb) | 982 | if (!skb) |
951 | return -ENOMEM; | 983 | return -ENOMEM; |
952 | 984 | ||
953 | dma_addr = pci_map_single(enic->pdev, skb->data, | 985 | dma_addr = pci_map_single(enic->pdev, skb->data, len, |
954 | len, PCI_DMA_FROMDEVICE); | 986 | PCI_DMA_FROMDEVICE); |
987 | if (unlikely(enic_dma_map_check(enic, dma_addr))) { | ||
988 | dev_kfree_skb(skb); | ||
989 | return -ENOMEM; | ||
990 | } | ||
955 | 991 | ||
956 | enic_queue_rq_desc(rq, skb, os_buf_index, | 992 | enic_queue_rq_desc(rq, skb, os_buf_index, |
957 | dma_addr, len); | 993 | dma_addr, len); |