diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-16 18:07:25 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-16 18:07:25 -0400 |
| commit | 896821657479905b95d5193595b81679155ce199 (patch) | |
| tree | e8462a545d28e175d4e3475469d02764a1f1b269 | |
| parent | e2a978ec72cffe4739f5ef7618961d6a6209fa66 (diff) | |
| parent | 8b19d450ad188d402a183ff4a4d40f31c3916fbf (diff) | |
Merge tag 'ntb-bugfixes-3.10' of git://github.com/jonmason/ntb
Pull NTB update from Jon Mason:
"NTB bug fixes to address Smatch/Coverity errors, link toggling bugs,
and a few corner cases in the driver."
This pull request came in during the merge window, but without any
signage etc. So I'm taking it late, because it wasn't _originally_
late.
* tag 'ntb-bugfixes-3.10' of git://github.com/jonmason/ntb:
NTB: Multiple NTB client fix
ntb_netdev: remove from list on exit
NTB: memcpy lockup workaround
NTB: Correctly handle receive buffers of the minimal size
NTB: reset tx_index on link toggle
NTB: Link toggle memory leak
NTB: Handle 64bit BAR sizes
NTB: fix pointer math issues
ntb: off by one sanity checks
NTB: variable dereferenced before check
| -rw-r--r-- | drivers/net/ntb_netdev.c | 2 | ||||
| -rw-r--r-- | drivers/ntb/ntb_hw.c | 10 | ||||
| -rw-r--r-- | drivers/ntb/ntb_transport.c | 175 |
3 files changed, 121 insertions, 66 deletions
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index ed947dd76fbd..f3cdf64997d6 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c | |||
| @@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev) | |||
| 375 | if (dev == NULL) | 375 | if (dev == NULL) |
| 376 | return; | 376 | return; |
| 377 | 377 | ||
| 378 | list_del(&dev->list); | ||
| 379 | |||
| 378 | ndev = dev->ndev; | 380 | ndev = dev->ndev; |
| 379 | 381 | ||
| 380 | unregister_netdev(ndev); | 382 | unregister_netdev(ndev); |
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c index f802e7c92356..2dacd19e1b8a 100644 --- a/drivers/ntb/ntb_hw.c +++ b/drivers/ntb/ntb_hw.c | |||
| @@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val) | |||
| 345 | */ | 345 | */ |
| 346 | void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) | 346 | void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) |
| 347 | { | 347 | { |
| 348 | if (mw > NTB_NUM_MW) | 348 | if (mw >= NTB_NUM_MW) |
| 349 | return NULL; | 349 | return NULL; |
| 350 | 350 | ||
| 351 | return ndev->mw[mw].vbase; | 351 | return ndev->mw[mw].vbase; |
| @@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) | |||
| 362 | */ | 362 | */ |
| 363 | resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) | 363 | resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) |
| 364 | { | 364 | { |
| 365 | if (mw > NTB_NUM_MW) | 365 | if (mw >= NTB_NUM_MW) |
| 366 | return 0; | 366 | return 0; |
| 367 | 367 | ||
| 368 | return ndev->mw[mw].bar_sz; | 368 | return ndev->mw[mw].bar_sz; |
| @@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) | |||
| 380 | */ | 380 | */ |
| 381 | void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) | 381 | void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) |
| 382 | { | 382 | { |
| 383 | if (mw > NTB_NUM_MW) | 383 | if (mw >= NTB_NUM_MW) |
| 384 | return; | 384 | return; |
| 385 | 385 | ||
| 386 | dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, | 386 | dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, |
| @@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1027 | ndev->mw[i].vbase = | 1027 | ndev->mw[i].vbase = |
| 1028 | ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), | 1028 | ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), |
| 1029 | ndev->mw[i].bar_sz); | 1029 | ndev->mw[i].bar_sz); |
| 1030 | dev_info(&pdev->dev, "MW %d size %d\n", i, | 1030 | dev_info(&pdev->dev, "MW %d size %llu\n", i, |
| 1031 | (u32) pci_resource_len(pdev, MW_TO_BAR(i))); | 1031 | pci_resource_len(pdev, MW_TO_BAR(i))); |
| 1032 | if (!ndev->mw[i].vbase) { | 1032 | if (!ndev->mw[i].vbase) { |
| 1033 | dev_warn(&pdev->dev, "Cannot remap BAR %d\n", | 1033 | dev_warn(&pdev->dev, "Cannot remap BAR %d\n", |
| 1034 | MW_TO_BAR(i)); | 1034 | MW_TO_BAR(i)); |
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index e0bdfd7f9930..f8d7081ee301 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
| @@ -58,7 +58,7 @@ | |||
| 58 | #include <linux/ntb.h> | 58 | #include <linux/ntb.h> |
| 59 | #include "ntb_hw.h" | 59 | #include "ntb_hw.h" |
| 60 | 60 | ||
| 61 | #define NTB_TRANSPORT_VERSION 2 | 61 | #define NTB_TRANSPORT_VERSION 3 |
| 62 | 62 | ||
| 63 | static unsigned int transport_mtu = 0x401E; | 63 | static unsigned int transport_mtu = 0x401E; |
| 64 | module_param(transport_mtu, uint, 0644); | 64 | module_param(transport_mtu, uint, 0644); |
| @@ -173,10 +173,13 @@ struct ntb_payload_header { | |||
| 173 | 173 | ||
| 174 | enum { | 174 | enum { |
| 175 | VERSION = 0, | 175 | VERSION = 0, |
| 176 | MW0_SZ, | ||
| 177 | MW1_SZ, | ||
| 178 | NUM_QPS, | ||
| 179 | QP_LINKS, | 176 | QP_LINKS, |
| 177 | NUM_QPS, | ||
| 178 | NUM_MWS, | ||
| 179 | MW0_SZ_HIGH, | ||
| 180 | MW0_SZ_LOW, | ||
| 181 | MW1_SZ_HIGH, | ||
| 182 | MW1_SZ_LOW, | ||
| 180 | MAX_SPAD, | 183 | MAX_SPAD, |
| 181 | }; | 184 | }; |
| 182 | 185 | ||
| @@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name) | |||
| 297 | { | 300 | { |
| 298 | struct ntb_transport_client_dev *client_dev; | 301 | struct ntb_transport_client_dev *client_dev; |
| 299 | struct ntb_transport *nt; | 302 | struct ntb_transport *nt; |
| 300 | int rc; | 303 | int rc, i = 0; |
| 301 | 304 | ||
| 302 | if (list_empty(&ntb_transport_list)) | 305 | if (list_empty(&ntb_transport_list)) |
| 303 | return -ENODEV; | 306 | return -ENODEV; |
| @@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name) | |||
| 315 | dev = &client_dev->dev; | 318 | dev = &client_dev->dev; |
| 316 | 319 | ||
| 317 | /* setup and register client devices */ | 320 | /* setup and register client devices */ |
| 318 | dev_set_name(dev, "%s", device_name); | 321 | dev_set_name(dev, "%s%d", device_name, i); |
| 319 | dev->bus = &ntb_bus_type; | 322 | dev->bus = &ntb_bus_type; |
| 320 | dev->release = ntb_client_release; | 323 | dev->release = ntb_client_release; |
| 321 | dev->parent = &ntb_query_pdev(nt->ndev)->dev; | 324 | dev->parent = &ntb_query_pdev(nt->ndev)->dev; |
| @@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name) | |||
| 327 | } | 330 | } |
| 328 | 331 | ||
| 329 | list_add_tail(&client_dev->entry, &nt->client_devs); | 332 | list_add_tail(&client_dev->entry, &nt->client_devs); |
| 333 | i++; | ||
| 330 | } | 334 | } |
| 331 | 335 | ||
| 332 | return 0; | 336 | return 0; |
| @@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, | |||
| 486 | (qp_num / NTB_NUM_MW * rx_size); | 490 | (qp_num / NTB_NUM_MW * rx_size); |
| 487 | rx_size -= sizeof(struct ntb_rx_info); | 491 | rx_size -= sizeof(struct ntb_rx_info); |
| 488 | 492 | ||
| 489 | qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); | 493 | qp->rx_buff = qp->remote_rx_info + 1; |
| 490 | qp->rx_max_frame = min(transport_mtu, rx_size); | 494 | /* Due to housekeeping, there must be atleast 2 buffs */ |
| 495 | qp->rx_max_frame = min(transport_mtu, rx_size / 2); | ||
| 491 | qp->rx_max_entry = rx_size / qp->rx_max_frame; | 496 | qp->rx_max_entry = rx_size / qp->rx_max_frame; |
| 492 | qp->rx_index = 0; | 497 | qp->rx_index = 0; |
| 493 | 498 | ||
| 494 | qp->remote_rx_info->entry = qp->rx_max_entry; | 499 | qp->remote_rx_info->entry = qp->rx_max_entry - 1; |
| 495 | 500 | ||
| 496 | /* setup the hdr offsets with 0's */ | 501 | /* setup the hdr offsets with 0's */ |
| 497 | for (i = 0; i < qp->rx_max_entry; i++) { | 502 | for (i = 0; i < qp->rx_max_entry; i++) { |
| @@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, | |||
| 502 | 507 | ||
| 503 | qp->rx_pkts = 0; | 508 | qp->rx_pkts = 0; |
| 504 | qp->tx_pkts = 0; | 509 | qp->tx_pkts = 0; |
| 510 | qp->tx_index = 0; | ||
| 511 | } | ||
| 512 | |||
| 513 | static void ntb_free_mw(struct ntb_transport *nt, int num_mw) | ||
| 514 | { | ||
| 515 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; | ||
| 516 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); | ||
| 517 | |||
| 518 | if (!mw->virt_addr) | ||
| 519 | return; | ||
| 520 | |||
| 521 | dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); | ||
| 522 | mw->virt_addr = NULL; | ||
| 505 | } | 523 | } |
| 506 | 524 | ||
| 507 | static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) | 525 | static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) |
| @@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) | |||
| 509 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; | 527 | struct ntb_transport_mw *mw = &nt->mw[num_mw]; |
| 510 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); | 528 | struct pci_dev *pdev = ntb_query_pdev(nt->ndev); |
| 511 | 529 | ||
| 530 | /* No need to re-setup */ | ||
| 531 | if (mw->size == ALIGN(size, 4096)) | ||
| 532 | return 0; | ||
| 533 | |||
| 534 | if (mw->size != 0) | ||
| 535 | ntb_free_mw(nt, num_mw); | ||
| 536 | |||
| 512 | /* Alloc memory for receiving data. Must be 4k aligned */ | 537 | /* Alloc memory for receiving data. Must be 4k aligned */ |
| 513 | mw->size = ALIGN(size, 4096); | 538 | mw->size = ALIGN(size, 4096); |
| 514 | 539 | ||
| 515 | mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, | 540 | mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, |
| 516 | GFP_KERNEL); | 541 | GFP_KERNEL); |
| 517 | if (!mw->virt_addr) { | 542 | if (!mw->virt_addr) { |
| 543 | mw->size = 0; | ||
| 518 | dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", | 544 | dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", |
| 519 | (int) mw->size); | 545 | (int) mw->size); |
| 520 | return -ENOMEM; | 546 | return -ENOMEM; |
| @@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 604 | u32 val; | 630 | u32 val; |
| 605 | int rc, i; | 631 | int rc, i; |
| 606 | 632 | ||
| 607 | /* send the local info */ | 633 | /* send the local info, in the opposite order of the way we read it */ |
| 608 | rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); | 634 | for (i = 0; i < NTB_NUM_MW; i++) { |
| 609 | if (rc) { | 635 | rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), |
| 610 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 636 | ntb_get_mw_size(ndev, i) >> 32); |
| 611 | 0, VERSION); | 637 | if (rc) { |
| 612 | goto out; | 638 | dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", |
| 613 | } | 639 | (u32)(ntb_get_mw_size(ndev, i) >> 32), |
| 640 | MW0_SZ_HIGH + (i * 2)); | ||
| 641 | goto out; | ||
| 642 | } | ||
| 614 | 643 | ||
| 615 | rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); | 644 | rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2), |
| 616 | if (rc) { | 645 | (u32) ntb_get_mw_size(ndev, i)); |
| 617 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 646 | if (rc) { |
| 618 | (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); | 647 | dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", |
| 619 | goto out; | 648 | (u32) ntb_get_mw_size(ndev, i), |
| 649 | MW0_SZ_LOW + (i * 2)); | ||
| 650 | goto out; | ||
| 651 | } | ||
| 620 | } | 652 | } |
| 621 | 653 | ||
| 622 | rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); | 654 | rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW); |
| 623 | if (rc) { | 655 | if (rc) { |
| 624 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 656 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", |
| 625 | (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); | 657 | NTB_NUM_MW, NUM_MWS); |
| 626 | goto out; | 658 | goto out; |
| 627 | } | 659 | } |
| 628 | 660 | ||
| @@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 633 | goto out; | 665 | goto out; |
| 634 | } | 666 | } |
| 635 | 667 | ||
| 636 | rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); | 668 | rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); |
| 637 | if (rc) { | ||
| 638 | dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); | ||
| 639 | goto out; | ||
| 640 | } | ||
| 641 | |||
| 642 | rc = ntb_write_remote_spad(ndev, QP_LINKS, val); | ||
| 643 | if (rc) { | 669 | if (rc) { |
| 644 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", | 670 | dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", |
| 645 | val, QP_LINKS); | 671 | NTB_TRANSPORT_VERSION, VERSION); |
| 646 | goto out; | 672 | goto out; |
| 647 | } | 673 | } |
| 648 | 674 | ||
| @@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 667 | goto out; | 693 | goto out; |
| 668 | dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); | 694 | dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); |
| 669 | 695 | ||
| 670 | rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); | 696 | rc = ntb_read_remote_spad(ndev, NUM_MWS, &val); |
| 671 | if (rc) { | 697 | if (rc) { |
| 672 | dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); | 698 | dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS); |
| 673 | goto out; | 699 | goto out; |
| 674 | } | 700 | } |
| 675 | 701 | ||
| 676 | if (!val) | 702 | if (val != NTB_NUM_MW) |
| 677 | goto out; | 703 | goto out; |
| 678 | dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); | 704 | dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); |
| 679 | 705 | ||
| 680 | rc = ntb_set_mw(nt, 0, val); | 706 | for (i = 0; i < NTB_NUM_MW; i++) { |
| 681 | if (rc) | 707 | u64 val64; |
| 682 | goto out; | ||
| 683 | 708 | ||
| 684 | rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); | 709 | rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val); |
| 685 | if (rc) { | 710 | if (rc) { |
| 686 | dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); | 711 | dev_err(&pdev->dev, "Error reading remote spad %d\n", |
| 687 | goto out; | 712 | MW0_SZ_HIGH + (i * 2)); |
| 688 | } | 713 | goto out1; |
| 714 | } | ||
| 689 | 715 | ||
| 690 | if (!val) | 716 | val64 = (u64) val << 32; |
| 691 | goto out; | ||
| 692 | dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val); | ||
| 693 | 717 | ||
| 694 | rc = ntb_set_mw(nt, 1, val); | 718 | rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val); |
| 695 | if (rc) | 719 | if (rc) { |
| 696 | goto out; | 720 | dev_err(&pdev->dev, "Error reading remote spad %d\n", |
| 721 | MW0_SZ_LOW + (i * 2)); | ||
| 722 | goto out1; | ||
| 723 | } | ||
| 724 | |||
| 725 | val64 |= val; | ||
| 726 | |||
| 727 | dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64); | ||
| 728 | |||
| 729 | rc = ntb_set_mw(nt, i, val64); | ||
| 730 | if (rc) | ||
| 731 | goto out1; | ||
| 732 | } | ||
| 697 | 733 | ||
| 698 | nt->transport_link = NTB_LINK_UP; | 734 | nt->transport_link = NTB_LINK_UP; |
| 699 | 735 | ||
| @@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work) | |||
| 708 | 744 | ||
| 709 | return; | 745 | return; |
| 710 | 746 | ||
| 747 | out1: | ||
| 748 | for (i = 0; i < NTB_NUM_MW; i++) | ||
| 749 | ntb_free_mw(nt, i); | ||
| 711 | out: | 750 | out: |
| 712 | if (ntb_hw_link_status(ndev)) | 751 | if (ntb_hw_link_status(ndev)) |
| 713 | schedule_delayed_work(&nt->link_work, | 752 | schedule_delayed_work(&nt->link_work, |
| @@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt, | |||
| 780 | (qp_num / NTB_NUM_MW * tx_size); | 819 | (qp_num / NTB_NUM_MW * tx_size); |
| 781 | tx_size -= sizeof(struct ntb_rx_info); | 820 | tx_size -= sizeof(struct ntb_rx_info); |
| 782 | 821 | ||
| 783 | qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); | 822 | qp->tx_mw = qp->rx_info + 1; |
| 784 | qp->tx_max_frame = min(transport_mtu, tx_size); | 823 | /* Due to housekeeping, there must be atleast 2 buffs */ |
| 824 | qp->tx_max_frame = min(transport_mtu, tx_size / 2); | ||
| 785 | qp->tx_max_entry = tx_size / qp->tx_max_frame; | 825 | qp->tx_max_entry = tx_size / qp->tx_max_frame; |
| 786 | qp->tx_index = 0; | ||
| 787 | 826 | ||
| 788 | if (nt->debugfs_dir) { | 827 | if (nt->debugfs_dir) { |
| 789 | char debugfs_name[4]; | 828 | char debugfs_name[4]; |
| @@ -897,10 +936,7 @@ void ntb_transport_free(void *transport) | |||
| 897 | pdev = ntb_query_pdev(nt->ndev); | 936 | pdev = ntb_query_pdev(nt->ndev); |
| 898 | 937 | ||
| 899 | for (i = 0; i < NTB_NUM_MW; i++) | 938 | for (i = 0; i < NTB_NUM_MW; i++) |
| 900 | if (nt->mw[i].virt_addr) | 939 | ntb_free_mw(nt, i); |
| 901 | dma_free_coherent(&pdev->dev, nt->mw[i].size, | ||
| 902 | nt->mw[i].virt_addr, | ||
| 903 | nt->mw[i].dma_addr); | ||
| 904 | 940 | ||
| 905 | kfree(nt->qps); | 941 | kfree(nt->qps); |
| 906 | ntb_unregister_transport(nt->ndev); | 942 | ntb_unregister_transport(nt->ndev); |
| @@ -999,11 +1035,16 @@ out: | |||
| 999 | static void ntb_transport_rx(unsigned long data) | 1035 | static void ntb_transport_rx(unsigned long data) |
| 1000 | { | 1036 | { |
| 1001 | struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; | 1037 | struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; |
| 1002 | int rc; | 1038 | int rc, i; |
| 1003 | 1039 | ||
| 1004 | do { | 1040 | /* Limit the number of packets processed in a single interrupt to |
| 1041 | * provide fairness to others | ||
| 1042 | */ | ||
| 1043 | for (i = 0; i < qp->rx_max_entry; i++) { | ||
| 1005 | rc = ntb_process_rxc(qp); | 1044 | rc = ntb_process_rxc(qp); |
| 1006 | } while (!rc); | 1045 | if (rc) |
| 1046 | break; | ||
| 1047 | } | ||
| 1007 | } | 1048 | } |
| 1008 | 1049 | ||
| 1009 | static void ntb_transport_rxc_db(void *data, int db_num) | 1050 | static void ntb_transport_rxc_db(void *data, int db_num) |
| @@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); | |||
| 1210 | */ | 1251 | */ |
| 1211 | void ntb_transport_free_queue(struct ntb_transport_qp *qp) | 1252 | void ntb_transport_free_queue(struct ntb_transport_qp *qp) |
| 1212 | { | 1253 | { |
| 1213 | struct pci_dev *pdev = ntb_query_pdev(qp->ndev); | 1254 | struct pci_dev *pdev; |
| 1214 | struct ntb_queue_entry *entry; | 1255 | struct ntb_queue_entry *entry; |
| 1215 | 1256 | ||
| 1216 | if (!qp) | 1257 | if (!qp) |
| 1217 | return; | 1258 | return; |
| 1218 | 1259 | ||
| 1260 | pdev = ntb_query_pdev(qp->ndev); | ||
| 1261 | |||
| 1219 | cancel_delayed_work_sync(&qp->link_work); | 1262 | cancel_delayed_work_sync(&qp->link_work); |
| 1220 | 1263 | ||
| 1221 | ntb_unregister_db_callback(qp->ndev, qp->qp_num); | 1264 | ntb_unregister_db_callback(qp->ndev, qp->qp_num); |
| @@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up); | |||
| 1371 | */ | 1414 | */ |
| 1372 | void ntb_transport_link_down(struct ntb_transport_qp *qp) | 1415 | void ntb_transport_link_down(struct ntb_transport_qp *qp) |
| 1373 | { | 1416 | { |
| 1374 | struct pci_dev *pdev = ntb_query_pdev(qp->ndev); | 1417 | struct pci_dev *pdev; |
| 1375 | int rc, val; | 1418 | int rc, val; |
| 1376 | 1419 | ||
| 1377 | if (!qp) | 1420 | if (!qp) |
| 1378 | return; | 1421 | return; |
| 1379 | 1422 | ||
| 1423 | pdev = ntb_query_pdev(qp->ndev); | ||
| 1380 | qp->client_ready = NTB_LINK_DOWN; | 1424 | qp->client_ready = NTB_LINK_DOWN; |
| 1381 | 1425 | ||
| 1382 | rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); | 1426 | rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); |
| @@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down); | |||
| 1408 | */ | 1452 | */ |
| 1409 | bool ntb_transport_link_query(struct ntb_transport_qp *qp) | 1453 | bool ntb_transport_link_query(struct ntb_transport_qp *qp) |
| 1410 | { | 1454 | { |
| 1455 | if (!qp) | ||
| 1456 | return false; | ||
| 1457 | |||
| 1411 | return qp->qp_link == NTB_LINK_UP; | 1458 | return qp->qp_link == NTB_LINK_UP; |
| 1412 | } | 1459 | } |
| 1413 | EXPORT_SYMBOL_GPL(ntb_transport_link_query); | 1460 | EXPORT_SYMBOL_GPL(ntb_transport_link_query); |
| @@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query); | |||
| 1422 | */ | 1469 | */ |
| 1423 | unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) | 1470 | unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) |
| 1424 | { | 1471 | { |
| 1472 | if (!qp) | ||
| 1473 | return 0; | ||
| 1474 | |||
| 1425 | return qp->qp_num; | 1475 | return qp->qp_num; |
| 1426 | } | 1476 | } |
| 1427 | EXPORT_SYMBOL_GPL(ntb_transport_qp_num); | 1477 | EXPORT_SYMBOL_GPL(ntb_transport_qp_num); |
| @@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num); | |||
| 1436 | */ | 1486 | */ |
| 1437 | unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) | 1487 | unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) |
| 1438 | { | 1488 | { |
| 1489 | if (!qp) | ||
| 1490 | return 0; | ||
| 1491 | |||
| 1439 | return qp->tx_max_frame - sizeof(struct ntb_payload_header); | 1492 | return qp->tx_max_frame - sizeof(struct ntb_payload_header); |
| 1440 | } | 1493 | } |
| 1441 | EXPORT_SYMBOL_GPL(ntb_transport_max_size); | 1494 | EXPORT_SYMBOL_GPL(ntb_transport_max_size); |
