diff options
| author | David S. Miller <davem@davemloft.net> | 2017-05-03 11:33:06 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2017-05-03 11:33:06 -0400 |
| commit | f411af6822182f84834c4881b825dd40534e7fe8 (patch) | |
| tree | c7bd90ce3fc9b3f31b3d6fa208f9efd3d715360e | |
| parent | 4d89ac2dd559b343dad30a294fb11e0237d697d8 (diff) | |
| parent | 7c3e7de3f3a94fa34731f302e2f6606c9adc0f38 (diff) | |
Merge branch 'ibmvnic-Updated-reset-handler-andcode-fixes'
Nathan Fontenot says:
====================
ibmvnic: Updated reset handler and code fixes
This set of patches multiple code fixes and a new rest handler
for the ibmvnic driver. In order to implement the new reset handler
for the ibmvnic driver resource initialization needed to be moved to
its own routine, a state variable is introduced to replace the
various is_* flags in the driver, and a new routine to handle the
assorted reasons the driver can be reset.
v4 updates:
Patch 3/11: Corrected trailing whitespace
Patch 7/11: Corrected trailing whitespace
v3 updates:
Patch 10/11: Correct patch subject line to be a description of the patch.
v2 updates:
Patch 11/11: Use __netif_subqueue_stopped() instead of
netif_subqueue_stopped() to avoid possible use of an un-initialized
skb variable.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 561 | ||||
| -rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.h | 31 |
2 files changed, 388 insertions, 204 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4fcd2f0378ba..4f2d329dba99 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -194,7 +194,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, | |||
| 194 | if (!ltb->buff) | 194 | if (!ltb->buff) |
| 195 | return; | 195 | return; |
| 196 | 196 | ||
| 197 | if (!adapter->failover) | 197 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && |
| 198 | adapter->reset_reason != VNIC_RESET_MOBILITY) | ||
| 198 | send_request_unmap(adapter, ltb->map_id); | 199 | send_request_unmap(adapter, ltb->map_id); |
| 199 | dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); | 200 | dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); |
| 200 | } | 201 | } |
| @@ -292,9 +293,6 @@ static void replenish_pools(struct ibmvnic_adapter *adapter) | |||
| 292 | { | 293 | { |
| 293 | int i; | 294 | int i; |
| 294 | 295 | ||
| 295 | if (adapter->migrated) | ||
| 296 | return; | ||
| 297 | |||
| 298 | adapter->replenish_task_cycles++; | 296 | adapter->replenish_task_cycles++; |
| 299 | for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); | 297 | for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); |
| 300 | i++) { | 298 | i++) { |
| @@ -350,7 +348,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) | |||
| 350 | free_long_term_buff(adapter, &rx_pool->long_term_buff); | 348 | free_long_term_buff(adapter, &rx_pool->long_term_buff); |
| 351 | 349 | ||
| 352 | if (!rx_pool->rx_buff) | 350 | if (!rx_pool->rx_buff) |
| 353 | continue; | 351 | continue; |
| 354 | 352 | ||
| 355 | for (j = 0; j < rx_pool->size; j++) { | 353 | for (j = 0; j < rx_pool->size; j++) { |
| 356 | if (rx_pool->rx_buff[j].skb) { | 354 | if (rx_pool->rx_buff[j].skb) { |
| @@ -554,11 +552,20 @@ static int ibmvnic_login(struct net_device *netdev) | |||
| 554 | 552 | ||
| 555 | static void release_resources(struct ibmvnic_adapter *adapter) | 553 | static void release_resources(struct ibmvnic_adapter *adapter) |
| 556 | { | 554 | { |
| 555 | int i; | ||
| 556 | |||
| 557 | release_tx_pools(adapter); | 557 | release_tx_pools(adapter); |
| 558 | release_rx_pools(adapter); | 558 | release_rx_pools(adapter); |
| 559 | 559 | ||
| 560 | release_stats_token(adapter); | 560 | release_stats_token(adapter); |
| 561 | release_error_buffers(adapter); | 561 | release_error_buffers(adapter); |
| 562 | |||
| 563 | if (adapter->napi) { | ||
| 564 | for (i = 0; i < adapter->req_rx_queues; i++) { | ||
| 565 | if (&adapter->napi[i]) | ||
| 566 | netif_napi_del(&adapter->napi[i]); | ||
| 567 | } | ||
| 568 | } | ||
| 562 | } | 569 | } |
| 563 | 570 | ||
| 564 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) | 571 | static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) |
| @@ -569,11 +576,6 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) | |||
| 569 | bool resend; | 576 | bool resend; |
| 570 | int rc; | 577 | int rc; |
| 571 | 578 | ||
| 572 | if (adapter->logical_link_state == link_state) { | ||
| 573 | netdev_dbg(netdev, "Link state already %d\n", link_state); | ||
| 574 | return 0; | ||
| 575 | } | ||
| 576 | |||
| 577 | netdev_err(netdev, "setting link state %d\n", link_state); | 579 | netdev_err(netdev, "setting link state %d\n", link_state); |
| 578 | memset(&crq, 0, sizeof(crq)); | 580 | memset(&crq, 0, sizeof(crq)); |
| 579 | crq.logical_link_state.first = IBMVNIC_CRQ_CMD; | 581 | crq.logical_link_state.first = IBMVNIC_CRQ_CMD; |
| @@ -624,22 +626,10 @@ static int set_real_num_queues(struct net_device *netdev) | |||
| 624 | return rc; | 626 | return rc; |
| 625 | } | 627 | } |
| 626 | 628 | ||
| 627 | static int ibmvnic_open(struct net_device *netdev) | 629 | static int init_resources(struct ibmvnic_adapter *adapter) |
| 628 | { | 630 | { |
| 629 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 631 | struct net_device *netdev = adapter->netdev; |
| 630 | struct device *dev = &adapter->vdev->dev; | 632 | int i, rc; |
| 631 | int rc = 0; | ||
| 632 | int i; | ||
| 633 | |||
| 634 | if (adapter->is_closed) { | ||
| 635 | rc = ibmvnic_init(adapter); | ||
| 636 | if (rc) | ||
| 637 | return rc; | ||
| 638 | } | ||
| 639 | |||
| 640 | rc = ibmvnic_login(netdev); | ||
| 641 | if (rc) | ||
| 642 | return rc; | ||
| 643 | 633 | ||
| 644 | rc = set_real_num_queues(netdev); | 634 | rc = set_real_num_queues(netdev); |
| 645 | if (rc) | 635 | if (rc) |
| @@ -647,7 +637,7 @@ static int ibmvnic_open(struct net_device *netdev) | |||
| 647 | 637 | ||
| 648 | rc = init_sub_crq_irqs(adapter); | 638 | rc = init_sub_crq_irqs(adapter); |
| 649 | if (rc) { | 639 | if (rc) { |
| 650 | dev_err(dev, "failed to initialize sub crq irqs\n"); | 640 | netdev_err(netdev, "failed to initialize sub crq irqs\n"); |
| 651 | return -1; | 641 | return -1; |
| 652 | } | 642 | } |
| 653 | 643 | ||
| @@ -659,90 +649,184 @@ static int ibmvnic_open(struct net_device *netdev) | |||
| 659 | adapter->napi = kcalloc(adapter->req_rx_queues, | 649 | adapter->napi = kcalloc(adapter->req_rx_queues, |
| 660 | sizeof(struct napi_struct), GFP_KERNEL); | 650 | sizeof(struct napi_struct), GFP_KERNEL); |
| 661 | if (!adapter->napi) | 651 | if (!adapter->napi) |
| 662 | goto ibmvnic_open_fail; | 652 | return -ENOMEM; |
| 653 | |||
| 663 | for (i = 0; i < adapter->req_rx_queues; i++) { | 654 | for (i = 0; i < adapter->req_rx_queues; i++) { |
| 664 | netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, | 655 | netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, |
| 665 | NAPI_POLL_WEIGHT); | 656 | NAPI_POLL_WEIGHT); |
| 666 | napi_enable(&adapter->napi[i]); | ||
| 667 | } | 657 | } |
| 668 | 658 | ||
| 669 | send_map_query(adapter); | 659 | send_map_query(adapter); |
| 670 | 660 | ||
| 671 | rc = init_rx_pools(netdev); | 661 | rc = init_rx_pools(netdev); |
| 672 | if (rc) | 662 | if (rc) |
| 673 | goto ibmvnic_open_fail; | 663 | return rc; |
| 674 | 664 | ||
| 675 | rc = init_tx_pools(netdev); | 665 | rc = init_tx_pools(netdev); |
| 676 | if (rc) | 666 | return rc; |
| 677 | goto ibmvnic_open_fail; | 667 | } |
| 668 | |||
| 669 | static int __ibmvnic_open(struct net_device *netdev) | ||
| 670 | { | ||
| 671 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | ||
| 672 | enum vnic_state prev_state = adapter->state; | ||
| 673 | int i, rc; | ||
| 678 | 674 | ||
| 675 | adapter->state = VNIC_OPENING; | ||
| 679 | replenish_pools(adapter); | 676 | replenish_pools(adapter); |
| 680 | 677 | ||
| 678 | for (i = 0; i < adapter->req_rx_queues; i++) | ||
| 679 | napi_enable(&adapter->napi[i]); | ||
| 680 | |||
| 681 | /* We're ready to receive frames, enable the sub-crq interrupts and | 681 | /* We're ready to receive frames, enable the sub-crq interrupts and |
| 682 | * set the logical link state to up | 682 | * set the logical link state to up |
| 683 | */ | 683 | */ |
| 684 | for (i = 0; i < adapter->req_rx_queues; i++) | 684 | for (i = 0; i < adapter->req_rx_queues; i++) { |
| 685 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); | 685 | if (prev_state == VNIC_CLOSED) |
| 686 | enable_irq(adapter->rx_scrq[i]->irq); | ||
| 687 | else | ||
| 688 | enable_scrq_irq(adapter, adapter->rx_scrq[i]); | ||
| 689 | } | ||
| 686 | 690 | ||
| 687 | for (i = 0; i < adapter->req_tx_queues; i++) | 691 | for (i = 0; i < adapter->req_tx_queues; i++) { |
| 688 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); | 692 | if (prev_state == VNIC_CLOSED) |
| 693 | enable_irq(adapter->tx_scrq[i]->irq); | ||
| 694 | else | ||
| 695 | enable_scrq_irq(adapter, adapter->tx_scrq[i]); | ||
| 696 | } | ||
| 689 | 697 | ||
| 690 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); | 698 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); |
| 691 | if (rc) | 699 | if (rc) { |
| 692 | goto ibmvnic_open_fail; | 700 | for (i = 0; i < adapter->req_rx_queues; i++) |
| 701 | napi_disable(&adapter->napi[i]); | ||
| 702 | release_resources(adapter); | ||
| 703 | return rc; | ||
| 704 | } | ||
| 693 | 705 | ||
| 694 | netif_tx_start_all_queues(netdev); | 706 | netif_tx_start_all_queues(netdev); |
| 695 | adapter->is_closed = false; | ||
| 696 | 707 | ||
| 697 | return 0; | 708 | if (prev_state == VNIC_CLOSED) { |
| 709 | for (i = 0; i < adapter->req_rx_queues; i++) | ||
| 710 | napi_schedule(&adapter->napi[i]); | ||
| 711 | } | ||
| 698 | 712 | ||
| 699 | ibmvnic_open_fail: | 713 | adapter->state = VNIC_OPEN; |
| 700 | for (i = 0; i < adapter->req_rx_queues; i++) | 714 | return rc; |
| 701 | napi_disable(&adapter->napi[i]); | ||
| 702 | release_resources(adapter); | ||
| 703 | return -ENOMEM; | ||
| 704 | } | 715 | } |
| 705 | 716 | ||
| 706 | static void disable_sub_crqs(struct ibmvnic_adapter *adapter) | 717 | static int ibmvnic_open(struct net_device *netdev) |
| 707 | { | 718 | { |
| 708 | int i; | 719 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 720 | int rc; | ||
| 709 | 721 | ||
| 710 | if (adapter->tx_scrq) { | 722 | mutex_lock(&adapter->reset_lock); |
| 711 | for (i = 0; i < adapter->req_tx_queues; i++) | 723 | |
| 712 | if (adapter->tx_scrq[i]) | 724 | if (adapter->state != VNIC_CLOSED) { |
| 713 | disable_irq(adapter->tx_scrq[i]->irq); | 725 | rc = ibmvnic_login(netdev); |
| 726 | if (rc) { | ||
| 727 | mutex_unlock(&adapter->reset_lock); | ||
| 728 | return rc; | ||
| 729 | } | ||
| 730 | |||
| 731 | rc = init_resources(adapter); | ||
| 732 | if (rc) { | ||
| 733 | netdev_err(netdev, "failed to initialize resources\n"); | ||
| 734 | release_resources(adapter); | ||
| 735 | mutex_unlock(&adapter->reset_lock); | ||
| 736 | return rc; | ||
| 737 | } | ||
| 714 | } | 738 | } |
| 715 | 739 | ||
| 716 | if (adapter->rx_scrq) { | 740 | rc = __ibmvnic_open(netdev); |
| 717 | for (i = 0; i < adapter->req_rx_queues; i++) | 741 | mutex_unlock(&adapter->reset_lock); |
| 718 | if (adapter->rx_scrq[i]) | 742 | |
| 719 | disable_irq(adapter->rx_scrq[i]->irq); | 743 | return rc; |
| 744 | } | ||
| 745 | |||
| 746 | static void clean_tx_pools(struct ibmvnic_adapter *adapter) | ||
| 747 | { | ||
| 748 | struct ibmvnic_tx_pool *tx_pool; | ||
| 749 | u64 tx_entries; | ||
| 750 | int tx_scrqs; | ||
| 751 | int i, j; | ||
| 752 | |||
| 753 | if (!adapter->tx_pool) | ||
| 754 | return; | ||
| 755 | |||
| 756 | tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); | ||
| 757 | tx_entries = adapter->req_tx_entries_per_subcrq; | ||
| 758 | |||
| 759 | /* Free any remaining skbs in the tx buffer pools */ | ||
| 760 | for (i = 0; i < tx_scrqs; i++) { | ||
| 761 | tx_pool = &adapter->tx_pool[i]; | ||
| 762 | if (!tx_pool) | ||
| 763 | continue; | ||
| 764 | |||
| 765 | for (j = 0; j < tx_entries; j++) { | ||
| 766 | if (tx_pool->tx_buff[j].skb) { | ||
| 767 | dev_kfree_skb_any(tx_pool->tx_buff[j].skb); | ||
| 768 | tx_pool->tx_buff[j].skb = NULL; | ||
| 769 | } | ||
| 770 | } | ||
| 720 | } | 771 | } |
| 721 | } | 772 | } |
| 722 | 773 | ||
| 723 | static int ibmvnic_close(struct net_device *netdev) | 774 | static int __ibmvnic_close(struct net_device *netdev) |
| 724 | { | 775 | { |
| 725 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 776 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 726 | int rc = 0; | 777 | int rc = 0; |
| 727 | int i; | 778 | int i; |
| 728 | 779 | ||
| 729 | adapter->closing = true; | 780 | adapter->state = VNIC_CLOSING; |
| 730 | disable_sub_crqs(adapter); | 781 | netif_tx_stop_all_queues(netdev); |
| 731 | 782 | ||
| 732 | if (adapter->napi) { | 783 | if (adapter->napi) { |
| 733 | for (i = 0; i < adapter->req_rx_queues; i++) | 784 | for (i = 0; i < adapter->req_rx_queues; i++) |
| 734 | napi_disable(&adapter->napi[i]); | 785 | napi_disable(&adapter->napi[i]); |
| 735 | } | 786 | } |
| 736 | 787 | ||
| 737 | if (!adapter->failover) | 788 | clean_tx_pools(adapter); |
| 738 | netif_tx_stop_all_queues(netdev); | 789 | |
| 790 | if (adapter->tx_scrq) { | ||
| 791 | for (i = 0; i < adapter->req_tx_queues; i++) | ||
| 792 | if (adapter->tx_scrq[i]->irq) | ||
| 793 | disable_irq(adapter->tx_scrq[i]->irq); | ||
| 794 | } | ||
| 739 | 795 | ||
| 740 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); | 796 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); |
| 797 | if (rc) | ||
| 798 | return rc; | ||
| 741 | 799 | ||
| 742 | release_resources(adapter); | 800 | if (adapter->rx_scrq) { |
| 801 | for (i = 0; i < adapter->req_rx_queues; i++) { | ||
| 802 | int retries = 10; | ||
| 803 | |||
| 804 | while (pending_scrq(adapter, adapter->rx_scrq[i])) { | ||
| 805 | retries--; | ||
| 806 | mdelay(100); | ||
| 807 | |||
| 808 | if (retries == 0) | ||
| 809 | break; | ||
| 810 | } | ||
| 811 | |||
| 812 | if (adapter->rx_scrq[i]->irq) | ||
| 813 | disable_irq(adapter->rx_scrq[i]->irq); | ||
| 814 | } | ||
| 815 | } | ||
| 816 | |||
| 817 | adapter->state = VNIC_CLOSED; | ||
| 818 | return rc; | ||
| 819 | } | ||
| 820 | |||
| 821 | static int ibmvnic_close(struct net_device *netdev) | ||
| 822 | { | ||
| 823 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | ||
| 824 | int rc; | ||
| 825 | |||
| 826 | mutex_lock(&adapter->reset_lock); | ||
| 827 | rc = __ibmvnic_close(netdev); | ||
| 828 | mutex_unlock(&adapter->reset_lock); | ||
| 743 | 829 | ||
| 744 | adapter->is_closed = true; | ||
| 745 | adapter->closing = false; | ||
| 746 | return rc; | 830 | return rc; |
| 747 | } | 831 | } |
| 748 | 832 | ||
| @@ -901,13 +985,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 901 | int index = 0; | 985 | int index = 0; |
| 902 | int ret = 0; | 986 | int ret = 0; |
| 903 | 987 | ||
| 904 | tx_pool = &adapter->tx_pool[queue_num]; | 988 | if (adapter->resetting) { |
| 905 | tx_scrq = adapter->tx_scrq[queue_num]; | ||
| 906 | txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); | ||
| 907 | handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + | ||
| 908 | be32_to_cpu(adapter->login_rsp_buf-> | ||
| 909 | off_txsubm_subcrqs)); | ||
| 910 | if (adapter->migrated) { | ||
| 911 | if (!netif_subqueue_stopped(netdev, skb)) | 989 | if (!netif_subqueue_stopped(netdev, skb)) |
| 912 | netif_stop_subqueue(netdev, queue_num); | 990 | netif_stop_subqueue(netdev, queue_num); |
| 913 | dev_kfree_skb_any(skb); | 991 | dev_kfree_skb_any(skb); |
| @@ -918,6 +996,12 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
| 918 | goto out; | 996 | goto out; |
| 919 | } | 997 | } |
| 920 | 998 | ||
| 999 | tx_pool = &adapter->tx_pool[queue_num]; | ||
| 1000 | tx_scrq = adapter->tx_scrq[queue_num]; | ||
| 1001 | txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); | ||
| 1002 | handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + | ||
| 1003 | be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); | ||
| 1004 | |||
| 921 | index = tx_pool->free_map[tx_pool->consumer_index]; | 1005 | index = tx_pool->free_map[tx_pool->consumer_index]; |
| 922 | offset = index * adapter->req_mtu; | 1006 | offset = index * adapter->req_mtu; |
| 923 | dst = tx_pool->long_term_buff.buff + offset; | 1007 | dst = tx_pool->long_term_buff.buff + offset; |
| @@ -1099,18 +1183,185 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) | |||
| 1099 | return 0; | 1183 | return 0; |
| 1100 | } | 1184 | } |
| 1101 | 1185 | ||
| 1102 | static void ibmvnic_tx_timeout(struct net_device *dev) | 1186 | /** |
| 1187 | * do_reset returns zero if we are able to keep processing reset events, or | ||
| 1188 | * non-zero if we hit a fatal error and must halt. | ||
| 1189 | */ | ||
| 1190 | static int do_reset(struct ibmvnic_adapter *adapter, | ||
| 1191 | struct ibmvnic_rwi *rwi, u32 reset_state) | ||
| 1103 | { | 1192 | { |
| 1104 | struct ibmvnic_adapter *adapter = netdev_priv(dev); | 1193 | struct net_device *netdev = adapter->netdev; |
| 1105 | int rc; | 1194 | int i, rc; |
| 1195 | |||
| 1196 | netif_carrier_off(netdev); | ||
| 1197 | adapter->reset_reason = rwi->reset_reason; | ||
| 1198 | |||
| 1199 | if (rwi->reset_reason == VNIC_RESET_MOBILITY) { | ||
| 1200 | rc = ibmvnic_reenable_crq_queue(adapter); | ||
| 1201 | if (rc) | ||
| 1202 | return 0; | ||
| 1203 | } | ||
| 1106 | 1204 | ||
| 1107 | /* Adapter timed out, resetting it */ | 1205 | rc = __ibmvnic_close(netdev); |
| 1206 | if (rc) | ||
| 1207 | return rc; | ||
| 1208 | |||
| 1209 | /* remove the closed state so when we call open it appears | ||
| 1210 | * we are coming from the probed state. | ||
| 1211 | */ | ||
| 1212 | adapter->state = VNIC_PROBED; | ||
| 1213 | |||
| 1214 | release_resources(adapter); | ||
| 1108 | release_sub_crqs(adapter); | 1215 | release_sub_crqs(adapter); |
| 1109 | rc = ibmvnic_reset_crq(adapter); | 1216 | release_crq_queue(adapter); |
| 1217 | |||
| 1218 | rc = ibmvnic_init(adapter); | ||
| 1110 | if (rc) | 1219 | if (rc) |
| 1111 | dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n"); | 1220 | return 0; |
| 1112 | else | 1221 | |
| 1113 | ibmvnic_send_crq_init(adapter); | 1222 | /* If the adapter was in PROBE state prior to the reset, exit here. */ |
| 1223 | if (reset_state == VNIC_PROBED) | ||
| 1224 | return 0; | ||
| 1225 | |||
| 1226 | rc = ibmvnic_login(netdev); | ||
| 1227 | if (rc) { | ||
| 1228 | adapter->state = VNIC_PROBED; | ||
| 1229 | return 0; | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | rtnl_lock(); | ||
| 1233 | rc = init_resources(adapter); | ||
| 1234 | rtnl_unlock(); | ||
| 1235 | if (rc) | ||
| 1236 | return rc; | ||
| 1237 | |||
| 1238 | if (reset_state == VNIC_CLOSED) | ||
| 1239 | return 0; | ||
| 1240 | |||
| 1241 | rc = __ibmvnic_open(netdev); | ||
| 1242 | if (rc) { | ||
| 1243 | if (list_empty(&adapter->rwi_list)) | ||
| 1244 | adapter->state = VNIC_CLOSED; | ||
| 1245 | else | ||
| 1246 | adapter->state = reset_state; | ||
| 1247 | |||
| 1248 | return 0; | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | netif_carrier_on(netdev); | ||
| 1252 | |||
| 1253 | /* kick napi */ | ||
| 1254 | for (i = 0; i < adapter->req_rx_queues; i++) | ||
| 1255 | napi_schedule(&adapter->napi[i]); | ||
| 1256 | |||
| 1257 | return 0; | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) | ||
| 1261 | { | ||
| 1262 | struct ibmvnic_rwi *rwi; | ||
| 1263 | |||
| 1264 | mutex_lock(&adapter->rwi_lock); | ||
| 1265 | |||
| 1266 | if (!list_empty(&adapter->rwi_list)) { | ||
| 1267 | rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, | ||
| 1268 | list); | ||
| 1269 | list_del(&rwi->list); | ||
| 1270 | } else { | ||
| 1271 | rwi = NULL; | ||
| 1272 | } | ||
| 1273 | |||
| 1274 | mutex_unlock(&adapter->rwi_lock); | ||
| 1275 | return rwi; | ||
| 1276 | } | ||
| 1277 | |||
| 1278 | static void free_all_rwi(struct ibmvnic_adapter *adapter) | ||
| 1279 | { | ||
| 1280 | struct ibmvnic_rwi *rwi; | ||
| 1281 | |||
| 1282 | rwi = get_next_rwi(adapter); | ||
| 1283 | while (rwi) { | ||
| 1284 | kfree(rwi); | ||
| 1285 | rwi = get_next_rwi(adapter); | ||
| 1286 | } | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | static void __ibmvnic_reset(struct work_struct *work) | ||
| 1290 | { | ||
| 1291 | struct ibmvnic_rwi *rwi; | ||
| 1292 | struct ibmvnic_adapter *adapter; | ||
| 1293 | struct net_device *netdev; | ||
| 1294 | u32 reset_state; | ||
| 1295 | int rc; | ||
| 1296 | |||
| 1297 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); | ||
| 1298 | netdev = adapter->netdev; | ||
| 1299 | |||
| 1300 | mutex_lock(&adapter->reset_lock); | ||
| 1301 | adapter->resetting = true; | ||
| 1302 | reset_state = adapter->state; | ||
| 1303 | |||
| 1304 | rwi = get_next_rwi(adapter); | ||
| 1305 | while (rwi) { | ||
| 1306 | rc = do_reset(adapter, rwi, reset_state); | ||
| 1307 | kfree(rwi); | ||
| 1308 | if (rc) | ||
| 1309 | break; | ||
| 1310 | |||
| 1311 | rwi = get_next_rwi(adapter); | ||
| 1312 | } | ||
| 1313 | |||
| 1314 | if (rc) { | ||
| 1315 | free_all_rwi(adapter); | ||
| 1316 | return; | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | adapter->resetting = false; | ||
| 1320 | mutex_unlock(&adapter->reset_lock); | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | static void ibmvnic_reset(struct ibmvnic_adapter *adapter, | ||
| 1324 | enum ibmvnic_reset_reason reason) | ||
| 1325 | { | ||
| 1326 | struct ibmvnic_rwi *rwi, *tmp; | ||
| 1327 | struct net_device *netdev = adapter->netdev; | ||
| 1328 | struct list_head *entry; | ||
| 1329 | |||
| 1330 | if (adapter->state == VNIC_REMOVING || | ||
| 1331 | adapter->state == VNIC_REMOVED) { | ||
| 1332 | netdev_dbg(netdev, "Adapter removing, skipping reset\n"); | ||
| 1333 | return; | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | mutex_lock(&adapter->rwi_lock); | ||
| 1337 | |||
| 1338 | list_for_each(entry, &adapter->rwi_list) { | ||
| 1339 | tmp = list_entry(entry, struct ibmvnic_rwi, list); | ||
| 1340 | if (tmp->reset_reason == reason) { | ||
| 1341 | netdev_err(netdev, "Matching reset found, skipping\n"); | ||
| 1342 | mutex_unlock(&adapter->rwi_lock); | ||
| 1343 | return; | ||
| 1344 | } | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | rwi = kzalloc(sizeof(*rwi), GFP_KERNEL); | ||
| 1348 | if (!rwi) { | ||
| 1349 | mutex_unlock(&adapter->rwi_lock); | ||
| 1350 | ibmvnic_close(netdev); | ||
| 1351 | return; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | rwi->reset_reason = reason; | ||
| 1355 | list_add_tail(&rwi->list, &adapter->rwi_list); | ||
| 1356 | mutex_unlock(&adapter->rwi_lock); | ||
| 1357 | schedule_work(&adapter->ibmvnic_reset); | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | static void ibmvnic_tx_timeout(struct net_device *dev) | ||
| 1361 | { | ||
| 1362 | struct ibmvnic_adapter *adapter = netdev_priv(dev); | ||
| 1363 | |||
| 1364 | ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT); | ||
| 1114 | } | 1365 | } |
| 1115 | 1366 | ||
| 1116 | static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, | 1367 | static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, |
| @@ -1153,7 +1404,7 @@ restart_poll: | |||
| 1153 | /* free the entry */ | 1404 | /* free the entry */ |
| 1154 | next->rx_comp.first = 0; | 1405 | next->rx_comp.first = 0; |
| 1155 | remove_buff_from_pool(adapter, rx_buff); | 1406 | remove_buff_from_pool(adapter, rx_buff); |
| 1156 | break; | 1407 | continue; |
| 1157 | } | 1408 | } |
| 1158 | 1409 | ||
| 1159 | length = be32_to_cpu(next->rx_comp.len); | 1410 | length = be32_to_cpu(next->rx_comp.len); |
| @@ -1177,6 +1428,7 @@ restart_poll: | |||
| 1177 | 1428 | ||
| 1178 | skb_put(skb, length); | 1429 | skb_put(skb, length); |
| 1179 | skb->protocol = eth_type_trans(skb, netdev); | 1430 | skb->protocol = eth_type_trans(skb, netdev); |
| 1431 | skb_record_rx_queue(skb, scrq_num); | ||
| 1180 | 1432 | ||
| 1181 | if (flags & IBMVNIC_IP_CHKSUM_GOOD && | 1433 | if (flags & IBMVNIC_IP_CHKSUM_GOOD && |
| 1182 | flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { | 1434 | flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) { |
| @@ -1557,19 +1809,8 @@ restart_loop: | |||
| 1557 | } | 1809 | } |
| 1558 | 1810 | ||
| 1559 | if (txbuff->last_frag) { | 1811 | if (txbuff->last_frag) { |
| 1560 | if (atomic_sub_return(next->tx_comp.num_comps, | ||
| 1561 | &scrq->used) <= | ||
| 1562 | (adapter->req_tx_entries_per_subcrq / 2) && | ||
| 1563 | netif_subqueue_stopped(adapter->netdev, | ||
| 1564 | txbuff->skb)) { | ||
| 1565 | netif_wake_subqueue(adapter->netdev, | ||
| 1566 | scrq->pool_index); | ||
| 1567 | netdev_dbg(adapter->netdev, | ||
| 1568 | "Started queue %d\n", | ||
| 1569 | scrq->pool_index); | ||
| 1570 | } | ||
| 1571 | |||
| 1572 | dev_kfree_skb_any(txbuff->skb); | 1812 | dev_kfree_skb_any(txbuff->skb); |
| 1813 | txbuff->skb = NULL; | ||
| 1573 | } | 1814 | } |
| 1574 | 1815 | ||
| 1575 | adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. | 1816 | adapter->tx_pool[pool].free_map[adapter->tx_pool[pool]. |
| @@ -1580,6 +1821,15 @@ restart_loop: | |||
| 1580 | } | 1821 | } |
| 1581 | /* remove tx_comp scrq*/ | 1822 | /* remove tx_comp scrq*/ |
| 1582 | next->tx_comp.first = 0; | 1823 | next->tx_comp.first = 0; |
| 1824 | |||
| 1825 | if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <= | ||
| 1826 | (adapter->req_tx_entries_per_subcrq / 2) && | ||
| 1827 | __netif_subqueue_stopped(adapter->netdev, | ||
| 1828 | scrq->pool_index)) { | ||
| 1829 | netif_wake_subqueue(adapter->netdev, scrq->pool_index); | ||
| 1830 | netdev_info(adapter->netdev, "Started queue %d\n", | ||
| 1831 | scrq->pool_index); | ||
| 1832 | } | ||
| 1583 | } | 1833 | } |
| 1584 | 1834 | ||
| 1585 | enable_scrq_irq(adapter, scrq); | 1835 | enable_scrq_irq(adapter, scrq); |
| @@ -1853,7 +2103,8 @@ static int pending_scrq(struct ibmvnic_adapter *adapter, | |||
| 1853 | { | 2103 | { |
| 1854 | union sub_crq *entry = &scrq->msgs[scrq->cur]; | 2104 | union sub_crq *entry = &scrq->msgs[scrq->cur]; |
| 1855 | 2105 | ||
| 1856 | if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing) | 2106 | if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || |
| 2107 | adapter->state == VNIC_CLOSING) | ||
| 1857 | return 1; | 2108 | return 1; |
| 1858 | else | 2109 | else |
| 1859 | return 0; | 2110 | return 0; |
| @@ -1991,18 +2242,6 @@ static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter) | |||
| 1991 | return ibmvnic_send_crq(adapter, &crq); | 2242 | return ibmvnic_send_crq(adapter, &crq); |
| 1992 | } | 2243 | } |
| 1993 | 2244 | ||
| 1994 | static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter) | ||
| 1995 | { | ||
| 1996 | union ibmvnic_crq crq; | ||
| 1997 | |||
| 1998 | memset(&crq, 0, sizeof(crq)); | ||
| 1999 | crq.generic.first = IBMVNIC_CRQ_INIT_CMD; | ||
| 2000 | crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE; | ||
| 2001 | netdev_dbg(adapter->netdev, "Sending CRQ init complete\n"); | ||
| 2002 | |||
| 2003 | return ibmvnic_send_crq(adapter, &crq); | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | static int send_version_xchg(struct ibmvnic_adapter *adapter) | 2245 | static int send_version_xchg(struct ibmvnic_adapter *adapter) |
| 2007 | { | 2246 | { |
| 2008 | union ibmvnic_crq crq; | 2247 | union ibmvnic_crq crq; |
| @@ -2500,6 +2739,9 @@ static void handle_error_indication(union ibmvnic_crq *crq, | |||
| 2500 | 2739 | ||
| 2501 | if (be32_to_cpu(crq->error_indication.error_id)) | 2740 | if (be32_to_cpu(crq->error_indication.error_id)) |
| 2502 | request_error_information(adapter, crq); | 2741 | request_error_information(adapter, crq); |
| 2742 | |||
| 2743 | if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) | ||
| 2744 | ibmvnic_reset(adapter, VNIC_RESET_FATAL); | ||
| 2503 | } | 2745 | } |
| 2504 | 2746 | ||
| 2505 | static void handle_change_mac_rsp(union ibmvnic_crq *crq, | 2747 | static void handle_change_mac_rsp(union ibmvnic_crq *crq, |
| @@ -2888,26 +3130,6 @@ out: | |||
| 2888 | } | 3130 | } |
| 2889 | } | 3131 | } |
| 2890 | 3132 | ||
| 2891 | static void ibmvnic_xport_event(struct work_struct *work) | ||
| 2892 | { | ||
| 2893 | struct ibmvnic_adapter *adapter = container_of(work, | ||
| 2894 | struct ibmvnic_adapter, | ||
| 2895 | ibmvnic_xport); | ||
| 2896 | struct device *dev = &adapter->vdev->dev; | ||
| 2897 | long rc; | ||
| 2898 | |||
| 2899 | release_sub_crqs(adapter); | ||
| 2900 | if (adapter->migrated) { | ||
| 2901 | rc = ibmvnic_reenable_crq_queue(adapter); | ||
| 2902 | if (rc) | ||
| 2903 | dev_err(dev, "Error after enable rc=%ld\n", rc); | ||
| 2904 | adapter->migrated = false; | ||
| 2905 | rc = ibmvnic_send_crq_init(adapter); | ||
| 2906 | if (rc) | ||
| 2907 | dev_err(dev, "Error sending init rc=%ld\n", rc); | ||
| 2908 | } | ||
| 2909 | } | ||
| 2910 | |||
| 2911 | static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | 3133 | static void ibmvnic_handle_crq(union ibmvnic_crq *crq, |
| 2912 | struct ibmvnic_adapter *adapter) | 3134 | struct ibmvnic_adapter *adapter) |
| 2913 | { | 3135 | { |
| @@ -2925,12 +3147,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | |||
| 2925 | switch (gen_crq->cmd) { | 3147 | switch (gen_crq->cmd) { |
| 2926 | case IBMVNIC_CRQ_INIT: | 3148 | case IBMVNIC_CRQ_INIT: |
| 2927 | dev_info(dev, "Partner initialized\n"); | 3149 | dev_info(dev, "Partner initialized\n"); |
| 2928 | /* Send back a response */ | ||
| 2929 | rc = ibmvnic_send_crq_init_complete(adapter); | ||
| 2930 | if (!rc) | ||
| 2931 | schedule_work(&adapter->vnic_crq_init); | ||
| 2932 | else | ||
| 2933 | dev_err(dev, "Can't send initrsp rc=%ld\n", rc); | ||
| 2934 | break; | 3150 | break; |
| 2935 | case IBMVNIC_CRQ_INIT_COMPLETE: | 3151 | case IBMVNIC_CRQ_INIT_COMPLETE: |
| 2936 | dev_info(dev, "Partner initialization complete\n"); | 3152 | dev_info(dev, "Partner initialization complete\n"); |
| @@ -2941,19 +3157,18 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | |||
| 2941 | } | 3157 | } |
| 2942 | return; | 3158 | return; |
| 2943 | case IBMVNIC_CRQ_XPORT_EVENT: | 3159 | case IBMVNIC_CRQ_XPORT_EVENT: |
| 3160 | netif_carrier_off(netdev); | ||
| 2944 | if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { | 3161 | if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { |
| 2945 | dev_info(dev, "Re-enabling adapter\n"); | 3162 | dev_info(dev, "Migrated, re-enabling adapter\n"); |
| 2946 | adapter->migrated = true; | 3163 | ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); |
| 2947 | schedule_work(&adapter->ibmvnic_xport); | ||
| 2948 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { | 3164 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { |
| 2949 | dev_info(dev, "Backing device failover detected\n"); | 3165 | dev_info(dev, "Backing device failover detected\n"); |
| 2950 | netif_carrier_off(netdev); | 3166 | ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); |
| 2951 | adapter->failover = true; | ||
| 2952 | } else { | 3167 | } else { |
| 2953 | /* The adapter lost the connection */ | 3168 | /* The adapter lost the connection */ |
| 2954 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", | 3169 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", |
| 2955 | gen_crq->cmd); | 3170 | gen_crq->cmd); |
| 2956 | schedule_work(&adapter->ibmvnic_xport); | 3171 | ibmvnic_reset(adapter, VNIC_RESET_FATAL); |
| 2957 | } | 3172 | } |
| 2958 | return; | 3173 | return; |
| 2959 | case IBMVNIC_CRQ_CMD_RSP: | 3174 | case IBMVNIC_CRQ_CMD_RSP: |
| @@ -3234,64 +3449,6 @@ map_failed: | |||
| 3234 | return retrc; | 3449 | return retrc; |
| 3235 | } | 3450 | } |
| 3236 | 3451 | ||
| 3237 | static void handle_crq_init_rsp(struct work_struct *work) | ||
| 3238 | { | ||
| 3239 | struct ibmvnic_adapter *adapter = container_of(work, | ||
| 3240 | struct ibmvnic_adapter, | ||
| 3241 | vnic_crq_init); | ||
| 3242 | struct device *dev = &adapter->vdev->dev; | ||
| 3243 | struct net_device *netdev = adapter->netdev; | ||
| 3244 | unsigned long timeout = msecs_to_jiffies(30000); | ||
| 3245 | bool restart = false; | ||
| 3246 | int rc; | ||
| 3247 | |||
| 3248 | if (adapter->failover) { | ||
| 3249 | release_sub_crqs(adapter); | ||
| 3250 | if (netif_running(netdev)) { | ||
| 3251 | netif_tx_disable(netdev); | ||
| 3252 | ibmvnic_close(netdev); | ||
| 3253 | restart = true; | ||
| 3254 | } | ||
| 3255 | } | ||
| 3256 | |||
| 3257 | reinit_completion(&adapter->init_done); | ||
| 3258 | send_version_xchg(adapter); | ||
| 3259 | if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { | ||
| 3260 | dev_err(dev, "Passive init timeout\n"); | ||
| 3261 | goto task_failed; | ||
| 3262 | } | ||
| 3263 | |||
| 3264 | netdev->mtu = adapter->req_mtu - ETH_HLEN; | ||
| 3265 | |||
| 3266 | if (adapter->failover) { | ||
| 3267 | adapter->failover = false; | ||
| 3268 | if (restart) { | ||
| 3269 | rc = ibmvnic_open(netdev); | ||
| 3270 | if (rc) | ||
| 3271 | goto restart_failed; | ||
| 3272 | } | ||
| 3273 | netif_carrier_on(netdev); | ||
| 3274 | return; | ||
| 3275 | } | ||
| 3276 | |||
| 3277 | rc = register_netdev(netdev); | ||
| 3278 | if (rc) { | ||
| 3279 | dev_err(dev, | ||
| 3280 | "failed to register netdev rc=%d\n", rc); | ||
| 3281 | goto register_failed; | ||
| 3282 | } | ||
| 3283 | dev_info(dev, "ibmvnic registered\n"); | ||
| 3284 | |||
| 3285 | return; | ||
| 3286 | |||
| 3287 | restart_failed: | ||
| 3288 | dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc); | ||
| 3289 | register_failed: | ||
| 3290 | release_sub_crqs(adapter); | ||
| 3291 | task_failed: | ||
| 3292 | dev_err(dev, "Passive initialization was not successful\n"); | ||
| 3293 | } | ||
| 3294 | |||
| 3295 | static int ibmvnic_init(struct ibmvnic_adapter *adapter) | 3452 | static int ibmvnic_init(struct ibmvnic_adapter *adapter) |
| 3296 | { | 3453 | { |
| 3297 | struct device *dev = &adapter->vdev->dev; | 3454 | struct device *dev = &adapter->vdev->dev; |
| @@ -3346,10 +3503,10 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 3346 | return -ENOMEM; | 3503 | return -ENOMEM; |
| 3347 | 3504 | ||
| 3348 | adapter = netdev_priv(netdev); | 3505 | adapter = netdev_priv(netdev); |
| 3506 | adapter->state = VNIC_PROBING; | ||
| 3349 | dev_set_drvdata(&dev->dev, netdev); | 3507 | dev_set_drvdata(&dev->dev, netdev); |
| 3350 | adapter->vdev = dev; | 3508 | adapter->vdev = dev; |
| 3351 | adapter->netdev = netdev; | 3509 | adapter->netdev = netdev; |
| 3352 | adapter->failover = false; | ||
| 3353 | 3510 | ||
| 3354 | ether_addr_copy(adapter->mac_addr, mac_addr_p); | 3511 | ether_addr_copy(adapter->mac_addr, mac_addr_p); |
| 3355 | ether_addr_copy(netdev->dev_addr, adapter->mac_addr); | 3512 | ether_addr_copy(netdev->dev_addr, adapter->mac_addr); |
| @@ -3358,14 +3515,17 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 3358 | netdev->ethtool_ops = &ibmvnic_ethtool_ops; | 3515 | netdev->ethtool_ops = &ibmvnic_ethtool_ops; |
| 3359 | SET_NETDEV_DEV(netdev, &dev->dev); | 3516 | SET_NETDEV_DEV(netdev, &dev->dev); |
| 3360 | 3517 | ||
| 3361 | INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp); | ||
| 3362 | INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event); | ||
| 3363 | |||
| 3364 | spin_lock_init(&adapter->stats_lock); | 3518 | spin_lock_init(&adapter->stats_lock); |
| 3365 | 3519 | ||
| 3366 | INIT_LIST_HEAD(&adapter->errors); | 3520 | INIT_LIST_HEAD(&adapter->errors); |
| 3367 | spin_lock_init(&adapter->error_list_lock); | 3521 | spin_lock_init(&adapter->error_list_lock); |
| 3368 | 3522 | ||
| 3523 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); | ||
| 3524 | INIT_LIST_HEAD(&adapter->rwi_list); | ||
| 3525 | mutex_init(&adapter->reset_lock); | ||
| 3526 | mutex_init(&adapter->rwi_lock); | ||
| 3527 | adapter->resetting = false; | ||
| 3528 | |||
| 3369 | rc = ibmvnic_init(adapter); | 3529 | rc = ibmvnic_init(adapter); |
| 3370 | if (rc) { | 3530 | if (rc) { |
| 3371 | free_netdev(netdev); | 3531 | free_netdev(netdev); |
| @@ -3373,7 +3533,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 3373 | } | 3533 | } |
| 3374 | 3534 | ||
| 3375 | netdev->mtu = adapter->req_mtu - ETH_HLEN; | 3535 | netdev->mtu = adapter->req_mtu - ETH_HLEN; |
| 3376 | adapter->is_closed = false; | ||
| 3377 | 3536 | ||
| 3378 | rc = register_netdev(netdev); | 3537 | rc = register_netdev(netdev); |
| 3379 | if (rc) { | 3538 | if (rc) { |
| @@ -3383,6 +3542,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 3383 | } | 3542 | } |
| 3384 | dev_info(&dev->dev, "ibmvnic registered\n"); | 3543 | dev_info(&dev->dev, "ibmvnic registered\n"); |
| 3385 | 3544 | ||
| 3545 | adapter->state = VNIC_PROBED; | ||
| 3386 | return 0; | 3546 | return 0; |
| 3387 | } | 3547 | } |
| 3388 | 3548 | ||
| @@ -3391,12 +3551,17 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
| 3391 | struct net_device *netdev = dev_get_drvdata(&dev->dev); | 3551 | struct net_device *netdev = dev_get_drvdata(&dev->dev); |
| 3392 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 3552 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 3393 | 3553 | ||
| 3554 | adapter->state = VNIC_REMOVING; | ||
| 3394 | unregister_netdev(netdev); | 3555 | unregister_netdev(netdev); |
| 3556 | mutex_lock(&adapter->reset_lock); | ||
| 3395 | 3557 | ||
| 3396 | release_resources(adapter); | 3558 | release_resources(adapter); |
| 3397 | release_sub_crqs(adapter); | 3559 | release_sub_crqs(adapter); |
| 3398 | release_crq_queue(adapter); | 3560 | release_crq_queue(adapter); |
| 3399 | 3561 | ||
| 3562 | adapter->state = VNIC_REMOVED; | ||
| 3563 | |||
| 3564 | mutex_unlock(&adapter->reset_lock); | ||
| 3400 | free_netdev(netdev); | 3565 | free_netdev(netdev); |
| 3401 | dev_set_drvdata(&dev->dev, NULL); | 3566 | dev_set_drvdata(&dev->dev, NULL); |
| 3402 | 3567 | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index a69979f6f19d..4702b48cfa44 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h | |||
| @@ -913,6 +913,25 @@ struct ibmvnic_error_buff { | |||
| 913 | __be32 error_id; | 913 | __be32 error_id; |
| 914 | }; | 914 | }; |
| 915 | 915 | ||
| 916 | enum vnic_state {VNIC_PROBING = 1, | ||
| 917 | VNIC_PROBED, | ||
| 918 | VNIC_OPENING, | ||
| 919 | VNIC_OPEN, | ||
| 920 | VNIC_CLOSING, | ||
| 921 | VNIC_CLOSED, | ||
| 922 | VNIC_REMOVING, | ||
| 923 | VNIC_REMOVED}; | ||
| 924 | |||
| 925 | enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1, | ||
| 926 | VNIC_RESET_MOBILITY, | ||
| 927 | VNIC_RESET_FATAL, | ||
| 928 | VNIC_RESET_TIMEOUT}; | ||
| 929 | |||
| 930 | struct ibmvnic_rwi { | ||
| 931 | enum ibmvnic_reset_reason reset_reason; | ||
| 932 | struct list_head list; | ||
| 933 | }; | ||
| 934 | |||
| 916 | struct ibmvnic_adapter { | 935 | struct ibmvnic_adapter { |
| 917 | struct vio_dev *vdev; | 936 | struct vio_dev *vdev; |
| 918 | struct net_device *netdev; | 937 | struct net_device *netdev; |
| @@ -922,7 +941,6 @@ struct ibmvnic_adapter { | |||
| 922 | dma_addr_t ip_offload_tok; | 941 | dma_addr_t ip_offload_tok; |
| 923 | struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; | 942 | struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl; |
| 924 | dma_addr_t ip_offload_ctrl_tok; | 943 | dma_addr_t ip_offload_ctrl_tok; |
| 925 | bool migrated; | ||
| 926 | u32 msg_enable; | 944 | u32 msg_enable; |
| 927 | 945 | ||
| 928 | /* Statistics */ | 946 | /* Statistics */ |
| @@ -962,7 +980,6 @@ struct ibmvnic_adapter { | |||
| 962 | u64 promisc; | 980 | u64 promisc; |
| 963 | 981 | ||
| 964 | struct ibmvnic_tx_pool *tx_pool; | 982 | struct ibmvnic_tx_pool *tx_pool; |
| 965 | bool closing; | ||
| 966 | struct completion init_done; | 983 | struct completion init_done; |
| 967 | int init_done_rc; | 984 | int init_done_rc; |
| 968 | 985 | ||
| @@ -1007,9 +1024,11 @@ struct ibmvnic_adapter { | |||
| 1007 | __be64 tx_rx_desc_req; | 1024 | __be64 tx_rx_desc_req; |
| 1008 | u8 map_id; | 1025 | u8 map_id; |
| 1009 | 1026 | ||
| 1010 | struct work_struct vnic_crq_init; | ||
| 1011 | struct work_struct ibmvnic_xport; | ||
| 1012 | struct tasklet_struct tasklet; | 1027 | struct tasklet_struct tasklet; |
| 1013 | bool failover; | 1028 | enum vnic_state state; |
| 1014 | bool is_closed; | 1029 | enum ibmvnic_reset_reason reset_reason; |
| 1030 | struct mutex reset_lock, rwi_lock; | ||
| 1031 | struct list_head rwi_list; | ||
| 1032 | struct work_struct ibmvnic_reset; | ||
| 1033 | bool resetting; | ||
| 1015 | }; | 1034 | }; |
