aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-06-04 02:32:12 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-04 02:32:12 -0400
commitc99f7abf0e69987e4add567e155e042cb1f2a20b (patch)
treed23898dc30ed25c1dae9bb6325041027d412397a /drivers/net/ethernet/broadcom
parent92ff71b8fe9cd9c673615fc6f3870af7376d7c84 (diff)
parentd8b0426af5b67973585712c9af36b86f6ea97815 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: include/net/inetpeer.h net/ipv6/output_core.c Changes in net were fixing bugs in code removed in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
3 files changed, 30 insertions, 15 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d18441ebe944..23da47925fa3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
906 bd_prod = RX_BD(bd_prod); 906 bd_prod = RX_BD(bd_prod);
907 bd_cons = RX_BD(bd_cons); 907 bd_cons = RX_BD(bd_cons);
908 908
909 /* A rmb() is required to ensure that the CQE is not read
910 * before it is written by the adapter DMA. PCI ordering
911 * rules will make sure the other fields are written before
912 * the marker at the end of struct eth_fast_path_rx_cqe
913 * but without rmb() a weakly ordered processor can process
914 * stale data. Without the barrier TPA state-machine might
915 * enter inconsistent state and kernel stack might be
916 * provided with incorrect packet description - these lead
917 * to various kernel crashed.
918 */
919 rmb();
920
909 cqe_fp_flags = cqe_fp->type_error_flags; 921 cqe_fp_flags = cqe_fp->type_error_flags;
910 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; 922 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
911 923
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ff2bdd80f0aa..cf14218697e4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13283,8 +13283,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13283 netdev_reset_tc(bp->dev); 13283 netdev_reset_tc(bp->dev);
13284 13284
13285 del_timer_sync(&bp->timer); 13285 del_timer_sync(&bp->timer);
13286 cancel_delayed_work(&bp->sp_task); 13286 cancel_delayed_work_sync(&bp->sp_task);
13287 cancel_delayed_work(&bp->period_task); 13287 cancel_delayed_work_sync(&bp->period_task);
13288 13288
13289 spin_lock_bh(&bp->stats_lock); 13289 spin_lock_bh(&bp->stats_lock);
13290 bp->stats_state = STATS_STATE_DISABLED; 13290 bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 09f3fefcbf9c..a4b25bc7113a 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
608 pr_err("%s: Bad type %d\n", __func__, ulp_type); 608 pr_err("%s: Bad type %d\n", __func__, ulp_type);
609 return -EINVAL; 609 return -EINVAL;
610 } 610 }
611
612 if (ulp_type == CNIC_ULP_ISCSI)
613 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
614
611 mutex_lock(&cnic_lock); 615 mutex_lock(&cnic_lock);
612 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 616 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
613 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 617 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
620 } 624 }
621 mutex_unlock(&cnic_lock); 625 mutex_unlock(&cnic_lock);
622 626
623 if (ulp_type == CNIC_ULP_ISCSI) 627 if (ulp_type == CNIC_ULP_FCOE)
624 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
625 else if (ulp_type == CNIC_ULP_FCOE)
626 dev->fcoe_cap = NULL; 628 dev->fcoe_cap = NULL;
627 629
628 synchronize_rcu(); 630 synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1039 struct cnic_local *cp = dev->cnic_priv; 1041 struct cnic_local *cp = dev->cnic_priv;
1040 struct cnic_uio_dev *udev; 1042 struct cnic_uio_dev *udev;
1041 1043
1042 read_lock(&cnic_dev_lock);
1043 list_for_each_entry(udev, &cnic_udev_list, list) { 1044 list_for_each_entry(udev, &cnic_udev_list, list) {
1044 if (udev->pdev == dev->pcidev) { 1045 if (udev->pdev == dev->pcidev) {
1045 udev->dev = dev; 1046 udev->dev = dev;
1046 if (__cnic_alloc_uio_rings(udev, pages)) { 1047 if (__cnic_alloc_uio_rings(udev, pages)) {
1047 udev->dev = NULL; 1048 udev->dev = NULL;
1048 read_unlock(&cnic_dev_lock);
1049 return -ENOMEM; 1049 return -ENOMEM;
1050 } 1050 }
1051 cp->udev = udev; 1051 cp->udev = udev;
1052 read_unlock(&cnic_dev_lock);
1053 return 0; 1052 return 0;
1054 } 1053 }
1055 } 1054 }
1056 read_unlock(&cnic_dev_lock);
1057 1055
1058 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1056 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1059 if (!udev) 1057 if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
1067 if (__cnic_alloc_uio_rings(udev, pages)) 1065 if (__cnic_alloc_uio_rings(udev, pages))
1068 goto err_udev; 1066 goto err_udev;
1069 1067
1070 write_lock(&cnic_dev_lock);
1071 list_add(&udev->list, &cnic_udev_list); 1068 list_add(&udev->list, &cnic_udev_list);
1072 write_unlock(&cnic_dev_lock);
1073 1069
1074 pci_dev_get(udev->pdev); 1070 pci_dev_get(udev->pdev);
1075 1071
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5624{ 5620{
5625 int if_type; 5621 int if_type;
5626 5622
5627 rcu_read_lock();
5628 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5623 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5629 struct cnic_ulp_ops *ulp_ops; 5624 struct cnic_ulp_ops *ulp_ops;
5630 void *ctx; 5625 void *ctx;
5631 5626
5632 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 5627 mutex_lock(&cnic_lock);
5633 if (!ulp_ops || !ulp_ops->indicate_netevent) 5628 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
5629 lockdep_is_held(&cnic_lock));
5630 if (!ulp_ops || !ulp_ops->indicate_netevent) {
5631 mutex_unlock(&cnic_lock);
5634 continue; 5632 continue;
5633 }
5635 5634
5636 ctx = cp->ulp_handle[if_type]; 5635 ctx = cp->ulp_handle[if_type];
5637 5636
5637 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5638 mutex_unlock(&cnic_lock);
5639
5638 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5640 ulp_ops->indicate_netevent(ctx, event, vlan_id);
5641
5642 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
5639 } 5643 }
5640 rcu_read_unlock();
5641} 5644}
5642 5645
5643/* netdev event handler */ 5646/* netdev event handler */