diff options
| author | Takashi Iwai <tiwai@suse.de> | 2015-06-22 05:32:41 -0400 |
|---|---|---|
| committer | Takashi Iwai <tiwai@suse.de> | 2015-06-22 05:32:41 -0400 |
| commit | 57fa8a1e22c5833fb2cae96af68fc39ec21cb017 (patch) | |
| tree | b0bb4e4a6e04a24119da30253add9fe9ffbc8d22 /drivers/net | |
| parent | f267f9dff8ba00a8b11f340da3634858ad50ebab (diff) | |
| parent | c99d49a8f81fb35e67b0ffa45f320a75e0b5639d (diff) | |
Merge tag 'asoc-v4.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-next
ASoC: Further updates for v4.2
There's a bunch of additional updates and fixes that came in since my
orignal pull request here, including DT support for rt5645 and fairly
large serieses of cleanups and improvements to tas2552 and rcar.
Diffstat (limited to 'drivers/net')
30 files changed, 275 insertions, 180 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index db84ddcfec84..9fd6c69a8bac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
| @@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data) | |||
| 423 | if (napi_schedule_prep(napi)) { | 423 | if (napi_schedule_prep(napi)) { |
| 424 | /* Disable Tx and Rx interrupts */ | 424 | /* Disable Tx and Rx interrupts */ |
| 425 | if (pdata->per_channel_irq) | 425 | if (pdata->per_channel_irq) |
| 426 | disable_irq(channel->dma_irq); | 426 | disable_irq_nosync(channel->dma_irq); |
| 427 | else | 427 | else |
| 428 | xgbe_disable_rx_tx_ints(pdata); | 428 | xgbe_disable_rx_tx_ints(pdata); |
| 429 | 429 | ||
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 77363d680532..a3b1c07ae0af 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
| @@ -2464,6 +2464,7 @@ err_out_powerdown: | |||
| 2464 | ssb_bus_may_powerdown(sdev->bus); | 2464 | ssb_bus_may_powerdown(sdev->bus); |
| 2465 | 2465 | ||
| 2466 | err_out_free_dev: | 2466 | err_out_free_dev: |
| 2467 | netif_napi_del(&bp->napi); | ||
| 2467 | free_netdev(dev); | 2468 | free_netdev(dev); |
| 2468 | 2469 | ||
| 2469 | out: | 2470 | out: |
| @@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev) | |||
| 2480 | b44_unregister_phy_one(bp); | 2481 | b44_unregister_phy_one(bp); |
| 2481 | ssb_device_disable(sdev, 0); | 2482 | ssb_device_disable(sdev, 0); |
| 2482 | ssb_bus_may_powerdown(sdev->bus); | 2483 | ssb_bus_may_powerdown(sdev->bus); |
| 2484 | netif_napi_del(&bp->napi); | ||
| 2483 | free_netdev(dev); | 2485 | free_netdev(dev); |
| 2484 | ssb_pcihost_set_power_state(sdev, PCI_D3hot); | 2486 | ssb_pcihost_set_power_state(sdev, PCI_D3hot); |
| 2485 | ssb_set_drvdata(sdev, NULL); | 2487 | ssb_set_drvdata(sdev, NULL); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index a3b0f7a0c61e..1f82a04ce01a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
| @@ -1774,7 +1774,7 @@ struct bnx2x { | |||
| 1774 | int stats_state; | 1774 | int stats_state; |
| 1775 | 1775 | ||
| 1776 | /* used for synchronization of concurrent threads statistics handling */ | 1776 | /* used for synchronization of concurrent threads statistics handling */ |
| 1777 | struct mutex stats_lock; | 1777 | struct semaphore stats_lock; |
| 1778 | 1778 | ||
| 1779 | /* used by dmae command loader */ | 1779 | /* used by dmae command loader */ |
| 1780 | struct dmae_command stats_dmae; | 1780 | struct dmae_command stats_dmae; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fd52ce95127e..33501bcddc48 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
| 12054 | mutex_init(&bp->port.phy_mutex); | 12054 | mutex_init(&bp->port.phy_mutex); |
| 12055 | mutex_init(&bp->fw_mb_mutex); | 12055 | mutex_init(&bp->fw_mb_mutex); |
| 12056 | mutex_init(&bp->drv_info_mutex); | 12056 | mutex_init(&bp->drv_info_mutex); |
| 12057 | mutex_init(&bp->stats_lock); | 12057 | sema_init(&bp->stats_lock, 1); |
| 12058 | bp->drv_info_mng_owner = false; | 12058 | bp->drv_info_mng_owner = false; |
| 12059 | 12059 | ||
| 12060 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 12060 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
| @@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) | |||
| 13690 | cancel_delayed_work_sync(&bp->sp_task); | 13690 | cancel_delayed_work_sync(&bp->sp_task); |
| 13691 | cancel_delayed_work_sync(&bp->period_task); | 13691 | cancel_delayed_work_sync(&bp->period_task); |
| 13692 | 13692 | ||
| 13693 | mutex_lock(&bp->stats_lock); | 13693 | if (!down_timeout(&bp->stats_lock, HZ / 10)) { |
| 13694 | bp->stats_state = STATS_STATE_DISABLED; | 13694 | bp->stats_state = STATS_STATE_DISABLED; |
| 13695 | mutex_unlock(&bp->stats_lock); | 13695 | up(&bp->stats_lock); |
| 13696 | } | ||
| 13696 | 13697 | ||
| 13697 | bnx2x_save_statistics(bp); | 13698 | bnx2x_save_statistics(bp); |
| 13698 | 13699 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 266b055c2360..69d699f0730a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
| @@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | |||
| 1372 | * that context in case someone is in the middle of a transition. | 1372 | * that context in case someone is in the middle of a transition. |
| 1373 | * For other events, wait a bit until lock is taken. | 1373 | * For other events, wait a bit until lock is taken. |
| 1374 | */ | 1374 | */ |
| 1375 | if (!mutex_trylock(&bp->stats_lock)) { | 1375 | if (down_trylock(&bp->stats_lock)) { |
| 1376 | if (event == STATS_EVENT_UPDATE) | 1376 | if (event == STATS_EVENT_UPDATE) |
| 1377 | return; | 1377 | return; |
| 1378 | 1378 | ||
| 1379 | DP(BNX2X_MSG_STATS, | 1379 | DP(BNX2X_MSG_STATS, |
| 1380 | "Unlikely stats' lock contention [event %d]\n", event); | 1380 | "Unlikely stats' lock contention [event %d]\n", event); |
| 1381 | mutex_lock(&bp->stats_lock); | 1381 | if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) { |
| 1382 | BNX2X_ERR("Failed to take stats lock [event %d]\n", | ||
| 1383 | event); | ||
| 1384 | return; | ||
| 1385 | } | ||
| 1382 | } | 1386 | } |
| 1383 | 1387 | ||
| 1384 | bnx2x_stats_stm[state][event].action(bp); | 1388 | bnx2x_stats_stm[state][event].action(bp); |
| 1385 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1389 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
| 1386 | 1390 | ||
| 1387 | mutex_unlock(&bp->stats_lock); | 1391 | up(&bp->stats_lock); |
| 1388 | 1392 | ||
| 1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1393 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
| 1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1394 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
| @@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp, | |||
| 1970 | /* Wait for statistics to end [while blocking further requests], | 1974 | /* Wait for statistics to end [while blocking further requests], |
| 1971 | * then run supplied function 'safely'. | 1975 | * then run supplied function 'safely'. |
| 1972 | */ | 1976 | */ |
| 1973 | mutex_lock(&bp->stats_lock); | 1977 | rc = down_timeout(&bp->stats_lock, HZ / 10); |
| 1978 | if (unlikely(rc)) { | ||
| 1979 | BNX2X_ERR("Failed to take statistics lock for safe execution\n"); | ||
| 1980 | goto out_no_lock; | ||
| 1981 | } | ||
| 1974 | 1982 | ||
| 1975 | bnx2x_stats_comp(bp); | 1983 | bnx2x_stats_comp(bp); |
| 1976 | while (bp->stats_pending && cnt--) | 1984 | while (bp->stats_pending && cnt--) |
| @@ -1988,7 +1996,7 @@ out: | |||
| 1988 | /* No need to restart statistics - if they're enabled, the timer | 1996 | /* No need to restart statistics - if they're enabled, the timer |
| 1989 | * will restart the statistics. | 1997 | * will restart the statistics. |
| 1990 | */ | 1998 | */ |
| 1991 | mutex_unlock(&bp->stats_lock); | 1999 | up(&bp->stats_lock); |
| 1992 | 2000 | out_no_lock: | |
| 1993 | return rc; | 2001 | return rc; |
| 1994 | } | 2002 | } |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e7651b3c6c57..420949cc55aa 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
| @@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) | |||
| 299 | phy_name = "external RGMII (no delay)"; | 299 | phy_name = "external RGMII (no delay)"; |
| 300 | else | 300 | else |
| 301 | phy_name = "external RGMII (TX delay)"; | 301 | phy_name = "external RGMII (TX delay)"; |
| 302 | reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); | ||
| 303 | reg |= RGMII_MODE_EN | id_mode_dis; | ||
| 304 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); | ||
| 305 | bcmgenet_sys_writel(priv, | 302 | bcmgenet_sys_writel(priv, |
| 306 | PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); | 303 | PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); |
| 307 | break; | 304 | break; |
| @@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) | |||
| 310 | return -EINVAL; | 307 | return -EINVAL; |
| 311 | } | 308 | } |
| 312 | 309 | ||
| 310 | /* This is an external PHY (xMII), so we need to enable the RGMII | ||
| 311 | * block for the interface to work | ||
| 312 | */ | ||
| 313 | if (priv->ext_phy) { | ||
| 314 | reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); | ||
| 315 | reg |= RGMII_MODE_EN | id_mode_dis; | ||
| 316 | bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); | ||
| 317 | } | ||
| 318 | |||
| 313 | if (init) | 319 | if (init) |
| 314 | dev_info(kdev, "configuring instance for %s\n", phy_name); | 320 | dev_info(kdev, "configuring instance for %s\n", phy_name); |
| 315 | 321 | ||
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 594a2ab36d31..68f3c13c9ef6 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c | |||
| @@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, | |||
| 2414 | if (status == BFA_STATUS_OK) | 2414 | if (status == BFA_STATUS_OK) |
| 2415 | bfa_ioc_lpu_start(ioc); | 2415 | bfa_ioc_lpu_start(ioc); |
| 2416 | else | 2416 | else |
| 2417 | bfa_nw_iocpf_timeout(ioc); | 2417 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); |
| 2418 | 2418 | ||
| 2419 | return status; | 2419 | return status; |
| 2420 | } | 2420 | } |
| @@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) | |||
| 3029 | } | 3029 | } |
| 3030 | 3030 | ||
| 3031 | if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { | 3031 | if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { |
| 3032 | bfa_nw_iocpf_timeout(ioc); | 3032 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); |
| 3033 | } else { | 3033 | } else { |
| 3034 | ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; | 3034 | ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; |
| 3035 | mod_timer(&ioc->iocpf_timer, jiffies + | 3035 | mod_timer(&ioc->iocpf_timer, jiffies + |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 37072a83f9d6..caae6cb2bc1a 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
| @@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
| 3701 | setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, | 3701 | setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, |
| 3702 | ((unsigned long)bnad)); | 3702 | ((unsigned long)bnad)); |
| 3703 | 3703 | ||
| 3704 | /* Now start the timer before calling IOC */ | ||
| 3705 | mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, | ||
| 3706 | jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); | ||
| 3707 | |||
| 3708 | /* | 3704 | /* |
| 3709 | * Start the chip | 3705 | * Start the chip |
| 3710 | * If the call back comes with error, we bail out. | 3706 | * If the call back comes with error, we bail out. |
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c index ebf462d8082f..badea368bdc8 100644 --- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c | |||
| @@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, | |||
| 30 | u32 *bfi_image_size, char *fw_name) | 30 | u32 *bfi_image_size, char *fw_name) |
| 31 | { | 31 | { |
| 32 | const struct firmware *fw; | 32 | const struct firmware *fw; |
| 33 | u32 n; | ||
| 33 | 34 | ||
| 34 | if (request_firmware(&fw, fw_name, &pdev->dev)) { | 35 | if (request_firmware(&fw, fw_name, &pdev->dev)) { |
| 35 | pr_alert("Can't locate firmware %s\n", fw_name); | 36 | pr_alert("Can't locate firmware %s\n", fw_name); |
| @@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, | |||
| 40 | *bfi_image_size = fw->size/sizeof(u32); | 41 | *bfi_image_size = fw->size/sizeof(u32); |
| 41 | bfi_fw = fw; | 42 | bfi_fw = fw; |
| 42 | 43 | ||
| 44 | /* Convert loaded firmware to host order as it is stored in file | ||
| 45 | * as sequence of LE32 integers. | ||
| 46 | */ | ||
| 47 | for (n = 0; n < *bfi_image_size; n++) | ||
| 48 | le32_to_cpus(*bfi_image + n); | ||
| 49 | |||
| 43 | return *bfi_image; | 50 | return *bfi_image; |
| 44 | error: | 51 | error: |
| 45 | return NULL; | 52 | return NULL; |
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 28d9ca675a27..68d47b196dae 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c | |||
| @@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev, | |||
| 131 | { | 131 | { |
| 132 | struct enic *enic = netdev_priv(netdev); | 132 | struct enic *enic = netdev_priv(netdev); |
| 133 | struct vnic_devcmd_fw_info *fw_info; | 133 | struct vnic_devcmd_fw_info *fw_info; |
| 134 | int err; | ||
| 134 | 135 | ||
| 135 | enic_dev_fw_info(enic, &fw_info); | 136 | err = enic_dev_fw_info(enic, &fw_info); |
| 137 | /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info | ||
| 138 | * For other failures, like devcmd failure, we return previously | ||
| 139 | * recorded info. | ||
| 140 | */ | ||
| 141 | if (err == -ENOMEM) | ||
| 142 | return; | ||
| 136 | 143 | ||
| 137 | strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | 144 | strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); |
| 138 | strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | 145 | strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); |
| @@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev, | |||
| 181 | struct enic *enic = netdev_priv(netdev); | 188 | struct enic *enic = netdev_priv(netdev); |
| 182 | struct vnic_stats *vstats; | 189 | struct vnic_stats *vstats; |
| 183 | unsigned int i; | 190 | unsigned int i; |
| 184 | 191 | int err; | |
| 185 | enic_dev_stats_dump(enic, &vstats); | 192 | |
| 193 | err = enic_dev_stats_dump(enic, &vstats); | ||
| 194 | /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump | ||
| 195 | * For other failures, like devcmd failure, we return previously | ||
| 196 | * recorded stats. | ||
| 197 | */ | ||
| 198 | if (err == -ENOMEM) | ||
| 199 | return; | ||
| 186 | 200 | ||
| 187 | for (i = 0; i < enic_n_tx_stats; i++) | 201 | for (i = 0; i < enic_n_tx_stats; i++) |
| 188 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; | 202 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index]; |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 204bd182473b..eadae1b412c6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
| @@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, | |||
| 615 | { | 615 | { |
| 616 | struct enic *enic = netdev_priv(netdev); | 616 | struct enic *enic = netdev_priv(netdev); |
| 617 | struct vnic_stats *stats; | 617 | struct vnic_stats *stats; |
| 618 | int err; | ||
| 618 | 619 | ||
| 619 | enic_dev_stats_dump(enic, &stats); | 620 | err = enic_dev_stats_dump(enic, &stats); |
| 621 | /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump | ||
| 622 | * For other failures, like devcmd failure, we return previously | ||
| 623 | * recorded stats. | ||
| 624 | */ | ||
| 625 | if (err == -ENOMEM) | ||
| 626 | return net_stats; | ||
| 620 | 627 | ||
| 621 | net_stats->tx_packets = stats->tx.tx_frames_ok; | 628 | net_stats->tx_packets = stats->tx.tx_frames_ok; |
| 622 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; | 629 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; |
| @@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
| 1407 | */ | 1414 | */ |
| 1408 | enic_calc_int_moderation(enic, &enic->rq[rq]); | 1415 | enic_calc_int_moderation(enic, &enic->rq[rq]); |
| 1409 | 1416 | ||
| 1417 | enic_poll_unlock_napi(&enic->rq[rq]); | ||
| 1410 | if (work_done < work_to_do) { | 1418 | if (work_done < work_to_do) { |
| 1411 | 1419 | ||
| 1412 | /* Some work done, but not enough to stay in polling, | 1420 | /* Some work done, but not enough to stay in polling, |
| @@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) | |||
| 1418 | enic_set_int_moderation(enic, &enic->rq[rq]); | 1426 | enic_set_int_moderation(enic, &enic->rq[rq]); |
| 1419 | vnic_intr_unmask(&enic->intr[intr]); | 1427 | vnic_intr_unmask(&enic->intr[intr]); |
| 1420 | } | 1428 | } |
| 1421 | enic_poll_unlock_napi(&enic->rq[rq]); | ||
| 1422 | 1429 | ||
| 1423 | return work_done; | 1430 | return work_done; |
| 1424 | } | 1431 | } |
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 36a2ed606c91..c4b2183bf352 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c | |||
| @@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq, | |||
| 188 | struct vnic_rq_buf *buf; | 188 | struct vnic_rq_buf *buf; |
| 189 | u32 fetch_index; | 189 | u32 fetch_index; |
| 190 | unsigned int count = rq->ring.desc_count; | 190 | unsigned int count = rq->ring.desc_count; |
| 191 | int i; | ||
| 191 | 192 | ||
| 192 | buf = rq->to_clean; | 193 | buf = rq->to_clean; |
| 193 | 194 | ||
| 194 | while (vnic_rq_desc_used(rq) > 0) { | 195 | for (i = 0; i < rq->ring.desc_count; i++) { |
| 195 | |||
| 196 | (*buf_clean)(rq, buf); | 196 | (*buf_clean)(rq, buf); |
| 197 | 197 | buf = buf->next; | |
| 198 | buf = rq->to_clean = buf->next; | ||
| 199 | rq->ring.desc_avail++; | ||
| 200 | } | 198 | } |
| 199 | rq->ring.desc_avail = rq->ring.desc_count - 1; | ||
| 201 | 200 | ||
| 202 | /* Use current fetch_index as the ring starting point */ | 201 | /* Use current fetch_index as the ring starting point */ |
| 203 | fetch_index = ioread32(&rq->ctrl->fetch_index); | 202 | fetch_index = ioread32(&rq->ctrl->fetch_index); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index fb140faeafb1..c5e1d0ac75f9 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
| @@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) | |||
| 1720 | total_size = buf_len; | 1720 | total_size = buf_len; |
| 1721 | 1721 | ||
| 1722 | get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; | 1722 | get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; |
| 1723 | get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, | 1723 | get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 1724 | get_fat_cmd.size, | 1724 | get_fat_cmd.size, |
| 1725 | &get_fat_cmd.dma); | 1725 | &get_fat_cmd.dma, GFP_ATOMIC); |
| 1726 | if (!get_fat_cmd.va) { | 1726 | if (!get_fat_cmd.va) { |
| 1727 | dev_err(&adapter->pdev->dev, | 1727 | dev_err(&adapter->pdev->dev, |
| 1728 | "Memory allocation failure while reading FAT data\n"); | 1728 | "Memory allocation failure while reading FAT data\n"); |
| @@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) | |||
| 1767 | log_offset += buf_size; | 1767 | log_offset += buf_size; |
| 1768 | } | 1768 | } |
| 1769 | err: | 1769 | err: |
| 1770 | pci_free_consistent(adapter->pdev, get_fat_cmd.size, | 1770 | dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, |
| 1771 | get_fat_cmd.va, get_fat_cmd.dma); | 1771 | get_fat_cmd.va, get_fat_cmd.dma); |
| 1772 | spin_unlock_bh(&adapter->mcc_lock); | 1772 | spin_unlock_bh(&adapter->mcc_lock); |
| 1773 | return status; | 1773 | return status; |
| 1774 | } | 1774 | } |
| @@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, | |||
| 2215 | return -EINVAL; | 2215 | return -EINVAL; |
| 2216 | 2216 | ||
| 2217 | cmd.size = sizeof(struct be_cmd_resp_port_type); | 2217 | cmd.size = sizeof(struct be_cmd_resp_port_type); |
| 2218 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 2218 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 2219 | GFP_ATOMIC); | ||
| 2219 | if (!cmd.va) { | 2220 | if (!cmd.va) { |
| 2220 | dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); | 2221 | dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); |
| 2221 | return -ENOMEM; | 2222 | return -ENOMEM; |
| 2222 | } | 2223 | } |
| 2223 | memset(cmd.va, 0, cmd.size); | ||
| 2224 | 2224 | ||
| 2225 | spin_lock_bh(&adapter->mcc_lock); | 2225 | spin_lock_bh(&adapter->mcc_lock); |
| 2226 | 2226 | ||
| @@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, | |||
| 2245 | } | 2245 | } |
| 2246 | err: | 2246 | err: |
| 2247 | spin_unlock_bh(&adapter->mcc_lock); | 2247 | spin_unlock_bh(&adapter->mcc_lock); |
| 2248 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 2248 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); |
| 2249 | return status; | 2249 | return status; |
| 2250 | } | 2250 | } |
| 2251 | 2251 | ||
| @@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) | |||
| 2720 | goto err; | 2720 | goto err; |
| 2721 | } | 2721 | } |
| 2722 | cmd.size = sizeof(struct be_cmd_req_get_phy_info); | 2722 | cmd.size = sizeof(struct be_cmd_req_get_phy_info); |
| 2723 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 2723 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 2724 | GFP_ATOMIC); | ||
| 2724 | if (!cmd.va) { | 2725 | if (!cmd.va) { |
| 2725 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 2726 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); |
| 2726 | status = -ENOMEM; | 2727 | status = -ENOMEM; |
| @@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) | |||
| 2754 | BE_SUPPORTED_SPEED_1GBPS; | 2755 | BE_SUPPORTED_SPEED_1GBPS; |
| 2755 | } | 2756 | } |
| 2756 | } | 2757 | } |
| 2757 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 2758 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); |
| 2758 | err: | 2759 | err: |
| 2759 | spin_unlock_bh(&adapter->mcc_lock); | 2760 | spin_unlock_bh(&adapter->mcc_lock); |
| 2760 | return status; | 2761 | return status; |
| @@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | |||
| 2805 | 2806 | ||
| 2806 | memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); | 2807 | memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); |
| 2807 | attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); | 2808 | attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); |
| 2808 | attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, | 2809 | attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 2809 | &attribs_cmd.dma); | 2810 | attribs_cmd.size, |
| 2811 | &attribs_cmd.dma, GFP_ATOMIC); | ||
| 2810 | if (!attribs_cmd.va) { | 2812 | if (!attribs_cmd.va) { |
| 2811 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 2813 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); |
| 2812 | status = -ENOMEM; | 2814 | status = -ENOMEM; |
| @@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) | |||
| 2833 | err: | 2835 | err: |
| 2834 | mutex_unlock(&adapter->mbox_lock); | 2836 | mutex_unlock(&adapter->mbox_lock); |
| 2835 | if (attribs_cmd.va) | 2837 | if (attribs_cmd.va) |
| 2836 | pci_free_consistent(adapter->pdev, attribs_cmd.size, | 2838 | dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, |
| 2837 | attribs_cmd.va, attribs_cmd.dma); | 2839 | attribs_cmd.va, attribs_cmd.dma); |
| 2838 | return status; | 2840 | return status; |
| 2839 | } | 2841 | } |
| 2840 | 2842 | ||
| @@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, | |||
| 2972 | 2974 | ||
| 2973 | memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); | 2975 | memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); |
| 2974 | get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); | 2976 | get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); |
| 2975 | get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, | 2977 | get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 2976 | get_mac_list_cmd.size, | 2978 | get_mac_list_cmd.size, |
| 2977 | &get_mac_list_cmd.dma); | 2979 | &get_mac_list_cmd.dma, |
| 2980 | GFP_ATOMIC); | ||
| 2978 | 2981 | ||
| 2979 | if (!get_mac_list_cmd.va) { | 2982 | if (!get_mac_list_cmd.va) { |
| 2980 | dev_err(&adapter->pdev->dev, | 2983 | dev_err(&adapter->pdev->dev, |
| @@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, | |||
| 3047 | 3050 | ||
| 3048 | out: | 3051 | out: |
| 3049 | spin_unlock_bh(&adapter->mcc_lock); | 3052 | spin_unlock_bh(&adapter->mcc_lock); |
| 3050 | pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, | 3053 | dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, |
| 3051 | get_mac_list_cmd.va, get_mac_list_cmd.dma); | 3054 | get_mac_list_cmd.va, get_mac_list_cmd.dma); |
| 3052 | return status; | 3055 | return status; |
| 3053 | } | 3056 | } |
| 3054 | 3057 | ||
| @@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, | |||
| 3101 | 3104 | ||
| 3102 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3105 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3103 | cmd.size = sizeof(struct be_cmd_req_set_mac_list); | 3106 | cmd.size = sizeof(struct be_cmd_req_set_mac_list); |
| 3104 | cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, | 3107 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3105 | &cmd.dma, GFP_KERNEL); | 3108 | GFP_KERNEL); |
| 3106 | if (!cmd.va) | 3109 | if (!cmd.va) |
| 3107 | return -ENOMEM; | 3110 | return -ENOMEM; |
| 3108 | 3111 | ||
| @@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) | |||
| 3291 | 3294 | ||
| 3292 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3295 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3293 | cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); | 3296 | cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); |
| 3294 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3297 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3298 | GFP_ATOMIC); | ||
| 3295 | if (!cmd.va) { | 3299 | if (!cmd.va) { |
| 3296 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); | 3300 | dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); |
| 3297 | status = -ENOMEM; | 3301 | status = -ENOMEM; |
| @@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) | |||
| 3326 | err: | 3330 | err: |
| 3327 | mutex_unlock(&adapter->mbox_lock); | 3331 | mutex_unlock(&adapter->mbox_lock); |
| 3328 | if (cmd.va) | 3332 | if (cmd.va) |
| 3329 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3333 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3334 | cmd.dma); | ||
| 3330 | return status; | 3335 | return status; |
| 3331 | 3336 | ||
| 3332 | } | 3337 | } |
| @@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) | |||
| 3340 | 3345 | ||
| 3341 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 3346 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); |
| 3342 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 3347 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); |
| 3343 | extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, | 3348 | extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 3344 | &extfat_cmd.dma); | 3349 | extfat_cmd.size, &extfat_cmd.dma, |
| 3350 | GFP_ATOMIC); | ||
| 3345 | if (!extfat_cmd.va) | 3351 | if (!extfat_cmd.va) |
| 3346 | return -ENOMEM; | 3352 | return -ENOMEM; |
| 3347 | 3353 | ||
| @@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) | |||
| 3363 | 3369 | ||
| 3364 | status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); | 3370 | status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); |
| 3365 | err: | 3371 | err: |
| 3366 | pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, | 3372 | dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, |
| 3367 | extfat_cmd.dma); | 3373 | extfat_cmd.dma); |
| 3368 | return status; | 3374 | return status; |
| 3369 | } | 3375 | } |
| 3370 | 3376 | ||
| @@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) | |||
| 3377 | 3383 | ||
| 3378 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); | 3384 | memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); |
| 3379 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); | 3385 | extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); |
| 3380 | extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, | 3386 | extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 3381 | &extfat_cmd.dma); | 3387 | extfat_cmd.size, &extfat_cmd.dma, |
| 3388 | GFP_ATOMIC); | ||
| 3382 | 3389 | ||
| 3383 | if (!extfat_cmd.va) { | 3390 | if (!extfat_cmd.va) { |
| 3384 | dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", | 3391 | dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", |
| @@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) | |||
| 3396 | level = cfgs->module[0].trace_lvl[j].dbg_lvl; | 3403 | level = cfgs->module[0].trace_lvl[j].dbg_lvl; |
| 3397 | } | 3404 | } |
| 3398 | } | 3405 | } |
| 3399 | pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, | 3406 | dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, |
| 3400 | extfat_cmd.dma); | 3407 | extfat_cmd.dma); |
| 3401 | err: | 3408 | err: |
| 3402 | return level; | 3409 | return level; |
| 3403 | } | 3410 | } |
| @@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) | |||
| 3595 | 3602 | ||
| 3596 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3603 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3597 | cmd.size = sizeof(struct be_cmd_resp_get_func_config); | 3604 | cmd.size = sizeof(struct be_cmd_resp_get_func_config); |
| 3598 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3605 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3606 | GFP_ATOMIC); | ||
| 3599 | if (!cmd.va) { | 3607 | if (!cmd.va) { |
| 3600 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); | 3608 | dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); |
| 3601 | status = -ENOMEM; | 3609 | status = -ENOMEM; |
| @@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) | |||
| 3635 | err: | 3643 | err: |
| 3636 | mutex_unlock(&adapter->mbox_lock); | 3644 | mutex_unlock(&adapter->mbox_lock); |
| 3637 | if (cmd.va) | 3645 | if (cmd.va) |
| 3638 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3646 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3647 | cmd.dma); | ||
| 3639 | return status; | 3648 | return status; |
| 3640 | } | 3649 | } |
| 3641 | 3650 | ||
| @@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | |||
| 3656 | 3665 | ||
| 3657 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3666 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3658 | cmd.size = sizeof(struct be_cmd_resp_get_profile_config); | 3667 | cmd.size = sizeof(struct be_cmd_resp_get_profile_config); |
| 3659 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3668 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3669 | GFP_ATOMIC); | ||
| 3660 | if (!cmd.va) | 3670 | if (!cmd.va) |
| 3661 | return -ENOMEM; | 3671 | return -ENOMEM; |
| 3662 | 3672 | ||
| @@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | |||
| 3702 | res->vf_if_cap_flags = vf_res->cap_flags; | 3712 | res->vf_if_cap_flags = vf_res->cap_flags; |
| 3703 | err: | 3713 | err: |
| 3704 | if (cmd.va) | 3714 | if (cmd.va) |
| 3705 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3715 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3716 | cmd.dma); | ||
| 3706 | return status; | 3717 | return status; |
| 3707 | } | 3718 | } |
| 3708 | 3719 | ||
| @@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, | |||
| 3717 | 3728 | ||
| 3718 | memset(&cmd, 0, sizeof(struct be_dma_mem)); | 3729 | memset(&cmd, 0, sizeof(struct be_dma_mem)); |
| 3719 | cmd.size = sizeof(struct be_cmd_req_set_profile_config); | 3730 | cmd.size = sizeof(struct be_cmd_req_set_profile_config); |
| 3720 | cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); | 3731 | cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, |
| 3732 | GFP_ATOMIC); | ||
| 3721 | if (!cmd.va) | 3733 | if (!cmd.va) |
| 3722 | return -ENOMEM; | 3734 | return -ENOMEM; |
| 3723 | 3735 | ||
| @@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, | |||
| 3733 | status = be_cmd_notify_wait(adapter, &wrb); | 3745 | status = be_cmd_notify_wait(adapter, &wrb); |
| 3734 | 3746 | ||
| 3735 | if (cmd.va) | 3747 | if (cmd.va) |
| 3736 | pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); | 3748 | dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, |
| 3749 | cmd.dma); | ||
| 3737 | return status; | 3750 | return status; |
| 3738 | } | 3751 | } |
| 3739 | 3752 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index b765c24625bf..2835dee5dc39 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c | |||
| @@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, | |||
| 264 | int status = 0; | 264 | int status = 0; |
| 265 | 265 | ||
| 266 | read_cmd.size = LANCER_READ_FILE_CHUNK; | 266 | read_cmd.size = LANCER_READ_FILE_CHUNK; |
| 267 | read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, | 267 | read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, |
| 268 | &read_cmd.dma); | 268 | &read_cmd.dma, GFP_ATOMIC); |
| 269 | 269 | ||
| 270 | if (!read_cmd.va) { | 270 | if (!read_cmd.va) { |
| 271 | dev_err(&adapter->pdev->dev, | 271 | dev_err(&adapter->pdev->dev, |
| @@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, | |||
| 289 | break; | 289 | break; |
| 290 | } | 290 | } |
| 291 | } | 291 | } |
| 292 | pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, | 292 | dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va, |
| 293 | read_cmd.dma); | 293 | read_cmd.dma); |
| 294 | 294 | ||
| 295 | return status; | 295 | return status; |
| 296 | } | 296 | } |
| @@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) | |||
| 818 | }; | 818 | }; |
| 819 | 819 | ||
| 820 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); | 820 | ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); |
| 821 | ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, | 821 | ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 822 | &ddrdma_cmd.dma, GFP_KERNEL); | 822 | ddrdma_cmd.size, &ddrdma_cmd.dma, |
| 823 | GFP_KERNEL); | ||
| 823 | if (!ddrdma_cmd.va) | 824 | if (!ddrdma_cmd.va) |
| 824 | return -ENOMEM; | 825 | return -ENOMEM; |
| 825 | 826 | ||
| @@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev, | |||
| 941 | 942 | ||
| 942 | memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); | 943 | memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); |
| 943 | eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); | 944 | eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); |
| 944 | eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, | 945 | eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, |
| 945 | &eeprom_cmd.dma, GFP_KERNEL); | 946 | eeprom_cmd.size, &eeprom_cmd.dma, |
| 947 | GFP_KERNEL); | ||
| 946 | 948 | ||
| 947 | if (!eeprom_cmd.va) | 949 | if (!eeprom_cmd.va) |
| 948 | return -ENOMEM; | 950 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6f9ffb9026cd..e43cc8a73ea7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
| @@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter, | |||
| 4605 | 4605 | ||
| 4606 | flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) | 4606 | flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) |
| 4607 | + LANCER_FW_DOWNLOAD_CHUNK; | 4607 | + LANCER_FW_DOWNLOAD_CHUNK; |
| 4608 | flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, | 4608 | flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, |
| 4609 | &flash_cmd.dma, GFP_KERNEL); | 4609 | &flash_cmd.dma, GFP_KERNEL); |
| 4610 | if (!flash_cmd.va) | 4610 | if (!flash_cmd.va) |
| 4611 | return -ENOMEM; | 4611 | return -ENOMEM; |
| 4612 | 4612 | ||
| @@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) | |||
| 4739 | } | 4739 | } |
| 4740 | 4740 | ||
| 4741 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom); | 4741 | flash_cmd.size = sizeof(struct be_cmd_write_flashrom); |
| 4742 | flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, | 4742 | flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, |
| 4743 | GFP_KERNEL); | 4743 | GFP_KERNEL); |
| 4744 | if (!flash_cmd.va) | 4744 | if (!flash_cmd.va) |
| 4745 | return -ENOMEM; | 4745 | return -ENOMEM; |
| 4746 | 4746 | ||
| @@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter) | |||
| 5291 | int status = 0; | 5291 | int status = 0; |
| 5292 | 5292 | ||
| 5293 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; | 5293 | mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; |
| 5294 | mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, | 5294 | mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, |
| 5295 | &mbox_mem_alloc->dma, | 5295 | &mbox_mem_alloc->dma, |
| 5296 | GFP_KERNEL); | 5296 | GFP_KERNEL); |
| 5297 | if (!mbox_mem_alloc->va) | 5297 | if (!mbox_mem_alloc->va) |
| 5298 | return -ENOMEM; | 5298 | return -ENOMEM; |
| 5299 | 5299 | ||
| 5300 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); | 5300 | mbox_mem_align->size = sizeof(struct be_mcc_mailbox); |
| 5301 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); | 5301 | mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); |
| 5302 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); | 5302 | mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); |
| 5303 | memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); | ||
| 5304 | 5303 | ||
| 5305 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); | 5304 | rx_filter->size = sizeof(struct be_cmd_req_rx_filter); |
| 5306 | rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, | 5305 | rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, |
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 33c35d3b7420..5d47307121ab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h | |||
| @@ -317,6 +317,7 @@ struct i40e_pf { | |||
| 317 | #endif | 317 | #endif |
| 318 | #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) | 318 | #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) |
| 319 | #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) | 319 | #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) |
| 320 | #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) | ||
| 320 | 321 | ||
| 321 | /* tracks features that get auto disabled by errors */ | 322 | /* tracks features that get auto disabled by errors */ |
| 322 | u64 auto_disable_flags; | 323 | u64 auto_disable_flags; |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 34170eabca7d..da0faf478af0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c | |||
| @@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, | |||
| 1021 | goto command_write_done; | 1021 | goto command_write_done; |
| 1022 | } | 1022 | } |
| 1023 | 1023 | ||
| 1024 | /* By default we are in VEPA mode, if this is the first VF/VMDq | ||
| 1025 | * VSI to be added switch to VEB mode. | ||
| 1026 | */ | ||
| 1027 | if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { | ||
| 1028 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 1029 | i40e_do_reset_safe(pf, | ||
| 1030 | BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 1031 | } | ||
| 1032 | |||
| 1024 | vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); | 1033 | vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); |
| 1025 | if (vsi) | 1034 | if (vsi) |
| 1026 | dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", | 1035 | dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a54c14491e3b..5b5bea159bd5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) | |||
| 6097 | if (ret) | 6097 | if (ret) |
| 6098 | goto end_reconstitute; | 6098 | goto end_reconstitute; |
| 6099 | 6099 | ||
| 6100 | if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) | ||
| 6101 | veb->bridge_mode = BRIDGE_MODE_VEB; | ||
| 6102 | else | ||
| 6103 | veb->bridge_mode = BRIDGE_MODE_VEPA; | ||
| 6100 | i40e_config_bridge_mode(veb); | 6104 | i40e_config_bridge_mode(veb); |
| 6101 | 6105 | ||
| 6102 | /* create the remaining VSIs attached to this VEB */ | 6106 | /* create the remaining VSIs attached to this VEB */ |
| @@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, | |||
| 8031 | } else if (mode != veb->bridge_mode) { | 8035 | } else if (mode != veb->bridge_mode) { |
| 8032 | /* Existing HW bridge but different mode needs reset */ | 8036 | /* Existing HW bridge but different mode needs reset */ |
| 8033 | veb->bridge_mode = mode; | 8037 | veb->bridge_mode = mode; |
| 8034 | i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); | 8038 | /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */ |
| 8039 | if (mode == BRIDGE_MODE_VEB) | ||
| 8040 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 8041 | else | ||
| 8042 | pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; | ||
| 8043 | i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 8035 | break; | 8044 | break; |
| 8036 | } | 8045 | } |
| 8037 | } | 8046 | } |
| @@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) | |||
| 8343 | ctxt.uplink_seid = vsi->uplink_seid; | 8352 | ctxt.uplink_seid = vsi->uplink_seid; |
| 8344 | ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; | 8353 | ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; |
| 8345 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; | 8354 | ctxt.flags = I40E_AQ_VSI_TYPE_PF; |
| 8346 | if (i40e_is_vsi_uplink_mode_veb(vsi)) { | 8355 | if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && |
| 8356 | (i40e_is_vsi_uplink_mode_veb(vsi))) { | ||
| 8347 | ctxt.info.valid_sections |= | 8357 | ctxt.info.valid_sections |= |
| 8348 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); | 8358 | cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); |
| 8349 | ctxt.info.switch_id = | 8359 | ctxt.info.switch_id = |
| 8350 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); | 8360 | cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); |
| 8351 | } | 8361 | } |
| 8352 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); | 8362 | i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); |
| 8353 | break; | 8363 | break; |
| @@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, | |||
| 8746 | __func__); | 8756 | __func__); |
| 8747 | return NULL; | 8757 | return NULL; |
| 8748 | } | 8758 | } |
| 8759 | /* We come up by default in VEPA mode if SRIOV is not | ||
| 8760 | * already enabled, in which case we can't force VEPA | ||
| 8761 | * mode. | ||
| 8762 | */ | ||
| 8763 | if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { | ||
| 8764 | veb->bridge_mode = BRIDGE_MODE_VEPA; | ||
| 8765 | pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; | ||
| 8766 | } | ||
| 8749 | i40e_config_bridge_mode(veb); | 8767 | i40e_config_bridge_mode(veb); |
| 8750 | } | 8768 | } |
| 8751 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { | 8769 | for (i = 0; i < I40E_MAX_VEB && !veb; i++) { |
| @@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 9856 | goto err_switch_setup; | 9874 | goto err_switch_setup; |
| 9857 | } | 9875 | } |
| 9858 | 9876 | ||
| 9877 | #ifdef CONFIG_PCI_IOV | ||
| 9878 | /* prep for VF support */ | ||
| 9879 | if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && | ||
| 9880 | (pf->flags & I40E_FLAG_MSIX_ENABLED) && | ||
| 9881 | !test_bit(__I40E_BAD_EEPROM, &pf->state)) { | ||
| 9882 | if (pci_num_vf(pdev)) | ||
| 9883 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 9884 | } | ||
| 9885 | #endif | ||
| 9859 | err = i40e_setup_pf_switch(pf, false); | 9886 | err = i40e_setup_pf_switch(pf, false); |
| 9860 | if (err) { | 9887 | if (err) { |
| 9861 | dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); | 9888 | dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4bd3a80aba82..9d95042d5a0f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
| @@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | |||
| 2410 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | 2410 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet |
| 2411 | * @skb: send buffer | 2411 | * @skb: send buffer |
| 2412 | * @tx_flags: collected send information | 2412 | * @tx_flags: collected send information |
| 2413 | * @hdr_len: size of the packet header | ||
| 2414 | * | 2413 | * |
| 2415 | * Note: Our HW can't scatter-gather more than 8 fragments to build | 2414 | * Note: Our HW can't scatter-gather more than 8 fragments to build |
| 2416 | * a packet on the wire and so we need to figure out the cases where we | 2415 | * a packet on the wire and so we need to figure out the cases where we |
| 2417 | * need to linearize the skb. | 2416 | * need to linearize the skb. |
| 2418 | **/ | 2417 | **/ |
| 2419 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | 2418 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) |
| 2420 | const u8 hdr_len) | ||
| 2421 | { | 2419 | { |
| 2422 | struct skb_frag_struct *frag; | 2420 | struct skb_frag_struct *frag; |
| 2423 | bool linearize = false; | 2421 | bool linearize = false; |
| @@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 2429 | gso_segs = skb_shinfo(skb)->gso_segs; | 2427 | gso_segs = skb_shinfo(skb)->gso_segs; |
| 2430 | 2428 | ||
| 2431 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | 2429 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { |
| 2432 | u16 j = 1; | 2430 | u16 j = 0; |
| 2433 | 2431 | ||
| 2434 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | 2432 | if (num_frags < (I40E_MAX_BUFFER_TXD)) |
| 2435 | goto linearize_chk_done; | 2433 | goto linearize_chk_done; |
| @@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 2440 | goto linearize_chk_done; | 2438 | goto linearize_chk_done; |
| 2441 | } | 2439 | } |
| 2442 | frag = &skb_shinfo(skb)->frags[0]; | 2440 | frag = &skb_shinfo(skb)->frags[0]; |
| 2443 | size = hdr_len; | ||
| 2444 | /* we might still have more fragments per segment */ | 2441 | /* we might still have more fragments per segment */ |
| 2445 | do { | 2442 | do { |
| 2446 | size += skb_frag_size(frag); | 2443 | size += skb_frag_size(frag); |
| 2447 | frag++; j++; | 2444 | frag++; j++; |
| 2445 | if ((size >= skb_shinfo(skb)->gso_size) && | ||
| 2446 | (j < I40E_MAX_BUFFER_TXD)) { | ||
| 2447 | size = (size % skb_shinfo(skb)->gso_size); | ||
| 2448 | j = (size) ? 1 : 0; | ||
| 2449 | } | ||
| 2448 | if (j == I40E_MAX_BUFFER_TXD) { | 2450 | if (j == I40E_MAX_BUFFER_TXD) { |
| 2449 | if (size < skb_shinfo(skb)->gso_size) { | 2451 | linearize = true; |
| 2450 | linearize = true; | 2452 | break; |
| 2451 | break; | ||
| 2452 | } | ||
| 2453 | j = 1; | ||
| 2454 | size -= skb_shinfo(skb)->gso_size; | ||
| 2455 | if (size) | ||
| 2456 | j++; | ||
| 2457 | size += hdr_len; | ||
| 2458 | } | 2453 | } |
| 2459 | num_frags--; | 2454 | num_frags--; |
| 2460 | } while (num_frags); | 2455 | } while (num_frags); |
| @@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
| 2724 | if (tsyn) | 2719 | if (tsyn) |
| 2725 | tx_flags |= I40E_TX_FLAGS_TSYN; | 2720 | tx_flags |= I40E_TX_FLAGS_TSYN; |
| 2726 | 2721 | ||
| 2727 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | 2722 | if (i40e_chk_linearize(skb, tx_flags)) |
| 2728 | if (skb_linearize(skb)) | 2723 | if (skb_linearize(skb)) |
| 2729 | goto out_drop; | 2724 | goto out_drop; |
| 2730 | 2725 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 78d1c4ff565e..4e9376da0518 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | |||
| @@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) | |||
| 1018 | { | 1018 | { |
| 1019 | struct i40e_pf *pf = pci_get_drvdata(pdev); | 1019 | struct i40e_pf *pf = pci_get_drvdata(pdev); |
| 1020 | 1020 | ||
| 1021 | if (num_vfs) | 1021 | if (num_vfs) { |
| 1022 | if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { | ||
| 1023 | pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; | ||
| 1024 | i40e_do_reset_safe(pf, | ||
| 1025 | BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 1026 | } | ||
| 1022 | return i40e_pci_sriov_enable(pdev, num_vfs); | 1027 | return i40e_pci_sriov_enable(pdev, num_vfs); |
| 1028 | } | ||
| 1023 | 1029 | ||
| 1024 | if (!pci_vfs_assigned(pf->pdev)) { | 1030 | if (!pci_vfs_assigned(pf->pdev)) { |
| 1025 | i40e_free_vfs(pf); | 1031 | i40e_free_vfs(pf); |
| 1032 | pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; | ||
| 1033 | i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); | ||
| 1026 | } else { | 1034 | } else { |
| 1027 | dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); | 1035 | dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); |
| 1028 | return -EINVAL; | 1036 | return -EINVAL; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b077e02a0cc7..458fbb421090 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
| @@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, | |||
| 1619 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet | 1619 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet |
| 1620 | * @skb: send buffer | 1620 | * @skb: send buffer |
| 1621 | * @tx_flags: collected send information | 1621 | * @tx_flags: collected send information |
| 1622 | * @hdr_len: size of the packet header | ||
| 1623 | * | 1622 | * |
| 1624 | * Note: Our HW can't scatter-gather more than 8 fragments to build | 1623 | * Note: Our HW can't scatter-gather more than 8 fragments to build |
| 1625 | * a packet on the wire and so we need to figure out the cases where we | 1624 | * a packet on the wire and so we need to figure out the cases where we |
| 1626 | * need to linearize the skb. | 1625 | * need to linearize the skb. |
| 1627 | **/ | 1626 | **/ |
| 1628 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | 1627 | static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) |
| 1629 | const u8 hdr_len) | ||
| 1630 | { | 1628 | { |
| 1631 | struct skb_frag_struct *frag; | 1629 | struct skb_frag_struct *frag; |
| 1632 | bool linearize = false; | 1630 | bool linearize = false; |
| @@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 1638 | gso_segs = skb_shinfo(skb)->gso_segs; | 1636 | gso_segs = skb_shinfo(skb)->gso_segs; |
| 1639 | 1637 | ||
| 1640 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { | 1638 | if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { |
| 1641 | u16 j = 1; | 1639 | u16 j = 0; |
| 1642 | 1640 | ||
| 1643 | if (num_frags < (I40E_MAX_BUFFER_TXD)) | 1641 | if (num_frags < (I40E_MAX_BUFFER_TXD)) |
| 1644 | goto linearize_chk_done; | 1642 | goto linearize_chk_done; |
| @@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, | |||
| 1649 | goto linearize_chk_done; | 1647 | goto linearize_chk_done; |
| 1650 | } | 1648 | } |
| 1651 | frag = &skb_shinfo(skb)->frags[0]; | 1649 | frag = &skb_shinfo(skb)->frags[0]; |
| 1652 | size = hdr_len; | ||
| 1653 | /* we might still have more fragments per segment */ | 1650 | /* we might still have more fragments per segment */ |
| 1654 | do { | 1651 | do { |
| 1655 | size += skb_frag_size(frag); | 1652 | size += skb_frag_size(frag); |
| 1656 | frag++; j++; | 1653 | frag++; j++; |
| 1654 | if ((size >= skb_shinfo(skb)->gso_size) && | ||
| 1655 | (j < I40E_MAX_BUFFER_TXD)) { | ||
| 1656 | size = (size % skb_shinfo(skb)->gso_size); | ||
| 1657 | j = (size) ? 1 : 0; | ||
| 1658 | } | ||
| 1657 | if (j == I40E_MAX_BUFFER_TXD) { | 1659 | if (j == I40E_MAX_BUFFER_TXD) { |
| 1658 | if (size < skb_shinfo(skb)->gso_size) { | 1660 | linearize = true; |
| 1659 | linearize = true; | 1661 | break; |
| 1660 | break; | ||
| 1661 | } | ||
| 1662 | j = 1; | ||
| 1663 | size -= skb_shinfo(skb)->gso_size; | ||
| 1664 | if (size) | ||
| 1665 | j++; | ||
| 1666 | size += hdr_len; | ||
| 1667 | } | 1662 | } |
| 1668 | num_frags--; | 1663 | num_frags--; |
| 1669 | } while (num_frags); | 1664 | } while (num_frags); |
| @@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
| 1950 | else if (tso) | 1945 | else if (tso) |
| 1951 | tx_flags |= I40E_TX_FLAGS_TSO; | 1946 | tx_flags |= I40E_TX_FLAGS_TSO; |
| 1952 | 1947 | ||
| 1953 | if (i40e_chk_linearize(skb, tx_flags, hdr_len)) | 1948 | if (i40e_chk_linearize(skb, tx_flags)) |
| 1954 | if (skb_linearize(skb)) | 1949 | if (skb_linearize(skb)) |
| 1955 | goto out_drop; | 1950 | goto out_drop; |
| 1956 | 1951 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e3b9b63ad010..c3a9392cbc19 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
| @@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, | |||
| 538 | igb->perout[i].start.tv_nsec = rq->perout.start.nsec; | 538 | igb->perout[i].start.tv_nsec = rq->perout.start.nsec; |
| 539 | igb->perout[i].period.tv_sec = ts.tv_sec; | 539 | igb->perout[i].period.tv_sec = ts.tv_sec; |
| 540 | igb->perout[i].period.tv_nsec = ts.tv_nsec; | 540 | igb->perout[i].period.tv_nsec = ts.tv_nsec; |
| 541 | wr32(trgttiml, rq->perout.start.sec); | 541 | wr32(trgttimh, rq->perout.start.sec); |
| 542 | wr32(trgttimh, rq->perout.start.nsec); | 542 | wr32(trgttiml, rq->perout.start.nsec); |
| 543 | tsauxc |= tsauxc_mask; | 543 | tsauxc |= tsauxc_mask; |
| 544 | tsim |= tsim_mask; | 544 | tsim |= tsim_mask; |
| 545 | } else { | 545 | } else { |
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index c0ad95d2f63d..809ea4610a77 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
| @@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
| 224 | } | 224 | } |
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf) | 227 | static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, |
| 228 | struct efx_rx_buffer *rx_buf, | ||
| 229 | unsigned int num_bufs) | ||
| 228 | { | 230 | { |
| 229 | if (rx_buf->page) { | 231 | do { |
| 230 | put_page(rx_buf->page); | 232 | if (rx_buf->page) { |
| 231 | rx_buf->page = NULL; | 233 | put_page(rx_buf->page); |
| 232 | } | 234 | rx_buf->page = NULL; |
| 235 | } | ||
| 236 | rx_buf = efx_rx_buf_next(rx_queue, rx_buf); | ||
| 237 | } while (--num_bufs); | ||
| 233 | } | 238 | } |
| 234 | 239 | ||
| 235 | /* Attempt to recycle the page if there is an RX recycle ring; the page can | 240 | /* Attempt to recycle the page if there is an RX recycle ring; the page can |
| @@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | |||
| 278 | /* If this is the last buffer in a page, unmap and free it. */ | 283 | /* If this is the last buffer in a page, unmap and free it. */ |
| 279 | if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { | 284 | if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) { |
| 280 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | 285 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); |
| 281 | efx_free_rx_buffer(rx_buf); | 286 | efx_free_rx_buffers(rx_queue, rx_buf, 1); |
| 282 | } | 287 | } |
| 283 | rx_buf->page = NULL; | 288 | rx_buf->page = NULL; |
| 284 | } | 289 | } |
| @@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel, | |||
| 304 | 309 | ||
| 305 | efx_recycle_rx_pages(channel, rx_buf, n_frags); | 310 | efx_recycle_rx_pages(channel, rx_buf, n_frags); |
| 306 | 311 | ||
| 307 | do { | 312 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); |
| 308 | efx_free_rx_buffer(rx_buf); | ||
| 309 | rx_buf = efx_rx_buf_next(rx_queue, rx_buf); | ||
| 310 | } while (--n_frags); | ||
| 311 | } | 313 | } |
| 312 | 314 | ||
| 313 | /** | 315 | /** |
| @@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, | |||
| 431 | 433 | ||
| 432 | skb = napi_get_frags(napi); | 434 | skb = napi_get_frags(napi); |
| 433 | if (unlikely(!skb)) { | 435 | if (unlikely(!skb)) { |
| 434 | while (n_frags--) { | 436 | struct efx_rx_queue *rx_queue; |
| 435 | put_page(rx_buf->page); | 437 | |
| 436 | rx_buf->page = NULL; | 438 | rx_queue = efx_channel_get_rx_queue(channel); |
| 437 | rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf); | 439 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); |
| 438 | } | ||
| 439 | return; | 440 | return; |
| 440 | } | 441 | } |
| 441 | 442 | ||
| @@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, | |||
| 622 | 623 | ||
| 623 | skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); | 624 | skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len); |
| 624 | if (unlikely(skb == NULL)) { | 625 | if (unlikely(skb == NULL)) { |
| 625 | efx_free_rx_buffer(rx_buf); | 626 | struct efx_rx_queue *rx_queue; |
| 627 | |||
| 628 | rx_queue = efx_channel_get_rx_queue(channel); | ||
| 629 | efx_free_rx_buffers(rx_queue, rx_buf, n_frags); | ||
| 626 | return; | 630 | return; |
| 627 | } | 631 | } |
| 628 | skb_record_rx_queue(skb, channel->rx_queue.core_index); | 632 | skb_record_rx_queue(skb, channel->rx_queue.core_index); |
| @@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel) | |||
| 661 | * loopback layer, and free the rx_buf here | 665 | * loopback layer, and free the rx_buf here |
| 662 | */ | 666 | */ |
| 663 | if (unlikely(efx->loopback_selftest)) { | 667 | if (unlikely(efx->loopback_selftest)) { |
| 668 | struct efx_rx_queue *rx_queue; | ||
| 669 | |||
| 664 | efx_loopback_rx_packet(efx, eh, rx_buf->len); | 670 | efx_loopback_rx_packet(efx, eh, rx_buf->len); |
| 665 | efx_free_rx_buffer(rx_buf); | 671 | rx_queue = efx_channel_get_rx_queue(channel); |
| 672 | efx_free_rx_buffers(rx_queue, rx_buf, | ||
| 673 | channel->rx_pkt_n_frags); | ||
| 666 | goto out; | 674 | goto out; |
| 667 | } | 675 | } |
| 668 | 676 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 4ec9811f49c8..65efb1468988 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c | |||
| @@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, | |||
| 511 | msgbuf->rx_pktids, | 511 | msgbuf->rx_pktids, |
| 512 | msgbuf->ioctl_resp_pktid); | 512 | msgbuf->ioctl_resp_pktid); |
| 513 | if (msgbuf->ioctl_resp_ret_len != 0) { | 513 | if (msgbuf->ioctl_resp_ret_len != 0) { |
| 514 | if (!skb) { | 514 | if (!skb) |
| 515 | brcmf_err("Invalid packet id idx recv'd %d\n", | ||
| 516 | msgbuf->ioctl_resp_pktid); | ||
| 517 | return -EBADF; | 515 | return -EBADF; |
| 518 | } | 516 | |
| 519 | memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? | 517 | memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? |
| 520 | len : msgbuf->ioctl_resp_ret_len); | 518 | len : msgbuf->ioctl_resp_ret_len); |
| 521 | } | 519 | } |
| @@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 874 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; | 872 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; |
| 875 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | 873 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, |
| 876 | msgbuf->tx_pktids, idx); | 874 | msgbuf->tx_pktids, idx); |
| 877 | if (!skb) { | 875 | if (!skb) |
| 878 | brcmf_err("Invalid packet id idx recv'd %d\n", idx); | ||
| 879 | return; | 876 | return; |
| 880 | } | ||
| 881 | 877 | ||
| 882 | set_bit(flowid, msgbuf->txstatus_done_map); | 878 | set_bit(flowid, msgbuf->txstatus_done_map); |
| 883 | commonring = msgbuf->flowrings[flowid]; | 879 | commonring = msgbuf->flowrings[flowid]; |
| @@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 1156 | 1152 | ||
| 1157 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, | 1153 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, |
| 1158 | msgbuf->rx_pktids, idx); | 1154 | msgbuf->rx_pktids, idx); |
| 1155 | if (!skb) | ||
| 1156 | return; | ||
| 1159 | 1157 | ||
| 1160 | if (data_offset) | 1158 | if (data_offset) |
| 1161 | skb_pull(skb, data_offset); | 1159 | skb_pull(skb, data_offset); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 75e96db6626b..8e604a3931ca 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | |||
| @@ -471,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, | |||
| 471 | if (cfg->device_family != IWL_DEVICE_FAMILY_8000) | 471 | if (cfg->device_family != IWL_DEVICE_FAMILY_8000) |
| 472 | return le16_to_cpup(nvm_sw + RADIO_CFG); | 472 | return le16_to_cpup(nvm_sw + RADIO_CFG); |
| 473 | 473 | ||
| 474 | return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); | 474 | return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000)); |
| 475 | 475 | ||
| 476 | } | 476 | } |
| 477 | 477 | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 01996c9d98a7..376b84e54ad7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /****************************************************************************** | 1 | /****************************************************************************** |
| 2 | * | 2 | * |
| 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. |
| 4 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 5 | * | 5 | * |
| 6 | * Portions of this file are derived from the ipw3945 project, as well | 6 | * Portions of this file are derived from the ipw3945 project, as well |
| 7 | * as portions of the ieee80211 subsystem header files. | 7 | * as portions of the ieee80211 subsystem header files. |
| @@ -320,7 +320,7 @@ struct iwl_trans_pcie { | |||
| 320 | 320 | ||
| 321 | /*protect hw register */ | 321 | /*protect hw register */ |
| 322 | spinlock_t reg_lock; | 322 | spinlock_t reg_lock; |
| 323 | bool cmd_in_flight; | 323 | bool cmd_hold_nic_awake; |
| 324 | bool ref_cmd_in_flight; | 324 | bool ref_cmd_in_flight; |
| 325 | 325 | ||
| 326 | /* protect ref counter */ | 326 | /* protect ref counter */ |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index d6f6515fe663..dc179094e6a0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
| @@ -1372,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent, | |||
| 1372 | 1372 | ||
| 1373 | spin_lock_irqsave(&trans_pcie->reg_lock, *flags); | 1373 | spin_lock_irqsave(&trans_pcie->reg_lock, *flags); |
| 1374 | 1374 | ||
| 1375 | if (trans_pcie->cmd_in_flight) | 1375 | if (trans_pcie->cmd_hold_nic_awake) |
| 1376 | goto out; | 1376 | goto out; |
| 1377 | 1377 | ||
| 1378 | /* this bit wakes up the NIC */ | 1378 | /* this bit wakes up the NIC */ |
| @@ -1438,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, | |||
| 1438 | */ | 1438 | */ |
| 1439 | __acquire(&trans_pcie->reg_lock); | 1439 | __acquire(&trans_pcie->reg_lock); |
| 1440 | 1440 | ||
| 1441 | if (trans_pcie->cmd_in_flight) | 1441 | if (trans_pcie->cmd_hold_nic_awake) |
| 1442 | goto out; | 1442 | goto out; |
| 1443 | 1443 | ||
| 1444 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | 1444 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 06952aadfd7b..5ef8044c2ea3 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
| @@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, | |||
| 1039 | iwl_trans_pcie_ref(trans); | 1039 | iwl_trans_pcie_ref(trans); |
| 1040 | } | 1040 | } |
| 1041 | 1041 | ||
| 1042 | if (trans_pcie->cmd_in_flight) | ||
| 1043 | return 0; | ||
| 1044 | |||
| 1045 | trans_pcie->cmd_in_flight = true; | ||
| 1046 | |||
| 1047 | /* | 1042 | /* |
| 1048 | * wake up the NIC to make sure that the firmware will see the host | 1043 | * wake up the NIC to make sure that the firmware will see the host |
| 1049 | * command - we will let the NIC sleep once all the host commands | 1044 | * command - we will let the NIC sleep once all the host commands |
| 1050 | * returned. This needs to be done only on NICs that have | 1045 | * returned. This needs to be done only on NICs that have |
| 1051 | * apmg_wake_up_wa set. | 1046 | * apmg_wake_up_wa set. |
| 1052 | */ | 1047 | */ |
| 1053 | if (trans->cfg->base_params->apmg_wake_up_wa) { | 1048 | if (trans->cfg->base_params->apmg_wake_up_wa && |
| 1049 | !trans_pcie->cmd_hold_nic_awake) { | ||
| 1054 | __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, | 1050 | __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, |
| 1055 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 1051 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
| 1056 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) | 1052 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) |
| @@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, | |||
| 1064 | if (ret < 0) { | 1060 | if (ret < 0) { |
| 1065 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | 1061 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
| 1066 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 1062 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
| 1067 | trans_pcie->cmd_in_flight = false; | ||
| 1068 | IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); | 1063 | IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); |
| 1069 | return -EIO; | 1064 | return -EIO; |
| 1070 | } | 1065 | } |
| 1066 | trans_pcie->cmd_hold_nic_awake = true; | ||
| 1071 | } | 1067 | } |
| 1072 | 1068 | ||
| 1073 | return 0; | 1069 | return 0; |
| @@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) | |||
| 1085 | iwl_trans_pcie_unref(trans); | 1081 | iwl_trans_pcie_unref(trans); |
| 1086 | } | 1082 | } |
| 1087 | 1083 | ||
| 1088 | if (WARN_ON(!trans_pcie->cmd_in_flight)) | 1084 | if (trans->cfg->base_params->apmg_wake_up_wa) { |
| 1089 | return 0; | 1085 | if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) |
| 1090 | 1086 | return 0; | |
| 1091 | trans_pcie->cmd_in_flight = false; | ||
| 1092 | 1087 | ||
| 1093 | if (trans->cfg->base_params->apmg_wake_up_wa) | 1088 | trans_pcie->cmd_hold_nic_awake = false; |
| 1094 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, | 1089 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
| 1095 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 1090 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
| 1096 | 1091 | } | |
| 1097 | return 0; | 1092 | return 0; |
| 1098 | } | 1093 | } |
| 1099 | 1094 | ||
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 4de46aa61d95..0d2594395ffb 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, | |||
| 1250 | netdev_err(queue->vif->dev, | 1250 | netdev_err(queue->vif->dev, |
| 1251 | "txreq.offset: %x, size: %u, end: %lu\n", | 1251 | "txreq.offset: %x, size: %u, end: %lu\n", |
| 1252 | txreq.offset, txreq.size, | 1252 | txreq.offset, txreq.size, |
| 1253 | (txreq.offset&~PAGE_MASK) + txreq.size); | 1253 | (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size); |
| 1254 | xenvif_fatal_tx_err(queue->vif); | 1254 | xenvif_fatal_tx_err(queue->vif); |
| 1255 | break; | 1255 | break; |
| 1256 | } | 1256 | } |
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index fee02414529e..968787abf78d 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c | |||
| @@ -34,6 +34,8 @@ struct backend_info { | |||
| 34 | enum xenbus_state frontend_state; | 34 | enum xenbus_state frontend_state; |
| 35 | struct xenbus_watch hotplug_status_watch; | 35 | struct xenbus_watch hotplug_status_watch; |
| 36 | u8 have_hotplug_status_watch:1; | 36 | u8 have_hotplug_status_watch:1; |
| 37 | |||
| 38 | const char *hotplug_script; | ||
| 37 | }; | 39 | }; |
| 38 | 40 | ||
| 39 | static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); | 41 | static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); |
| @@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev) | |||
| 238 | xenvif_free(be->vif); | 240 | xenvif_free(be->vif); |
| 239 | be->vif = NULL; | 241 | be->vif = NULL; |
| 240 | } | 242 | } |
| 243 | kfree(be->hotplug_script); | ||
| 241 | kfree(be); | 244 | kfree(be); |
| 242 | dev_set_drvdata(&dev->dev, NULL); | 245 | dev_set_drvdata(&dev->dev, NULL); |
| 243 | return 0; | 246 | return 0; |
| @@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev, | |||
| 255 | struct xenbus_transaction xbt; | 258 | struct xenbus_transaction xbt; |
| 256 | int err; | 259 | int err; |
| 257 | int sg; | 260 | int sg; |
| 261 | const char *script; | ||
| 258 | struct backend_info *be = kzalloc(sizeof(struct backend_info), | 262 | struct backend_info *be = kzalloc(sizeof(struct backend_info), |
| 259 | GFP_KERNEL); | 263 | GFP_KERNEL); |
| 260 | if (!be) { | 264 | if (!be) { |
| @@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev, | |||
| 347 | if (err) | 351 | if (err) |
| 348 | pr_debug("Error writing multi-queue-max-queues\n"); | 352 | pr_debug("Error writing multi-queue-max-queues\n"); |
| 349 | 353 | ||
| 354 | script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL); | ||
| 355 | if (IS_ERR(script)) { | ||
| 356 | err = PTR_ERR(script); | ||
| 357 | xenbus_dev_fatal(dev, err, "reading script"); | ||
| 358 | goto fail; | ||
| 359 | } | ||
| 360 | |||
| 361 | be->hotplug_script = script; | ||
| 362 | |||
| 350 | err = xenbus_switch_state(dev, XenbusStateInitWait); | 363 | err = xenbus_switch_state(dev, XenbusStateInitWait); |
| 351 | if (err) | 364 | if (err) |
| 352 | goto fail; | 365 | goto fail; |
| @@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev, | |||
| 379 | struct kobj_uevent_env *env) | 392 | struct kobj_uevent_env *env) |
| 380 | { | 393 | { |
| 381 | struct backend_info *be = dev_get_drvdata(&xdev->dev); | 394 | struct backend_info *be = dev_get_drvdata(&xdev->dev); |
| 382 | char *val; | ||
| 383 | 395 | ||
| 384 | val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); | 396 | if (!be) |
| 385 | if (IS_ERR(val)) { | 397 | return 0; |
| 386 | int err = PTR_ERR(val); | 398 | |
| 387 | xenbus_dev_fatal(xdev, err, "reading script"); | 399 | if (add_uevent_var(env, "script=%s", be->hotplug_script)) |
| 388 | return err; | 400 | return -ENOMEM; |
| 389 | } else { | ||
| 390 | if (add_uevent_var(env, "script=%s", val)) { | ||
| 391 | kfree(val); | ||
| 392 | return -ENOMEM; | ||
| 393 | } | ||
| 394 | kfree(val); | ||
| 395 | } | ||
| 396 | 401 | ||
| 397 | if (!be || !be->vif) | 402 | if (!be->vif) |
| 398 | return 0; | 403 | return 0; |
| 399 | 404 | ||
| 400 | return add_uevent_var(env, "vif=%s", be->vif->dev->name); | 405 | return add_uevent_var(env, "vif=%s", be->vif->dev->name); |
