aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/nfit/core.c10
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/power/domain.c8
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/clocksource/timer-of.c12
-rw-r--r--drivers/cpufreq/intel_pstate.c21
-rw-r--r--drivers/dax/device-dax.h2
-rw-r--r--drivers/dax/device.c33
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/infiniband/core/addr.c46
-rw-r--r--drivers/infiniband/core/cma.c32
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c23
-rw-r--r--drivers/infiniband/core/verbs.c51
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c86
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c40
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c36
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c48
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c31
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-gic-realview.c2
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c25
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c4
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c70
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c299
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c10
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c49
-rw-r--r--drivers/net/phy/mdio-mux.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c30
-rw-r--r--drivers/net/usb/cdc_ncm.c28
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c6
-rw-r--r--drivers/net/usb/smsc95xx.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/nvdimm/core.c7
-rw-r--r--drivers/scsi/cxlflash/main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/isci/request.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qedi/qedi.h17
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c419
-rw-r--r--drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h210
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/sg.c5
-rw-r--r--drivers/scsi/virtio_scsi.c1
113 files changed, 1541 insertions, 650 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b75b734ee73a..19182d091587 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = {
3160 3160
3161static __init int nfit_init(void) 3161static __init int nfit_init(void)
3162{ 3162{
3163 int ret;
3164
3163 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3165 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3164 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3166 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3165 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3167 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -3187,8 +3189,14 @@ static __init int nfit_init(void)
3187 return -ENOMEM; 3189 return -ENOMEM;
3188 3190
3189 nfit_mce_register(); 3191 nfit_mce_register();
3192 ret = acpi_bus_register_driver(&acpi_nfit_driver);
3193 if (ret) {
3194 nfit_mce_unregister();
3195 destroy_workqueue(nfit_wq);
3196 }
3197
3198 return ret;
3190 3199
3191 return acpi_bus_register_driver(&acpi_nfit_driver);
3192} 3200}
3193 3201
3194static __exit void nfit_exit(void) 3202static __exit void nfit_exit(void)
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 292dec18ffb8..07bdd51b3b9a 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev,
1613 1613
1614 ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); 1614 ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
1615 if (ret < 0) 1615 if (ret < 0)
1616 goto out_disable; 1616 goto out_release;
1617 1617
1618 zatm_dev->pci_dev = pci_dev; 1618 zatm_dev->pci_dev = pci_dev;
1619 dev->dev_data = zatm_dev; 1619 dev->dev_data = zatm_dev;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3b8210ebb50e..60303aa28587 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1222,8 +1222,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1222 1222
1223 spin_unlock_irq(&dev->power.lock); 1223 spin_unlock_irq(&dev->power.lock);
1224 1224
1225 dev_pm_domain_set(dev, &genpd->domain);
1226
1227 return gpd_data; 1225 return gpd_data;
1228 1226
1229 err_free: 1227 err_free:
@@ -1237,8 +1235,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1237static void genpd_free_dev_data(struct device *dev, 1235static void genpd_free_dev_data(struct device *dev,
1238 struct generic_pm_domain_data *gpd_data) 1236 struct generic_pm_domain_data *gpd_data)
1239{ 1237{
1240 dev_pm_domain_set(dev, NULL);
1241
1242 spin_lock_irq(&dev->power.lock); 1238 spin_lock_irq(&dev->power.lock);
1243 1239
1244 dev->power.subsys_data->domain_data = NULL; 1240 dev->power.subsys_data->domain_data = NULL;
@@ -1275,6 +1271,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1275 if (ret) 1271 if (ret)
1276 goto out; 1272 goto out;
1277 1273
1274 dev_pm_domain_set(dev, &genpd->domain);
1275
1278 genpd->device_count++; 1276 genpd->device_count++;
1279 genpd->max_off_time_changed = true; 1277 genpd->max_off_time_changed = true;
1280 1278
@@ -1336,6 +1334,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
1336 if (genpd->detach_dev) 1334 if (genpd->detach_dev)
1337 genpd->detach_dev(genpd, dev); 1335 genpd->detach_dev(genpd, dev);
1338 1336
1337 dev_pm_domain_set(dev, NULL);
1338
1339 list_del_init(&pdd->list_node); 1339 list_del_init(&pdd->list_node);
1340 1340
1341 genpd_unlock(genpd); 1341 genpd_unlock(genpd);
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 5f04e7bf063e..e6c64b0be5b2 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Register map access API - W1 (1-Wire) support 2 * Register map access API - W1 (1-Wire) support
3 * 3 *
4 * Copyright (C) 2017 OAO Radioavionica 4 * Copyright (c) 2017 Radioavionica Corporation
5 * Author: Alex A. Mihaylov <minimumlaw@rambler.ru> 5 * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -11,7 +11,7 @@
11 11
12#include <linux/regmap.h> 12#include <linux/regmap.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include "../../w1/w1.h" 14#include <linux/w1.h>
15 15
16#include "internal.h" 16#include "internal.h"
17 17
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index f6e7491c873c..d509b500a7b5 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -41,8 +41,16 @@ static __init int timer_irq_init(struct device_node *np,
41 struct timer_of *to = container_of(of_irq, struct timer_of, of_irq); 41 struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
42 struct clock_event_device *clkevt = &to->clkevt; 42 struct clock_event_device *clkevt = &to->clkevt;
43 43
44 of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name): 44 if (of_irq->name) {
45 irq_of_parse_and_map(np, of_irq->index); 45 of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
46 if (ret < 0) {
47 pr_err("Failed to get interrupt %s for %s\n",
48 of_irq->name, np->full_name);
49 return ret;
50 }
51 } else {
52 of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
53 }
46 if (!of_irq->irq) { 54 if (!of_irq->irq) {
47 pr_err("Failed to map interrupt for %s\n", np->full_name); 55 pr_err("Failed to map interrupt for %s\n", np->full_name);
48 return -EINVAL; 56 return -EINVAL;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7fb8b7c980d..6cd503525638 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -225,6 +225,9 @@ struct global_params {
225 * @vid: Stores VID limits for this CPU 225 * @vid: Stores VID limits for this CPU
226 * @pid: Stores PID parameters for this CPU 226 * @pid: Stores PID parameters for this CPU
227 * @last_sample_time: Last Sample time 227 * @last_sample_time: Last Sample time
228 * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
229 * This shift is a multiplier to mperf delta to
230 * calculate CPU busy.
228 * @prev_aperf: Last APERF value read from APERF MSR 231 * @prev_aperf: Last APERF value read from APERF MSR
229 * @prev_mperf: Last MPERF value read from MPERF MSR 232 * @prev_mperf: Last MPERF value read from MPERF MSR
230 * @prev_tsc: Last timestamp counter (TSC) value 233 * @prev_tsc: Last timestamp counter (TSC) value
@@ -259,6 +262,7 @@ struct cpudata {
259 262
260 u64 last_update; 263 u64 last_update;
261 u64 last_sample_time; 264 u64 last_sample_time;
265 u64 aperf_mperf_shift;
262 u64 prev_aperf; 266 u64 prev_aperf;
263 u64 prev_mperf; 267 u64 prev_mperf;
264 u64 prev_tsc; 268 u64 prev_tsc;
@@ -321,6 +325,7 @@ struct pstate_funcs {
321 int (*get_min)(void); 325 int (*get_min)(void);
322 int (*get_turbo)(void); 326 int (*get_turbo)(void);
323 int (*get_scaling)(void); 327 int (*get_scaling)(void);
328 int (*get_aperf_mperf_shift)(void);
324 u64 (*get_val)(struct cpudata*, int pstate); 329 u64 (*get_val)(struct cpudata*, int pstate);
325 void (*get_vid)(struct cpudata *); 330 void (*get_vid)(struct cpudata *);
326 void (*update_util)(struct update_util_data *data, u64 time, 331 void (*update_util)(struct update_util_data *data, u64 time,
@@ -1486,6 +1491,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
1486 return val; 1491 return val;
1487} 1492}
1488 1493
1494static int knl_get_aperf_mperf_shift(void)
1495{
1496 return 10;
1497}
1498
1489static int knl_get_turbo_pstate(void) 1499static int knl_get_turbo_pstate(void)
1490{ 1500{
1491 u64 value; 1501 u64 value;
@@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1543 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1553 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1544 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1554 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1545 1555
1556 if (pstate_funcs.get_aperf_mperf_shift)
1557 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1558
1546 if (pstate_funcs.get_vid) 1559 if (pstate_funcs.get_vid)
1547 pstate_funcs.get_vid(cpu); 1560 pstate_funcs.get_vid(cpu);
1548 1561
@@ -1616,7 +1629,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1616 int32_t busy_frac, boost; 1629 int32_t busy_frac, boost;
1617 int target, avg_pstate; 1630 int target, avg_pstate;
1618 1631
1619 busy_frac = div_fp(sample->mperf, sample->tsc); 1632 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
1633 sample->tsc);
1620 1634
1621 boost = cpu->iowait_boost; 1635 boost = cpu->iowait_boost;
1622 cpu->iowait_boost >>= 1; 1636 cpu->iowait_boost >>= 1;
@@ -1675,7 +1689,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1675 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1689 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
1676 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1690 perf_scaled = mul_fp(perf_scaled, sample_ratio);
1677 } else { 1691 } else {
1678 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1692 sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
1693 cpu->sample.tsc);
1679 if (sample_ratio < int_tofp(1)) 1694 if (sample_ratio < int_tofp(1))
1680 perf_scaled = 0; 1695 perf_scaled = 0;
1681 } 1696 }
@@ -1807,6 +1822,7 @@ static const struct pstate_funcs knl_funcs = {
1807 .get_max_physical = core_get_max_pstate_physical, 1822 .get_max_physical = core_get_max_pstate_physical,
1808 .get_min = core_get_min_pstate, 1823 .get_min = core_get_min_pstate,
1809 .get_turbo = knl_get_turbo_pstate, 1824 .get_turbo = knl_get_turbo_pstate,
1825 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
1810 .get_scaling = core_get_scaling, 1826 .get_scaling = core_get_scaling,
1811 .get_val = core_get_val, 1827 .get_val = core_get_val,
1812 .update_util = intel_pstate_update_util_pid, 1828 .update_util = intel_pstate_update_util_pid,
@@ -2403,6 +2419,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2403 pstate_funcs.get_val = funcs->get_val; 2419 pstate_funcs.get_val = funcs->get_val;
2404 pstate_funcs.get_vid = funcs->get_vid; 2420 pstate_funcs.get_vid = funcs->get_vid;
2405 pstate_funcs.update_util = funcs->update_util; 2421 pstate_funcs.update_util = funcs->update_util;
2422 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
2406 2423
2407 intel_pstate_use_acpi_profile(); 2424 intel_pstate_use_acpi_profile();
2408} 2425}
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
index fdcd9769ffde..688b051750bd 100644
--- a/drivers/dax/device-dax.h
+++ b/drivers/dax/device-dax.h
@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
21 int region_id, struct resource *res, unsigned int align, 21 int region_id, struct resource *res, unsigned int align,
22 void *addr, unsigned long flags); 22 void *addr, unsigned long flags);
23struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 23struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
24 struct resource *res, int count); 24 int id, struct resource *res, int count);
25#endif /* __DEVICE_DAX_H__ */ 25#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 12943d19bfc4..e9f3b3e4bbf4 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev)
529 struct dax_region *dax_region = dev_dax->region; 529 struct dax_region *dax_region = dev_dax->region;
530 struct dax_device *dax_dev = dev_dax->dax_dev; 530 struct dax_device *dax_dev = dev_dax->dax_dev;
531 531
532 ida_simple_remove(&dax_region->ida, dev_dax->id); 532 if (dev_dax->id >= 0)
533 ida_simple_remove(&dax_region->ida, dev_dax->id);
533 dax_region_put(dax_region); 534 dax_region_put(dax_region);
534 put_dax(dax_dev); 535 put_dax(dax_dev);
535 kfree(dev_dax); 536 kfree(dev_dax);
@@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev)
559} 560}
560 561
561struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 562struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
562 struct resource *res, int count) 563 int id, struct resource *res, int count)
563{ 564{
564 struct device *parent = dax_region->dev; 565 struct device *parent = dax_region->dev;
565 struct dax_device *dax_dev; 566 struct dax_device *dax_dev;
@@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
567 struct inode *inode; 568 struct inode *inode;
568 struct device *dev; 569 struct device *dev;
569 struct cdev *cdev; 570 struct cdev *cdev;
570 int rc = 0, i; 571 int rc, i;
572
573 if (!count)
574 return ERR_PTR(-EINVAL);
571 575
572 dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL); 576 dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
573 if (!dev_dax) 577 if (!dev_dax)
@@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
587 if (i < count) 591 if (i < count)
588 goto err_id; 592 goto err_id;
589 593
590 dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); 594 if (id < 0) {
591 if (dev_dax->id < 0) { 595 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
592 rc = dev_dax->id; 596 dev_dax->id = id;
593 goto err_id; 597 if (id < 0) {
598 rc = id;
599 goto err_id;
600 }
601 } else {
602 /* region provider owns @id lifetime */
603 dev_dax->id = -1;
594 } 604 }
595 605
596 /* 606 /*
@@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
598 * device outside of mmap of the resulting character device. 608 * device outside of mmap of the resulting character device.
599 */ 609 */
600 dax_dev = alloc_dax(dev_dax, NULL, NULL); 610 dax_dev = alloc_dax(dev_dax, NULL, NULL);
601 if (!dax_dev) 611 if (!dax_dev) {
612 rc = -ENOMEM;
602 goto err_dax; 613 goto err_dax;
614 }
603 615
604 /* from here on we're committed to teardown via dax_dev_release() */ 616 /* from here on we're committed to teardown via dax_dev_release() */
605 dev = &dev_dax->dev; 617 dev = &dev_dax->dev;
@@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
620 dev->parent = parent; 632 dev->parent = parent;
621 dev->groups = dax_attribute_groups; 633 dev->groups = dax_attribute_groups;
622 dev->release = dev_dax_release; 634 dev->release = dev_dax_release;
623 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); 635 dev_set_name(dev, "dax%d.%d", dax_region->id, id);
624 636
625 rc = cdev_device_add(cdev, dev); 637 rc = cdev_device_add(cdev, dev);
626 if (rc) { 638 if (rc) {
@@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
636 return dev_dax; 648 return dev_dax;
637 649
638 err_dax: 650 err_dax:
639 ida_simple_remove(&dax_region->ida, dev_dax->id); 651 if (dev_dax->id >= 0)
652 ida_simple_remove(&dax_region->ida, dev_dax->id);
640 err_id: 653 err_id:
641 kfree(dev_dax); 654 kfree(dev_dax);
642 655
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9f2a0b4fd801..8d8c852ba8f2 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
58 58
59static int dax_pmem_probe(struct device *dev) 59static int dax_pmem_probe(struct device *dev)
60{ 60{
61 int rc;
62 void *addr; 61 void *addr;
63 struct resource res; 62 struct resource res;
63 int rc, id, region_id;
64 struct nd_pfn_sb *pfn_sb; 64 struct nd_pfn_sb *pfn_sb;
65 struct dev_dax *dev_dax; 65 struct dev_dax *dev_dax;
66 struct dax_pmem *dax_pmem; 66 struct dax_pmem *dax_pmem;
67 struct nd_region *nd_region;
68 struct nd_namespace_io *nsio; 67 struct nd_namespace_io *nsio;
69 struct dax_region *dax_region; 68 struct dax_region *dax_region;
70 struct nd_namespace_common *ndns; 69 struct nd_namespace_common *ndns;
@@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev)
123 /* adjust the dax_region resource to the start of data */ 122 /* adjust the dax_region resource to the start of data */
124 res.start += le64_to_cpu(pfn_sb->dataoff); 123 res.start += le64_to_cpu(pfn_sb->dataoff);
125 124
126 nd_region = to_nd_region(dev->parent); 125 rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
127 dax_region = alloc_dax_region(dev, nd_region->id, &res, 126 if (rc != 2)
127 return -EINVAL;
128
129 dax_region = alloc_dax_region(dev, region_id, &res,
128 le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); 130 le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
129 if (!dax_region) 131 if (!dax_region)
130 return -ENOMEM; 132 return -ENOMEM;
131 133
132 /* TODO: support for subdividing a dax region... */ 134 /* TODO: support for subdividing a dax region... */
133 dev_dax = devm_create_dev_dax(dax_region, &res, 1); 135 dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
134 136
135 /* child dev_dax instances now own the lifetime of the dax_region */ 137 /* child dev_dax instances now own the lifetime of the dax_region */
136 dax_region_put(dax_region); 138 dax_region_put(dax_region);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 41b39464ded8..501e16a9227d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2732,6 +2732,9 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
2732 hidpp_battery_props, 2732 hidpp_battery_props,
2733 sizeof(hidpp_battery_props), 2733 sizeof(hidpp_battery_props),
2734 GFP_KERNEL); 2734 GFP_KERNEL);
2735 if (!battery_props)
2736 return -ENOMEM;
2737
2735 num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2; 2738 num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
2736 2739
2737 if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) 2740 if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f3e35e7a189d..aff20f4b6d97 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -620,16 +620,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
620 return 0; 620 return 0;
621} 621}
622 622
623static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi,
624 struct hid_field *field, struct hid_usage *usage,
625 unsigned long **bit, int *max)
626{
627 if (usage->type == EV_KEY || usage->type == EV_ABS)
628 set_bit(usage->type, hi->input->evbit);
629
630 return -1;
631}
632
633static int mt_compute_slot(struct mt_device *td, struct input_dev *input) 623static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
634{ 624{
635 __s32 quirks = td->mtclass.quirks; 625 __s32 quirks = td->mtclass.quirks;
@@ -969,8 +959,10 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
969 return 0; 959 return 0;
970 960
971 if (field->application == HID_DG_TOUCHSCREEN || 961 if (field->application == HID_DG_TOUCHSCREEN ||
972 field->application == HID_DG_TOUCHPAD) 962 field->application == HID_DG_TOUCHPAD) {
973 return mt_touch_input_mapped(hdev, hi, field, usage, bit, max); 963 /* We own these mappings, tell hid-input to ignore them */
964 return -1;
965 }
974 966
975 /* let hid-core decide for the others */ 967 /* let hid-core decide for the others */
976 return 0; 968 return 0;
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 0e05f75934c9..1858e3ce3993 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -104,19 +104,19 @@ u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
104EXPORT_SYMBOL_GPL(ide_pio_cycle_time); 104EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
105 105
106#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1) 106#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
107#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0) 107#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
108 108
109static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, 109static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
110 int T, int UT) 110 int T, int UT)
111{ 111{
112 q->setup = EZ(t->setup * 1000, T); 112 q->setup = EZ(t->setup, T);
113 q->act8b = EZ(t->act8b * 1000, T); 113 q->act8b = EZ(t->act8b, T);
114 q->rec8b = EZ(t->rec8b * 1000, T); 114 q->rec8b = EZ(t->rec8b, T);
115 q->cyc8b = EZ(t->cyc8b * 1000, T); 115 q->cyc8b = EZ(t->cyc8b, T);
116 q->active = EZ(t->active * 1000, T); 116 q->active = EZ(t->active, T);
117 q->recover = EZ(t->recover * 1000, T); 117 q->recover = EZ(t->recover, T);
118 q->cycle = EZ(t->cycle * 1000, T); 118 q->cycle = EZ(t->cycle, T);
119 q->udma = EZ(t->udma * 1000, UT); 119 q->udma = EZ(t->udma, UT);
120} 120}
121 121
122void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, 122void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a6cb379a4ebc..01236cef7bfb 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -268,6 +268,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
268 return ret; 268 return ret;
269 269
270 ret = rdma_copy_addr(dev_addr, dev, NULL); 270 ret = rdma_copy_addr(dev_addr, dev, NULL);
271 dev_addr->bound_dev_if = dev->ifindex;
271 if (vlan_id) 272 if (vlan_id)
272 *vlan_id = rdma_vlan_dev_vlan_id(dev); 273 *vlan_id = rdma_vlan_dev_vlan_id(dev);
273 dev_put(dev); 274 dev_put(dev);
@@ -280,6 +281,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
280 &((const struct sockaddr_in6 *)addr)->sin6_addr, 281 &((const struct sockaddr_in6 *)addr)->sin6_addr,
281 dev, 1)) { 282 dev, 1)) {
282 ret = rdma_copy_addr(dev_addr, dev, NULL); 283 ret = rdma_copy_addr(dev_addr, dev, NULL);
284 dev_addr->bound_dev_if = dev->ifindex;
283 if (vlan_id) 285 if (vlan_id)
284 *vlan_id = rdma_vlan_dev_vlan_id(dev); 286 *vlan_id = rdma_vlan_dev_vlan_id(dev);
285 break; 287 break;
@@ -405,10 +407,10 @@ static int addr4_resolve(struct sockaddr_in *src_in,
405 fl4.saddr = src_ip; 407 fl4.saddr = src_ip;
406 fl4.flowi4_oif = addr->bound_dev_if; 408 fl4.flowi4_oif = addr->bound_dev_if;
407 rt = ip_route_output_key(addr->net, &fl4); 409 rt = ip_route_output_key(addr->net, &fl4);
408 if (IS_ERR(rt)) { 410 ret = PTR_ERR_OR_ZERO(rt);
409 ret = PTR_ERR(rt); 411 if (ret)
410 goto out; 412 return ret;
411 } 413
412 src_in->sin_family = AF_INET; 414 src_in->sin_family = AF_INET;
413 src_in->sin_addr.s_addr = fl4.saddr; 415 src_in->sin_addr.s_addr = fl4.saddr;
414 416
@@ -423,8 +425,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
423 425
424 *prt = rt; 426 *prt = rt;
425 return 0; 427 return 0;
426out:
427 return ret;
428} 428}
429 429
430#if IS_ENABLED(CONFIG_IPV6) 430#if IS_ENABLED(CONFIG_IPV6)
@@ -509,6 +509,11 @@ static int addr_resolve(struct sockaddr *src_in,
509 struct dst_entry *dst; 509 struct dst_entry *dst;
510 int ret; 510 int ret;
511 511
512 if (!addr->net) {
513 pr_warn_ratelimited("%s: missing namespace\n", __func__);
514 return -EINVAL;
515 }
516
512 if (src_in->sa_family == AF_INET) { 517 if (src_in->sa_family == AF_INET) {
513 struct rtable *rt = NULL; 518 struct rtable *rt = NULL;
514 const struct sockaddr_in *dst_in4 = 519 const struct sockaddr_in *dst_in4 =
@@ -522,8 +527,12 @@ static int addr_resolve(struct sockaddr *src_in,
522 if (resolve_neigh) 527 if (resolve_neigh)
523 ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq); 528 ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
524 529
525 ndev = rt->dst.dev; 530 if (addr->bound_dev_if) {
526 dev_hold(ndev); 531 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
532 } else {
533 ndev = rt->dst.dev;
534 dev_hold(ndev);
535 }
527 536
528 ip_rt_put(rt); 537 ip_rt_put(rt);
529 } else { 538 } else {
@@ -539,14 +548,27 @@ static int addr_resolve(struct sockaddr *src_in,
539 if (resolve_neigh) 548 if (resolve_neigh)
540 ret = addr_resolve_neigh(dst, dst_in, addr, seq); 549 ret = addr_resolve_neigh(dst, dst_in, addr, seq);
541 550
542 ndev = dst->dev; 551 if (addr->bound_dev_if) {
543 dev_hold(ndev); 552 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
553 } else {
554 ndev = dst->dev;
555 dev_hold(ndev);
556 }
544 557
545 dst_release(dst); 558 dst_release(dst);
546 } 559 }
547 560
548 addr->bound_dev_if = ndev->ifindex; 561 if (ndev->flags & IFF_LOOPBACK) {
549 addr->net = dev_net(ndev); 562 ret = rdma_translate_ip(dst_in, addr, NULL);
563 /*
564 * Put the loopback device and get the translated
565 * device instead.
566 */
567 dev_put(ndev);
568 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
569 } else {
570 addr->bound_dev_if = ndev->ifindex;
571 }
550 dev_put(ndev); 572 dev_put(ndev);
551 573
552 return ret; 574 return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 31bb82d8ecd7..11aff923b633 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -623,22 +623,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
623 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 623 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
624 return ret; 624 return ret;
625 625
626 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 626 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
627 ndev = dev_get_by_index(&init_net, bound_if_index); 627 ndev = dev_get_by_index(&init_net, bound_if_index);
628 if (ndev && ndev->flags & IFF_LOOPBACK) { 628 else
629 pr_info("detected loopback device\n");
630 dev_put(ndev);
631
632 if (!device->get_netdev)
633 return -EOPNOTSUPP;
634
635 ndev = device->get_netdev(device, port);
636 if (!ndev)
637 return -ENODEV;
638 }
639 } else {
640 gid_type = IB_GID_TYPE_IB; 629 gid_type = IB_GID_TYPE_IB;
641 } 630
642 631
643 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 632 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
644 ndev, NULL); 633 ndev, NULL);
@@ -2569,21 +2558,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2569 goto err2; 2558 goto err2;
2570 } 2559 }
2571 2560
2572 if (ndev->flags & IFF_LOOPBACK) {
2573 dev_put(ndev);
2574 if (!id_priv->id.device->get_netdev) {
2575 ret = -EOPNOTSUPP;
2576 goto err2;
2577 }
2578
2579 ndev = id_priv->id.device->get_netdev(id_priv->id.device,
2580 id_priv->id.port_num);
2581 if (!ndev) {
2582 ret = -ENODEV;
2583 goto err2;
2584 }
2585 }
2586
2587 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2561 supported_gids = roce_gid_type_mask_support(id_priv->id.device,
2588 id_priv->id.port_num); 2562 id_priv->id.port_num);
2589 gid_type = cma_route_gid_type(addr->dev_addr.network, 2563 gid_type = cma_route_gid_type(addr->dev_addr.network,
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index db958d3207ef..94a9eefb3cfc 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -42,6 +42,8 @@
42#include <rdma/ib_cache.h> 42#include <rdma/ib_cache.h>
43#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
44 44
45static struct workqueue_struct *gid_cache_wq;
46
45enum gid_op_type { 47enum gid_op_type {
46 GID_DEL = 0, 48 GID_DEL = 0,
47 GID_ADD 49 GID_ADD
@@ -560,7 +562,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
560 } 562 }
561 INIT_WORK(&ndev_work->work, netdevice_event_work_handler); 563 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
562 564
563 queue_work(ib_wq, &ndev_work->work); 565 queue_work(gid_cache_wq, &ndev_work->work);
564 566
565 return NOTIFY_DONE; 567 return NOTIFY_DONE;
566} 568}
@@ -693,7 +695,7 @@ static int addr_event(struct notifier_block *this, unsigned long event,
693 dev_hold(ndev); 695 dev_hold(ndev);
694 work->gid_attr.ndev = ndev; 696 work->gid_attr.ndev = ndev;
695 697
696 queue_work(ib_wq, &work->work); 698 queue_work(gid_cache_wq, &work->work);
697 699
698 return NOTIFY_DONE; 700 return NOTIFY_DONE;
699} 701}
@@ -740,6 +742,10 @@ static struct notifier_block nb_inet6addr = {
740 742
741int __init roce_gid_mgmt_init(void) 743int __init roce_gid_mgmt_init(void)
742{ 744{
745 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
746 if (!gid_cache_wq)
747 return -ENOMEM;
748
743 register_inetaddr_notifier(&nb_inetaddr); 749 register_inetaddr_notifier(&nb_inetaddr);
744 if (IS_ENABLED(CONFIG_IPV6)) 750 if (IS_ENABLED(CONFIG_IPV6))
745 register_inet6addr_notifier(&nb_inet6addr); 751 register_inet6addr_notifier(&nb_inet6addr);
@@ -764,4 +770,5 @@ void __exit roce_gid_mgmt_cleanup(void)
764 * ib-core is removed, all physical devices have been removed, 770 * ib-core is removed, all physical devices have been removed,
765 * so no issue with remaining hardware contexts. 771 * so no issue with remaining hardware contexts.
766 */ 772 */
773 destroy_workqueue(gid_cache_wq);
767} 774}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8ba9bfb073d1..3f55d18a3791 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2005,28 +2005,13 @@ static int modify_qp(struct ib_uverbs_file *file,
2005 rdma_ah_set_port_num(&attr->alt_ah_attr, 2005 rdma_ah_set_port_num(&attr->alt_ah_attr,
2006 cmd->base.alt_dest.port_num); 2006 cmd->base.alt_dest.port_num);
2007 2007
2008 if (qp->real_qp == qp) { 2008 ret = ib_modify_qp_with_udata(qp, attr,
2009 if (cmd->base.attr_mask & IB_QP_AV) { 2009 modify_qp_mask(qp->qp_type,
2010 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); 2010 cmd->base.attr_mask),
2011 if (ret) 2011 udata);
2012 goto release_qp;
2013 }
2014 ret = ib_security_modify_qp(qp,
2015 attr,
2016 modify_qp_mask(qp->qp_type,
2017 cmd->base.attr_mask),
2018 udata);
2019 } else {
2020 ret = ib_security_modify_qp(qp,
2021 attr,
2022 modify_qp_mask(qp->qp_type,
2023 cmd->base.attr_mask),
2024 NULL);
2025 }
2026 2012
2027release_qp: 2013release_qp:
2028 uobj_put_obj_read(qp); 2014 uobj_put_obj_read(qp);
2029
2030out: 2015out:
2031 kfree(attr); 2016 kfree(attr);
2032 2017
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c973a83c898b..fb98ed67d5bc 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -452,6 +452,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
452} 452}
453EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); 453EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
454 454
455/*
456 * This function creates ah from the incoming packet.
457 * Incoming packet has dgid of the receiver node on which this code is
458 * getting executed and, sgid contains the GID of the sender.
459 *
460 * When resolving mac address of destination, the arrived dgid is used
461 * as sgid and, sgid is used as dgid because sgid contains destinations
462 * GID whom to respond to.
463 *
464 * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
465 * position of arguments dgid and sgid do not match the order of the
466 * parameters.
467 */
455int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 468int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
456 const struct ib_wc *wc, const struct ib_grh *grh, 469 const struct ib_wc *wc, const struct ib_grh *grh,
457 struct rdma_ah_attr *ah_attr) 470 struct rdma_ah_attr *ah_attr)
@@ -507,11 +520,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
507 } 520 }
508 521
509 resolved_dev = dev_get_by_index(&init_net, if_index); 522 resolved_dev = dev_get_by_index(&init_net, if_index);
510 if (resolved_dev->flags & IFF_LOOPBACK) {
511 dev_put(resolved_dev);
512 resolved_dev = idev;
513 dev_hold(resolved_dev);
514 }
515 rcu_read_lock(); 523 rcu_read_lock();
516 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, 524 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
517 resolved_dev)) 525 resolved_dev))
@@ -887,6 +895,7 @@ static const struct {
887} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 895} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
888 [IB_QPS_RESET] = { 896 [IB_QPS_RESET] = {
889 [IB_QPS_RESET] = { .valid = 1 }, 897 [IB_QPS_RESET] = { .valid = 1 },
898 [IB_QPS_ERR] = { .valid = 1 },
890 [IB_QPS_INIT] = { 899 [IB_QPS_INIT] = {
891 .valid = 1, 900 .valid = 1,
892 .req_param = { 901 .req_param = {
@@ -1268,20 +1277,36 @@ out:
1268} 1277}
1269EXPORT_SYMBOL(ib_resolve_eth_dmac); 1278EXPORT_SYMBOL(ib_resolve_eth_dmac);
1270 1279
1271int ib_modify_qp(struct ib_qp *qp, 1280/**
1272 struct ib_qp_attr *qp_attr, 1281 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1273 int qp_attr_mask) 1282 * @qp: The QP to modify.
1283 * @attr: On input, specifies the QP attributes to modify. On output,
1284 * the current values of selected QP attributes are returned.
1285 * @attr_mask: A bit-mask used to specify which attributes of the QP
1286 * are being modified.
1287 * @udata: pointer to user's input output buffer information
1288 * are being modified.
1289 * It returns 0 on success and returns appropriate error code on error.
1290 */
1291int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
1292 int attr_mask, struct ib_udata *udata)
1274{ 1293{
1294 int ret;
1275 1295
1276 if (qp_attr_mask & IB_QP_AV) { 1296 if (attr_mask & IB_QP_AV) {
1277 int ret; 1297 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1278
1279 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
1280 if (ret) 1298 if (ret)
1281 return ret; 1299 return ret;
1282 } 1300 }
1301 return ib_security_modify_qp(qp, attr, attr_mask, udata);
1302}
1303EXPORT_SYMBOL(ib_modify_qp_with_udata);
1283 1304
1284 return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1305int ib_modify_qp(struct ib_qp *qp,
1306 struct ib_qp_attr *qp_attr,
1307 int qp_attr_mask)
1308{
1309 return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
1285} 1310}
1286EXPORT_SYMBOL(ib_modify_qp); 1311EXPORT_SYMBOL(ib_modify_qp);
1287 1312
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2ba00b89df6a..94b54850ec75 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12847 /* clear from the handled mask of the general interrupt */ 12847 /* clear from the handled mask of the general interrupt */
12848 m = isrc / 64; 12848 m = isrc / 64;
12849 n = isrc % 64; 12849 n = isrc % 64;
12850 dd->gi_mask[m] &= ~((u64)1 << n); 12850 if (likely(m < CCE_NUM_INT_CSRS)) {
12851 dd->gi_mask[m] &= ~((u64)1 << n);
12852 } else {
12853 dd_dev_err(dd, "remap interrupt err\n");
12854 return;
12855 }
12851 12856
12852 /* direct the chip source to the given MSI-X interrupt */ 12857 /* direct the chip source to the given MSI-X interrupt */
12853 m = isrc / 8; 12858 m = isrc / 8;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 650305cc0373..1a7af9f60c13 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
647 qp->pid); 647 qp->pid);
648} 648}
649 649
650void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 650void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
651 gfp_t gfp)
652{ 651{
653 struct hfi1_qp_priv *priv; 652 struct hfi1_qp_priv *priv;
654 653
655 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); 654 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
656 if (!priv) 655 if (!priv)
657 return ERR_PTR(-ENOMEM); 656 return ERR_PTR(-ENOMEM);
658 657
659 priv->owner = qp; 658 priv->owner = qp;
660 659
661 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, 660 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
662 rdi->dparms.node); 661 rdi->dparms.node);
663 if (!priv->s_ahg) { 662 if (!priv->s_ahg) {
664 kfree(priv); 663 kfree(priv);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 1eb9cd7b8c19..6fe542b6a927 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp);
123/* 123/*
124 * Functions provided by hfi1 driver for rdmavt to use 124 * Functions provided by hfi1 driver for rdmavt to use
125 */ 125 */
126void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 126void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
127 gfp_t gfp);
128void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); 127void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
129unsigned free_all_qps(struct rvt_dev_info *rdi); 128unsigned free_all_qps(struct rvt_dev_info *rdi);
130void notify_qp_reset(struct rvt_qp *qp); 129void notify_qp_reset(struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 37d5d29597a4..23fad6d96944 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
228 switch (wr->opcode) { 228 switch (wr->opcode) {
229 case IB_WR_RDMA_READ: 229 case IB_WR_RDMA_READ:
230 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; 230 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
231 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 231 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
232 atomic_wr(wr)->rkey); 232 rdma_wr(wr)->rkey);
233 break; 233 break;
234 case IB_WR_RDMA_WRITE: 234 case IB_WR_RDMA_WRITE:
235 case IB_WR_RDMA_WRITE_WITH_IMM: 235 case IB_WR_RDMA_WRITE_WITH_IMM:
236 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; 236 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
237 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 237 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
238 atomic_wr(wr)->rkey); 238 rdma_wr(wr)->rkey);
239 break; 239 break;
240 case IB_WR_SEND: 240 case IB_WR_SEND:
241 case IB_WR_SEND_WITH_INV: 241 case IB_WR_SEND_WITH_INV:
@@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
661 union ib_gid dgid; 661 union ib_gid dgid;
662 u64 subnet_prefix; 662 u64 subnet_prefix;
663 int attr_mask = 0; 663 int attr_mask = 0;
664 int i; 664 int i, j;
665 int ret; 665 int ret;
666 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
666 u8 phy_port; 667 u8 phy_port;
668 u8 port = 0;
667 u8 sl; 669 u8 sl;
668 670
669 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; 671 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
@@ -709,11 +711,27 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
709 attr.rnr_retry = 7; 711 attr.rnr_retry = 7;
710 attr.timeout = 0x12; 712 attr.timeout = 0x12;
711 attr.path_mtu = IB_MTU_256; 713 attr.path_mtu = IB_MTU_256;
714 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
712 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); 715 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
713 rdma_ah_set_static_rate(&attr.ah_attr, 3); 716 rdma_ah_set_static_rate(&attr.ah_attr, 3);
714 717
715 subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 718 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
716 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 719 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
720 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
721 (i % HNS_ROCE_MAX_PORTS);
722 sl = i / HNS_ROCE_MAX_PORTS;
723
724 for (j = 0; j < caps->num_ports; j++) {
725 if (hr_dev->iboe.phy_port[j] == phy_port) {
726 queue_en[i] = 1;
727 port = j;
728 break;
729 }
730 }
731
732 if (!queue_en[i])
733 continue;
734
717 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); 735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
718 if (IS_ERR(free_mr->mr_free_qp[i])) { 736 if (IS_ERR(free_mr->mr_free_qp[i])) {
719 dev_err(dev, "Create loop qp failed!\n"); 737 dev_err(dev, "Create loop qp failed!\n");
@@ -721,15 +739,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
721 } 739 }
722 hr_qp = free_mr->mr_free_qp[i]; 740 hr_qp = free_mr->mr_free_qp[i];
723 741
724 sl = i / caps->num_ports; 742 hr_qp->port = port;
725
726 if (caps->num_ports == HNS_ROCE_MAX_PORTS)
727 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
728 (i % caps->num_ports);
729 else
730 phy_port = i % caps->num_ports;
731
732 hr_qp->port = phy_port + 1;
733 hr_qp->phy_port = phy_port; 743 hr_qp->phy_port = phy_port;
734 hr_qp->ibqp.qp_type = IB_QPT_RC; 744 hr_qp->ibqp.qp_type = IB_QPT_RC;
735 hr_qp->ibqp.device = &hr_dev->ib_dev; 745 hr_qp->ibqp.device = &hr_dev->ib_dev;
@@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
739 hr_qp->ibqp.recv_cq = cq; 749 hr_qp->ibqp.recv_cq = cq;
740 hr_qp->ibqp.send_cq = cq; 750 hr_qp->ibqp.send_cq = cq;
741 751
742 rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1); 752 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
743 rdma_ah_set_sl(&attr.ah_attr, phy_port + 1); 753 rdma_ah_set_sl(&attr.ah_attr, sl);
744 attr.port_num = phy_port + 1; 754 attr.port_num = port + 1;
745 755
746 attr.dest_qp_num = hr_qp->qpn; 756 attr.dest_qp_num = hr_qp->qpn;
747 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), 757 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
748 hr_dev->dev_addr[phy_port], 758 hr_dev->dev_addr[port],
749 MAC_ADDR_OCTET_NUM); 759 MAC_ADDR_OCTET_NUM);
750 760
751 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); 761 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
752 memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3); 762 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
753 memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3); 763 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
754 dgid.raw[11] = 0xff; 764 dgid.raw[11] = 0xff;
755 dgid.raw[12] = 0xfe; 765 dgid.raw[12] = 0xfe;
756 dgid.raw[8] ^= 2; 766 dgid.raw[8] ^= 2;
757 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); 767 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
758 attr_mask |= IB_QP_PORT;
759 768
760 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, 769 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
761 IB_QPS_RESET, IB_QPS_INIT); 770 IB_QPS_RESET, IB_QPS_INIT);
@@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
812 821
813 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 822 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
814 hr_qp = free_mr->mr_free_qp[i]; 823 hr_qp = free_mr->mr_free_qp[i];
824 if (!hr_qp)
825 continue;
826
815 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); 827 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
816 if (ret) 828 if (ret)
817 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", 829 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
@@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
963 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; 975 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
964 int i; 976 int i;
965 int ret; 977 int ret;
966 int ne; 978 int ne = 0;
967 979
968 mr_work = container_of(work, struct hns_roce_mr_free_work, work); 980 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
969 hr_mr = (struct hns_roce_mr *)mr_work->mr; 981 hr_mr = (struct hns_roce_mr *)mr_work->mr;
@@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
976 988
977 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 989 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
978 hr_qp = free_mr->mr_free_qp[i]; 990 hr_qp = free_mr->mr_free_qp[i];
991 if (!hr_qp)
992 continue;
993 ne++;
994
979 ret = hns_roce_v1_send_lp_wqe(hr_qp); 995 ret = hns_roce_v1_send_lp_wqe(hr_qp);
980 if (ret) { 996 if (ret) {
981 dev_err(dev, 997 dev_err(dev,
@@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
985 } 1001 }
986 } 1002 }
987 1003
988 ne = HNS_ROCE_V1_RESV_QP;
989 do { 1004 do {
990 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1005 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
991 if (ret < 0) { 1006 if (ret < 0) {
@@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
995 goto free_work; 1010 goto free_work;
996 } 1011 }
997 ne -= ret; 1012 ne -= ret;
998 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); 1013 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1014 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
999 } while (ne && time_before_eq(jiffies, end)); 1015 } while (ne && time_before_eq(jiffies, end));
1000 1016
1001 if (ne != 0) 1017 if (ne != 0)
@@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2181 } 2197 }
2182 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 2198 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2183 ++wq->tail; 2199 ++wq->tail;
2184 } else { 2200 } else {
2185 /* RQ conrespond to CQE */ 2201 /* RQ conrespond to CQE */
2186 wc->byte_len = le32_to_cpu(cqe->byte_cnt); 2202 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2187 opcode = roce_get_field(cqe->cqe_byte_4, 2203 opcode = roce_get_field(cqe->cqe_byte_4,
@@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
3533 old_cnt = roce_get_field(old_send, 3549 old_cnt = roce_get_field(old_send,
3534 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, 3550 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3535 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); 3551 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
3536 if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) 3552 if (cur_cnt - old_cnt >
3553 SDB_ST_CMP_VAL) {
3537 success_flags = 1; 3554 success_flags = 1;
3538 else { 3555 } else {
3539 send_ptr = roce_get_field(old_send, 3556 send_ptr =
3557 roce_get_field(old_send,
3540 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, 3558 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3541 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + 3559 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3542 roce_get_field(sdb_retry_cnt, 3560 roce_get_field(sdb_retry_cnt,
@@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3641 struct hns_roce_dev *hr_dev; 3659 struct hns_roce_dev *hr_dev;
3642 struct hns_roce_qp *hr_qp; 3660 struct hns_roce_qp *hr_qp;
3643 struct device *dev; 3661 struct device *dev;
3662 unsigned long qpn;
3644 int ret; 3663 int ret;
3645 3664
3646 qp_work_entry = container_of(work, struct hns_roce_qp_work, work); 3665 qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
@@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3648 dev = &hr_dev->pdev->dev; 3667 dev = &hr_dev->pdev->dev;
3649 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; 3668 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
3650 hr_qp = qp_work_entry->qp; 3669 hr_qp = qp_work_entry->qp;
3670 qpn = hr_qp->qpn;
3651 3671
3652 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn); 3672 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
3653 3673
3654 qp_work_entry->sche_cnt++; 3674 qp_work_entry->sche_cnt++;
3655 3675
@@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3660 &qp_work_entry->db_wait_stage); 3680 &qp_work_entry->db_wait_stage);
3661 if (ret) { 3681 if (ret) {
3662 dev_err(dev, "Check QP(0x%lx) db process status failed!\n", 3682 dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3663 hr_qp->qpn); 3683 qpn);
3664 return; 3684 return;
3665 } 3685 }
3666 3686
@@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3674 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, 3694 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3675 IB_QPS_RESET); 3695 IB_QPS_RESET);
3676 if (ret) { 3696 if (ret) {
3677 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn); 3697 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
3678 return; 3698 return;
3679 } 3699 }
3680 3700
@@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3683 3703
3684 if (hr_qp->ibqp.qp_type == IB_QPT_RC) { 3704 if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3685 /* RC QP, release QPN */ 3705 /* RC QP, release QPN */
3686 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); 3706 hns_roce_release_range_qp(hr_dev, qpn, 1);
3687 kfree(hr_qp); 3707 kfree(hr_qp);
3688 } else 3708 } else
3689 kfree(hr_to_hr_sqp(hr_qp)); 3709 kfree(hr_to_hr_sqp(hr_qp));
3690 3710
3691 kfree(qp_work_entry); 3711 kfree(qp_work_entry);
3692 3712
3693 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn); 3713 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
3694} 3714}
3695 3715
3696int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) 3716int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c3b41f95e70a..d9777b662eba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
125 return -ENODEV; 125 return -ENODEV;
126 } 126 }
127 127
128 spin_lock_bh(&hr_dev->iboe.lock);
129
130 switch (event) { 128 switch (event) {
131 case NETDEV_UP: 129 case NETDEV_UP:
132 case NETDEV_CHANGE: 130 case NETDEV_CHANGE:
@@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
144 break; 142 break;
145 } 143 }
146 144
147 spin_unlock_bh(&hr_dev->iboe.lock);
148 return 0; 145 return 0;
149} 146}
150 147
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 4f5a143fc0a7..ff931c580557 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
102 int err; 102 int err;
103 103
104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, 104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
105 PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); 105 PAGE_SIZE * 2, &buf->buf);
106 106
107 if (err) 107 if (err)
108 goto out; 108 goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
113 if (err) 113 if (err)
114 goto err_buf; 114 goto err_buf;
115 115
116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); 116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
117 if (err) 117 if (err)
118 goto err_mtt; 118 goto err_mtt;
119 119
@@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
219 219
220 uar = &to_mucontext(context)->uar; 220 uar = &to_mucontext(context)->uar;
221 } else { 221 } else {
222 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); 222 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
223 if (err) 223 if (err)
224 goto err_cq; 224 goto err_cq;
225 225
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75b2f7d4cd95..d1b43cbbfea7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1155 * call to mlx4_ib_vma_close. 1155 * call to mlx4_ib_vma_close.
1156 */ 1156 */
1157 put_task_struct(owning_process); 1157 put_task_struct(owning_process);
1158 msleep(1); 1158 usleep_range(1000, 2000);
1159 owning_process = get_pid_task(ibcontext->tgid, 1159 owning_process = get_pid_task(ibcontext->tgid,
1160 PIDTYPE_PID); 1160 PIDTYPE_PID);
1161 if (!owning_process || 1161 if (!owning_process ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3405e947dc1e..b73f89700ef9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
1091 if (!count) 1091 if (!count)
1092 break; 1092 break;
1093 1093
1094 msleep(1); 1094 usleep_range(1000, 2000);
1095 } while (time_after(end, jiffies)); 1095 } while (time_after(end, jiffies));
1096 1096
1097 flush_workqueue(ctx->mcg_wq); 1097 flush_workqueue(ctx->mcg_wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c2b9cbf4da05..9db82e67e959 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags {
185 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, 185 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
186 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 186 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
187 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, 187 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
188 MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
189 188
190 /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */ 189 /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
191 MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI, 190 MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 996e9058e515..75c0e6c5dd56 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
634 634
635static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 635static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
636 struct ib_qp_init_attr *init_attr, 636 struct ib_qp_init_attr *init_attr,
637 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, 637 struct ib_udata *udata, int sqpn,
638 gfp_t gfp) 638 struct mlx4_ib_qp **caller_qp)
639{ 639{
640 int qpn; 640 int qpn;
641 int err; 641 int err;
@@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
691 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || 691 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
692 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | 692 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
693 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { 693 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
694 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); 694 sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
695 if (!sqp) 695 if (!sqp)
696 return -ENOMEM; 696 return -ENOMEM;
697 qp = &sqp->qp; 697 qp = &sqp->qp;
698 qp->pri.vid = 0xFFFF; 698 qp->pri.vid = 0xFFFF;
699 qp->alt.vid = 0xFFFF; 699 qp->alt.vid = 0xFFFF;
700 } else { 700 } else {
701 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); 701 qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
702 if (!qp) 702 if (!qp)
703 return -ENOMEM; 703 return -ENOMEM;
704 qp->pri.vid = 0xFFFF; 704 qp->pri.vid = 0xFFFF;
@@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
780 goto err; 780 goto err;
781 781
782 if (qp_has_rq(init_attr)) { 782 if (qp_has_rq(init_attr)) {
783 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); 783 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
784 if (err) 784 if (err)
785 goto err; 785 goto err;
786 786
@@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
788 } 788 }
789 789
790 if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size, 790 if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
791 &qp->buf, gfp)) { 791 &qp->buf)) {
792 memcpy(&init_attr->cap, &backup_cap, 792 memcpy(&init_attr->cap, &backup_cap,
793 sizeof(backup_cap)); 793 sizeof(backup_cap));
794 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, 794 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
@@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
797 goto err_db; 797 goto err_db;
798 798
799 if (mlx4_buf_alloc(dev->dev, qp->buf_size, 799 if (mlx4_buf_alloc(dev->dev, qp->buf_size,
800 PAGE_SIZE * 2, &qp->buf, gfp)) { 800 PAGE_SIZE * 2, &qp->buf)) {
801 err = -ENOMEM; 801 err = -ENOMEM;
802 goto err_db; 802 goto err_db;
803 } 803 }
@@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
808 if (err) 808 if (err)
809 goto err_buf; 809 goto err_buf;
810 810
811 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); 811 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
812 if (err) 812 if (err)
813 goto err_mtt; 813 goto err_mtt;
814 814
815 qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64), 815 qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
816 gfp | __GFP_NOWARN); 816 GFP_KERNEL | __GFP_NOWARN);
817 if (!qp->sq.wrid) 817 if (!qp->sq.wrid)
818 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), 818 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
819 gfp, PAGE_KERNEL); 819 GFP_KERNEL, PAGE_KERNEL);
820 qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64), 820 qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
821 gfp | __GFP_NOWARN); 821 GFP_KERNEL | __GFP_NOWARN);
822 if (!qp->rq.wrid) 822 if (!qp->rq.wrid)
823 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), 823 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
824 gfp, PAGE_KERNEL); 824 GFP_KERNEL, PAGE_KERNEL);
825 if (!qp->sq.wrid || !qp->rq.wrid) { 825 if (!qp->sq.wrid || !qp->rq.wrid) {
826 err = -ENOMEM; 826 err = -ENOMEM;
827 goto err_wrid; 827 goto err_wrid;
@@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
859 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 859 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
860 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 860 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
861 861
862 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); 862 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
863 if (err) 863 if (err)
864 goto err_qpn; 864 goto err_qpn;
865 865
@@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1127 int err; 1127 int err;
1128 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1128 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1129 u16 xrcdn = 0; 1129 u16 xrcdn = 0;
1130 gfp_t gfp;
1131 1130
1132 gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
1133 GFP_NOIO : GFP_KERNEL;
1134 /* 1131 /*
1135 * We only support LSO, vendor flag1, and multicast loopback blocking, 1132 * We only support LSO, vendor flag1, and multicast loopback blocking,
1136 * and only for kernel UD QPs. 1133 * and only for kernel UD QPs.
@@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1140 MLX4_IB_SRIOV_TUNNEL_QP | 1137 MLX4_IB_SRIOV_TUNNEL_QP |
1141 MLX4_IB_SRIOV_SQP | 1138 MLX4_IB_SRIOV_SQP |
1142 MLX4_IB_QP_NETIF | 1139 MLX4_IB_QP_NETIF |
1143 MLX4_IB_QP_CREATE_ROCE_V2_GSI | 1140 MLX4_IB_QP_CREATE_ROCE_V2_GSI))
1144 MLX4_IB_QP_CREATE_USE_GFP_NOIO))
1145 return ERR_PTR(-EINVAL); 1141 return ERR_PTR(-EINVAL);
1146 1142
1147 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { 1143 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1154 return ERR_PTR(-EINVAL); 1150 return ERR_PTR(-EINVAL);
1155 1151
1156 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | 1152 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
1157 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
1158 MLX4_IB_QP_CREATE_ROCE_V2_GSI | 1153 MLX4_IB_QP_CREATE_ROCE_V2_GSI |
1159 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && 1154 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
1160 init_attr->qp_type != IB_QPT_UD) || 1155 init_attr->qp_type != IB_QPT_UD) ||
@@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1179 case IB_QPT_RC: 1174 case IB_QPT_RC:
1180 case IB_QPT_UC: 1175 case IB_QPT_UC:
1181 case IB_QPT_RAW_PACKET: 1176 case IB_QPT_RAW_PACKET:
1182 qp = kzalloc(sizeof *qp, gfp); 1177 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1183 if (!qp) 1178 if (!qp)
1184 return ERR_PTR(-ENOMEM); 1179 return ERR_PTR(-ENOMEM);
1185 qp->pri.vid = 0xFFFF; 1180 qp->pri.vid = 0xFFFF;
@@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1188 case IB_QPT_UD: 1183 case IB_QPT_UD:
1189 { 1184 {
1190 err = create_qp_common(to_mdev(pd->device), pd, init_attr, 1185 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1191 udata, 0, &qp, gfp); 1186 udata, 0, &qp);
1192 if (err) { 1187 if (err) {
1193 kfree(qp); 1188 kfree(qp);
1194 return ERR_PTR(err); 1189 return ERR_PTR(err);
@@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1217 } 1212 }
1218 1213
1219 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 1214 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
1220 sqpn, 1215 sqpn, &qp);
1221 &qp, gfp);
1222 if (err) 1216 if (err)
1223 return ERR_PTR(err); 1217 return ERR_PTR(err);
1224 1218
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index e32dd58937a8..0facaf5f6d23 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
135 if (err) 135 if (err)
136 goto err_mtt; 136 goto err_mtt;
137 } else { 137 } else {
138 err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); 138 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
139 if (err) 139 if (err)
140 goto err_srq; 140 goto err_srq;
141 141
142 *srq->db.db = 0; 142 *srq->db.db = 0;
143 143
144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, 144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
145 GFP_KERNEL)) { 145 &srq->buf)) {
146 err = -ENOMEM; 146 err = -ENOMEM;
147 goto err_db; 147 goto err_db;
148 } 148 }
@@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
167 if (err) 167 if (err)
168 goto err_buf; 168 goto err_buf;
169 169
170 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); 170 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
171 if (err) 171 if (err)
172 goto err_mtt; 172 goto err_mtt;
173 173
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 763bb5b36144..8ab2f1360a45 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
582 } 582 }
583} 583}
584 584
585static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
586{
587 if (!mlx5_debugfs_root)
588 return;
589
590 debugfs_remove_recursive(dev->cache.root);
591 dev->cache.root = NULL;
592}
593
585static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) 594static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
586{ 595{
587 struct mlx5_mr_cache *cache = &dev->cache; 596 struct mlx5_mr_cache *cache = &dev->cache;
@@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
600 sprintf(ent->name, "%d", ent->order); 609 sprintf(ent->name, "%d", ent->order);
601 ent->dir = debugfs_create_dir(ent->name, cache->root); 610 ent->dir = debugfs_create_dir(ent->name, cache->root);
602 if (!ent->dir) 611 if (!ent->dir)
603 return -ENOMEM; 612 goto err;
604 613
605 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, 614 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
606 &size_fops); 615 &size_fops);
607 if (!ent->fsize) 616 if (!ent->fsize)
608 return -ENOMEM; 617 goto err;
609 618
610 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, 619 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
611 &limit_fops); 620 &limit_fops);
612 if (!ent->flimit) 621 if (!ent->flimit)
613 return -ENOMEM; 622 goto err;
614 623
615 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, 624 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
616 &ent->cur); 625 &ent->cur);
617 if (!ent->fcur) 626 if (!ent->fcur)
618 return -ENOMEM; 627 goto err;
619 628
620 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, 629 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
621 &ent->miss); 630 &ent->miss);
622 if (!ent->fmiss) 631 if (!ent->fmiss)
623 return -ENOMEM; 632 goto err;
624 } 633 }
625 634
626 return 0; 635 return 0;
627} 636err:
628 637 mlx5_mr_cache_debugfs_cleanup(dev);
629static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
630{
631 if (!mlx5_debugfs_root)
632 return;
633 638
634 debugfs_remove_recursive(dev->cache.root); 639 return -ENOMEM;
635} 640}
636 641
637static void delay_time_func(unsigned long ctx) 642static void delay_time_func(unsigned long ctx)
@@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
692 if (err) 697 if (err)
693 mlx5_ib_warn(dev, "cache debugfs failure\n"); 698 mlx5_ib_warn(dev, "cache debugfs failure\n");
694 699
700 /*
701 * We don't want to fail driver if debugfs failed to initialize,
702 * so we are not forwarding error to the user.
703 */
704
695 return 0; 705 return 0;
696} 706}
697 707
@@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1779 mr->ndescs = sg_nents; 1789 mr->ndescs = sg_nents;
1780 1790
1781 for_each_sg(sgl, sg, sg_nents, i) { 1791 for_each_sg(sgl, sg, sg_nents, i) {
1782 if (unlikely(i > mr->max_descs)) 1792 if (unlikely(i >= mr->max_descs))
1783 break; 1793 break;
1784 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 1794 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1785 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 1795 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8f9d8b4ad583..b0adf65e4bdb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
551 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) 551 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
552 || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) 552 || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
553 int_cnt++; 553 int_cnt++;
554 msleep(1); 554 usleep_range(1000, 2000);
555 } 555 }
556 if (int_cnt > 1) { 556 if (int_cnt > 1) {
557 spin_lock_irqsave(&nesadapter->phy_lock, flags); 557 spin_lock_irqsave(&nesadapter->phy_lock, flags);
@@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
592 break; 592 break;
593 } 593 }
594 } 594 }
595 msleep(1); 595 usleep_range(1000, 2000);
596 } 596 }
597 } 597 }
598 } 598 }
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 5984981e7dd4..a343e3b5d4cb 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
104 104
105}; 105};
106 106
107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, 107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
108 gfp_t gfp)
109{ 108{
110 unsigned long page = get_zeroed_page(gfp); 109 unsigned long page = get_zeroed_page(GFP_KERNEL);
111 110
112 /* 111 /*
113 * Free the page if someone raced with us installing it. 112 * Free the page if someone raced with us installing it.
@@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
126 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. 125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
127 */ 126 */
128int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 127int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
129 enum ib_qp_type type, u8 port, gfp_t gfp) 128 enum ib_qp_type type, u8 port)
130{ 129{
131 u32 i, offset, max_scan, qpn; 130 u32 i, offset, max_scan, qpn;
132 struct rvt_qpn_map *map; 131 struct rvt_qpn_map *map;
@@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
160 max_scan = qpt->nmaps - !offset; 159 max_scan = qpt->nmaps - !offset;
161 for (i = 0;;) { 160 for (i = 0;;) {
162 if (unlikely(!map->page)) { 161 if (unlikely(!map->page)) {
163 get_map_page(qpt, map, gfp); 162 get_map_page(qpt, map);
164 if (unlikely(!map->page)) 163 if (unlikely(!map->page))
165 break; 164 break;
166 } 165 }
@@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
317 return ib_mtu_enum_to_int(pmtu); 316 return ib_mtu_enum_to_int(pmtu);
318} 317}
319 318
320void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) 319void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
321{ 320{
322 struct qib_qp_priv *priv; 321 struct qib_qp_priv *priv;
323 322
324 priv = kzalloc(sizeof(*priv), gfp); 323 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
325 if (!priv) 324 if (!priv)
326 return ERR_PTR(-ENOMEM); 325 return ERR_PTR(-ENOMEM);
327 priv->owner = qp; 326 priv->owner = qp;
328 327
329 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); 328 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
330 if (!priv->s_hdr) { 329 if (!priv->s_hdr) {
331 kfree(priv); 330 kfree(priv);
332 return ERR_PTR(-ENOMEM); 331 return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index da0db5485ddc..a52fc67b40d7 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd,
274 * Functions provided by qib driver for rdmavt to use 274 * Functions provided by qib driver for rdmavt to use
275 */ 275 */
276unsigned qib_free_all_qps(struct rvt_dev_info *rdi); 276unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
277void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); 277void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
278void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); 278void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
279void qib_notify_qp_reset(struct rvt_qp *qp); 279void qib_notify_qp_reset(struct rvt_qp *qp);
280int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 280int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
281 enum ib_qp_type type, u8 port, gfp_t gfp); 281 enum ib_qp_type type, u8 port);
282void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); 282void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
283#ifdef CONFIG_DEBUG_FS 283#ifdef CONFIG_DEBUG_FS
284 284
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 727e81cc2c8f..459865439a0b 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,10 +118,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
118EXPORT_SYMBOL(ib_rvt_state_ops); 118EXPORT_SYMBOL(ib_rvt_state_ops);
119 119
120static void get_map_page(struct rvt_qpn_table *qpt, 120static void get_map_page(struct rvt_qpn_table *qpt,
121 struct rvt_qpn_map *map, 121 struct rvt_qpn_map *map)
122 gfp_t gfp)
123{ 122{
124 unsigned long page = get_zeroed_page(gfp); 123 unsigned long page = get_zeroed_page(GFP_KERNEL);
125 124
126 /* 125 /*
127 * Free the page if someone raced with us installing it. 126 * Free the page if someone raced with us installing it.
@@ -173,7 +172,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
173 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 172 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
174 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 173 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
175 if (!map->page) { 174 if (!map->page) {
176 get_map_page(qpt, map, GFP_KERNEL); 175 get_map_page(qpt, map);
177 if (!map->page) { 176 if (!map->page) {
178 ret = -ENOMEM; 177 ret = -ENOMEM;
179 break; 178 break;
@@ -342,14 +341,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
342 * Return: The queue pair number 341 * Return: The queue pair number
343 */ 342 */
344static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 343static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
345 enum ib_qp_type type, u8 port_num, gfp_t gfp) 344 enum ib_qp_type type, u8 port_num)
346{ 345{
347 u32 i, offset, max_scan, qpn; 346 u32 i, offset, max_scan, qpn;
348 struct rvt_qpn_map *map; 347 struct rvt_qpn_map *map;
349 u32 ret; 348 u32 ret;
350 349
351 if (rdi->driver_f.alloc_qpn) 350 if (rdi->driver_f.alloc_qpn)
352 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); 351 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
353 352
354 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 353 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
355 unsigned n; 354 unsigned n;
@@ -374,7 +373,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
374 max_scan = qpt->nmaps - !offset; 373 max_scan = qpt->nmaps - !offset;
375 for (i = 0;;) { 374 for (i = 0;;) {
376 if (unlikely(!map->page)) { 375 if (unlikely(!map->page)) {
377 get_map_page(qpt, map, gfp); 376 get_map_page(qpt, map);
378 if (unlikely(!map->page)) 377 if (unlikely(!map->page))
379 break; 378 break;
380 } 379 }
@@ -672,7 +671,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
672 struct ib_qp *ret = ERR_PTR(-ENOMEM); 671 struct ib_qp *ret = ERR_PTR(-ENOMEM);
673 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 672 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
674 void *priv = NULL; 673 void *priv = NULL;
675 gfp_t gfp;
676 size_t sqsize; 674 size_t sqsize;
677 675
678 if (!rdi) 676 if (!rdi)
@@ -680,18 +678,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
680 678
681 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || 679 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
682 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 680 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
683 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 681 init_attr->create_flags)
684 return ERR_PTR(-EINVAL); 682 return ERR_PTR(-EINVAL);
685 683
686 /* GFP_NOIO is applicable to RC QP's only */
687
688 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
689 init_attr->qp_type != IB_QPT_RC)
690 return ERR_PTR(-EINVAL);
691
692 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
693 GFP_NOIO : GFP_KERNEL;
694
695 /* Check receive queue parameters if no SRQ is specified. */ 684 /* Check receive queue parameters if no SRQ is specified. */
696 if (!init_attr->srq) { 685 if (!init_attr->srq) {
697 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || 686 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
@@ -719,14 +708,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
719 sz = sizeof(struct rvt_sge) * 708 sz = sizeof(struct rvt_sge) *
720 init_attr->cap.max_send_sge + 709 init_attr->cap.max_send_sge +
721 sizeof(struct rvt_swqe); 710 sizeof(struct rvt_swqe);
722 if (gfp == GFP_NOIO) 711 swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
723 swq = __vmalloc(
724 sqsize * sz,
725 gfp | __GFP_ZERO, PAGE_KERNEL);
726 else
727 swq = vzalloc_node(
728 sqsize * sz,
729 rdi->dparms.node);
730 if (!swq) 712 if (!swq)
731 return ERR_PTR(-ENOMEM); 713 return ERR_PTR(-ENOMEM);
732 714
@@ -741,7 +723,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
741 } else if (init_attr->cap.max_recv_sge > 1) 723 } else if (init_attr->cap.max_recv_sge > 1)
742 sg_list_sz = sizeof(*qp->r_sg_list) * 724 sg_list_sz = sizeof(*qp->r_sg_list) *
743 (init_attr->cap.max_recv_sge - 1); 725 (init_attr->cap.max_recv_sge - 1);
744 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); 726 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
727 rdi->dparms.node);
745 if (!qp) 728 if (!qp)
746 goto bail_swq; 729 goto bail_swq;
747 730
@@ -751,7 +734,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
751 kzalloc_node( 734 kzalloc_node(
752 sizeof(*qp->s_ack_queue) * 735 sizeof(*qp->s_ack_queue) *
753 rvt_max_atomic(rdi), 736 rvt_max_atomic(rdi),
754 gfp, 737 GFP_KERNEL,
755 rdi->dparms.node); 738 rdi->dparms.node);
756 if (!qp->s_ack_queue) 739 if (!qp->s_ack_queue)
757 goto bail_qp; 740 goto bail_qp;
@@ -766,7 +749,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
766 * Driver needs to set up it's private QP structure and do any 749 * Driver needs to set up it's private QP structure and do any
767 * initialization that is needed. 750 * initialization that is needed.
768 */ 751 */
769 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 752 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
770 if (IS_ERR(priv)) { 753 if (IS_ERR(priv)) {
771 ret = priv; 754 ret = priv;
772 goto bail_qp; 755 goto bail_qp;
@@ -786,11 +769,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
786 qp->r_rq.wq = vmalloc_user( 769 qp->r_rq.wq = vmalloc_user(
787 sizeof(struct rvt_rwq) + 770 sizeof(struct rvt_rwq) +
788 qp->r_rq.size * sz); 771 qp->r_rq.size * sz);
789 else if (gfp == GFP_NOIO)
790 qp->r_rq.wq = __vmalloc(
791 sizeof(struct rvt_rwq) +
792 qp->r_rq.size * sz,
793 gfp | __GFP_ZERO, PAGE_KERNEL);
794 else 772 else
795 qp->r_rq.wq = vzalloc_node( 773 qp->r_rq.wq = vzalloc_node(
796 sizeof(struct rvt_rwq) + 774 sizeof(struct rvt_rwq) +
@@ -824,7 +802,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
824 802
825 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 803 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
826 init_attr->qp_type, 804 init_attr->qp_type,
827 init_attr->port_num, gfp); 805 init_attr->port_num);
828 if (err < 0) { 806 if (err < 0) {
829 ret = ERR_PTR(err); 807 ret = ERR_PTR(err);
830 goto bail_rq_wq; 808 goto bail_rq_wq;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c3a140ed4df2..08f3f90d2912 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -441,6 +441,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
441 if (unlikely(qp->need_req_skb && 441 if (unlikely(qp->need_req_skb &&
442 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) 442 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
443 rxe_run_task(&qp->req.task, 1); 443 rxe_run_task(&qp->req.task, 1);
444
445 rxe_drop_ref(qp);
444} 446}
445 447
446int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) 448int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
@@ -473,6 +475,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
473 return -EAGAIN; 475 return -EAGAIN;
474 } 476 }
475 477
478 rxe_add_ref(pkt->qp);
476 atomic_inc(&pkt->qp->skb_out); 479 atomic_inc(&pkt->qp->skb_out);
477 kfree_skb(skb); 480 kfree_skb(skb);
478 481
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 073e66783f1d..07511718d98d 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1240,6 +1240,8 @@ int rxe_register_device(struct rxe_dev *rxe)
1240 addrconf_addr_eui48((unsigned char *)&dev->node_guid, 1240 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1241 rxe->ndev->dev_addr); 1241 rxe->ndev->dev_addr);
1242 dev->dev.dma_ops = &dma_virt_ops; 1242 dev->dev.dma_ops = &dma_virt_ops;
1243 dma_coerce_mask_and_coherent(&dev->dev,
1244 dma_get_required_mask(dev->dev.parent));
1243 1245
1244 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; 1246 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1245 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) 1247 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7cbcfdac6529..f87d104837dc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -39,6 +39,7 @@
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/sched/signal.h> 41#include <linux/sched/signal.h>
42#include <linux/sched/mm.h>
42 43
43#include "ipoib.h" 44#include "ipoib.h"
44 45
@@ -954,7 +955,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
954 break; 955 break;
955 } 956 }
956 spin_unlock_irq(&priv->lock); 957 spin_unlock_irq(&priv->lock);
957 msleep(1); 958 usleep_range(1000, 2000);
958 ipoib_drain_cq(dev); 959 ipoib_drain_cq(dev);
959 spin_lock_irq(&priv->lock); 960 spin_lock_irq(&priv->lock);
960 } 961 }
@@ -1047,9 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1047 .sq_sig_type = IB_SIGNAL_ALL_WR, 1048 .sq_sig_type = IB_SIGNAL_ALL_WR,
1048 .qp_type = IB_QPT_RC, 1049 .qp_type = IB_QPT_RC,
1049 .qp_context = tx, 1050 .qp_context = tx,
1050 .create_flags = IB_QP_CREATE_USE_GFP_NOIO 1051 .create_flags = 0
1051 }; 1052 };
1052
1053 struct ib_qp *tx_qp; 1053 struct ib_qp *tx_qp;
1054 1054
1055 if (dev->features & NETIF_F_SG) 1055 if (dev->features & NETIF_F_SG)
@@ -1057,10 +1057,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1057 min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1); 1057 min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
1058 1058
1059 tx_qp = ib_create_qp(priv->pd, &attr); 1059 tx_qp = ib_create_qp(priv->pd, &attr);
1060 if (PTR_ERR(tx_qp) == -EINVAL) {
1061 attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
1062 tx_qp = ib_create_qp(priv->pd, &attr);
1063 }
1064 tx->max_send_sge = attr.cap.max_send_sge; 1060 tx->max_send_sge = attr.cap.max_send_sge;
1065 return tx_qp; 1061 return tx_qp;
1066} 1062}
@@ -1131,10 +1127,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1131 struct sa_path_rec *pathrec) 1127 struct sa_path_rec *pathrec)
1132{ 1128{
1133 struct ipoib_dev_priv *priv = ipoib_priv(p->dev); 1129 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1130 unsigned int noio_flag;
1134 int ret; 1131 int ret;
1135 1132
1136 p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring, 1133 noio_flag = memalloc_noio_save();
1137 GFP_NOIO, PAGE_KERNEL); 1134 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
1138 if (!p->tx_ring) { 1135 if (!p->tx_ring) {
1139 ret = -ENOMEM; 1136 ret = -ENOMEM;
1140 goto err_tx; 1137 goto err_tx;
@@ -1142,9 +1139,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1142 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1139 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1143 1140
1144 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1141 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1142 memalloc_noio_restore(noio_flag);
1145 if (IS_ERR(p->qp)) { 1143 if (IS_ERR(p->qp)) {
1146 ret = PTR_ERR(p->qp); 1144 ret = PTR_ERR(p->qp);
1147 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); 1145 ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
1148 goto err_qp; 1146 goto err_qp;
1149 } 1147 }
1150 1148
@@ -1206,7 +1204,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1206 goto timeout; 1204 goto timeout;
1207 } 1205 }
1208 1206
1209 msleep(1); 1207 usleep_range(1000, 2000);
1210 } 1208 }
1211 } 1209 }
1212 1210
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index efe7402f4885..57a9655e844d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -770,7 +770,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
770 770
771 ipoib_drain_cq(dev); 771 ipoib_drain_cq(dev);
772 772
773 msleep(1); 773 usleep_range(1000, 2000);
774 } 774 }
775 775
776 ipoib_dbg(priv, "All sends and receives done.\n"); 776 ipoib_dbg(priv, "All sends and receives done.\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6e86eeee370e..70dacaf9044e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -233,6 +233,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
233static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 233static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
234{ 234{
235 struct ipoib_dev_priv *priv = ipoib_priv(dev); 235 struct ipoib_dev_priv *priv = ipoib_priv(dev);
236 int ret = 0;
236 237
237 /* dev->mtu > 2K ==> connected mode */ 238 /* dev->mtu > 2K ==> connected mode */
238 if (ipoib_cm_admin_enabled(dev)) { 239 if (ipoib_cm_admin_enabled(dev)) {
@@ -256,9 +257,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
256 ipoib_dbg(priv, "MTU must be smaller than the underlying " 257 ipoib_dbg(priv, "MTU must be smaller than the underlying "
257 "link layer MTU - 4 (%u)\n", priv->mcast_mtu); 258 "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
258 259
259 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 260 new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
260 261
261 return 0; 262 if (priv->rn_ops->ndo_change_mtu) {
263 bool carrier_status = netif_carrier_ok(dev);
264
265 netif_carrier_off(dev);
266
267 /* notify lower level on the real mtu */
268 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
269
270 if (carrier_status)
271 netif_carrier_on(dev);
272 } else {
273 dev->mtu = new_mtu;
274 }
275
276 return ret;
277}
278
279static void ipoib_get_stats(struct net_device *dev,
280 struct rtnl_link_stats64 *stats)
281{
282 struct ipoib_dev_priv *priv = ipoib_priv(dev);
283
284 if (priv->rn_ops->ndo_get_stats64)
285 priv->rn_ops->ndo_get_stats64(dev, stats);
286 else
287 netdev_stats_to_stats64(stats, &dev->stats);
262} 288}
263 289
264/* Called with an RCU read lock taken */ 290/* Called with an RCU read lock taken */
@@ -1808,6 +1834,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
1808 .ndo_get_vf_stats = ipoib_get_vf_stats, 1834 .ndo_get_vf_stats = ipoib_get_vf_stats,
1809 .ndo_set_vf_guid = ipoib_set_vf_guid, 1835 .ndo_set_vf_guid = ipoib_set_vf_guid,
1810 .ndo_set_mac_address = ipoib_set_mac, 1836 .ndo_set_mac_address = ipoib_set_mac,
1837 .ndo_get_stats64 = ipoib_get_stats,
1811}; 1838};
1812 1839
1813static const struct net_device_ops ipoib_netdev_ops_vf = { 1840static const struct net_device_ops ipoib_netdev_ops_vf = {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5a887efb4bdf..37b33d708c2d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht;
83static struct iscsi_transport iscsi_iser_transport; 83static struct iscsi_transport iscsi_iser_transport;
84static struct scsi_transport_template *iscsi_iser_scsi_transport; 84static struct scsi_transport_template *iscsi_iser_scsi_transport;
85static struct workqueue_struct *release_wq; 85static struct workqueue_struct *release_wq;
86static DEFINE_MUTEX(unbind_iser_conn_mutex);
86struct iser_global ig; 87struct iser_global ig;
87 88
88int iser_debug_level = 0; 89int iser_debug_level = 0;
@@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
550 */ 551 */
551 if (iser_conn) { 552 if (iser_conn) {
552 mutex_lock(&iser_conn->state_mutex); 553 mutex_lock(&iser_conn->state_mutex);
554 mutex_lock(&unbind_iser_conn_mutex);
553 iser_conn_terminate(iser_conn); 555 iser_conn_terminate(iser_conn);
554 iscsi_conn_stop(cls_conn, flag); 556 iscsi_conn_stop(cls_conn, flag);
555 557
556 /* unbind */ 558 /* unbind */
557 iser_conn->iscsi_conn = NULL; 559 iser_conn->iscsi_conn = NULL;
558 conn->dd_data = NULL; 560 conn->dd_data = NULL;
561 mutex_unlock(&unbind_iser_conn_mutex);
559 562
560 complete(&iser_conn->stop_completion); 563 complete(&iser_conn->stop_completion);
561 mutex_unlock(&iser_conn->state_mutex); 564 mutex_unlock(&iser_conn->state_mutex);
@@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
977 struct iser_conn *iser_conn; 980 struct iser_conn *iser_conn;
978 struct ib_device *ib_dev; 981 struct ib_device *ib_dev;
979 982
983 mutex_lock(&unbind_iser_conn_mutex);
984
980 session = starget_to_session(scsi_target(sdev))->dd_data; 985 session = starget_to_session(scsi_target(sdev))->dd_data;
981 iser_conn = session->leadconn->dd_data; 986 iser_conn = session->leadconn->dd_data;
987 if (!iser_conn) {
988 mutex_unlock(&unbind_iser_conn_mutex);
989 return -ENOTCONN;
990 }
982 ib_dev = iser_conn->ib_conn.device->ib_device; 991 ib_dev = iser_conn->ib_conn.device->ib_device;
983 992
984 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) 993 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
985 blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); 994 blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
986 995
996 mutex_unlock(&unbind_iser_conn_mutex);
997
987 return 0; 998 return 0;
988} 999}
989 1000
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c538a38c91ce..26a004e97ae0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -708,8 +708,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
708 unsigned short sg_tablesize, sup_sg_tablesize; 708 unsigned short sg_tablesize, sup_sg_tablesize;
709 709
710 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); 710 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
711 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, 711 if (device->ib_device->attrs.device_cap_flags &
712 device->ib_device->attrs.max_fast_reg_page_list_len); 712 IB_DEVICE_MEM_MGT_EXTENSIONS)
713 sup_sg_tablesize =
714 min_t(
715 uint, ISCSI_ISER_MAX_SG_TABLESIZE,
716 device->ib_device->attrs.max_fast_reg_page_list_len);
717 else
718 sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
713 719
714 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); 720 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
715} 721}
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index dad85e74c37c..3aae015469a5 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -71,7 +71,7 @@ static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
71static int __init digicolor_of_init(struct device_node *node, 71static int __init digicolor_of_init(struct device_node *node,
72 struct device_node *parent) 72 struct device_node *parent)
73{ 73{
74 static void __iomem *reg_base; 74 void __iomem *reg_base;
75 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 75 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
76 struct regmap *ucregs; 76 struct regmap *ucregs;
77 int ret; 77 int ret;
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index 54c296401525..18d58d2b4ffe 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -43,7 +43,7 @@ static const struct of_device_id syscon_pldset_of_match[] = {
43static int __init 43static int __init
44realview_gic_of_init(struct device_node *node, struct device_node *parent) 44realview_gic_of_init(struct device_node *node, struct device_node *parent)
45{ 45{
46 static struct regmap *map; 46 struct regmap *map;
47 struct device_node *np; 47 struct device_node *np;
48 const struct of_device_id *gic_id; 48 const struct of_device_id *gic_id;
49 u32 pld1_ctrl; 49 u32 pld1_ctrl;
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0a8ed1c05518..14461cbfab2f 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -154,7 +154,7 @@ asmlinkage void __weak plat_irq_dispatch(void)
154static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, 154static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
155 irq_hw_number_t hw) 155 irq_hw_number_t hw)
156{ 156{
157 static struct irq_chip *chip; 157 struct irq_chip *chip;
158 158
159 if (hw < 2 && cpu_has_mipsmt) { 159 if (hw < 2 && cpu_has_mipsmt) {
160 /* Software interrupts are used for MT/CMT IPI */ 160 /* Software interrupts are used for MT/CMT IPI */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 832ebf4062f7..6ab1d3afec02 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -950,7 +950,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
950 &gic_irq_domain_ops, NULL); 950 &gic_irq_domain_ops, NULL);
951 if (!gic_irq_domain) 951 if (!gic_irq_domain)
952 panic("Failed to add GIC IRQ domain"); 952 panic("Failed to add GIC IRQ domain");
953 gic_irq_domain->name = "mips-gic-irq";
954 953
955 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 954 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
956 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 955 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -959,7 +958,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
959 if (!gic_ipi_domain) 958 if (!gic_ipi_domain)
960 panic("Failed to add GIC IPI domain"); 959 panic("Failed to add GIC IPI domain");
961 960
962 gic_ipi_domain->name = "mips-gic-ipi";
963 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 961 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
964 962
965 if (node && 963 if (node &&
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 060d357f107f..6f423bc49d0d 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -485,18 +485,19 @@ static int isdn_divert_icall(isdn_ctrl *ic)
485 cs->deflect_dest[0] = '\0'; 485 cs->deflect_dest[0] = '\0';
486 retval = 4; /* only proceed */ 486 retval = 4; /* only proceed */
487 } 487 }
488 sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n", 488 snprintf(cs->info, sizeof(cs->info),
489 cs->akt_state, 489 "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
490 cs->divert_id, 490 cs->akt_state,
491 divert_if.drv_to_name(cs->ics.driver), 491 cs->divert_id,
492 (ic->command == ISDN_STAT_ICALLW) ? "1" : "0", 492 divert_if.drv_to_name(cs->ics.driver),
493 cs->ics.parm.setup.phone, 493 (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
494 cs->ics.parm.setup.eazmsn, 494 cs->ics.parm.setup.phone,
495 cs->ics.parm.setup.si1, 495 cs->ics.parm.setup.eazmsn,
496 cs->ics.parm.setup.si2, 496 cs->ics.parm.setup.si1,
497 cs->ics.parm.setup.screen, 497 cs->ics.parm.setup.si2,
498 dv->rule.waittime, 498 cs->ics.parm.setup.screen,
499 cs->deflect_dest); 499 dv->rule.waittime,
500 cs->deflect_dest);
500 if ((dv->rule.action == DEFLECT_REPORT) || 501 if ((dv->rule.action == DEFLECT_REPORT) ||
501 (dv->rule.action == DEFLECT_REJECT)) { 502 (dv->rule.action == DEFLECT_REJECT)) {
502 put_info_buffer(cs->info); 503 put_info_buffer(cs->info);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 40c7e2cf423b..034cabac699d 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -42,7 +42,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
42 42
43static bool suppress_pollack; 43static bool suppress_pollack;
44 44
45static struct pci_device_id c4_pci_tbl[] = { 45static const struct pci_device_id c4_pci_tbl[] = {
46 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 }, 46 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
47 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 }, 47 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
48 { } /* Terminating entry */ 48 { } /* Terminating entry */
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 8b7ad4f1ab01..b2023e08dcd2 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -110,7 +110,7 @@ typedef struct _diva_os_thread_dpc {
110/* 110/*
111 This table should be sorted by PCI device ID 111 This table should be sorted by PCI device ID
112*/ 112*/
113static struct pci_device_id divas_pci_tbl[] = { 113static const struct pci_device_id divas_pci_tbl[] = {
114 /* Diva Server BRI-2M PCI 0xE010 */ 114 /* Diva Server BRI-2M PCI 0xE010 */
115 { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA), 115 { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
116 CARDTYPE_MAESTRA_PCI }, 116 CARDTYPE_MAESTRA_PCI },
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index e3fa1cd64470..dce6632daae1 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
1142 pr_info("%s: drvdata already removed\n", __func__); 1142 pr_info("%s: drvdata already removed\n", __func__);
1143} 1143}
1144 1144
1145static struct pci_device_id fcpci_ids[] = { 1145static const struct pci_device_id fcpci_ids[] = {
1146 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID, 1146 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
1147 0, 0, (unsigned long) "Fritz!Card PCI"}, 1147 0, 0, (unsigned long) "Fritz!Card PCI"},
1148 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID, 1148 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index aea0c9616ea5..3cf07b8ced1c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5348,7 +5348,7 @@ static const struct hm_map hfcm_map[] = {
5348 5348
5349#undef H 5349#undef H
5350#define H(x) ((unsigned long)&hfcm_map[x]) 5350#define H(x) ((unsigned long)&hfcm_map[x])
5351static struct pci_device_id hfmultipci_ids[] = { 5351static const struct pci_device_id hfmultipci_ids[] = {
5352 5352
5353 /* Cards with HFC-4S Chip */ 5353 /* Cards with HFC-4S Chip */
5354 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD, 5354 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 5dc246d71c16..d2e401a8090e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2161,7 +2161,7 @@ static const struct _hfc_map hfc_map[] =
2161 {}, 2161 {},
2162}; 2162};
2163 2163
2164static struct pci_device_id hfc_ids[] = 2164static const struct pci_device_id hfc_ids[] =
2165{ 2165{
2166 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0), 2166 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2167 (unsigned long) &hfc_map[0] }, 2167 (unsigned long) &hfc_map[0] },
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index afde4edef9ae..6a6d848bd18e 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1137,7 +1137,7 @@ static void nj_remove(struct pci_dev *pdev)
1137/* We cannot select cards with PCI_SUB... IDs, since here are cards with 1137/* We cannot select cards with PCI_SUB... IDs, since here are cards with
1138 * SUB IDs set to PCI_ANY_ID, so we need to match all and reject 1138 * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
1139 * known other cards which not work with this driver - see probe function */ 1139 * known other cards which not work with this driver - see probe function */
1140static struct pci_device_id nj_pci_ids[] = { 1140static const struct pci_device_id nj_pci_ids[] = {
1141 { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300, 1141 { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
1142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1143 { } 1143 { }
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 3052c836b89f..d80072fef434 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1398,7 +1398,7 @@ w6692_remove_pci(struct pci_dev *pdev)
1398 pr_notice("%s: drvdata already removed\n", __func__); 1398 pr_notice("%s: drvdata already removed\n", __func__);
1399} 1399}
1400 1400
1401static struct pci_device_id w6692_ids[] = { 1401static const struct pci_device_id w6692_ids[] = {
1402 { PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, 1402 { PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH,
1403 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]}, 1403 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]},
1404 { PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, 1404 { PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index c7d68675b028..7108bdb8742e 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1909,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1909#ifdef CONFIG_PCI 1909#ifdef CONFIG_PCI
1910#include <linux/pci.h> 1910#include <linux/pci.h>
1911 1911
1912static struct pci_device_id hisax_pci_tbl[] __used = { 1912static const struct pci_device_id hisax_pci_tbl[] __used = {
1913#ifdef CONFIG_HISAX_FRITZPCI 1913#ifdef CONFIG_HISAX_FRITZPCI
1914 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) }, 1914 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
1915#endif 1915#endif
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 90f051ce0259..9090cc1e1f29 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -86,7 +86,7 @@ typedef struct {
86 char *device_name; 86 char *device_name;
87} hfc4s8s_param; 87} hfc4s8s_param;
88 88
89static struct pci_device_id hfc4s8s_ids[] = { 89static const struct pci_device_id hfc4s8s_ids[] = {
90 {.vendor = PCI_VENDOR_ID_CCD, 90 {.vendor = PCI_VENDOR_ID_CCD,
91 .device = PCI_DEVICE_ID_4S, 91 .device = PCI_DEVICE_ID_4S,
92 .subvendor = 0x1397, 92 .subvendor = 0x1397,
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 5a9f39ed1d5d..e4f7573ba9bf 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -52,7 +52,7 @@ module_param(debug, int, 0);
52MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>"); 52MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
53MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver"); 53MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver");
54 54
55static struct pci_device_id fcpci_ids[] = { 55static const struct pci_device_id fcpci_ids[] = {
56 { .vendor = PCI_VENDOR_ID_AVM, 56 { .vendor = PCI_VENDOR_ID_AVM,
57 .device = PCI_DEVICE_ID_AVM_A1, 57 .device = PCI_DEVICE_ID_AVM_A1,
58 .subvendor = PCI_ANY_ID, 58 .subvendor = PCI_ANY_ID,
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f4eace5ea184..40f3cd7eab0f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
156 156
157 rdev_for_each(rdev, mddev) { 157 rdev_for_each(rdev, mddev) {
158 if (! test_bit(In_sync, &rdev->flags) 158 if (! test_bit(In_sync, &rdev->flags)
159 || test_bit(Faulty, &rdev->flags)) 159 || test_bit(Faulty, &rdev->flags)
160 || test_bit(Bitmap_sync, &rdev->flags))
160 continue; 161 continue;
161 162
162 target = offset + index * (PAGE_SIZE/512); 163 target = offset + index * (PAGE_SIZE/512);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 991f0fe2dcc6..b50eb4ac1b82 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,7 +134,9 @@ enum flag_bits {
134 Faulty, /* device is known to have a fault */ 134 Faulty, /* device is known to have a fault */
135 In_sync, /* device is in_sync with rest of array */ 135 In_sync, /* device is in_sync with rest of array */
136 Bitmap_sync, /* ..actually, not quite In_sync. Need a 136 Bitmap_sync, /* ..actually, not quite In_sync. Need a
137 * bitmap-based recovery to get fully in sync 137 * bitmap-based recovery to get fully in sync.
138 * The bit is only meaningful before device
139 * has been passed to pers->hot_add_disk.
138 */ 140 */
139 WriteMostly, /* Avoid reading if at all possible */ 141 WriteMostly, /* Avoid reading if at all possible */
140 AutoDetected, /* added by auto-detect */ 142 AutoDetected, /* added by auto-detect */
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 77cce3573aa8..44ad5baf3206 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
1150 goto err; 1150 goto err;
1151 } 1151 }
1152 1152
1153 ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0); 1153 ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
1154 if (!ppl_conf->bs) { 1154 if (!ppl_conf->bs) {
1155 ret = -ENOMEM; 1155 ret = -ENOMEM;
1156 goto err; 1156 goto err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2ceb338b094b..aeeb8d6854e2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7951,12 +7951,10 @@ static void end_reshape(struct r5conf *conf)
7951{ 7951{
7952 7952
7953 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 7953 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
7954 struct md_rdev *rdev;
7955 7954
7956 spin_lock_irq(&conf->device_lock); 7955 spin_lock_irq(&conf->device_lock);
7957 conf->previous_raid_disks = conf->raid_disks; 7956 conf->previous_raid_disks = conf->raid_disks;
7958 rdev_for_each(rdev, conf->mddev) 7957 md_finish_reshape(conf->mddev);
7959 rdev->data_offset = rdev->new_data_offset;
7960 smp_wmb(); 7958 smp_wmb();
7961 conf->reshape_progress = MaxSector; 7959 conf->reshape_progress = MaxSector;
7962 conf->mddev->reshape_position = MaxSector; 7960 conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 14ff622190a5..181839d6fbea 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4596,7 +4596,7 @@ static int bond_check_params(struct bond_params *params)
4596 } 4596 }
4597 ad_user_port_key = valptr->value; 4597 ad_user_port_key = valptr->value;
4598 4598
4599 if (bond_mode == BOND_MODE_TLB) { 4599 if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
4600 bond_opt_initstr(&newval, "default"); 4600 bond_opt_initstr(&newval, "default");
4601 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), 4601 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
4602 &newval); 4602 &newval);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e68d368e20ac..7f36d3e3c98b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1665,6 +1665,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
1665 .dev_name = "BCM53125", 1665 .dev_name = "BCM53125",
1666 .vlans = 4096, 1666 .vlans = 4096,
1667 .enabled_ports = 0xff, 1667 .enabled_ports = 0xff,
1668 .arl_entries = 4,
1668 .cpu_port = B53_CPU_PORT, 1669 .cpu_port = B53_CPU_PORT,
1669 .vta_regs = B53_VTA_REGS, 1670 .vta_regs = B53_VTA_REGS,
1670 .duplex_reg = B53_DUPLEX_STAT_GE, 1671 .duplex_reg = B53_DUPLEX_STAT_GE,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 53b088166c28..5bcdd33101b0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3178,6 +3178,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
3178 .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, 3178 .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
3179 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3179 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3180 .port_pause_limit = mv88e6390_port_pause_limit, 3180 .port_pause_limit = mv88e6390_port_pause_limit,
3181 .port_set_cmode = mv88e6390x_port_set_cmode,
3181 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3182 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3182 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3183 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3183 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3184 .stats_snapshot = mv88e6390_g1_stats_snapshot,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d3906f6b01bd..86058a9f3417 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1785,16 +1785,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1785 1785
1786 xgene_enet_gpiod_get(pdata); 1786 xgene_enet_gpiod_get(pdata);
1787 1787
1788 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1788 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
1789 if (IS_ERR(pdata->clk)) { 1789 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1790 /* Abort if the clock is defined but couldn't be retrived. 1790 if (IS_ERR(pdata->clk)) {
1791 * Always abort if the clock is missing on DT system as 1791 /* Abort if the clock is defined but couldn't be
1792 * the driver can't cope with this case. 1792 * retrived. Always abort if the clock is missing on
1793 */ 1793 * DT system as the driver can't cope with this case.
1794 if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) 1794 */
1795 return PTR_ERR(pdata->clk); 1795 if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
1796 /* Firmware may have set up the clock already. */ 1796 return PTR_ERR(pdata->clk);
1797 dev_info(dev, "clocks have been setup already\n"); 1797 /* Firmware may have set up the clock already. */
1798 dev_info(dev, "clocks have been setup already\n");
1799 }
1798 } 1800 }
1799 1801
1800 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) 1802 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 73aca97a96bc..d937083db9a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -50,11 +50,14 @@ static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
50 50
51static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) 51static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
52{ 52{
53 return writel(value, bgmac->plat.idm_base + offset); 53 writel(value, bgmac->plat.idm_base + offset);
54} 54}
55 55
56static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) 56static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
57{ 57{
58 if (!bgmac->plat.idm_base)
59 return true;
60
58 if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN) 61 if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
59 return false; 62 return false;
60 if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) 63 if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
@@ -66,6 +69,9 @@ static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
66{ 69{
67 u32 val; 70 u32 val;
68 71
72 if (!bgmac->plat.idm_base)
73 return;
74
69 /* The Reset Control register only contains a single bit to show if the 75 /* The Reset Control register only contains a single bit to show if the
70 * controller is currently in reset. Do a sanity check here, just in 76 * controller is currently in reset. Do a sanity check here, just in
71 * case the bootloader happened to leave the device in reset. 77 * case the bootloader happened to leave the device in reset.
@@ -180,6 +186,7 @@ static int bgmac_probe(struct platform_device *pdev)
180 bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4; 186 bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
181 bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP; 187 bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
182 bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP; 188 bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
189 bgmac->feature_flags |= BGMAC_FEAT_IDM_MASK;
183 190
184 bgmac->dev = &pdev->dev; 191 bgmac->dev = &pdev->dev;
185 bgmac->dma_dev = &pdev->dev; 192 bgmac->dma_dev = &pdev->dev;
@@ -207,15 +214,13 @@ static int bgmac_probe(struct platform_device *pdev)
207 return PTR_ERR(bgmac->plat.base); 214 return PTR_ERR(bgmac->plat.base);
208 215
209 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); 216 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
210 if (!regs) { 217 if (regs) {
211 dev_err(&pdev->dev, "Unable to obtain idm resource\n"); 218 bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
212 return -EINVAL; 219 if (IS_ERR(bgmac->plat.idm_base))
220 return PTR_ERR(bgmac->plat.idm_base);
221 bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
213 } 222 }
214 223
215 bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
216 if (IS_ERR(bgmac->plat.idm_base))
217 return PTR_ERR(bgmac->plat.idm_base);
218
219 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); 224 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
220 if (regs) { 225 if (regs) {
221 bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, 226 bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index ba4d2e145bb9..48d672b204a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -622,9 +622,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
622 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); 622 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
623 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); 623 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
624 624
625 if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { 625 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
626 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); 626 if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
627 return -ENOTSUPP; 627 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
628 return -ENOTSUPP;
629 }
628 } 630 }
629 631
630 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 632 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -855,9 +857,11 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
855static void bgmac_miiconfig(struct bgmac *bgmac) 857static void bgmac_miiconfig(struct bgmac *bgmac)
856{ 858{
857 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { 859 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
858 bgmac_idm_write(bgmac, BCMA_IOCTL, 860 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
859 bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 | 861 bgmac_idm_write(bgmac, BCMA_IOCTL,
860 BGMAC_BCMA_IOCTL_SW_CLKEN); 862 bgmac_idm_read(bgmac, BCMA_IOCTL) |
863 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN);
864 }
861 bgmac->mac_speed = SPEED_2500; 865 bgmac->mac_speed = SPEED_2500;
862 bgmac->mac_duplex = DUPLEX_FULL; 866 bgmac->mac_duplex = DUPLEX_FULL;
863 bgmac_mac_speed(bgmac); 867 bgmac_mac_speed(bgmac);
@@ -874,11 +878,36 @@ static void bgmac_miiconfig(struct bgmac *bgmac)
874 } 878 }
875} 879}
876 880
881static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
882{
883 u32 iost;
884
885 iost = bgmac_idm_read(bgmac, BCMA_IOST);
886 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
887 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
888
889 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
890 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
891 u32 flags = 0;
892
893 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
894 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
895 if (!bgmac->has_robosw)
896 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
897 }
898 bgmac_clk_enable(bgmac, flags);
899 }
900
901 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
902 bgmac_idm_write(bgmac, BCMA_IOCTL,
903 bgmac_idm_read(bgmac, BCMA_IOCTL) &
904 ~BGMAC_BCMA_IOCTL_SW_RESET);
905}
906
877/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ 907/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
878static void bgmac_chip_reset(struct bgmac *bgmac) 908static void bgmac_chip_reset(struct bgmac *bgmac)
879{ 909{
880 u32 cmdcfg_sr; 910 u32 cmdcfg_sr;
881 u32 iost;
882 int i; 911 int i;
883 912
884 if (bgmac_clk_enabled(bgmac)) { 913 if (bgmac_clk_enabled(bgmac)) {
@@ -899,20 +928,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
899 /* TODO: Clear software multicast filter list */ 928 /* TODO: Clear software multicast filter list */
900 } 929 }
901 930
902 iost = bgmac_idm_read(bgmac, BCMA_IOST); 931 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK))
903 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) 932 bgmac_chip_reset_idm_config(bgmac);
904 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
905
906 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
907 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
908 u32 flags = 0;
909 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
910 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
911 if (!bgmac->has_robosw)
912 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
913 }
914 bgmac_clk_enable(bgmac, flags);
915 }
916 933
917 /* Request Misc PLL for corerev > 2 */ 934 /* Request Misc PLL for corerev > 2 */
918 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { 935 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
@@ -970,11 +987,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
970 BGMAC_CHIPCTL_7_IF_TYPE_RGMII); 987 BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
971 } 988 }
972 989
973 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
974 bgmac_idm_write(bgmac, BCMA_IOCTL,
975 bgmac_idm_read(bgmac, BCMA_IOCTL) &
976 ~BGMAC_BCMA_IOCTL_SW_RESET);
977
978 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset 990 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
979 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine 991 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
980 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to 992 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
@@ -1497,8 +1509,10 @@ int bgmac_enet_probe(struct bgmac *bgmac)
1497 bgmac_clk_enable(bgmac, 0); 1509 bgmac_clk_enable(bgmac, 0);
1498 1510
1499 /* This seems to be fixing IRQ by assigning OOB #6 to the core */ 1511 /* This seems to be fixing IRQ by assigning OOB #6 to the core */
1500 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) 1512 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
1501 bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); 1513 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
1514 bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
1515 }
1502 1516
1503 bgmac_chip_reset(bgmac); 1517 bgmac_chip_reset(bgmac);
1504 1518
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index c1818766c501..443d57b10264 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -425,6 +425,7 @@
425#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17) 425#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17)
426#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18) 426#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18)
427#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19) 427#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19)
428#define BGMAC_FEAT_IDM_MASK BIT(20)
428 429
429struct bgmac_slot_info { 430struct bgmac_slot_info {
430 union { 431 union {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 43423744fdfa..1e33abde4a3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2886,7 +2886,7 @@ static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
2886 2886
2887static int bnx2x_test_nvram(struct bnx2x *bp) 2887static int bnx2x_test_nvram(struct bnx2x *bp)
2888{ 2888{
2889 const struct crc_pair nvram_tbl[] = { 2889 static const struct crc_pair nvram_tbl[] = {
2890 { 0, 0x14 }, /* bootstrap */ 2890 { 0, 0x14 }, /* bootstrap */
2891 { 0x14, 0xec }, /* dir */ 2891 { 0x14, 0xec }, /* dir */
2892 { 0x100, 0x350 }, /* manuf_info */ 2892 { 0x100, 0x350 }, /* manuf_info */
@@ -2895,7 +2895,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
2895 { 0x708, 0x70 }, /* manuf_key_info */ 2895 { 0x708, 0x70 }, /* manuf_key_info */
2896 { 0, 0 } 2896 { 0, 0 }
2897 }; 2897 };
2898 const struct crc_pair nvram_tbl2[] = { 2898 static const struct crc_pair nvram_tbl2[] = {
2899 { 0x7e8, 0x350 }, /* manuf_info2 */ 2899 { 0x7e8, 0x350 }, /* manuf_info2 */
2900 { 0xb38, 0xf0 }, /* feature_info */ 2900 { 0xb38, 0xf0 }, /* feature_info */
2901 { 0, 0 } 2901 { 0, 0 }
@@ -3162,7 +3162,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3162 if (is_multi(bp)) { 3162 if (is_multi(bp)) {
3163 for_each_eth_queue(bp, i) { 3163 for_each_eth_queue(bp, i) {
3164 memset(queue_name, 0, sizeof(queue_name)); 3164 memset(queue_name, 0, sizeof(queue_name));
3165 sprintf(queue_name, "%d", i); 3165 snprintf(queue_name, sizeof(queue_name),
3166 "%d", i);
3166 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 3167 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
3167 snprintf(buf + (k + j)*ETH_GSTRING_LEN, 3168 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
3168 ETH_GSTRING_LEN, 3169 ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index daca1c9d254b..7b0b399aaedd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1202 return tx_cb_ptr; 1202 return tx_cb_ptr;
1203} 1203}
1204 1204
1205/* Simple helper to free a control block's resources */ 1205static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1206static void bcmgenet_free_cb(struct enet_cb *cb) 1206 struct bcmgenet_tx_ring *ring)
1207{ 1207{
1208 dev_kfree_skb_any(cb->skb); 1208 struct enet_cb *tx_cb_ptr;
1209 cb->skb = NULL; 1209
1210 dma_unmap_addr_set(cb, dma_addr, 0); 1210 tx_cb_ptr = ring->cbs;
1211 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1212
1213 /* Rewinding local write pointer */
1214 if (ring->write_ptr == ring->cb_ptr)
1215 ring->write_ptr = ring->end_ptr;
1216 else
1217 ring->write_ptr--;
1218
1219 return tx_cb_ptr;
1211} 1220}
1212 1221
1213static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) 1222static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
@@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1260 INTRL2_CPU_MASK_SET); 1269 INTRL2_CPU_MASK_SET);
1261} 1270}
1262 1271
1272/* Simple helper to free a transmit control block's resources
1273 * Returns an skb when the last transmit control block associated with the
1274 * skb is freed. The skb should be freed by the caller if necessary.
1275 */
1276static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1277 struct enet_cb *cb)
1278{
1279 struct sk_buff *skb;
1280
1281 skb = cb->skb;
1282
1283 if (skb) {
1284 cb->skb = NULL;
1285 if (cb == GENET_CB(skb)->first_cb)
1286 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1287 dma_unmap_len(cb, dma_len),
1288 DMA_TO_DEVICE);
1289 else
1290 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1291 dma_unmap_len(cb, dma_len),
1292 DMA_TO_DEVICE);
1293 dma_unmap_addr_set(cb, dma_addr, 0);
1294
1295 if (cb == GENET_CB(skb)->last_cb)
1296 return skb;
1297
1298 } else if (dma_unmap_addr(cb, dma_addr)) {
1299 dma_unmap_page(dev,
1300 dma_unmap_addr(cb, dma_addr),
1301 dma_unmap_len(cb, dma_len),
1302 DMA_TO_DEVICE);
1303 dma_unmap_addr_set(cb, dma_addr, 0);
1304 }
1305
1306 return 0;
1307}
1308
1309/* Simple helper to free a receive control block's resources */
1310static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1311 struct enet_cb *cb)
1312{
1313 struct sk_buff *skb;
1314
1315 skb = cb->skb;
1316 cb->skb = NULL;
1317
1318 if (dma_unmap_addr(cb, dma_addr)) {
1319 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1320 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1321 dma_unmap_addr_set(cb, dma_addr, 0);
1322 }
1323
1324 return skb;
1325}
1326
1263/* Unlocked version of the reclaim routine */ 1327/* Unlocked version of the reclaim routine */
1264static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, 1328static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1265 struct bcmgenet_tx_ring *ring) 1329 struct bcmgenet_tx_ring *ring)
1266{ 1330{
1267 struct bcmgenet_priv *priv = netdev_priv(dev); 1331 struct bcmgenet_priv *priv = netdev_priv(dev);
1268 struct device *kdev = &priv->pdev->dev; 1332 unsigned int txbds_processed = 0;
1269 struct enet_cb *tx_cb_ptr;
1270 unsigned int pkts_compl = 0;
1271 unsigned int bytes_compl = 0; 1333 unsigned int bytes_compl = 0;
1272 unsigned int c_index; 1334 unsigned int pkts_compl = 0;
1273 unsigned int txbds_ready; 1335 unsigned int txbds_ready;
1274 unsigned int txbds_processed = 0; 1336 unsigned int c_index;
1337 struct sk_buff *skb;
1275 1338
1276 /* Clear status before servicing to reduce spurious interrupts */ 1339 /* Clear status before servicing to reduce spurious interrupts */
1277 if (ring->index == DESC_INDEX) 1340 if (ring->index == DESC_INDEX)
@@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1292 1355
1293 /* Reclaim transmitted buffers */ 1356 /* Reclaim transmitted buffers */
1294 while (txbds_processed < txbds_ready) { 1357 while (txbds_processed < txbds_ready) {
1295 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; 1358 skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1296 if (tx_cb_ptr->skb) { 1359 &priv->tx_cbs[ring->clean_ptr]);
1360 if (skb) {
1297 pkts_compl++; 1361 pkts_compl++;
1298 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; 1362 bytes_compl += GENET_CB(skb)->bytes_sent;
1299 dma_unmap_single(kdev, 1363 dev_kfree_skb_any(skb);
1300 dma_unmap_addr(tx_cb_ptr, dma_addr),
1301 dma_unmap_len(tx_cb_ptr, dma_len),
1302 DMA_TO_DEVICE);
1303 bcmgenet_free_cb(tx_cb_ptr);
1304 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1305 dma_unmap_page(kdev,
1306 dma_unmap_addr(tx_cb_ptr, dma_addr),
1307 dma_unmap_len(tx_cb_ptr, dma_len),
1308 DMA_TO_DEVICE);
1309 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1310 } 1364 }
1311 1365
1312 txbds_processed++; 1366 txbds_processed++;
@@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1380 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); 1434 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1381} 1435}
1382 1436
1383/* Transmits a single SKB (either head of a fragment or a single SKB)
1384 * caller must hold priv->lock
1385 */
1386static int bcmgenet_xmit_single(struct net_device *dev,
1387 struct sk_buff *skb,
1388 u16 dma_desc_flags,
1389 struct bcmgenet_tx_ring *ring)
1390{
1391 struct bcmgenet_priv *priv = netdev_priv(dev);
1392 struct device *kdev = &priv->pdev->dev;
1393 struct enet_cb *tx_cb_ptr;
1394 unsigned int skb_len;
1395 dma_addr_t mapping;
1396 u32 length_status;
1397 int ret;
1398
1399 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1400
1401 if (unlikely(!tx_cb_ptr))
1402 BUG();
1403
1404 tx_cb_ptr->skb = skb;
1405
1406 skb_len = skb_headlen(skb);
1407
1408 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1409 ret = dma_mapping_error(kdev, mapping);
1410 if (ret) {
1411 priv->mib.tx_dma_failed++;
1412 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1413 dev_kfree_skb(skb);
1414 return ret;
1415 }
1416
1417 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1418 dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
1419 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1420 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1421 DMA_TX_APPEND_CRC;
1422
1423 if (skb->ip_summed == CHECKSUM_PARTIAL)
1424 length_status |= DMA_TX_DO_CSUM;
1425
1426 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1427
1428 return 0;
1429}
1430
1431/* Transmit a SKB fragment */
1432static int bcmgenet_xmit_frag(struct net_device *dev,
1433 skb_frag_t *frag,
1434 u16 dma_desc_flags,
1435 struct bcmgenet_tx_ring *ring)
1436{
1437 struct bcmgenet_priv *priv = netdev_priv(dev);
1438 struct device *kdev = &priv->pdev->dev;
1439 struct enet_cb *tx_cb_ptr;
1440 unsigned int frag_size;
1441 dma_addr_t mapping;
1442 int ret;
1443
1444 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1445
1446 if (unlikely(!tx_cb_ptr))
1447 BUG();
1448
1449 tx_cb_ptr->skb = NULL;
1450
1451 frag_size = skb_frag_size(frag);
1452
1453 mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
1454 ret = dma_mapping_error(kdev, mapping);
1455 if (ret) {
1456 priv->mib.tx_dma_failed++;
1457 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1458 __func__);
1459 return ret;
1460 }
1461
1462 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1463 dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
1464
1465 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1466 (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1467 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1468
1469 return 0;
1470}
1471
1472/* Reallocate the SKB to put enough headroom in front of it and insert 1437/* Reallocate the SKB to put enough headroom in front of it and insert
1473 * the transmit checksum offsets in the descriptors 1438 * the transmit checksum offsets in the descriptors
1474 */ 1439 */
@@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1535static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) 1500static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1536{ 1501{
1537 struct bcmgenet_priv *priv = netdev_priv(dev); 1502 struct bcmgenet_priv *priv = netdev_priv(dev);
1503 struct device *kdev = &priv->pdev->dev;
1538 struct bcmgenet_tx_ring *ring = NULL; 1504 struct bcmgenet_tx_ring *ring = NULL;
1505 struct enet_cb *tx_cb_ptr;
1539 struct netdev_queue *txq; 1506 struct netdev_queue *txq;
1540 unsigned long flags = 0; 1507 unsigned long flags = 0;
1541 int nr_frags, index; 1508 int nr_frags, index;
1542 u16 dma_desc_flags; 1509 dma_addr_t mapping;
1510 unsigned int size;
1511 skb_frag_t *frag;
1512 u32 len_stat;
1543 int ret; 1513 int ret;
1544 int i; 1514 int i;
1545 1515
@@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1592 } 1562 }
1593 } 1563 }
1594 1564
1595 dma_desc_flags = DMA_SOP; 1565 for (i = 0; i <= nr_frags; i++) {
1596 if (nr_frags == 0) 1566 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1597 dma_desc_flags |= DMA_EOP;
1598 1567
1599 /* Transmit single SKB or head of fragment list */ 1568 if (unlikely(!tx_cb_ptr))
1600 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); 1569 BUG();
1601 if (ret) { 1570
1602 ret = NETDEV_TX_OK; 1571 if (!i) {
1603 goto out; 1572 /* Transmit single SKB or head of fragment list */
1604 } 1573 GENET_CB(skb)->first_cb = tx_cb_ptr;
1574 size = skb_headlen(skb);
1575 mapping = dma_map_single(kdev, skb->data, size,
1576 DMA_TO_DEVICE);
1577 } else {
1578 /* xmit fragment */
1579 frag = &skb_shinfo(skb)->frags[i - 1];
1580 size = skb_frag_size(frag);
1581 mapping = skb_frag_dma_map(kdev, frag, 0, size,
1582 DMA_TO_DEVICE);
1583 }
1605 1584
1606 /* xmit fragment */ 1585 ret = dma_mapping_error(kdev, mapping);
1607 for (i = 0; i < nr_frags; i++) {
1608 ret = bcmgenet_xmit_frag(dev,
1609 &skb_shinfo(skb)->frags[i],
1610 (i == nr_frags - 1) ? DMA_EOP : 0,
1611 ring);
1612 if (ret) { 1586 if (ret) {
1587 priv->mib.tx_dma_failed++;
1588 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1613 ret = NETDEV_TX_OK; 1589 ret = NETDEV_TX_OK;
1614 goto out; 1590 goto out_unmap_frags;
1591 }
1592 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1593 dma_unmap_len_set(tx_cb_ptr, dma_len, size);
1594
1595 tx_cb_ptr->skb = skb;
1596
1597 len_stat = (size << DMA_BUFLENGTH_SHIFT) |
1598 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
1599
1600 if (!i) {
1601 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
1602 if (skb->ip_summed == CHECKSUM_PARTIAL)
1603 len_stat |= DMA_TX_DO_CSUM;
1615 } 1604 }
1605 if (i == nr_frags)
1606 len_stat |= DMA_EOP;
1607
1608 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
1616 } 1609 }
1617 1610
1611 GENET_CB(skb)->last_cb = tx_cb_ptr;
1618 skb_tx_timestamp(skb); 1612 skb_tx_timestamp(skb);
1619 1613
1620 /* Decrement total BD count and advance our write pointer */ 1614 /* Decrement total BD count and advance our write pointer */
@@ -1635,6 +1629,19 @@ out:
1635 spin_unlock_irqrestore(&ring->lock, flags); 1629 spin_unlock_irqrestore(&ring->lock, flags);
1636 1630
1637 return ret; 1631 return ret;
1632
1633out_unmap_frags:
1634 /* Back up for failed control block mapping */
1635 bcmgenet_put_txcb(priv, ring);
1636
1637 /* Unmap successfully mapped control blocks */
1638 while (i-- > 0) {
1639 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
1640 bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
1641 }
1642
1643 dev_kfree_skb(skb);
1644 goto out;
1638} 1645}
1639 1646
1640static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, 1647static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
@@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1666 } 1673 }
1667 1674
1668 /* Grab the current Rx skb from the ring and DMA-unmap it */ 1675 /* Grab the current Rx skb from the ring and DMA-unmap it */
1669 rx_skb = cb->skb; 1676 rx_skb = bcmgenet_free_rx_cb(kdev, cb);
1670 if (likely(rx_skb))
1671 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1672 priv->rx_buf_len, DMA_FROM_DEVICE);
1673 1677
1674 /* Put the new Rx skb on the ring */ 1678 /* Put the new Rx skb on the ring */
1675 cb->skb = skb; 1679 cb->skb = skb;
1676 dma_unmap_addr_set(cb, dma_addr, mapping); 1680 dma_unmap_addr_set(cb, dma_addr, mapping);
1681 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
1677 dmadesc_set_addr(priv, cb->bd_addr, mapping); 1682 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1678 1683
1679 /* Return the current Rx skb to caller */ 1684 /* Return the current Rx skb to caller */
@@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1880 1885
1881static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) 1886static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1882{ 1887{
1883 struct device *kdev = &priv->pdev->dev; 1888 struct sk_buff *skb;
1884 struct enet_cb *cb; 1889 struct enet_cb *cb;
1885 int i; 1890 int i;
1886 1891
1887 for (i = 0; i < priv->num_rx_bds; i++) { 1892 for (i = 0; i < priv->num_rx_bds; i++) {
1888 cb = &priv->rx_cbs[i]; 1893 cb = &priv->rx_cbs[i];
1889 1894
1890 if (dma_unmap_addr(cb, dma_addr)) { 1895 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1891 dma_unmap_single(kdev, 1896 if (skb)
1892 dma_unmap_addr(cb, dma_addr), 1897 dev_kfree_skb_any(skb);
1893 priv->rx_buf_len, DMA_FROM_DEVICE);
1894 dma_unmap_addr_set(cb, dma_addr, 0);
1895 }
1896
1897 if (cb->skb)
1898 bcmgenet_free_cb(cb);
1899 } 1898 }
1900} 1899}
1901 1900
@@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2479 2478
2480static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 2479static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2481{ 2480{
2482 int i;
2483 struct netdev_queue *txq; 2481 struct netdev_queue *txq;
2482 struct sk_buff *skb;
2483 struct enet_cb *cb;
2484 int i;
2484 2485
2485 bcmgenet_fini_rx_napi(priv); 2486 bcmgenet_fini_rx_napi(priv);
2486 bcmgenet_fini_tx_napi(priv); 2487 bcmgenet_fini_tx_napi(priv);
@@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2489 bcmgenet_dma_teardown(priv); 2490 bcmgenet_dma_teardown(priv);
2490 2491
2491 for (i = 0; i < priv->num_tx_bds; i++) { 2492 for (i = 0; i < priv->num_tx_bds; i++) {
2492 if (priv->tx_cbs[i].skb != NULL) { 2493 cb = priv->tx_cbs + i;
2493 dev_kfree_skb(priv->tx_cbs[i].skb); 2494 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
2494 priv->tx_cbs[i].skb = NULL; 2495 if (skb)
2495 } 2496 dev_kfree_skb(skb);
2496 } 2497 }
2497 2498
2498 for (i = 0; i < priv->hw_params->tx_queues; i++) { 2499 for (i = 0; i < priv->hw_params->tx_queues; i++) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index efd07020b89f..b9344de669f8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -544,6 +544,8 @@ struct bcmgenet_hw_params {
544}; 544};
545 545
546struct bcmgenet_skb_cb { 546struct bcmgenet_skb_cb {
547 struct enet_cb *first_cb; /* First control block of SKB */
548 struct enet_cb *last_cb; /* Last control block of SKB */
547 unsigned int bytes_sent; /* bytes on the wire (no TSB) */ 549 unsigned int bytes_sent; /* bytes on the wire (no TSB) */
548}; 550};
549 551
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 28ecda3d3404..ebd353bc78ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -335,7 +335,7 @@ lio_ethtool_get_channels(struct net_device *dev,
335 335
336static int lio_get_eeprom_len(struct net_device *netdev) 336static int lio_get_eeprom_len(struct net_device *netdev)
337{ 337{
338 u8 buf[128]; 338 u8 buf[192];
339 struct lio *lio = GET_LIO(netdev); 339 struct lio *lio = GET_LIO(netdev);
340 struct octeon_device *oct_dev = lio->oct_dev; 340 struct octeon_device *oct_dev = lio->oct_dev;
341 struct octeon_board_info *board_info; 341 struct octeon_board_info *board_info;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a0ca68ce3fbb..79112563a25a 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1008,7 +1008,7 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
1008{ 1008{
1009 struct device *dev = &bgx->pdev->dev; 1009 struct device *dev = &bgx->pdev->dev;
1010 struct lmac *lmac; 1010 struct lmac *lmac;
1011 char str[20]; 1011 char str[27];
1012 1012
1013 if (!bgx->is_dlm && lmacid) 1013 if (!bgx->is_dlm && lmacid)
1014 return; 1014 return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 50517cfd9671..9f9d6cae39d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -441,7 +441,8 @@ void cxgb4_ptp_init(struct adapter *adapter)
441 441
442 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, 442 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
443 &adapter->pdev->dev); 443 &adapter->pdev->dev);
444 if (!adapter->ptp_clock) { 444 if (IS_ERR_OR_NULL(adapter->ptp_clock)) {
445 adapter->ptp_clock = NULL;
445 dev_err(adapter->pdev_dev, 446 dev_err(adapter->pdev_dev,
446 "PTP %s Clock registration has failed\n", __func__); 447 "PTP %s Clock registration has failed\n", __func__);
447 return; 448 return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 99987d8e437e..aa28299aef5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -174,6 +174,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
174 CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ 174 CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
175 CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */ 175 CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
176 CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */ 176 CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
177 CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
178 CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
177 179
178 /* T6 adapters: 180 /* T6 adapters:
179 */ 181 */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ff864a187d5a..a37166ee577b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -776,8 +776,9 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
776 776
777 assert(handle); 777 assert(handle);
778 mac_cb = hns_get_mac_cb(handle); 778 mac_cb = hns_get_mac_cb(handle);
779 if (!mac_cb->cpld_ctrl) 779 if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
780 return; 780 return;
781
781 hns_set_led_opt(mac_cb); 782 hns_set_led_opt(mac_cb);
782} 783}
783 784
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 7a8addda726e..408b63faf9a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -53,6 +53,34 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
53 return ret; 53 return ret;
54} 54}
55 55
56static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
57 u32 link, u32 port, u32 act)
58{
59 union acpi_object *obj;
60 union acpi_object obj_args[3], argv4;
61
62 obj_args[0].integer.type = ACPI_TYPE_INTEGER;
63 obj_args[0].integer.value = link;
64 obj_args[1].integer.type = ACPI_TYPE_INTEGER;
65 obj_args[1].integer.value = port;
66 obj_args[2].integer.type = ACPI_TYPE_INTEGER;
67 obj_args[2].integer.value = act;
68
69 argv4.type = ACPI_TYPE_PACKAGE;
70 argv4.package.count = 3;
71 argv4.package.elements = obj_args;
72
73 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
74 &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
75 if (!obj) {
76 dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
77 link, port, act);
78 return;
79 }
80
81 ACPI_FREE(obj);
82}
83
56static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, 84static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
57 u16 speed, int data) 85 u16 speed, int data)
58{ 86{
@@ -93,6 +121,18 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
93 } 121 }
94} 122}
95 123
124static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
125 u16 speed, int data)
126{
127 if (!mac_cb) {
128 pr_err("cpld_led_set mac_cb is null!\n");
129 return;
130 }
131
132 hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
133 link_status, mac_cb->mac_id, data);
134}
135
96static void cpld_led_reset(struct hns_mac_cb *mac_cb) 136static void cpld_led_reset(struct hns_mac_cb *mac_cb)
97{ 137{
98 if (!mac_cb || !mac_cb->cpld_ctrl) 138 if (!mac_cb || !mac_cb->cpld_ctrl)
@@ -103,6 +143,20 @@ static void cpld_led_reset(struct hns_mac_cb *mac_cb)
103 mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; 143 mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
104} 144}
105 145
146static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
147{
148 if (!mac_cb) {
149 pr_err("cpld_led_reset mac_cb is null!\n");
150 return;
151 }
152
153 if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
154 return;
155
156 hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
157 0, mac_cb->mac_id, 0);
158}
159
106static int cpld_set_led_id(struct hns_mac_cb *mac_cb, 160static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
107 enum hnae_led_state status) 161 enum hnae_led_state status)
108{ 162{
@@ -604,8 +658,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
604 658
605 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback; 659 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
606 } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { 660 } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
607 misc_op->cpld_set_led = hns_cpld_set_led; 661 misc_op->cpld_set_led = hns_cpld_set_led_acpi;
608 misc_op->cpld_reset_led = cpld_led_reset; 662 misc_op->cpld_reset_led = cpld_led_reset_acpi;
609 misc_op->cpld_set_led_id = cpld_set_led_id; 663 misc_op->cpld_set_led_id = cpld_set_led_id;
610 664
611 misc_op->dsaf_reset = hns_dsaf_rst_acpi; 665 misc_op->dsaf_reset = hns_dsaf_rst_acpi;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 249a4584401a..b651c1210555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -283,7 +283,7 @@ int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
283} 283}
284 284
285/* Should be called under a lock */ 285/* Should be called under a lock */
286static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) 286static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
287{ 287{
288 struct mlx4_zone_allocator *zone_alloc = entry->allocator; 288 struct mlx4_zone_allocator *zone_alloc = entry->allocator;
289 289
@@ -315,8 +315,6 @@ static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
315 } 315 }
316 zone_alloc->mask = mask; 316 zone_alloc->mask = mask;
317 } 317 }
318
319 return 0;
320} 318}
321 319
322void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) 320void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
@@ -457,7 +455,7 @@ struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32
457int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) 455int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
458{ 456{
459 struct mlx4_zone_entry *zone; 457 struct mlx4_zone_entry *zone;
460 int res; 458 int res = 0;
461 459
462 spin_lock(&zones->lock); 460 spin_lock(&zones->lock);
463 461
@@ -468,7 +466,7 @@ int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
468 goto out; 466 goto out;
469 } 467 }
470 468
471 res = __mlx4_zone_remove_one_entry(zone); 469 __mlx4_zone_remove_one_entry(zone);
472 470
473out: 471out:
474 spin_unlock(&zones->lock); 472 spin_unlock(&zones->lock);
@@ -578,7 +576,7 @@ out:
578} 576}
579 577
580static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, 578static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
581 struct mlx4_buf *buf, gfp_t gfp) 579 struct mlx4_buf *buf)
582{ 580{
583 dma_addr_t t; 581 dma_addr_t t;
584 582
@@ -587,7 +585,7 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
587 buf->page_shift = get_order(size) + PAGE_SHIFT; 585 buf->page_shift = get_order(size) + PAGE_SHIFT;
588 buf->direct.buf = 586 buf->direct.buf =
589 dma_zalloc_coherent(&dev->persist->pdev->dev, 587 dma_zalloc_coherent(&dev->persist->pdev->dev,
590 size, &t, gfp); 588 size, &t, GFP_KERNEL);
591 if (!buf->direct.buf) 589 if (!buf->direct.buf)
592 return -ENOMEM; 590 return -ENOMEM;
593 591
@@ -607,10 +605,10 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
607 * multiple pages, so we don't require too much contiguous memory. 605 * multiple pages, so we don't require too much contiguous memory.
608 */ 606 */
609int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 607int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
610 struct mlx4_buf *buf, gfp_t gfp) 608 struct mlx4_buf *buf)
611{ 609{
612 if (size <= max_direct) { 610 if (size <= max_direct) {
613 return mlx4_buf_direct_alloc(dev, size, buf, gfp); 611 return mlx4_buf_direct_alloc(dev, size, buf);
614 } else { 612 } else {
615 dma_addr_t t; 613 dma_addr_t t;
616 int i; 614 int i;
@@ -620,14 +618,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
620 buf->npages = buf->nbufs; 618 buf->npages = buf->nbufs;
621 buf->page_shift = PAGE_SHIFT; 619 buf->page_shift = PAGE_SHIFT;
622 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), 620 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
623 gfp); 621 GFP_KERNEL);
624 if (!buf->page_list) 622 if (!buf->page_list)
625 return -ENOMEM; 623 return -ENOMEM;
626 624
627 for (i = 0; i < buf->nbufs; ++i) { 625 for (i = 0; i < buf->nbufs; ++i) {
628 buf->page_list[i].buf = 626 buf->page_list[i].buf =
629 dma_zalloc_coherent(&dev->persist->pdev->dev, 627 dma_zalloc_coherent(&dev->persist->pdev->dev,
630 PAGE_SIZE, &t, gfp); 628 PAGE_SIZE, &t, GFP_KERNEL);
631 if (!buf->page_list[i].buf) 629 if (!buf->page_list[i].buf)
632 goto err_free; 630 goto err_free;
633 631
@@ -663,12 +661,11 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
663} 661}
664EXPORT_SYMBOL_GPL(mlx4_buf_free); 662EXPORT_SYMBOL_GPL(mlx4_buf_free);
665 663
666static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, 664static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
667 gfp_t gfp)
668{ 665{
669 struct mlx4_db_pgdir *pgdir; 666 struct mlx4_db_pgdir *pgdir;
670 667
671 pgdir = kzalloc(sizeof *pgdir, gfp); 668 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
672 if (!pgdir) 669 if (!pgdir)
673 return NULL; 670 return NULL;
674 671
@@ -676,7 +673,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
676 pgdir->bits[0] = pgdir->order0; 673 pgdir->bits[0] = pgdir->order0;
677 pgdir->bits[1] = pgdir->order1; 674 pgdir->bits[1] = pgdir->order1;
678 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, 675 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
679 &pgdir->db_dma, gfp); 676 &pgdir->db_dma, GFP_KERNEL);
680 if (!pgdir->db_page) { 677 if (!pgdir->db_page) {
681 kfree(pgdir); 678 kfree(pgdir);
682 return NULL; 679 return NULL;
@@ -716,7 +713,7 @@ found:
716 return 0; 713 return 0;
717} 714}
718 715
719int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) 716int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
720{ 717{
721 struct mlx4_priv *priv = mlx4_priv(dev); 718 struct mlx4_priv *priv = mlx4_priv(dev);
722 struct mlx4_db_pgdir *pgdir; 719 struct mlx4_db_pgdir *pgdir;
@@ -728,7 +725,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
728 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 725 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
729 goto out; 726 goto out;
730 727
731 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); 728 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
732 if (!pgdir) { 729 if (!pgdir) {
733 ret = -ENOMEM; 730 ret = -ENOMEM;
734 goto out; 731 goto out;
@@ -780,13 +777,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
780{ 777{
781 int err; 778 int err;
782 779
783 err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL); 780 err = mlx4_db_alloc(dev, &wqres->db, 1);
784 if (err) 781 if (err)
785 return err; 782 return err;
786 783
787 *wqres->db.db = 0; 784 *wqres->db.db = 0;
788 785
789 err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL); 786 err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
790 if (err) 787 if (err)
791 goto err_db; 788 goto err_db;
792 789
@@ -795,7 +792,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
795 if (err) 792 if (err)
796 goto err_buf; 793 goto err_buf;
797 794
798 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); 795 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
799 if (err) 796 if (err)
800 goto err_mtt; 797 goto err_mtt;
801 798
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index fa6d2354a0e9..c56a511b918e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -224,11 +224,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
224 if (*cqn == -1) 224 if (*cqn == -1)
225 return -ENOMEM; 225 return -ENOMEM;
226 226
227 err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL); 227 err = mlx4_table_get(dev, &cq_table->table, *cqn);
228 if (err) 228 if (err)
229 goto err_out; 229 goto err_out;
230 230
231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL); 231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
232 if (err) 232 if (err)
233 goto err_put; 233 goto err_put;
234 return 0; 234 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e5fb89505a13..436f7689a032 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1042,7 +1042,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
1042 if (!context) 1042 if (!context)
1043 return -ENOMEM; 1043 return -ENOMEM;
1044 1044
1045 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); 1045 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
1046 if (err) { 1046 if (err) {
1047 en_err(priv, "Failed to allocate qp #%x\n", qpn); 1047 en_err(priv, "Failed to allocate qp #%x\n", qpn);
1048 goto out; 1048 goto out;
@@ -1086,7 +1086,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
1086 en_err(priv, "Failed reserving drop qpn\n"); 1086 en_err(priv, "Failed reserving drop qpn\n");
1087 return err; 1087 return err;
1088 } 1088 }
1089 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); 1089 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
1090 if (err) { 1090 if (err) {
1091 en_err(priv, "Failed allocating drop qp\n"); 1091 en_err(priv, "Failed allocating drop qp\n");
1092 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 1092 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
@@ -1158,8 +1158,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1158 } 1158 }
1159 1159
1160 /* Configure RSS indirection qp */ 1160 /* Configure RSS indirection qp */
1161 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp, 1161 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1162 GFP_KERNEL);
1163 if (err) { 1162 if (err) {
1164 en_err(priv, "Failed to allocate RSS indirection QP\n"); 1163 en_err(priv, "Failed to allocate RSS indirection QP\n");
1165 goto rss_err; 1164 goto rss_err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4f3a9b27ce4a..73faa3d77921 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -111,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
111 goto err_hwq_res; 111 goto err_hwq_res;
112 } 112 }
113 113
114 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL); 114 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
115 if (err) { 115 if (err) {
116 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 116 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
117 goto err_reserve; 117 goto err_reserve;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index e1f9e7cebf8f..5a7816e7c7b4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -251,8 +251,7 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
251 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 251 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
252} 252}
253 253
254int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, 254int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
255 gfp_t gfp)
256{ 255{
257 u32 i = (obj & (table->num_obj - 1)) / 256 u32 i = (obj & (table->num_obj - 1)) /
258 (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 257 (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
@@ -266,7 +265,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
266 } 265 }
267 266
268 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 267 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
269 (table->lowmem ? gfp : GFP_HIGHUSER) | 268 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
270 __GFP_NOWARN, table->coherent); 269 __GFP_NOWARN, table->coherent);
271 if (!table->icm[i]) { 270 if (!table->icm[i]) {
272 ret = -ENOMEM; 271 ret = -ENOMEM;
@@ -363,7 +362,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
363 u32 i; 362 u32 i;
364 363
365 for (i = start; i <= end; i += inc) { 364 for (i = start; i <= end; i += inc) {
366 err = mlx4_table_get(dev, table, i, GFP_KERNEL); 365 err = mlx4_table_get(dev, table, i);
367 if (err) 366 if (err)
368 goto fail; 367 goto fail;
369 } 368 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index 0c7364550150..dee67fa39107 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -71,8 +71,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
71 gfp_t gfp_mask, int coherent); 71 gfp_t gfp_mask, int coherent);
72void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); 72void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
73 73
74int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, 74int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
75 gfp_t gfp);
76void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); 75void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
77int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 76int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
78 u32 start, u32 end); 77 u32 start, u32 end);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 30616cd0140d..706d7f21ac5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -969,7 +969,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
969void mlx4_cleanup_qp_table(struct mlx4_dev *dev); 969void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
970void mlx4_cleanup_srq_table(struct mlx4_dev *dev); 970void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
971void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); 971void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
972int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); 972int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
973void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); 973void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
974int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); 974int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
975void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); 975void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
@@ -977,7 +977,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
977void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); 977void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
978int __mlx4_mpt_reserve(struct mlx4_dev *dev); 978int __mlx4_mpt_reserve(struct mlx4_dev *dev);
979void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); 979void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
980int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); 980int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
981void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); 981void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
982u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); 982u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
983void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); 983void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index ce852ca22a96..24282cd017d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -479,14 +479,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
479 __mlx4_mpt_release(dev, index); 479 __mlx4_mpt_release(dev, index);
480} 480}
481 481
482int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) 482int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
483{ 483{
484 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 484 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
485 485
486 return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp); 486 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
487} 487}
488 488
489static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) 489static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
490{ 490{
491 u64 param = 0; 491 u64 param = 0;
492 492
@@ -497,7 +497,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
497 MLX4_CMD_TIME_CLASS_A, 497 MLX4_CMD_TIME_CLASS_A,
498 MLX4_CMD_WRAPPED); 498 MLX4_CMD_WRAPPED);
499 } 499 }
500 return __mlx4_mpt_alloc_icm(dev, index, gfp); 500 return __mlx4_mpt_alloc_icm(dev, index);
501} 501}
502 502
503void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 503void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
@@ -629,7 +629,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
629 struct mlx4_mpt_entry *mpt_entry; 629 struct mlx4_mpt_entry *mpt_entry;
630 int err; 630 int err;
631 631
632 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL); 632 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
633 if (err) 633 if (err)
634 return err; 634 return err;
635 635
@@ -787,14 +787,13 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
787EXPORT_SYMBOL_GPL(mlx4_write_mtt); 787EXPORT_SYMBOL_GPL(mlx4_write_mtt);
788 788
789int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 789int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
790 struct mlx4_buf *buf, gfp_t gfp) 790 struct mlx4_buf *buf)
791{ 791{
792 u64 *page_list; 792 u64 *page_list;
793 int err; 793 int err;
794 int i; 794 int i;
795 795
796 page_list = kmalloc(buf->npages * sizeof *page_list, 796 page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
797 gfp);
798 if (!page_list) 797 if (!page_list)
799 return -ENOMEM; 798 return -ENOMEM;
800 799
@@ -841,7 +840,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
841 struct mlx4_mpt_entry *mpt_entry; 840 struct mlx4_mpt_entry *mpt_entry;
842 int err; 841 int err;
843 842
844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL); 843 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
845 if (err) 844 if (err)
846 return err; 845 return err;
847 846
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 5a310d313e94..26747212526b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -301,29 +301,29 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
301} 301}
302EXPORT_SYMBOL_GPL(mlx4_qp_release_range); 302EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
303 303
304int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 304int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
305{ 305{
306 struct mlx4_priv *priv = mlx4_priv(dev); 306 struct mlx4_priv *priv = mlx4_priv(dev);
307 struct mlx4_qp_table *qp_table = &priv->qp_table; 307 struct mlx4_qp_table *qp_table = &priv->qp_table;
308 int err; 308 int err;
309 309
310 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); 310 err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
311 if (err) 311 if (err)
312 goto err_out; 312 goto err_out;
313 313
314 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); 314 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
315 if (err) 315 if (err)
316 goto err_put_qp; 316 goto err_put_qp;
317 317
318 err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); 318 err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
319 if (err) 319 if (err)
320 goto err_put_auxc; 320 goto err_put_auxc;
321 321
322 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); 322 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
323 if (err) 323 if (err)
324 goto err_put_altc; 324 goto err_put_altc;
325 325
326 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); 326 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
327 if (err) 327 if (err)
328 goto err_put_rdmarc; 328 goto err_put_rdmarc;
329 329
@@ -345,7 +345,7 @@ err_out:
345 return err; 345 return err;
346} 346}
347 347
348static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 348static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
349{ 349{
350 u64 param = 0; 350 u64 param = 0;
351 351
@@ -355,7 +355,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
355 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, 355 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
356 MLX4_CMD_WRAPPED); 356 MLX4_CMD_WRAPPED);
357 } 357 }
358 return __mlx4_qp_alloc_icm(dev, qpn, gfp); 358 return __mlx4_qp_alloc_icm(dev, qpn);
359} 359}
360 360
361void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 361void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
@@ -397,7 +397,7 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
397 return qp; 397 return qp;
398} 398}
399 399
400int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 400int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
401{ 401{
402 struct mlx4_priv *priv = mlx4_priv(dev); 402 struct mlx4_priv *priv = mlx4_priv(dev);
403 struct mlx4_qp_table *qp_table = &priv->qp_table; 403 struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -408,7 +408,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
408 408
409 qp->qpn = qpn; 409 qp->qpn = qpn;
410 410
411 err = mlx4_qp_alloc_icm(dev, qpn, gfp); 411 err = mlx4_qp_alloc_icm(dev, qpn);
412 if (err) 412 if (err)
413 return err; 413 return err;
414 414
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 812783865205..215e21c3dc8a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1822,7 +1822,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1822 return err; 1822 return err;
1823 1823
1824 if (!fw_reserved(dev, qpn)) { 1824 if (!fw_reserved(dev, qpn)) {
1825 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); 1825 err = __mlx4_qp_alloc_icm(dev, qpn);
1826 if (err) { 1826 if (err) {
1827 res_abort_move(dev, slave, RES_QP, qpn); 1827 res_abort_move(dev, slave, RES_QP, qpn);
1828 return err; 1828 return err;
@@ -1909,7 +1909,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1909 if (err) 1909 if (err)
1910 return err; 1910 return err;
1911 1911
1912 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); 1912 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1913 if (err) { 1913 if (err) {
1914 res_abort_move(dev, slave, RES_MPT, id); 1914 res_abort_move(dev, slave, RES_MPT, id);
1915 return err; 1915 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index f44d089e2ca6..bedf52126824 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -100,11 +100,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
100 if (*srqn == -1) 100 if (*srqn == -1)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
103 err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL); 103 err = mlx4_table_get(dev, &srq_table->table, *srqn);
104 if (err) 104 if (err)
105 goto err_out; 105 goto err_out;
106 106
107 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL); 107 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
108 if (err) 108 if (err)
109 goto err_put; 109 goto err_put;
110 return 0; 110 return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 746d94e28470..60850bfa3d32 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -766,11 +766,13 @@ static void emac_shutdown(struct platform_device *pdev)
766 struct emac_adapter *adpt = netdev_priv(netdev); 766 struct emac_adapter *adpt = netdev_priv(netdev);
767 struct emac_sgmii *sgmii = &adpt->phy; 767 struct emac_sgmii *sgmii = &adpt->phy;
768 768
769 /* Closing the SGMII turns off its interrupts */ 769 if (netdev->flags & IFF_UP) {
770 sgmii->close(adpt); 770 /* Closing the SGMII turns off its interrupts */
771 sgmii->close(adpt);
771 772
772 /* Resetting the MAC turns off all DMA and its interrupts */ 773 /* Resetting the MAC turns off all DMA and its interrupts */
773 emac_mac_reset(adpt); 774 emac_mac_reset(adpt);
775 }
774} 776}
775 777
776static struct platform_driver emac_platform_driver = { 778static struct platform_driver emac_platform_driver = {
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index b607936e1b3e..9c0488e0f08e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -90,17 +90,13 @@ struct ioc3_private {
90 spinlock_t ioc3_lock; 90 spinlock_t ioc3_lock;
91 struct mii_if_info mii; 91 struct mii_if_info mii;
92 92
93 struct net_device *dev;
93 struct pci_dev *pdev; 94 struct pci_dev *pdev;
94 95
95 /* Members used by autonegotiation */ 96 /* Members used by autonegotiation */
96 struct timer_list ioc3_timer; 97 struct timer_list ioc3_timer;
97}; 98};
98 99
99static inline struct net_device *priv_netdev(struct ioc3_private *dev)
100{
101 return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
102}
103
104static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105static void ioc3_set_multicast_list(struct net_device *dev); 101static void ioc3_set_multicast_list(struct net_device *dev);
106static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); 102static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -427,7 +423,7 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
427 nic[i] = nic_read_byte(ioc3); 423 nic[i] = nic_read_byte(ioc3);
428 424
429 for (i = 2; i < 8; i++) 425 for (i = 2; i < 8; i++)
430 priv_netdev(ip)->dev_addr[i - 2] = nic[i]; 426 ip->dev->dev_addr[i - 2] = nic[i];
431} 427}
432 428
433/* 429/*
@@ -439,7 +435,7 @@ static void ioc3_get_eaddr(struct ioc3_private *ip)
439{ 435{
440 ioc3_get_eaddr_nic(ip); 436 ioc3_get_eaddr_nic(ip);
441 437
442 printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr); 438 printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
443} 439}
444 440
445static void __ioc3_set_mac_address(struct net_device *dev) 441static void __ioc3_set_mac_address(struct net_device *dev)
@@ -790,13 +786,12 @@ static void ioc3_timer(unsigned long data)
790 */ 786 */
791static int ioc3_mii_init(struct ioc3_private *ip) 787static int ioc3_mii_init(struct ioc3_private *ip)
792{ 788{
793 struct net_device *dev = priv_netdev(ip);
794 int i, found = 0, res = 0; 789 int i, found = 0, res = 0;
795 int ioc3_phy_workaround = 1; 790 int ioc3_phy_workaround = 1;
796 u16 word; 791 u16 word;
797 792
798 for (i = 0; i < 32; i++) { 793 for (i = 0; i < 32; i++) {
799 word = ioc3_mdio_read(dev, i, MII_PHYSID1); 794 word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
800 795
801 if (word != 0xffff && word != 0x0000) { 796 if (word != 0xffff && word != 0x0000) {
802 found = 1; 797 found = 1;
@@ -1276,6 +1271,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1276 SET_NETDEV_DEV(dev, &pdev->dev); 1271 SET_NETDEV_DEV(dev, &pdev->dev);
1277 1272
1278 ip = netdev_priv(dev); 1273 ip = netdev_priv(dev);
1274 ip->dev = dev;
1279 1275
1280 dev->irq = pdev->irq; 1276 dev->irq = pdev->irq;
1281 1277
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f233bf8b4ebb..c4407e8e39a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -117,7 +117,7 @@ static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
117 void __iomem *ioaddr = hw->pcsr; 117 void __iomem *ioaddr = hw->pcsr;
118 u32 value; 118 u32 value;
119 119
120 const struct stmmac_rx_routing route_possibilities[] = { 120 static const struct stmmac_rx_routing route_possibilities[] = {
121 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, 121 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
122 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, 122 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
123 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, 123 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1853f7ff6657..1763e48c84e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4120,8 +4120,15 @@ int stmmac_dvr_probe(struct device *device,
4120 if ((phyaddr >= 0) && (phyaddr <= 31)) 4120 if ((phyaddr >= 0) && (phyaddr <= 31))
4121 priv->plat->phy_addr = phyaddr; 4121 priv->plat->phy_addr = phyaddr;
4122 4122
4123 if (priv->plat->stmmac_rst) 4123 if (priv->plat->stmmac_rst) {
4124 ret = reset_control_assert(priv->plat->stmmac_rst);
4124 reset_control_deassert(priv->plat->stmmac_rst); 4125 reset_control_deassert(priv->plat->stmmac_rst);
4126 /* Some reset controllers have only reset callback instead of
4127 * assert + deassert callbacks pair.
4128 */
4129 if (ret == -ENOTSUPP)
4130 reset_control_reset(priv->plat->stmmac_rst);
4131 }
4125 4132
4126 /* Init MAC and get the capabilities */ 4133 /* Init MAC and get the capabilities */
4127 ret = stmmac_hw_init(priv); 4134 ret = stmmac_hw_init(priv);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 46cb7f8955a2..4bb04aaf9650 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9532,7 +9532,7 @@ static struct niu_parent *niu_get_parent(struct niu *np,
9532 p = niu_new_parent(np, id, ptype); 9532 p = niu_new_parent(np, id, ptype);
9533 9533
9534 if (p) { 9534 if (p) {
9535 char port_name[6]; 9535 char port_name[8];
9536 int err; 9536 int err;
9537 9537
9538 sprintf(port_name, "port%d", port); 9538 sprintf(port_name, "port%d", port);
@@ -9553,7 +9553,7 @@ static void niu_put_parent(struct niu *np)
9553{ 9553{
9554 struct niu_parent *p = np->parent; 9554 struct niu_parent *p = np->parent;
9555 u8 port = np->port; 9555 u8 port = np->port;
9556 char port_name[6]; 9556 char port_name[8];
9557 9557
9558 BUG_ON(!p || p->ports[port] != np); 9558 BUG_ON(!p || p->ports[port] != np);
9559 9559
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 711fbbbc4b1f..163d8d16bc24 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -654,6 +654,8 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
654 RET(-EFAULT); 654 RET(-EFAULT);
655 } 655 }
656 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); 656 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
657 } else {
658 return -EOPNOTSUPP;
657 } 659 }
658 660
659 if (!capable(CAP_SYS_RAWIO)) 661 if (!capable(CAP_SYS_RAWIO))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1850e348f555..badd0a8caeb9 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -3089,6 +3089,31 @@ static int cpsw_probe(struct platform_device *pdev)
3089 cpsw->quirk_irq = true; 3089 cpsw->quirk_irq = true;
3090 } 3090 }
3091 3091
3092 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3093
3094 ndev->netdev_ops = &cpsw_netdev_ops;
3095 ndev->ethtool_ops = &cpsw_ethtool_ops;
3096 netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
3097 netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
3098 cpsw_split_res(ndev);
3099
3100 /* register the network device */
3101 SET_NETDEV_DEV(ndev, &pdev->dev);
3102 ret = register_netdev(ndev);
3103 if (ret) {
3104 dev_err(priv->dev, "error registering net device\n");
3105 ret = -ENODEV;
3106 goto clean_ale_ret;
3107 }
3108
3109 if (cpsw->data.dual_emac) {
3110 ret = cpsw_probe_dual_emac(priv);
3111 if (ret) {
3112 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3113 goto clean_unregister_netdev_ret;
3114 }
3115 }
3116
3092 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and 3117 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3093 * MISC IRQs which are always kept disabled with this driver so 3118 * MISC IRQs which are always kept disabled with this driver so
3094 * we will not request them. 3119 * we will not request them.
@@ -3127,33 +3152,9 @@ static int cpsw_probe(struct platform_device *pdev)
3127 goto clean_ale_ret; 3152 goto clean_ale_ret;
3128 } 3153 }
3129 3154
3130 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3131
3132 ndev->netdev_ops = &cpsw_netdev_ops;
3133 ndev->ethtool_ops = &cpsw_ethtool_ops;
3134 netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
3135 netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
3136 cpsw_split_res(ndev);
3137
3138 /* register the network device */
3139 SET_NETDEV_DEV(ndev, &pdev->dev);
3140 ret = register_netdev(ndev);
3141 if (ret) {
3142 dev_err(priv->dev, "error registering net device\n");
3143 ret = -ENODEV;
3144 goto clean_ale_ret;
3145 }
3146
3147 cpsw_notice(priv, probe, 3155 cpsw_notice(priv, probe,
3148 "initialized device (regs %pa, irq %d, pool size %d)\n", 3156 "initialized device (regs %pa, irq %d, pool size %d)\n",
3149 &ss_res->start, ndev->irq, dma_params.descs_pool_size); 3157 &ss_res->start, ndev->irq, dma_params.descs_pool_size);
3150 if (cpsw->data.dual_emac) {
3151 ret = cpsw_probe_dual_emac(priv);
3152 if (ret) {
3153 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3154 goto clean_unregister_netdev_ret;
3155 }
3156 }
3157 3158
3158 pm_runtime_put(&pdev->dev); 3159 pm_runtime_put(&pdev->dev);
3159 3160
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 00755b6a42cf..c608e1dfaf09 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -135,8 +135,8 @@ int mdio_mux_init(struct device *dev,
135 for_each_available_child_of_node(dev->of_node, child_bus_node) { 135 for_each_available_child_of_node(dev->of_node, child_bus_node) {
136 int v; 136 int v;
137 137
138 v = of_mdio_parse_addr(dev, child_bus_node); 138 r = of_property_read_u32(child_bus_node, "reg", &v);
139 if (v < 0) { 139 if (r) {
140 dev_err(dev, 140 dev_err(dev,
141 "Error: Failed to find reg for child %s\n", 141 "Error: Failed to find reg for child %s\n",
142 of_node_full_name(child_bus_node)); 142 of_node_full_name(child_bus_node));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 13028833bee3..bd4303944e44 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,6 +120,7 @@ struct ppp {
120 int n_channels; /* how many channels are attached 54 */ 120 int n_channels; /* how many channels are attached 54 */
121 spinlock_t rlock; /* lock for receive side 58 */ 121 spinlock_t rlock; /* lock for receive side 58 */
122 spinlock_t wlock; /* lock for transmit side 5c */ 122 spinlock_t wlock; /* lock for transmit side 5c */
123 int *xmit_recursion __percpu; /* xmit recursion detect */
123 int mru; /* max receive unit 60 */ 124 int mru; /* max receive unit 60 */
124 unsigned int flags; /* control bits 64 */ 125 unsigned int flags; /* control bits 64 */
125 unsigned int xstate; /* transmit state bits 68 */ 126 unsigned int xstate; /* transmit state bits 68 */
@@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1025 struct ppp *ppp = netdev_priv(dev); 1026 struct ppp *ppp = netdev_priv(dev);
1026 int indx; 1027 int indx;
1027 int err; 1028 int err;
1029 int cpu;
1028 1030
1029 ppp->dev = dev; 1031 ppp->dev = dev;
1030 ppp->ppp_net = src_net; 1032 ppp->ppp_net = src_net;
@@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1039 INIT_LIST_HEAD(&ppp->channels); 1041 INIT_LIST_HEAD(&ppp->channels);
1040 spin_lock_init(&ppp->rlock); 1042 spin_lock_init(&ppp->rlock);
1041 spin_lock_init(&ppp->wlock); 1043 spin_lock_init(&ppp->wlock);
1044
1045 ppp->xmit_recursion = alloc_percpu(int);
1046 if (!ppp->xmit_recursion) {
1047 err = -ENOMEM;
1048 goto err1;
1049 }
1050 for_each_possible_cpu(cpu)
1051 (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
1052
1042#ifdef CONFIG_PPP_MULTILINK 1053#ifdef CONFIG_PPP_MULTILINK
1043 ppp->minseq = -1; 1054 ppp->minseq = -1;
1044 skb_queue_head_init(&ppp->mrq); 1055 skb_queue_head_init(&ppp->mrq);
@@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1050 1061
1051 err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set); 1062 err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
1052 if (err < 0) 1063 if (err < 0)
1053 return err; 1064 goto err2;
1054 1065
1055 conf->file->private_data = &ppp->file; 1066 conf->file->private_data = &ppp->file;
1056 1067
1057 return 0; 1068 return 0;
1069err2:
1070 free_percpu(ppp->xmit_recursion);
1071err1:
1072 return err;
1058} 1073}
1059 1074
1060static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = { 1075static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
@@ -1400,18 +1415,16 @@ static void __ppp_xmit_process(struct ppp *ppp)
1400 ppp_xmit_unlock(ppp); 1415 ppp_xmit_unlock(ppp);
1401} 1416}
1402 1417
1403static DEFINE_PER_CPU(int, ppp_xmit_recursion);
1404
1405static void ppp_xmit_process(struct ppp *ppp) 1418static void ppp_xmit_process(struct ppp *ppp)
1406{ 1419{
1407 local_bh_disable(); 1420 local_bh_disable();
1408 1421
1409 if (unlikely(__this_cpu_read(ppp_xmit_recursion))) 1422 if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
1410 goto err; 1423 goto err;
1411 1424
1412 __this_cpu_inc(ppp_xmit_recursion); 1425 (*this_cpu_ptr(ppp->xmit_recursion))++;
1413 __ppp_xmit_process(ppp); 1426 __ppp_xmit_process(ppp);
1414 __this_cpu_dec(ppp_xmit_recursion); 1427 (*this_cpu_ptr(ppp->xmit_recursion))--;
1415 1428
1416 local_bh_enable(); 1429 local_bh_enable();
1417 1430
@@ -1905,7 +1918,7 @@ static void __ppp_channel_push(struct channel *pch)
1905 read_lock(&pch->upl); 1918 read_lock(&pch->upl);
1906 ppp = pch->ppp; 1919 ppp = pch->ppp;
1907 if (ppp) 1920 if (ppp)
1908 __ppp_xmit_process(ppp); 1921 ppp_xmit_process(ppp);
1909 read_unlock(&pch->upl); 1922 read_unlock(&pch->upl);
1910 } 1923 }
1911} 1924}
@@ -1914,9 +1927,7 @@ static void ppp_channel_push(struct channel *pch)
1914{ 1927{
1915 local_bh_disable(); 1928 local_bh_disable();
1916 1929
1917 __this_cpu_inc(ppp_xmit_recursion);
1918 __ppp_channel_push(pch); 1930 __ppp_channel_push(pch);
1919 __this_cpu_dec(ppp_xmit_recursion);
1920 1931
1921 local_bh_enable(); 1932 local_bh_enable();
1922} 1933}
@@ -3057,6 +3068,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
3057#endif /* CONFIG_PPP_FILTER */ 3068#endif /* CONFIG_PPP_FILTER */
3058 3069
3059 kfree_skb(ppp->xmit_pending); 3070 kfree_skb(ppp->xmit_pending);
3071 free_percpu(ppp->xmit_recursion);
3060 3072
3061 free_netdev(ppp->dev); 3073 free_netdev(ppp->dev);
3062} 3074}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d103a1d4fb36..8f572b9f3625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -768,8 +768,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
768 u8 *buf; 768 u8 *buf;
769 int len; 769 int len;
770 int temp; 770 int temp;
771 int err;
771 u8 iface_no; 772 u8 iface_no;
772 struct usb_cdc_parsed_header hdr; 773 struct usb_cdc_parsed_header hdr;
774 u16 curr_ntb_format;
773 775
774 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 776 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
775 if (!ctx) 777 if (!ctx)
@@ -874,6 +876,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
874 goto error2; 876 goto error2;
875 } 877 }
876 878
879 /*
880 * Some Huawei devices have been observed to come out of reset in NDP32 mode.
881 * Let's check if this is the case, and set the device to NDP16 mode again if
882 * needed.
883 */
884 if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
885 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
886 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
887 0, iface_no, &curr_ntb_format, 2);
888 if (err < 0) {
889 goto error2;
890 }
891
892 if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
893 dev_info(&intf->dev, "resetting NTB format to 16-bit");
894 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
895 USB_TYPE_CLASS | USB_DIR_OUT
896 | USB_RECIP_INTERFACE,
897 USB_CDC_NCM_NTB16_FORMAT,
898 iface_no, NULL, 0);
899
900 if (err < 0)
901 goto error2;
902 }
903 }
904
877 cdc_ncm_find_endpoints(dev, ctx->data); 905 cdc_ncm_find_endpoints(dev, ctx->data);
878 cdc_ncm_find_endpoints(dev, ctx->control); 906 cdc_ncm_find_endpoints(dev, ctx->control);
879 if (!dev->in || !dev->out || !dev->status) { 907 if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65cd5e4..63f28908afda 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
80 * be at the end of the frame. 80 * be at the end of the frame.
81 */ 81 */
82 drvflags |= CDC_NCM_FLAG_NDP_TO_END; 82 drvflags |= CDC_NCM_FLAG_NDP_TO_END;
83
84 /* Additionally, it has been reported that some Huawei E3372H devices, with
85 * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
86 * needing to be set to the NTB16 one again.
87 */
88 drvflags |= CDC_NCM_FLAG_RESET_NTB16;
83 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); 89 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
84 if (ret) 90 if (ret)
85 goto err; 91 goto err;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2dfca96a63b6..340c13484e5c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -898,6 +898,7 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
898 .set_wol = smsc95xx_ethtool_set_wol, 898 .set_wol = smsc95xx_ethtool_set_wol,
899 .get_link_ksettings = smsc95xx_get_link_ksettings, 899 .get_link_ksettings = smsc95xx_get_link_ksettings,
900 .set_link_ksettings = smsc95xx_set_link_ksettings, 900 .set_link_ksettings = smsc95xx_set_link_ksettings,
901 .get_ts_info = ethtool_op_get_ts_info,
901}; 902};
902 903
903static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 904static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index ba1c9f93592b..9c51b8be0038 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -311,7 +311,7 @@ struct vmxnet3_intr {
311 u8 num_intrs; /* # of intr vectors */ 311 u8 num_intrs; /* # of intr vectors */
312 u8 event_intr_idx; /* idx of the intr vector for event */ 312 u8 event_intr_idx; /* idx of the intr vector for event */
313 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ 313 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
314 char event_msi_vector_name[IFNAMSIZ+11]; 314 char event_msi_vector_name[IFNAMSIZ+17];
315#ifdef CONFIG_PCI_MSI 315#ifdef CONFIG_PCI_MSI
316 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; 316 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
317#endif 317#endif
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 6e2e760d98b1..0b75def39c6c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5704,7 +5704,7 @@ static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
5704 5704
5705static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev) 5705static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
5706{ 5706{
5707 const u8 glrt_table[] = { 5707 static const u8 glrt_table[] = {
5708 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */ 5708 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
5709 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */ 5709 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
5710 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */ 5710 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7cd99b1f8596..75bc08c6838c 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -421,14 +421,15 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num)
421static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) 421static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
422{ 422{
423 const unsigned int sector_size = 512; 423 const unsigned int sector_size = 512;
424 sector_t start_sector; 424 sector_t start_sector, end_sector;
425 u64 num_sectors; 425 u64 num_sectors;
426 u32 rem; 426 u32 rem;
427 427
428 start_sector = div_u64(ns_offset, sector_size); 428 start_sector = div_u64(ns_offset, sector_size);
429 num_sectors = div_u64_rem(len, sector_size, &rem); 429 end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
430 if (rem) 430 if (rem)
431 num_sectors++; 431 end_sector++;
432 num_sectors = end_sector - start_sector;
432 433
433 if (unlikely(num_sectors > (u64)INT_MAX)) { 434 if (unlikely(num_sectors > (u64)INT_MAX)) {
434 u64 remaining = num_sectors; 435 u64 remaining = num_sectors;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 077f62e208aa..6a4367cc9caa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3401 if (is_write) { 3401 if (is_write) {
3402 req_flags |= SISL_REQ_FLAGS_HOST_WRITE; 3402 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3403 3403
3404 rc = copy_from_user(kbuf, ubuf, ulen); 3404 if (copy_from_user(kbuf, ubuf, ulen)) {
3405 if (unlikely(rc)) 3405 rc = -EFAULT;
3406 goto out; 3406 goto out;
3407 }
3407 } 3408 }
3408 } 3409 }
3409 3410
@@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3431 goto out; 3432 goto out;
3432 } 3433 }
3433 3434
3434 if (ulen && !is_write) 3435 if (ulen && !is_write) {
3435 rc = copy_to_user(ubuf, kbuf, ulen); 3436 if (copy_to_user(ubuf, kbuf, ulen))
3437 rc = -EFAULT;
3438 }
3436out: 3439out:
3437 kfree(buf); 3440 kfree(buf);
3438 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3441 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 551d103c27f1..2bfea7082e3a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1693 1693
1694static int parse_trans_tx_err_code_v2_hw(u32 err_msk) 1694static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
1695{ 1695{
1696 const u8 trans_tx_err_code_prio[] = { 1696 static const u8 trans_tx_err_code_prio[] = {
1697 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, 1697 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
1698 TRANS_TX_ERR_PHY_NOT_ENABLE, 1698 TRANS_TX_ERR_PHY_NOT_ENABLE,
1699 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, 1699 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
@@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
1738 1738
1739static int parse_trans_rx_err_code_v2_hw(u32 err_msk) 1739static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
1740{ 1740{
1741 const u8 trans_rx_err_code_prio[] = { 1741 static const u8 trans_rx_err_code_prio[] = {
1742 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, 1742 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
1743 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, 1743 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
1744 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, 1744 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
@@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
1784 1784
1785static int parse_dma_tx_err_code_v2_hw(u32 err_msk) 1785static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
1786{ 1786{
1787 const u8 dma_tx_err_code_prio[] = { 1787 static const u8 dma_tx_err_code_prio[] = {
1788 DMA_TX_UNEXP_XFER_ERR, 1788 DMA_TX_UNEXP_XFER_ERR,
1789 DMA_TX_UNEXP_RETRANS_ERR, 1789 DMA_TX_UNEXP_RETRANS_ERR,
1790 DMA_TX_XFER_LEN_OVERFLOW, 1790 DMA_TX_XFER_LEN_OVERFLOW,
@@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
1810 1810
1811static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) 1811static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
1812{ 1812{
1813 const u8 sipc_rx_err_code_prio[] = { 1813 static const u8 sipc_rx_err_code_prio[] = {
1814 SIPC_RX_FIS_STATUS_ERR_BIT_VLD, 1814 SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
1815 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, 1815 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
1816 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, 1816 SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
@@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
1836 1836
1837static int parse_dma_rx_err_code_v2_hw(u32 err_msk) 1837static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
1838{ 1838{
1839 const u8 dma_rx_err_code_prio[] = { 1839 static const u8 dma_rx_err_code_prio[] = {
1840 DMA_RX_UNKNOWN_FRM_ERR, 1840 DMA_RX_UNKNOWN_FRM_ERR,
1841 DMA_RX_DATA_LEN_OVERFLOW, 1841 DMA_RX_DATA_LEN_OVERFLOW,
1842 DMA_RX_DATA_LEN_UNDERFLOW, 1842 DMA_RX_DATA_LEN_UNDERFLOW,
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 47f66e949745..ed197bc8e801 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
213 * @task_context: 213 * @task_context:
214 * 214 *
215 */ 215 */
216static void scu_ssp_reqeust_construct_task_context( 216static void scu_ssp_request_construct_task_context(
217 struct isci_request *ireq, 217 struct isci_request *ireq,
218 struct scu_task_context *task_context) 218 struct scu_task_context *task_context)
219{ 219{
@@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
425 u8 prot_type = scsi_get_prot_type(scmd); 425 u8 prot_type = scsi_get_prot_type(scmd);
426 u8 prot_op = scsi_get_prot_op(scmd); 426 u8 prot_op = scsi_get_prot_op(scmd);
427 427
428 scu_ssp_reqeust_construct_task_context(ireq, task_context); 428 scu_ssp_request_construct_task_context(ireq, task_context);
429 429
430 task_context->ssp_command_iu_length = 430 task_context->ssp_command_iu_length =
431 sizeof(struct ssp_cmd_iu) / sizeof(u32); 431 sizeof(struct ssp_cmd_iu) / sizeof(u32);
@@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
472{ 472{
473 struct scu_task_context *task_context = ireq->tc; 473 struct scu_task_context *task_context = ireq->tc;
474 474
475 scu_ssp_reqeust_construct_task_context(ireq, task_context); 475 scu_ssp_request_construct_task_context(ireq, task_context);
476 476
477 task_context->control_frame = 1; 477 task_context->control_frame = 1;
478 task_context->priority = SCU_TASK_PRIORITY_HIGH; 478 task_context->priority = SCU_TASK_PRIORITY_HIGH;
@@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
495 * the command buffer is complete. none Revisit task context construction to 495 * the command buffer is complete. none Revisit task context construction to
496 * determine what is common for SSP/SMP/STP task context structures. 496 * determine what is common for SSP/SMP/STP task context structures.
497 */ 497 */
498static void scu_sata_reqeust_construct_task_context( 498static void scu_sata_request_construct_task_context(
499 struct isci_request *ireq, 499 struct isci_request *ireq,
500 struct scu_task_context *task_context) 500 struct scu_task_context *task_context)
501{ 501{
@@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq
562{ 562{
563 struct scu_task_context *task_context = ireq->tc; 563 struct scu_task_context *task_context = ireq->tc;
564 564
565 scu_sata_reqeust_construct_task_context(ireq, task_context); 565 scu_sata_request_construct_task_context(ireq, task_context);
566 566
567 task_context->control_frame = 0; 567 task_context->control_frame = 0;
568 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 568 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
@@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
613 struct scu_task_context *task_context = ireq->tc; 613 struct scu_task_context *task_context = ireq->tc;
614 614
615 /* Build the STP task context structure */ 615 /* Build the STP task context structure */
616 scu_sata_reqeust_construct_task_context(ireq, task_context); 616 scu_sata_request_construct_task_context(ireq, task_context);
617 617
618 /* Copy over the SGL elements */ 618 /* Copy over the SGL elements */
619 sci_request_build_sgl(ireq); 619 sci_request_build_sgl(ireq);
@@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
1401 * @data_buffer: The buffer of data to be copied. 1401 * @data_buffer: The buffer of data to be copied.
1402 * @length: The length of the data transfer. 1402 * @length: The length of the data transfer.
1403 * 1403 *
1404 * Copy the data from the buffer for the length specified to the IO reqeust SGL 1404 * Copy the data from the buffer for the length specified to the IO request SGL
1405 * specified data region. enum sci_status 1405 * specified data region. enum sci_status
1406 */ 1406 */
1407static enum sci_status 1407static enum sci_status
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index fd501f8dbb11..8660f923ace0 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
573 event = DISC_EV_FAILED; 573 event = DISC_EV_FAILED;
574 } 574 }
575 if (error) 575 if (error)
576 fc_disc_error(disc, fp); 576 fc_disc_error(disc, ERR_PTR(error));
577 else if (event != DISC_EV_NONE) 577 else if (event != DISC_EV_NONE)
578 fc_disc_done(disc, event); 578 fc_disc_done(disc, event);
579 fc_frame_free(fp); 579 fc_frame_free(fp);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b58bba4604e8..7786c97e033f 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
1227 1227
1228 if (rdata->spp_type != FC_TYPE_FCP) { 1228 if (rdata->spp_type != FC_TYPE_FCP) {
1229 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 1229 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1230 "Not offlading since since spp type isn't FCP\n"); 1230 "Not offloading since spp type isn't FCP\n");
1231 break; 1231 break;
1232 } 1232 }
1233 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { 1233 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 32632c9b2276..91d2f51c351b 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -23,11 +23,17 @@
23#include <linux/qed/qed_iscsi_if.h> 23#include <linux/qed/qed_iscsi_if.h>
24#include <linux/qed/qed_ll2_if.h> 24#include <linux/qed/qed_ll2_if.h>
25#include "qedi_version.h" 25#include "qedi_version.h"
26#include "qedi_nvm_iscsi_cfg.h"
26 27
27#define QEDI_MODULE_NAME "qedi" 28#define QEDI_MODULE_NAME "qedi"
28 29
29struct qedi_endpoint; 30struct qedi_endpoint;
30 31
32#ifndef GET_FIELD2
33#define GET_FIELD2(value, name) \
34 (((value) & (name ## _MASK)) >> (name ## _OFFSET))
35#endif
36
31/* 37/*
32 * PCI function probe defines 38 * PCI function probe defines
33 */ 39 */
@@ -66,6 +72,11 @@ struct qedi_endpoint;
66#define QEDI_HW_DMA_BOUNDARY 0xfff 72#define QEDI_HW_DMA_BOUNDARY 0xfff
67#define QEDI_PATH_HANDLE 0xFE0000000UL 73#define QEDI_PATH_HANDLE 0xFE0000000UL
68 74
75enum qedi_nvm_tgts {
76 QEDI_NVM_TGT_PRI,
77 QEDI_NVM_TGT_SEC,
78};
79
69struct qedi_uio_ctrl { 80struct qedi_uio_ctrl {
70 /* meta data */ 81 /* meta data */
71 u32 uio_hsi_version; 82 u32 uio_hsi_version;
@@ -283,6 +294,8 @@ struct qedi_ctx {
283 void *bdq_pbl_list; 294 void *bdq_pbl_list;
284 dma_addr_t bdq_pbl_list_dma; 295 dma_addr_t bdq_pbl_list_dma;
285 u8 bdq_pbl_list_num_entries; 296 u8 bdq_pbl_list_num_entries;
297 struct nvm_iscsi_cfg *iscsi_cfg;
298 dma_addr_t nvm_buf_dma;
286 void __iomem *bdq_primary_prod; 299 void __iomem *bdq_primary_prod;
287 void __iomem *bdq_secondary_prod; 300 void __iomem *bdq_secondary_prod;
288 u16 bdq_prod_idx; 301 u16 bdq_prod_idx;
@@ -337,6 +350,10 @@ struct qedi_ctx {
337 bool use_fast_sge; 350 bool use_fast_sge;
338 351
339 atomic_t num_offloads; 352 atomic_t num_offloads;
353#define SYSFS_FLAG_FW_SEL_BOOT 2
354#define IPV6_LEN 41
355#define IPV4_LEN 17
356 struct iscsi_boot_kset *boot_kset;
340}; 357};
341 358
342struct qedi_work { 359struct qedi_work {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 19254bd739d9..93d54acd4a22 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work)
1411 1411
1412 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC); 1412 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
1413 if (!list_work) { 1413 if (!list_work) {
1414 QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n"); 1414 QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
1415 goto abort_ret; 1415 goto abort_ret;
1416 } 1416 }
1417 1417
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5f5a4ef2e529..2c3783684815 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/iscsi_boot_sysfs.h>
22 23
23#include <scsi/scsi_cmnd.h> 24#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_device.h> 25#include <scsi/scsi_device.h>
@@ -1143,6 +1144,30 @@ exit_setup_int:
1143 return rc; 1144 return rc;
1144} 1145}
1145 1146
1147static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1148{
1149 if (qedi->iscsi_cfg)
1150 dma_free_coherent(&qedi->pdev->dev,
1151 sizeof(struct nvm_iscsi_cfg),
1152 qedi->iscsi_cfg, qedi->nvm_buf_dma);
1153}
1154
1155static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1156{
1157 qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
1158 sizeof(struct nvm_iscsi_cfg),
1159 &qedi->nvm_buf_dma, GFP_KERNEL);
1160 if (!qedi->iscsi_cfg) {
1161 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1162 return -ENOMEM;
1163 }
1164 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1165 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
1166 qedi->nvm_buf_dma);
1167
1168 return 0;
1169}
1170
1146static void qedi_free_bdq(struct qedi_ctx *qedi) 1171static void qedi_free_bdq(struct qedi_ctx *qedi)
1147{ 1172{
1148 int i; 1173 int i;
@@ -1183,6 +1208,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi)
1183 kfree(gl[i]); 1208 kfree(gl[i]);
1184 } 1209 }
1185 qedi_free_bdq(qedi); 1210 qedi_free_bdq(qedi);
1211 qedi_free_nvm_iscsi_cfg(qedi);
1186} 1212}
1187 1213
1188static int qedi_alloc_bdq(struct qedi_ctx *qedi) 1214static int qedi_alloc_bdq(struct qedi_ctx *qedi)
@@ -1309,6 +1335,11 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1309 if (rc) 1335 if (rc)
1310 goto mem_alloc_failure; 1336 goto mem_alloc_failure;
1311 1337
1338 /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
1339 rc = qedi_alloc_nvm_iscsi_cfg(qedi);
1340 if (rc)
1341 goto mem_alloc_failure;
1342
1312 /* Allocate a CQ and an associated PBL for each MSI-X 1343 /* Allocate a CQ and an associated PBL for each MSI-X
1313 * vector. 1344 * vector.
1314 */ 1345 */
@@ -1671,6 +1702,387 @@ void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
1671 qedi_ops->ll2->start(qedi->cdev, &params); 1702 qedi_ops->ll2->start(qedi->cdev, &params);
1672} 1703}
1673 1704
1705/**
1706 * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
1707 * for gaps) for the matching absolute-pf-id of the QEDI device.
1708 */
1709static struct nvm_iscsi_block *
1710qedi_get_nvram_block(struct qedi_ctx *qedi)
1711{
1712 int i;
1713 u8 pf;
1714 u32 flags;
1715 struct nvm_iscsi_block *block;
1716
1717 pf = qedi->dev_info.common.abs_pf_id;
1718 block = &qedi->iscsi_cfg->block[0];
1719 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
1720 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
1721 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
1722 if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
1723 NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
1724 (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
1725 >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
1726 return block;
1727 }
1728 return NULL;
1729}
1730
1731static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
1732{
1733 struct qedi_ctx *qedi = data;
1734 struct nvm_iscsi_initiator *initiator;
1735 char *str = buf;
1736 int rc = 1;
1737 u32 ipv6_en, dhcp_en, ip_len;
1738 struct nvm_iscsi_block *block;
1739 char *fmt, *ip, *sub, *gw;
1740
1741 block = qedi_get_nvram_block(qedi);
1742 if (!block)
1743 return 0;
1744
1745 initiator = &block->initiator;
1746 ipv6_en = block->generic.ctrl_flags &
1747 NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
1748 dhcp_en = block->generic.ctrl_flags &
1749 NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
1750 /* Static IP assignments. */
1751 fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
1752 ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
1753 ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
1754 sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
1755 initiator->ipv4.subnet_mask.byte;
1756 gw = ipv6_en ? initiator->ipv6.gateway.byte :
1757 initiator->ipv4.gateway.byte;
1758 /* DHCP IP adjustments. */
1759 fmt = dhcp_en ? "%s\n" : fmt;
1760 if (dhcp_en) {
1761 ip = ipv6_en ? "0::0" : "0.0.0.0";
1762 sub = ip;
1763 gw = ip;
1764 ip_len = ipv6_en ? 5 : 8;
1765 }
1766
1767 switch (type) {
1768 case ISCSI_BOOT_ETH_IP_ADDR:
1769 rc = snprintf(str, ip_len, fmt, ip);
1770 break;
1771 case ISCSI_BOOT_ETH_SUBNET_MASK:
1772 rc = snprintf(str, ip_len, fmt, sub);
1773 break;
1774 case ISCSI_BOOT_ETH_GATEWAY:
1775 rc = snprintf(str, ip_len, fmt, gw);
1776 break;
1777 case ISCSI_BOOT_ETH_FLAGS:
1778 rc = snprintf(str, 3, "%hhd\n",
1779 SYSFS_FLAG_FW_SEL_BOOT);
1780 break;
1781 case ISCSI_BOOT_ETH_INDEX:
1782 rc = snprintf(str, 3, "0\n");
1783 break;
1784 case ISCSI_BOOT_ETH_MAC:
1785 rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
1786 break;
1787 case ISCSI_BOOT_ETH_VLAN:
1788 rc = snprintf(str, 12, "%d\n",
1789 GET_FIELD2(initiator->generic_cont0,
1790 NVM_ISCSI_CFG_INITIATOR_VLAN));
1791 break;
1792 case ISCSI_BOOT_ETH_ORIGIN:
1793 if (dhcp_en)
1794 rc = snprintf(str, 3, "3\n");
1795 break;
1796 default:
1797 rc = 0;
1798 break;
1799 }
1800
1801 return rc;
1802}
1803
1804static umode_t qedi_eth_get_attr_visibility(void *data, int type)
1805{
1806 int rc = 1;
1807
1808 switch (type) {
1809 case ISCSI_BOOT_ETH_FLAGS:
1810 case ISCSI_BOOT_ETH_MAC:
1811 case ISCSI_BOOT_ETH_INDEX:
1812 case ISCSI_BOOT_ETH_IP_ADDR:
1813 case ISCSI_BOOT_ETH_SUBNET_MASK:
1814 case ISCSI_BOOT_ETH_GATEWAY:
1815 case ISCSI_BOOT_ETH_ORIGIN:
1816 case ISCSI_BOOT_ETH_VLAN:
1817 rc = 0444;
1818 break;
1819 default:
1820 rc = 0;
1821 break;
1822 }
1823 return rc;
1824}
1825
1826static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
1827{
1828 struct qedi_ctx *qedi = data;
1829 struct nvm_iscsi_initiator *initiator;
1830 char *str = buf;
1831 int rc;
1832 struct nvm_iscsi_block *block;
1833
1834 block = qedi_get_nvram_block(qedi);
1835 if (!block)
1836 return 0;
1837
1838 initiator = &block->initiator;
1839
1840 switch (type) {
1841 case ISCSI_BOOT_INI_INITIATOR_NAME:
1842 rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
1843 initiator->initiator_name.byte);
1844 break;
1845 default:
1846 rc = 0;
1847 break;
1848 }
1849 return rc;
1850}
1851
1852static umode_t qedi_ini_get_attr_visibility(void *data, int type)
1853{
1854 int rc;
1855
1856 switch (type) {
1857 case ISCSI_BOOT_INI_INITIATOR_NAME:
1858 rc = 0444;
1859 break;
1860 default:
1861 rc = 0;
1862 break;
1863 }
1864 return rc;
1865}
1866
1867static ssize_t
1868qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
1869 char *buf, enum qedi_nvm_tgts idx)
1870{
1871 char *str = buf;
1872 int rc = 1;
1873 u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
1874 struct nvm_iscsi_block *block;
1875 char *chap_name, *chap_secret;
1876 char *mchap_name, *mchap_secret;
1877
1878 block = qedi_get_nvram_block(qedi);
1879 if (!block)
1880 goto exit_show_tgt_info;
1881
1882 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
1883 "Port:%d, tgt_idx:%d\n",
1884 GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
1885
1886 ctrl_flags = block->target[idx].ctrl_flags &
1887 NVM_ISCSI_CFG_TARGET_ENABLED;
1888
1889 if (!ctrl_flags) {
1890 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
1891 "Target disabled\n");
1892 goto exit_show_tgt_info;
1893 }
1894
1895 ipv6_en = block->generic.ctrl_flags &
1896 NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
1897 ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
1898 chap_en = block->generic.ctrl_flags &
1899 NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
1900 chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
1901 chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
1902
1903 mchap_en = block->generic.ctrl_flags &
1904 NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
1905 mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
1906 mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
1907
1908 switch (type) {
1909 case ISCSI_BOOT_TGT_NAME:
1910 rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
1911 block->target[idx].target_name.byte);
1912 break;
1913 case ISCSI_BOOT_TGT_IP_ADDR:
1914 if (ipv6_en)
1915 rc = snprintf(str, ip_len, "%pI6\n",
1916 block->target[idx].ipv6_addr.byte);
1917 else
1918 rc = snprintf(str, ip_len, "%pI4\n",
1919 block->target[idx].ipv4_addr.byte);
1920 break;
1921 case ISCSI_BOOT_TGT_PORT:
1922 rc = snprintf(str, 12, "%d\n",
1923 GET_FIELD2(block->target[idx].generic_cont0,
1924 NVM_ISCSI_CFG_TARGET_TCP_PORT));
1925 break;
1926 case ISCSI_BOOT_TGT_LUN:
1927 rc = snprintf(str, 22, "%.*d\n",
1928 block->target[idx].lun.value[1],
1929 block->target[idx].lun.value[0]);
1930 break;
1931 case ISCSI_BOOT_TGT_CHAP_NAME:
1932 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
1933 chap_name);
1934 break;
1935 case ISCSI_BOOT_TGT_CHAP_SECRET:
1936 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
1937 chap_secret);
1938 break;
1939 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
1940 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
1941 mchap_name);
1942 break;
1943 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
1944 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
1945 mchap_secret);
1946 break;
1947 case ISCSI_BOOT_TGT_FLAGS:
1948 rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
1949 break;
1950 case ISCSI_BOOT_TGT_NIC_ASSOC:
1951 rc = snprintf(str, 3, "0\n");
1952 break;
1953 default:
1954 rc = 0;
1955 break;
1956 }
1957
1958exit_show_tgt_info:
1959 return rc;
1960}
1961
1962static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
1963{
1964 struct qedi_ctx *qedi = data;
1965
1966 return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
1967}
1968
1969static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
1970{
1971 struct qedi_ctx *qedi = data;
1972
1973 return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
1974}
1975
1976static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
1977{
1978 int rc;
1979
1980 switch (type) {
1981 case ISCSI_BOOT_TGT_NAME:
1982 case ISCSI_BOOT_TGT_IP_ADDR:
1983 case ISCSI_BOOT_TGT_PORT:
1984 case ISCSI_BOOT_TGT_LUN:
1985 case ISCSI_BOOT_TGT_CHAP_NAME:
1986 case ISCSI_BOOT_TGT_CHAP_SECRET:
1987 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
1988 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
1989 case ISCSI_BOOT_TGT_NIC_ASSOC:
1990 case ISCSI_BOOT_TGT_FLAGS:
1991 rc = 0444;
1992 break;
1993 default:
1994 rc = 0;
1995 break;
1996 }
1997 return rc;
1998}
1999
2000static void qedi_boot_release(void *data)
2001{
2002 struct qedi_ctx *qedi = data;
2003
2004 scsi_host_put(qedi->shost);
2005}
2006
2007static int qedi_get_boot_info(struct qedi_ctx *qedi)
2008{
2009 int ret = 1;
2010 u16 len;
2011
2012 len = sizeof(struct nvm_iscsi_cfg);
2013
2014 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2015 "Get NVM iSCSI CFG image\n");
2016 ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2017 QED_NVM_IMAGE_ISCSI_CFG,
2018 (char *)qedi->iscsi_cfg, len);
2019 if (ret)
2020 QEDI_ERR(&qedi->dbg_ctx,
2021 "Could not get NVM image. ret = %d\n", ret);
2022
2023 return ret;
2024}
2025
2026static int qedi_setup_boot_info(struct qedi_ctx *qedi)
2027{
2028 struct iscsi_boot_kobj *boot_kobj;
2029
2030 if (qedi_get_boot_info(qedi))
2031 return -EPERM;
2032
2033 qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
2034 if (!qedi->boot_kset)
2035 goto kset_free;
2036
2037 if (!scsi_host_get(qedi->shost))
2038 goto kset_free;
2039
2040 boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
2041 qedi_show_boot_tgt_pri_info,
2042 qedi_tgt_get_attr_visibility,
2043 qedi_boot_release);
2044 if (!boot_kobj)
2045 goto put_host;
2046
2047 if (!scsi_host_get(qedi->shost))
2048 goto kset_free;
2049
2050 boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
2051 qedi_show_boot_tgt_sec_info,
2052 qedi_tgt_get_attr_visibility,
2053 qedi_boot_release);
2054 if (!boot_kobj)
2055 goto put_host;
2056
2057 if (!scsi_host_get(qedi->shost))
2058 goto kset_free;
2059
2060 boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
2061 qedi_show_boot_ini_info,
2062 qedi_ini_get_attr_visibility,
2063 qedi_boot_release);
2064 if (!boot_kobj)
2065 goto put_host;
2066
2067 if (!scsi_host_get(qedi->shost))
2068 goto kset_free;
2069
2070 boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
2071 qedi_show_boot_eth_info,
2072 qedi_eth_get_attr_visibility,
2073 qedi_boot_release);
2074 if (!boot_kobj)
2075 goto put_host;
2076
2077 return 0;
2078
2079put_host:
2080 scsi_host_put(qedi->shost);
2081kset_free:
2082 iscsi_boot_destroy_kset(qedi->boot_kset);
2083 return -ENOMEM;
2084}
2085
1674static void __qedi_remove(struct pci_dev *pdev, int mode) 2086static void __qedi_remove(struct pci_dev *pdev, int mode)
1675{ 2087{
1676 struct qedi_ctx *qedi = pci_get_drvdata(pdev); 2088 struct qedi_ctx *qedi = pci_get_drvdata(pdev);
@@ -1724,6 +2136,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
1724 qedi->ll2_recv_thread = NULL; 2136 qedi->ll2_recv_thread = NULL;
1725 } 2137 }
1726 qedi_ll2_free_skbs(qedi); 2138 qedi_ll2_free_skbs(qedi);
2139
2140 if (qedi->boot_kset)
2141 iscsi_boot_destroy_kset(qedi->boot_kset);
1727 } 2142 }
1728} 2143}
1729 2144
@@ -1967,6 +2382,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
1967 /* F/w needs 1st task context memory entry for performance */ 2382 /* F/w needs 1st task context memory entry for performance */
1968 set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); 2383 set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
1969 atomic_set(&qedi->num_offloads, 0); 2384 atomic_set(&qedi->num_offloads, 0);
2385
2386 if (qedi_setup_boot_info(qedi))
2387 QEDI_ERR(&qedi->dbg_ctx,
2388 "No iSCSI boot target configured\n");
1970 } 2389 }
1971 2390
1972 return 0; 2391 return 0;
diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
new file mode 100644
index 000000000000..df39b69b366d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
@@ -0,0 +1,210 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef NVM_ISCSI_CFG_H
11#define NVM_ISCSI_CFG_H
12
13#define NUM_OF_ISCSI_TARGET_PER_PF 4 /* Defined as per the
14 * ISCSI IBFT constraint
15 */
16#define NUM_OF_ISCSI_PF_SUPPORTED 4 /* One PF per Port -
17 * assuming 4 port card
18 */
19
20#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN 256
21
22union nvm_iscsi_dhcp_vendor_id {
23 u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4];
24 u8 byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN];
25};
26
27#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4
28union nvm_iscsi_ipv4_addr {
29 u32 addr;
30 u8 byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN];
31};
32
33#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16
34union nvm_iscsi_ipv6_addr {
35 u32 addr[4];
36 u8 byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN];
37};
38
39struct nvm_iscsi_initiator_ipv4 {
40 union nvm_iscsi_ipv4_addr addr; /* 0x0 */
41 union nvm_iscsi_ipv4_addr subnet_mask; /* 0x4 */
42 union nvm_iscsi_ipv4_addr gateway; /* 0x8 */
43 union nvm_iscsi_ipv4_addr primary_dns; /* 0xC */
44 union nvm_iscsi_ipv4_addr secondary_dns; /* 0x10 */
45 union nvm_iscsi_ipv4_addr dhcp_addr; /* 0x14 */
46
47 union nvm_iscsi_ipv4_addr isns_server; /* 0x18 */
48 union nvm_iscsi_ipv4_addr slp_server; /* 0x1C */
49 union nvm_iscsi_ipv4_addr primay_radius_server; /* 0x20 */
50 union nvm_iscsi_ipv4_addr secondary_radius_server; /* 0x24 */
51
52 union nvm_iscsi_ipv4_addr rsvd[4]; /* 0x28 */
53};
54
55struct nvm_iscsi_initiator_ipv6 {
56 union nvm_iscsi_ipv6_addr addr; /* 0x0 */
57 union nvm_iscsi_ipv6_addr subnet_mask; /* 0x10 */
58 union nvm_iscsi_ipv6_addr gateway; /* 0x20 */
59 union nvm_iscsi_ipv6_addr primary_dns; /* 0x30 */
60 union nvm_iscsi_ipv6_addr secondary_dns; /* 0x40 */
61 union nvm_iscsi_ipv6_addr dhcp_addr; /* 0x50 */
62
63 union nvm_iscsi_ipv6_addr isns_server; /* 0x60 */
64 union nvm_iscsi_ipv6_addr slp_server; /* 0x70 */
65 union nvm_iscsi_ipv6_addr primay_radius_server; /* 0x80 */
66 union nvm_iscsi_ipv6_addr secondary_radius_server; /* 0x90 */
67
68 union nvm_iscsi_ipv6_addr rsvd[3]; /* 0xA0 */
69
70 u32 config; /* 0xD0 */
71#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK 0x000000FF
72#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET 0
73
74 u32 rsvd_1[3];
75};
76
77#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN 256
78union nvm_iscsi_name {
79 u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4];
80 u8 byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN];
81};
82
83#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN 256
84union nvm_iscsi_chap_name {
85 u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4];
86 u8 byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN];
87};
88
89#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN 16 /* md5 need per RFC1996
90 * is 16 octets
91 */
92union nvm_iscsi_chap_password {
93 u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4];
94 u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN];
95};
96
97union nvm_iscsi_lun {
98 u8 byte[8];
99 u32 value[2];
100};
101
102struct nvm_iscsi_generic {
103 u32 ctrl_flags; /* 0x0 */
104#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED BIT(0)
105#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED BIT(1)
106#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED BIT(2)
107#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED BIT(3)
108#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED BIT(4)
109#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN BIT(5)
110#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN BIT(6)
111#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED BIT(7)
112#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED BIT(8)
113
114 u32 timeout; /* 0x4 */
115#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK 0x0000FFFF
116#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET 0
117#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK 0xFFFF0000
118#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET 16
119
120 union nvm_iscsi_dhcp_vendor_id dhcp_vendor_id; /* 0x8 */
121 u32 rsvd[62]; /* 0x108 */
122};
123
124struct nvm_iscsi_initiator {
125 struct nvm_iscsi_initiator_ipv4 ipv4; /* 0x0 */
126 struct nvm_iscsi_initiator_ipv6 ipv6; /* 0x38 */
127
128 union nvm_iscsi_name initiator_name; /* 0x118 */
129 union nvm_iscsi_chap_name chap_name; /* 0x218 */
130 union nvm_iscsi_chap_password chap_password; /* 0x318 */
131
132 u32 generic_cont0; /* 0x398 */
133#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK 0x0000FFFF
134#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET 0
135#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK 0x00030000
136#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET 16
137#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4 1
138#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6 2
139#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6 3
140
141 u32 ctrl_flags;
142#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6 BIT(0)
143#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED BIT(1)
144
145 u32 rsvd[116]; /* 0x32C */
146};
147
148struct nvm_iscsi_target {
149 u32 ctrl_flags; /* 0x0 */
150#define NVM_ISCSI_CFG_TARGET_ENABLED BIT(0)
151#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS BIT(1)
152
153 u32 generic_cont0; /* 0x4 */
154#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK 0x0000FFFF
155#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET 0
156
157 u32 ip_ver;
158#define NVM_ISCSI_CFG_IPv4 4
159#define NVM_ISCSI_CFG_IPv6 6
160
161 u32 rsvd_1[7]; /* 0x24 */
162 union nvm_iscsi_ipv4_addr ipv4_addr; /* 0x28 */
163 union nvm_iscsi_ipv6_addr ipv6_addr; /* 0x2C */
164 union nvm_iscsi_lun lun; /* 0x3C */
165
166 union nvm_iscsi_name target_name; /* 0x44 */
167 union nvm_iscsi_chap_name chap_name; /* 0x144 */
168 union nvm_iscsi_chap_password chap_password; /* 0x244 */
169
170 u32 rsvd_2[107]; /* 0x2C4 */
171};
172
173struct nvm_iscsi_block {
174 u32 id; /* 0x0 */
175#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK 0x0000000F
176#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET 0
177#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK 0x00000FF0
178#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET 4
179#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY BIT(0)
180#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED BIT(1)
181
182 u32 rsvd_1[5]; /* 0x4 */
183
184 struct nvm_iscsi_generic generic; /* 0x18 */
185 struct nvm_iscsi_initiator initiator; /* 0x218 */
186 struct nvm_iscsi_target target[NUM_OF_ISCSI_TARGET_PER_PF];
187 /* 0x718 */
188
189 u32 rsvd_2[58]; /* 0x1718 */
190 /* total size - 0x1800 - 6K block */
191};
192
193struct nvm_iscsi_cfg {
194 u32 id; /* 0x0 */
195#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK 0x000000FF
196#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK 0x0000FF00
197#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK 0xFFFF0000
198#define NVM_ISCSI_CFG_BLK_SIGNATURE 0x49430000 /* IC - Iscsi
199 * Config
200 */
201
202#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR 0
203#define NVM_ISCSI_CFG_BLK_VERSION_MINOR 10
204#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \
205 NVM_ISCSI_CFG_BLK_VERSION_MINOR)
206
207 struct nvm_iscsi_block block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */
208};
209
210#endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index c2dc836dc484..e101cd3043b9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3727,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3727 h &= QLA_CMD_HANDLE_MASK; 3727 h &= QLA_CMD_HANDLE_MASK;
3728 3728
3729 if (h != QLA_TGT_NULL_HANDLE) { 3729 if (h != QLA_TGT_NULL_HANDLE) {
3730 if (unlikely(h > req->num_outstanding_cmds)) { 3730 if (unlikely(h >= req->num_outstanding_cmds)) {
3731 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3731 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3732 "qla_target(%d): Wrong handle %x received\n", 3732 "qla_target(%d): Wrong handle %x received\n",
3733 vha->vp_idx, handle); 3733 vha->vp_idx, handle);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 21225d62b0c1..1e82d4128a84 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -758,8 +758,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
758 if (hp->dxferp || hp->dxfer_len > 0) 758 if (hp->dxferp || hp->dxfer_len > 0)
759 return false; 759 return false;
760 return true; 760 return true;
761 case SG_DXFER_TO_DEV:
762 case SG_DXFER_FROM_DEV: 761 case SG_DXFER_FROM_DEV:
762 if (hp->dxfer_len < 0)
763 return false;
764 return true;
765 case SG_DXFER_TO_DEV:
763 case SG_DXFER_TO_FROM_DEV: 766 case SG_DXFER_TO_FROM_DEV:
764 if (!hp->dxferp || hp->dxfer_len == 0) 767 if (!hp->dxferp || hp->dxfer_len == 0)
765 return false; 768 return false;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8b93197daefe..9be211d68b15 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
837 .eh_abort_handler = virtscsi_abort, 837 .eh_abort_handler = virtscsi_abort,
838 .eh_device_reset_handler = virtscsi_device_reset, 838 .eh_device_reset_handler = virtscsi_device_reset,
839 .eh_timed_out = virtscsi_eh_timed_out, 839 .eh_timed_out = virtscsi_eh_timed_out,
840 .slave_alloc = virtscsi_device_alloc,
840 841
841 .can_queue = 1024, 842 .can_queue = 1024,
842 .dma_boundary = UINT_MAX, 843 .dma_boundary = UINT_MAX,