aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpi_extlog.c18
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/apei/apei-base.c4
-rw-r--r--drivers/acpi/apei/einj.c58
-rw-r--r--drivers/acpi/apei/erst.c2
-rw-r--r--drivers/acpi/apei/ghes.c39
-rw-r--r--drivers/acpi/battery.c22
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/acpi/processor_idle.c15
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/block/null_blk.c10
-rw-r--r--drivers/block/z2ram.c7
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/tpm/tpm_ppi.c15
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c14
-rw-r--r--drivers/clocksource/Kconfig4
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_global_timer.c4
-rw-r--r--drivers/clocksource/bcm_kona_timer.c6
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c26
-rw-r--r--drivers/clocksource/clksrc-of.c4
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/clocksource/dw_apb_timer.c3
-rw-r--r--drivers/clocksource/nomadik-mtu.c2
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c2
-rw-r--r--drivers/clocksource/sh_cmt.c23
-rw-r--r--drivers/clocksource/sh_mtu2.c4
-rw-r--r--drivers/clocksource/sh_tmu.c4
-rw-r--r--drivers/clocksource/sun4i_timer.c11
-rw-r--r--drivers/clocksource/tegra20_timer.c2
-rw-r--r--drivers/clocksource/time-armada-370-xp.c18
-rw-r--r--drivers/clocksource/time-orion.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c192
-rw-r--r--drivers/clocksource/vt8500_timer.c2
-rw-r--r--drivers/cpufreq/cpufreq.c37
-rw-r--r--drivers/cpufreq/intel_pstate.c8
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/edac/amd64_edac.c135
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/e752x_edac.c6
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_device.c3
-rw-r--r--drivers/edac/edac_mc_sysfs.c2
-rw-r--r--drivers/edac/edac_stub.c19
-rw-r--r--drivers/edac/i3000_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/edac/i5400_edac.c2
-rw-r--r--drivers/edac/i7300_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82443bxgx_edac.c2
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/mpc85xx_edac.c98
-rw-r--r--drivers/edac/mpc85xx_edac.h7
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/edac/x38_edac.c2
-rw-r--r--drivers/firmware/efi/Kconfig11
-rw-r--r--drivers/firmware/efi/Makefile1
-rw-r--r--drivers/firmware/efi/efi.c45
-rw-r--r--drivers/firmware/efi/runtime-map.c181
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c83
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c12
-rw-r--r--drivers/hwmon/coretemp.c60
-rw-r--r--drivers/hwmon/da9052-hwmon.c4
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/k10temp.c3
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/hwmon/nct6775.c38
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/ide/buddha.c2
-rw-r--r--drivers/idle/intel_idle.c25
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/touchscreen/zforce_ts.c21
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-lp5523.c12
-rw-r--r--drivers/macintosh/Kconfig2
-rw-r--r--drivers/md/md.c18
-rw-r--r--drivers/md/md.h3
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/mfd/rtsx_pcr.c10
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/ethernet/8390/hydra.c2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c4
-rw-r--r--drivers/net/ethernet/amd/a2065.c13
-rw-r--r--drivers/net/ethernet/amd/ariadne.c13
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c254
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h73
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c40
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c1
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c20
-rw-r--r--drivers/net/macvlan.c26
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/team/team.c3
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/dm9601.c56
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/mcs7830.c19
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c11
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c11
-rw-r--r--drivers/net/xen-netback/netback.c18
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/pci-acpi.c21
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/power_supply_core.c12
-rw-r--r--drivers/rtc/rtc-cmos.c52
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/char/sclp.h1
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_early.c125
-rw-r--r--drivers/s390/char/tty3270.c9
-rw-r--r--drivers/s390/cio/blacklist.c6
-rw-r--r--drivers/s390/cio/ccwgroup.c12
-rw-r--r--drivers/s390/cio/chsc.c73
-rw-r--r--drivers/s390/cio/chsc.h51
-rw-r--r--drivers/s390/cio/css.c26
-rw-r--r--drivers/s390/cio/css.h1
-rw-r--r--drivers/s390/cio/device.c29
-rw-r--r--drivers/s390/cio/qdio_main.c91
-rw-r--r--drivers/s390/crypto/ap_bus.c31
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c109
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c20
-rw-r--r--drivers/s390/crypto/zcrypt_error.h18
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c12
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c260
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h2
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c11
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c12
-rw-r--r--drivers/scsi/a2091.c2
-rw-r--r--drivers/scsi/a3000.c2
-rw-r--r--drivers/scsi/a4000t.c2
-rw-r--r--drivers/scsi/gvp11.c2
-rw-r--r--drivers/scsi/zorro7xx.c2
-rw-r--r--drivers/staging/bcm/Bcmnet.c3
-rw-r--r--drivers/staging/netlogic/xlr_net.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/thermal/intel_powerclamp.c6
-rw-r--r--drivers/video/amifb.c2
-rw-r--r--drivers/video/cirrusfb.c4
-rw-r--r--drivers/video/macfb.c1
-rw-r--r--drivers/video/valkyriefb.c1
-rw-r--r--drivers/zorro/Makefile3
-rw-r--r--drivers/zorro/names.c11
-rw-r--r--drivers/zorro/proc.c10
-rw-r--r--drivers/zorro/zorro-driver.c11
-rw-r--r--drivers/zorro/zorro-sysfs.c22
-rw-r--r--drivers/zorro/zorro.c27
-rw-r--r--drivers/zorro/zorro.h5
264 files changed, 3234 insertions, 1217 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 8711e3797165..3c2e4aa529c4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -207,7 +207,7 @@ static int acpi_ac_probe(struct platform_device *pdev)
207 goto end; 207 goto end;
208 208
209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), 209 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
210 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac); 210 ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
211 if (result) { 211 if (result) {
212 power_supply_unregister(&ac->charger); 212 power_supply_unregister(&ac->charger);
213 goto end; 213 goto end;
@@ -255,7 +255,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
255 return -EINVAL; 255 return -EINVAL;
256 256
257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 257 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
258 ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler); 258 ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
259 259
260 ac = platform_get_drvdata(pdev); 260 ac = platform_get_drvdata(pdev);
261 if (ac->charger.dev) 261 if (ac->charger.dev)
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index a6869e110ce5..5d33c5415405 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -12,6 +12,7 @@
12#include <acpi/acpi_bus.h> 12#include <acpi/acpi_bus.h>
13#include <linux/cper.h> 13#include <linux/cper.h>
14#include <linux/ratelimit.h> 14#include <linux/ratelimit.h>
15#include <linux/edac.h>
15#include <asm/cpu.h> 16#include <asm/cpu.h>
16#include <asm/mce.h> 17#include <asm/mce.h>
17 18
@@ -43,6 +44,8 @@ struct extlog_l1_head {
43 u8 rev1[12]; 44 u8 rev1[12];
44}; 45};
45 46
47static int old_edac_report_status;
48
46static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295"; 49static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295";
47 50
48/* L1 table related physical address */ 51/* L1 table related physical address */
@@ -150,7 +153,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
150 153
151 rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu); 154 rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu);
152 155
153 return NOTIFY_DONE; 156 return NOTIFY_STOP;
154} 157}
155 158
156static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret) 159static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret)
@@ -231,8 +234,12 @@ static int __init extlog_init(void)
231 u64 cap; 234 u64 cap;
232 int rc; 235 int rc;
233 236
234 rc = -ENODEV; 237 if (get_edac_report_status() == EDAC_REPORTING_FORCE) {
238 pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n");
239 return -EPERM;
240 }
235 241
242 rc = -ENODEV;
236 rdmsrl(MSR_IA32_MCG_CAP, cap); 243 rdmsrl(MSR_IA32_MCG_CAP, cap);
237 if (!(cap & MCG_ELOG_P)) 244 if (!(cap & MCG_ELOG_P))
238 return rc; 245 return rc;
@@ -287,6 +294,12 @@ static int __init extlog_init(void)
287 if (elog_buf == NULL) 294 if (elog_buf == NULL)
288 goto err_release_elog; 295 goto err_release_elog;
289 296
297 /*
298 * eMCA event report method has higher priority than EDAC method,
299 * unless EDAC event report method is mandatory.
300 */
301 old_edac_report_status = get_edac_report_status();
302 set_edac_report_status(EDAC_REPORTING_DISABLED);
290 mce_register_decode_chain(&extlog_mce_dec); 303 mce_register_decode_chain(&extlog_mce_dec);
291 /* enable OS to be involved to take over management from BIOS */ 304 /* enable OS to be involved to take over management from BIOS */
292 ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN; 305 ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN;
@@ -308,6 +321,7 @@ err:
308 321
309static void __exit extlog_exit(void) 322static void __exit extlog_exit(void)
310{ 323{
324 set_edac_report_status(old_edac_report_status);
311 mce_unregister_decode_chain(&extlog_mce_dec); 325 mce_unregister_decode_chain(&extlog_mce_dec);
312 ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; 326 ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN;
313 if (extlog_l1_addr) 327 if (extlog_l1_addr)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index e60390597372..6745fe137b9e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -162,7 +162,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
162 { "80860F14", (unsigned long)&byt_sdio_dev_desc }, 162 { "80860F14", (unsigned long)&byt_sdio_dev_desc },
163 { "80860F41", (unsigned long)&byt_i2c_dev_desc }, 163 { "80860F41", (unsigned long)&byt_i2c_dev_desc },
164 { "INT33B2", }, 164 { "INT33B2", },
165 { "INT33FC", },
166 165
167 { "INT3430", (unsigned long)&lpt_dev_desc }, 166 { "INT3430", (unsigned long)&lpt_dev_desc },
168 { "INT3431", (unsigned long)&lpt_dev_desc }, 167 { "INT3431", (unsigned long)&lpt_dev_desc },
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index fc6008fbce35..509452a62f96 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -193,10 +193,7 @@ static int power_saving_thread(void *data)
193 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 193 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
194 stop_critical_timings(); 194 stop_critical_timings();
195 195
196 __monitor((void *)&current_thread_info()->flags, 0, 0); 196 mwait_idle_with_hints(power_saving_mwait_eax, 1);
197 smp_mb();
198 if (!need_resched())
199 __mwait(power_saving_mwait_eax, 1);
200 197
201 start_critical_timings(); 198 start_critical_timings();
202 if (lapic_marked_unstable) 199 if (lapic_marked_unstable)
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 6d2c49b86b7f..e55584a072c6 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -41,6 +41,7 @@
41#include <linux/rculist.h> 41#include <linux/rculist.h>
42#include <linux/interrupt.h> 42#include <linux/interrupt.h>
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <asm/unaligned.h>
44 45
45#include "apei-internal.h" 46#include "apei-internal.h"
46 47
@@ -567,8 +568,7 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
567 bit_offset = reg->bit_offset; 568 bit_offset = reg->bit_offset;
568 access_size_code = reg->access_width; 569 access_size_code = reg->access_width;
569 space_id = reg->space_id; 570 space_id = reg->space_id;
570 /* Handle possible alignment issues */ 571 *paddr = get_unaligned(&reg->address);
571 memcpy(paddr, &reg->address, sizeof(*paddr));
572 if (!*paddr) { 572 if (!*paddr) {
573 pr_warning(FW_BUG APEI_PFX 573 pr_warning(FW_BUG APEI_PFX
574 "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n", 574 "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index fb57d03e698b..7dcc8a824aae 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -34,6 +34,7 @@
34#include <linux/delay.h> 34#include <linux/delay.h>
35#include <linux/mm.h> 35#include <linux/mm.h>
36#include <acpi/acpi.h> 36#include <acpi/acpi.h>
37#include <asm/unaligned.h>
37 38
38#include "apei-internal.h" 39#include "apei-internal.h"
39 40
@@ -216,7 +217,7 @@ static void check_vendor_extension(u64 paddr,
216static void *einj_get_parameter_address(void) 217static void *einj_get_parameter_address(void)
217{ 218{
218 int i; 219 int i;
219 u64 paddrv4 = 0, paddrv5 = 0; 220 u64 pa_v4 = 0, pa_v5 = 0;
220 struct acpi_whea_header *entry; 221 struct acpi_whea_header *entry;
221 222
222 entry = EINJ_TAB_ENTRY(einj_tab); 223 entry = EINJ_TAB_ENTRY(einj_tab);
@@ -225,30 +226,28 @@ static void *einj_get_parameter_address(void)
225 entry->instruction == ACPI_EINJ_WRITE_REGISTER && 226 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
226 entry->register_region.space_id == 227 entry->register_region.space_id ==
227 ACPI_ADR_SPACE_SYSTEM_MEMORY) 228 ACPI_ADR_SPACE_SYSTEM_MEMORY)
228 memcpy(&paddrv4, &entry->register_region.address, 229 pa_v4 = get_unaligned(&entry->register_region.address);
229 sizeof(paddrv4));
230 if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS && 230 if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
231 entry->instruction == ACPI_EINJ_WRITE_REGISTER && 231 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
232 entry->register_region.space_id == 232 entry->register_region.space_id ==
233 ACPI_ADR_SPACE_SYSTEM_MEMORY) 233 ACPI_ADR_SPACE_SYSTEM_MEMORY)
234 memcpy(&paddrv5, &entry->register_region.address, 234 pa_v5 = get_unaligned(&entry->register_region.address);
235 sizeof(paddrv5));
236 entry++; 235 entry++;
237 } 236 }
238 if (paddrv5) { 237 if (pa_v5) {
239 struct set_error_type_with_address *v5param; 238 struct set_error_type_with_address *v5param;
240 239
241 v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param)); 240 v5param = acpi_os_map_memory(pa_v5, sizeof(*v5param));
242 if (v5param) { 241 if (v5param) {
243 acpi5 = 1; 242 acpi5 = 1;
244 check_vendor_extension(paddrv5, v5param); 243 check_vendor_extension(pa_v5, v5param);
245 return v5param; 244 return v5param;
246 } 245 }
247 } 246 }
248 if (param_extension && paddrv4) { 247 if (param_extension && pa_v4) {
249 struct einj_parameter *v4param; 248 struct einj_parameter *v4param;
250 249
251 v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param)); 250 v4param = acpi_os_map_memory(pa_v4, sizeof(*v4param));
252 if (!v4param) 251 if (!v4param)
253 return NULL; 252 return NULL;
254 if (v4param->reserved1 || v4param->reserved2) { 253 if (v4param->reserved1 || v4param->reserved2) {
@@ -416,7 +415,8 @@ out:
416 return rc; 415 return rc;
417} 416}
418 417
419static int __einj_error_inject(u32 type, u64 param1, u64 param2) 418static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
419 u64 param3, u64 param4)
420{ 420{
421 struct apei_exec_context ctx; 421 struct apei_exec_context ctx;
422 u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT; 422 u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
@@ -446,6 +446,12 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
446 break; 446 break;
447 } 447 }
448 v5param->flags = vendor_flags; 448 v5param->flags = vendor_flags;
449 } else if (flags) {
450 v5param->flags = flags;
451 v5param->memory_address = param1;
452 v5param->memory_address_range = param2;
453 v5param->apicid = param3;
454 v5param->pcie_sbdf = param4;
449 } else { 455 } else {
450 switch (type) { 456 switch (type) {
451 case ACPI_EINJ_PROCESSOR_CORRECTABLE: 457 case ACPI_EINJ_PROCESSOR_CORRECTABLE:
@@ -514,11 +520,17 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
514} 520}
515 521
516/* Inject the specified hardware error */ 522/* Inject the specified hardware error */
517static int einj_error_inject(u32 type, u64 param1, u64 param2) 523static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
524 u64 param3, u64 param4)
518{ 525{
519 int rc; 526 int rc;
520 unsigned long pfn; 527 unsigned long pfn;
521 528
529 /* If user manually set "flags", make sure it is legal */
530 if (flags && (flags &
531 ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
532 return -EINVAL;
533
522 /* 534 /*
523 * We need extra sanity checks for memory errors. 535 * We need extra sanity checks for memory errors.
524 * Other types leap directly to injection. 536 * Other types leap directly to injection.
@@ -532,7 +544,7 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2)
532 if (type & ACPI5_VENDOR_BIT) { 544 if (type & ACPI5_VENDOR_BIT) {
533 if (vendor_flags != SETWA_FLAGS_MEM) 545 if (vendor_flags != SETWA_FLAGS_MEM)
534 goto inject; 546 goto inject;
535 } else if (!(type & MEM_ERROR_MASK)) 547 } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM))
536 goto inject; 548 goto inject;
537 549
538 /* 550 /*
@@ -546,15 +558,18 @@ static int einj_error_inject(u32 type, u64 param1, u64 param2)
546 558
547inject: 559inject:
548 mutex_lock(&einj_mutex); 560 mutex_lock(&einj_mutex);
549 rc = __einj_error_inject(type, param1, param2); 561 rc = __einj_error_inject(type, flags, param1, param2, param3, param4);
550 mutex_unlock(&einj_mutex); 562 mutex_unlock(&einj_mutex);
551 563
552 return rc; 564 return rc;
553} 565}
554 566
555static u32 error_type; 567static u32 error_type;
568static u32 error_flags;
556static u64 error_param1; 569static u64 error_param1;
557static u64 error_param2; 570static u64 error_param2;
571static u64 error_param3;
572static u64 error_param4;
558static struct dentry *einj_debug_dir; 573static struct dentry *einj_debug_dir;
559 574
560static int available_error_type_show(struct seq_file *m, void *v) 575static int available_error_type_show(struct seq_file *m, void *v)
@@ -648,7 +663,8 @@ static int error_inject_set(void *data, u64 val)
648 if (!error_type) 663 if (!error_type)
649 return -EINVAL; 664 return -EINVAL;
650 665
651 return einj_error_inject(error_type, error_param1, error_param2); 666 return einj_error_inject(error_type, error_flags, error_param1, error_param2,
667 error_param3, error_param4);
652} 668}
653 669
654DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, 670DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
@@ -729,6 +745,10 @@ static int __init einj_init(void)
729 rc = -ENOMEM; 745 rc = -ENOMEM;
730 einj_param = einj_get_parameter_address(); 746 einj_param = einj_get_parameter_address();
731 if ((param_extension || acpi5) && einj_param) { 747 if ((param_extension || acpi5) && einj_param) {
748 fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR,
749 einj_debug_dir, &error_flags);
750 if (!fentry)
751 goto err_unmap;
732 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, 752 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
733 einj_debug_dir, &error_param1); 753 einj_debug_dir, &error_param1);
734 if (!fentry) 754 if (!fentry)
@@ -737,6 +757,14 @@ static int __init einj_init(void)
737 einj_debug_dir, &error_param2); 757 einj_debug_dir, &error_param2);
738 if (!fentry) 758 if (!fentry)
739 goto err_unmap; 759 goto err_unmap;
760 fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR,
761 einj_debug_dir, &error_param3);
762 if (!fentry)
763 goto err_unmap;
764 fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR,
765 einj_debug_dir, &error_param4);
766 if (!fentry)
767 goto err_unmap;
740 768
741 fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, 769 fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
742 einj_debug_dir, &notrigger); 770 einj_debug_dir, &notrigger);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index cb1d557fc22c..ed65e9c4b5b0 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -611,7 +611,7 @@ static void __erst_record_id_cache_compact(void)
611 if (entries[i] == APEI_ERST_INVALID_RECORD_ID) 611 if (entries[i] == APEI_ERST_INVALID_RECORD_ID)
612 continue; 612 continue;
613 if (wpos != i) 613 if (wpos != i)
614 memcpy(&entries[wpos], &entries[i], sizeof(entries[i])); 614 entries[wpos] = entries[i];
615 wpos++; 615 wpos++;
616 } 616 }
617 erst_record_id_cache.len = wpos; 617 erst_record_id_cache.len = wpos;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index a30bc313787b..46766ef7ef5d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -413,27 +413,31 @@ static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
413{ 413{
414#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE 414#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
415 unsigned long pfn; 415 unsigned long pfn;
416 int flags = -1;
416 int sec_sev = ghes_severity(gdata->error_severity); 417 int sec_sev = ghes_severity(gdata->error_severity);
417 struct cper_sec_mem_err *mem_err; 418 struct cper_sec_mem_err *mem_err;
418 mem_err = (struct cper_sec_mem_err *)(gdata + 1); 419 mem_err = (struct cper_sec_mem_err *)(gdata + 1);
419 420
420 if (sec_sev == GHES_SEV_CORRECTED && 421 if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
421 (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) && 422 return;
422 (mem_err->validation_bits & CPER_MEM_VALID_PA)) { 423
423 pfn = mem_err->physical_addr >> PAGE_SHIFT; 424 pfn = mem_err->physical_addr >> PAGE_SHIFT;
424 if (pfn_valid(pfn)) 425 if (!pfn_valid(pfn)) {
425 memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE); 426 pr_warn_ratelimited(FW_WARN GHES_PFX
426 else if (printk_ratelimit()) 427 "Invalid address in generic error data: %#llx\n",
427 pr_warn(FW_WARN GHES_PFX 428 mem_err->physical_addr);
428 "Invalid address in generic error data: %#llx\n", 429 return;
429 mem_err->physical_addr);
430 }
431 if (sev == GHES_SEV_RECOVERABLE &&
432 sec_sev == GHES_SEV_RECOVERABLE &&
433 mem_err->validation_bits & CPER_MEM_VALID_PA) {
434 pfn = mem_err->physical_addr >> PAGE_SHIFT;
435 memory_failure_queue(pfn, 0, 0);
436 } 430 }
431
432 /* iff following two events can be handled properly by now */
433 if (sec_sev == GHES_SEV_CORRECTED &&
434 (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
435 flags = MF_SOFT_OFFLINE;
436 if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
437 flags = 0;
438
439 if (flags != -1)
440 memory_failure_queue(pfn, 0, flags);
437#endif 441#endif
438} 442}
439 443
@@ -453,8 +457,7 @@ static void ghes_do_proc(struct ghes *ghes,
453 ghes_edac_report_mem_error(ghes, sev, mem_err); 457 ghes_edac_report_mem_error(ghes, sev, mem_err);
454 458
455#ifdef CONFIG_X86_MCE 459#ifdef CONFIG_X86_MCE
456 apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, 460 apei_mce_report_mem_error(sev, mem_err);
457 mem_err);
458#endif 461#endif
459 ghes_handle_memory_failure(gdata, sev); 462 ghes_handle_memory_failure(gdata, sev);
460 } 463 }
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index fbf1aceda8b8..5876a49dfd38 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -62,6 +62,7 @@ MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
62MODULE_DESCRIPTION("ACPI Battery Driver"); 62MODULE_DESCRIPTION("ACPI Battery Driver");
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65static int battery_bix_broken_package;
65static unsigned int cache_time = 1000; 66static unsigned int cache_time = 1000;
66module_param(cache_time, uint, 0644); 67module_param(cache_time, uint, 0644);
67MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 68MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -416,7 +417,12 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
416 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); 417 ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name));
417 return -ENODEV; 418 return -ENODEV;
418 } 419 }
419 if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) 420
421 if (battery_bix_broken_package)
422 result = extract_package(battery, buffer.pointer,
423 extended_info_offsets + 1,
424 ARRAY_SIZE(extended_info_offsets) - 1);
425 else if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags))
420 result = extract_package(battery, buffer.pointer, 426 result = extract_package(battery, buffer.pointer,
421 extended_info_offsets, 427 extended_info_offsets,
422 ARRAY_SIZE(extended_info_offsets)); 428 ARRAY_SIZE(extended_info_offsets));
@@ -754,6 +760,17 @@ static int battery_notify(struct notifier_block *nb,
754 return 0; 760 return 0;
755} 761}
756 762
763static struct dmi_system_id bat_dmi_table[] = {
764 {
765 .ident = "NEC LZ750/LS",
766 .matches = {
767 DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
768 DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
769 },
770 },
771 {},
772};
773
757static int acpi_battery_add(struct acpi_device *device) 774static int acpi_battery_add(struct acpi_device *device)
758{ 775{
759 int result = 0; 776 int result = 0;
@@ -846,6 +863,9 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
846{ 863{
847 if (acpi_disabled) 864 if (acpi_disabled)
848 return; 865 return;
866
867 if (dmi_check_system(bat_dmi_table))
868 battery_bix_broken_package = 1;
849 acpi_bus_register_driver(&acpi_battery_driver); 869 acpi_bus_register_driver(&acpi_battery_driver);
850} 870}
851 871
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bba9b72e25f8..0710004055c8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
156} 156}
157EXPORT_SYMBOL(acpi_bus_get_private_data); 157EXPORT_SYMBOL(acpi_bus_get_private_data);
158 158
159void acpi_bus_no_hotplug(acpi_handle handle)
160{
161 struct acpi_device *adev = NULL;
162
163 acpi_bus_get_device(handle, &adev);
164 if (adev)
165 adev->flags.no_hotplug = true;
166}
167EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
168
159static void acpi_print_osc_error(acpi_handle handle, 169static void acpi_print_osc_error(acpi_handle handle,
160 struct acpi_osc_context *context, char *error) 170 struct acpi_osc_context *context, char *error)
161{ 171{
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 644516d9bde6..f90c56c8379e 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -727,11 +727,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
727 if (unlikely(!pr)) 727 if (unlikely(!pr))
728 return -EINVAL; 728 return -EINVAL;
729 729
730 if (cx->entry_method == ACPI_CSTATE_FFH) {
731 if (current_set_polling_and_test())
732 return -EINVAL;
733 }
734
735 lapic_timer_state_broadcast(pr, cx, 1); 730 lapic_timer_state_broadcast(pr, cx, 1);
736 acpi_idle_do_entry(cx); 731 acpi_idle_do_entry(cx);
737 732
@@ -785,11 +780,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
785 if (unlikely(!pr)) 780 if (unlikely(!pr))
786 return -EINVAL; 781 return -EINVAL;
787 782
788 if (cx->entry_method == ACPI_CSTATE_FFH) {
789 if (current_set_polling_and_test())
790 return -EINVAL;
791 }
792
793 /* 783 /*
794 * Must be done before busmaster disable as we might need to 784 * Must be done before busmaster disable as we might need to
795 * access HPET ! 785 * access HPET !
@@ -841,11 +831,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
841 } 831 }
842 } 832 }
843 833
844 if (cx->entry_method == ACPI_CSTATE_FFH) {
845 if (current_set_polling_and_test())
846 return -EINVAL;
847 }
848
849 acpi_unlazy_tlb(smp_processor_id()); 834 acpi_unlazy_tlb(smp_processor_id());
850 835
851 /* Tell the scheduler that we are going deep-idle: */ 836 /* Tell the scheduler that we are going deep-idle: */
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index c0ed4f273cf2..e3a92a6da39a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ 427 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), 428 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ 429 .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
430 { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
431 PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
432 .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
430 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), 433 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
431 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ 434 .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
432 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), 435 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fe3ca0989b14..1ad2f62d34b9 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
83 .id_table = sis_pci_tbl, 83 .id_table = sis_pci_tbl,
84 .probe = sis_init_one, 84 .probe = sis_init_one,
85 .remove = ata_pci_remove_one, 85 .remove = ata_pci_remove_one,
86#ifdef CONFIG_PM
87 .suspend = ata_pci_device_suspend,
88 .resume = ata_pci_device_resume,
89#endif
86}; 90};
87 91
88static struct scsi_host_template sis_sht = { 92static struct scsi_host_template sis_sht = {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a2e69d26266d..83a598ebb65a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -425,10 +425,7 @@ static void null_del_dev(struct nullb *nullb)
425 list_del_init(&nullb->list); 425 list_del_init(&nullb->list);
426 426
427 del_gendisk(nullb->disk); 427 del_gendisk(nullb->disk);
428 if (queue_mode == NULL_Q_MQ) 428 blk_cleanup_queue(nullb->q);
429 blk_mq_free_queue(nullb->q);
430 else
431 blk_cleanup_queue(nullb->q);
432 put_disk(nullb->disk); 429 put_disk(nullb->disk);
433 kfree(nullb); 430 kfree(nullb);
434} 431}
@@ -578,10 +575,7 @@ static int null_add_dev(void)
578 disk = nullb->disk = alloc_disk_node(1, home_node); 575 disk = nullb->disk = alloc_disk_node(1, home_node);
579 if (!disk) { 576 if (!disk) {
580queue_fail: 577queue_fail:
581 if (queue_mode == NULL_Q_MQ) 578 blk_cleanup_queue(nullb->q);
582 blk_mq_free_queue(nullb->q);
583 else
584 blk_cleanup_queue(nullb->q);
585 cleanup_queues(nullb); 579 cleanup_queues(nullb);
586err: 580err:
587 kfree(nullb); 581 kfree(nullb);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 5a95baf4b104..27de5046708a 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -43,9 +43,6 @@
43#include <linux/zorro.h> 43#include <linux/zorro.h>
44 44
45 45
46extern int m68k_realnum_memory;
47extern struct mem_info m68k_memory[NUM_MEMINFO];
48
49#define Z2MINOR_COMBINED (0) 46#define Z2MINOR_COMBINED (0)
50#define Z2MINOR_Z2ONLY (1) 47#define Z2MINOR_Z2ONLY (1)
51#define Z2MINOR_CHIPONLY (2) 48#define Z2MINOR_CHIPONLY (2)
@@ -116,8 +113,8 @@ get_z2ram( void )
116 if ( test_bit( i, zorro_unused_z2ram ) ) 113 if ( test_bit( i, zorro_unused_z2ram ) )
117 { 114 {
118 z2_count++; 115 z2_count++;
119 z2ram_map[ z2ram_size++ ] = 116 z2ram_map[z2ram_size++] = (unsigned long)ZTWO_VADDR(Z2RAM_START) +
120 ZTWO_VADDR( Z2RAM_START ) + ( i << Z2RAM_CHUNKSHIFT ); 117 (i << Z2RAM_CHUNKSHIFT);
121 clear_bit( i, zorro_unused_z2ram ); 118 clear_bit( i, zorro_unused_z2ram );
122 } 119 }
123 } 120 }
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bfc1bb318f6..dceb85f8d9a8 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
87 { USB_DEVICE(0x0CF3, 0xE004) }, 87 { USB_DEVICE(0x0CF3, 0xE004) },
88 { USB_DEVICE(0x0CF3, 0xE005) }, 88 { USB_DEVICE(0x0CF3, 0xE005) },
89 { USB_DEVICE(0x0930, 0x0219) }, 89 { USB_DEVICE(0x0930, 0x0219) },
90 { USB_DEVICE(0x0930, 0x0220) },
90 { USB_DEVICE(0x0489, 0xe057) }, 91 { USB_DEVICE(0x0489, 0xe057) },
91 { USB_DEVICE(0x13d3, 0x3393) }, 92 { USB_DEVICE(0x13d3, 0x3393) },
92 { USB_DEVICE(0x0489, 0xe04e) }, 93 { USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
129 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 130 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
130 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 131 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
131 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
132 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 135 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c0ff34f2d2df..3980fd18f6ea 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, 155 { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
158 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, 159 { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
159 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, 160 { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 8e562dc65601..e1f3337a0cf9 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -27,15 +27,18 @@ static char *tpm_device_name = "TPM";
27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context, 27static acpi_status ppi_callback(acpi_handle handle, u32 level, void *context,
28 void **return_value) 28 void **return_value)
29{ 29{
30 acpi_status status; 30 acpi_status status = AE_OK;
31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 31 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
32 status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 32
33 if (strstr(buffer.pointer, context) != NULL) { 33 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer))) {
34 *return_value = handle; 34 if (strstr(buffer.pointer, context) != NULL) {
35 *return_value = handle;
36 status = AE_CTRL_TERMINATE;
37 }
35 kfree(buffer.pointer); 38 kfree(buffer.pointer);
36 return AE_CTRL_TERMINATE;
37 } 39 }
38 return AE_OK; 40
41 return status;
39} 42}
40 43
41static inline void ppi_assign_params(union acpi_object params[4], 44static inline void ppi_assign_params(union acpi_object params[4],
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8d3009e44fba..5543b7df8e16 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
87 return 0; 87 return 0;
88} 88}
89 89
90static unsigned int _get_val(struct clk_divider *divider, u8 div) 90static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
91{ 91{
92 if (divider->flags & CLK_DIVIDER_ONE_BASED) 92 if (divider->flags & CLK_DIVIDER_ONE_BASED)
93 return div; 93 return div;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 39b40aaede2b..68e515d093d8 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data;
26#define ASS_CLK_DIV 0x4 26#define ASS_CLK_DIV 0x4
27#define ASS_CLK_GATE 0x8 27#define ASS_CLK_GATE 0x8
28 28
29/* list of all parent clock list */
30static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
31static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
32
33#ifdef CONFIG_PM_SLEEP
29static unsigned long reg_save[][2] = { 34static unsigned long reg_save[][2] = {
30 {ASS_CLK_SRC, 0}, 35 {ASS_CLK_SRC, 0},
31 {ASS_CLK_DIV, 0}, 36 {ASS_CLK_DIV, 0},
32 {ASS_CLK_GATE, 0}, 37 {ASS_CLK_GATE, 0},
33}; 38};
34 39
35/* list of all parent clock list */
36static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
37static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
38
39#ifdef CONFIG_PM_SLEEP
40static int exynos_audss_clk_suspend(void) 40static int exynos_audss_clk_suspend(void)
41{ 41{
42 int i; 42 int i;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ad5ff50c5f28..1a7c1b929c69 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -39,7 +39,7 @@
39#define SRC_TOP1 0xc214 39#define SRC_TOP1 0xc214
40#define SRC_CAM 0xc220 40#define SRC_CAM 0xc220
41#define SRC_TV 0xc224 41#define SRC_TV 0xc224
42#define SRC_MFC 0xcc28 42#define SRC_MFC 0xc228
43#define SRC_G3D 0xc22c 43#define SRC_G3D 0xc22c
44#define E4210_SRC_IMAGE 0xc230 44#define E4210_SRC_IMAGE 0xc230
45#define SRC_LCD0 0xc234 45#define SRC_LCD0 0xc234
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index adf32343c9f9..e52359cf9b6f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -25,6 +25,7 @@
25#define MPLL_LOCK 0x4000 25#define MPLL_LOCK 0x4000
26#define MPLL_CON0 0x4100 26#define MPLL_CON0 0x4100
27#define SRC_CORE1 0x4204 27#define SRC_CORE1 0x4204
28#define GATE_IP_ACP 0x8800
28#define CPLL_LOCK 0x10020 29#define CPLL_LOCK 0x10020
29#define EPLL_LOCK 0x10030 30#define EPLL_LOCK 0x10030
30#define VPLL_LOCK 0x10040 31#define VPLL_LOCK 0x10040
@@ -75,7 +76,6 @@
75#define SRC_CDREX 0x20200 76#define SRC_CDREX 0x20200
76#define PLL_DIV2_SEL 0x20a24 77#define PLL_DIV2_SEL 0x20a24
77#define GATE_IP_DISP1 0x10928 78#define GATE_IP_DISP1 0x10928
78#define GATE_IP_ACP 0x10000
79 79
80/* list of PLLs to be registered */ 80/* list of PLLs to be registered */
81enum exynos5250_plls { 81enum exynos5250_plls {
@@ -120,7 +120,8 @@ enum exynos5250_clks {
120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2, 120 spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1, 121 hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct, 122 tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, 123 wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
124 smmu_mdma0,
124 125
125 /* mux clocks */ 126 /* mux clocks */
126 mout_hdmi = 1024, 127 mout_hdmi = 1024,
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
354 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0), 355 GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
355 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0), 356 GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
356 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), 357 GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
357 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), 358 GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
358 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), 359 GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
359 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), 360 GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
360 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0), 361 GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
361 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), 362 GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
406 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0), 407 GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
407 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0), 408 GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
408 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0), 409 GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
409 GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0), 410 GATE(sysreg, "sysreg", "aclk66",
411 GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
410 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0), 412 GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
411 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0), 413 GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
412 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0), 414 GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
492 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), 494 GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
493 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), 495 GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
494 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0), 496 GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
497 GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
498 GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
495}; 499};
496 500
497static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { 501static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 634c4d6dd45a..cd6950fd8caf 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -37,6 +37,10 @@ config SUN4I_TIMER
37 select CLKSRC_MMIO 37 select CLKSRC_MMIO
38 bool 38 bool
39 39
40config SUN5I_HSTIMER
41 select CLKSRC_MMIO
42 bool
43
40config VT8500_TIMER 44config VT8500_TIMER
41 bool 45 bool
42 46
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 33621efb9148..358358d87b6d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
22obj-$(CONFIG_ARCH_MXS) += mxs_timer.o 22obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
23obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o 23obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
24obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o 24obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
25obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
25obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o 26obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
26obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o 27obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
27obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o 28obj-$(CONFIG_ARCH_NSPIRE) += zevio-timer.o
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index c639b1a9e996..0fc31d029e52 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -202,7 +202,7 @@ static struct clocksource gt_clocksource = {
202}; 202};
203 203
204#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK 204#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
205static u32 notrace gt_sched_clock_read(void) 205static u64 notrace gt_sched_clock_read(void)
206{ 206{
207 return gt_counter_read(); 207 return gt_counter_read();
208} 208}
@@ -217,7 +217,7 @@ static void __init gt_clocksource_init(void)
217 writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL); 217 writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
218 218
219#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK 219#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
220 setup_sched_clock(gt_sched_clock_read, 32, gt_clk_rate); 220 sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
221#endif 221#endif
222 clocksource_register_hz(&gt_clocksource, gt_clk_rate); 222 clocksource_register_hz(&gt_clocksource, gt_clk_rate);
223} 223}
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0d7d8c3ed6b2..5176e761166b 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -98,12 +98,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
98 return; 98 return;
99} 99}
100 100
101static const struct of_device_id bcm_timer_ids[] __initconst = {
102 {.compatible = "brcm,kona-timer"},
103 {.compatible = "bcm,kona-timer"}, /* deprecated name */
104 {},
105};
106
107static void __init kona_timers_init(struct device_node *node) 101static void __init kona_timers_init(struct device_node *node)
108{ 102{
109 u32 freq; 103 u32 freq;
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index b2bb3a4bc205..63f176de0d02 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -67,11 +67,13 @@
67 * struct ttc_timer - This definition defines local timer structure 67 * struct ttc_timer - This definition defines local timer structure
68 * 68 *
69 * @base_addr: Base address of timer 69 * @base_addr: Base address of timer
70 * @freq: Timer input clock frequency
70 * @clk: Associated clock source 71 * @clk: Associated clock source
71 * @clk_rate_change_nb Notifier block for clock rate changes 72 * @clk_rate_change_nb Notifier block for clock rate changes
72 */ 73 */
73struct ttc_timer { 74struct ttc_timer {
74 void __iomem *base_addr; 75 void __iomem *base_addr;
76 unsigned long freq;
75 struct clk *clk; 77 struct clk *clk;
76 struct notifier_block clk_rate_change_nb; 78 struct notifier_block clk_rate_change_nb;
77}; 79};
@@ -158,7 +160,7 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs)
158 TTC_COUNT_VAL_OFFSET); 160 TTC_COUNT_VAL_OFFSET);
159} 161}
160 162
161static u32 notrace ttc_sched_clock_read(void) 163static u64 notrace ttc_sched_clock_read(void)
162{ 164{
163 return __raw_readl(ttc_sched_clock_val_reg); 165 return __raw_readl(ttc_sched_clock_val_reg);
164} 166}
@@ -196,9 +198,8 @@ static void ttc_set_mode(enum clock_event_mode mode,
196 198
197 switch (mode) { 199 switch (mode) {
198 case CLOCK_EVT_MODE_PERIODIC: 200 case CLOCK_EVT_MODE_PERIODIC:
199 ttc_set_interval(timer, 201 ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
200 DIV_ROUND_CLOSEST(clk_get_rate(ttce->ttc.clk), 202 PRESCALE * HZ));
201 PRESCALE * HZ));
202 break; 203 break;
203 case CLOCK_EVT_MODE_ONESHOT: 204 case CLOCK_EVT_MODE_ONESHOT:
204 case CLOCK_EVT_MODE_UNUSED: 205 case CLOCK_EVT_MODE_UNUSED:
@@ -273,6 +274,8 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
273 return; 274 return;
274 } 275 }
275 276
277 ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
278
276 ttccs->ttc.clk_rate_change_nb.notifier_call = 279 ttccs->ttc.clk_rate_change_nb.notifier_call =
277 ttc_rate_change_clocksource_cb; 280 ttc_rate_change_clocksource_cb;
278 ttccs->ttc.clk_rate_change_nb.next = NULL; 281 ttccs->ttc.clk_rate_change_nb.next = NULL;
@@ -298,16 +301,14 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
298 __raw_writel(CNT_CNTRL_RESET, 301 __raw_writel(CNT_CNTRL_RESET,
299 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); 302 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
300 303
301 err = clocksource_register_hz(&ttccs->cs, 304 err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
302 clk_get_rate(ttccs->ttc.clk) / PRESCALE);
303 if (WARN_ON(err)) { 305 if (WARN_ON(err)) {
304 kfree(ttccs); 306 kfree(ttccs);
305 return; 307 return;
306 } 308 }
307 309
308 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET; 310 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
309 setup_sched_clock(ttc_sched_clock_read, 16, 311 sched_clock_register(ttc_sched_clock_read, 16, ttccs->ttc.freq / PRESCALE);
310 clk_get_rate(ttccs->ttc.clk) / PRESCALE);
311} 312}
312 313
313static int ttc_rate_change_clockevent_cb(struct notifier_block *nb, 314static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -334,6 +335,9 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
334 ndata->new_rate / PRESCALE); 335 ndata->new_rate / PRESCALE);
335 local_irq_restore(flags); 336 local_irq_restore(flags);
336 337
338 /* update cached frequency */
339 ttc->freq = ndata->new_rate;
340
337 /* fall through */ 341 /* fall through */
338 } 342 }
339 case PRE_RATE_CHANGE: 343 case PRE_RATE_CHANGE:
@@ -367,6 +371,7 @@ static void __init ttc_setup_clockevent(struct clk *clk,
367 if (clk_notifier_register(ttcce->ttc.clk, 371 if (clk_notifier_register(ttcce->ttc.clk,
368 &ttcce->ttc.clk_rate_change_nb)) 372 &ttcce->ttc.clk_rate_change_nb))
369 pr_warn("Unable to register clock notifier.\n"); 373 pr_warn("Unable to register clock notifier.\n");
374 ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
370 375
371 ttcce->ttc.base_addr = base; 376 ttcce->ttc.base_addr = base;
372 ttcce->ce.name = "ttc_clockevent"; 377 ttcce->ce.name = "ttc_clockevent";
@@ -388,15 +393,14 @@ static void __init ttc_setup_clockevent(struct clk *clk,
388 __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); 393 __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
389 394
390 err = request_irq(irq, ttc_clock_event_interrupt, 395 err = request_irq(irq, ttc_clock_event_interrupt,
391 IRQF_DISABLED | IRQF_TIMER, 396 IRQF_TIMER, ttcce->ce.name, ttcce);
392 ttcce->ce.name, ttcce);
393 if (WARN_ON(err)) { 397 if (WARN_ON(err)) {
394 kfree(ttcce); 398 kfree(ttcce);
395 return; 399 return;
396 } 400 }
397 401
398 clockevents_config_and_register(&ttcce->ce, 402 clockevents_config_and_register(&ttcce->ce,
399 clk_get_rate(ttcce->ttc.clk) / PRESCALE, 1, 0xfffe); 403 ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
400} 404}
401 405
402/** 406/**
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index b9ddd9e3a2f5..ae2e4278c42a 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -28,6 +28,7 @@ void __init clocksource_of_init(void)
28 struct device_node *np; 28 struct device_node *np;
29 const struct of_device_id *match; 29 const struct of_device_id *match;
30 clocksource_of_init_fn init_func; 30 clocksource_of_init_fn init_func;
31 unsigned clocksources = 0;
31 32
32 for_each_matching_node_and_match(np, __clksrc_of_table, &match) { 33 for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
33 if (!of_device_is_available(np)) 34 if (!of_device_is_available(np))
@@ -35,5 +36,8 @@ void __init clocksource_of_init(void)
35 36
36 init_func = match->data; 37 init_func = match->data;
37 init_func(np); 38 init_func(np);
39 clocksources++;
38 } 40 }
41 if (!clocksources)
42 pr_crit("%s: no matching clocksources found\n", __func__);
39} 43}
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index ea210482dd20..db2105290898 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -131,7 +131,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
131 131
132static struct irqaction mfgptirq = { 132static struct irqaction mfgptirq = {
133 .handler = mfgpt_tick, 133 .handler = mfgpt_tick,
134 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED, 134 .flags = IRQF_NOBALANCING | IRQF_TIMER | IRQF_SHARED,
135 .name = DRV_NAME, 135 .name = DRV_NAME,
136}; 136};
137 137
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index e54ca1062d8e..f3656a6b0382 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -243,8 +243,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
243 dw_ced->irqaction.dev_id = &dw_ced->ced; 243 dw_ced->irqaction.dev_id = &dw_ced->ced;
244 dw_ced->irqaction.irq = irq; 244 dw_ced->irqaction.irq = irq;
245 dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | 245 dw_ced->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL |
246 IRQF_NOBALANCING | 246 IRQF_NOBALANCING;
247 IRQF_DISABLED;
248 247
249 dw_ced->eoi = apbt_eoi; 248 dw_ced->eoi = apbt_eoi;
250 err = setup_irq(irq, &dw_ced->irqaction); 249 err = setup_irq(irq, &dw_ced->irqaction);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index ed7b73b508e0..152a3f3875ee 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -187,7 +187,7 @@ static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
187 187
188static struct irqaction nmdk_timer_irq = { 188static struct irqaction nmdk_timer_irq = {
189 .name = "Nomadik Timer Tick", 189 .name = "Nomadik Timer Tick",
190 .flags = IRQF_DISABLED | IRQF_TIMER, 190 .flags = IRQF_TIMER,
191 .handler = nmdk_timer_interrupt, 191 .handler = nmdk_timer_interrupt,
192 .dev_id = &nmdk_clkevt, 192 .dev_id = &nmdk_clkevt,
193}; 193};
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 85082e8d3052..5645cfc90c41 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -264,7 +264,7 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
264 264
265static struct irqaction samsung_clock_event_irq = { 265static struct irqaction samsung_clock_event_irq = {
266 .name = "samsung_time_irq", 266 .name = "samsung_time_irq",
267 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 267 .flags = IRQF_TIMER | IRQF_IRQPOLL,
268 .handler = samsung_clock_event_isr, 268 .handler = samsung_clock_event_isr,
269 .dev_id = &time_event_device, 269 .dev_id = &time_event_device,
270}; 270};
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 0965e9848b3d..0b1836a6c539 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -634,12 +634,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
634 634
635static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) 635static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
636{ 636{
637 pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev); 637 struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
638
639 pm_genpd_syscore_poweroff(&p->pdev->dev);
640 clk_unprepare(p->clk);
638} 641}
639 642
640static void sh_cmt_clock_event_resume(struct clock_event_device *ced) 643static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
641{ 644{
642 pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev); 645 struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
646
647 clk_prepare(p->clk);
648 pm_genpd_syscore_poweron(&p->pdev->dev);
643} 649}
644 650
645static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, 651static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
@@ -726,8 +732,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
726 p->irqaction.name = dev_name(&p->pdev->dev); 732 p->irqaction.name = dev_name(&p->pdev->dev);
727 p->irqaction.handler = sh_cmt_interrupt; 733 p->irqaction.handler = sh_cmt_interrupt;
728 p->irqaction.dev_id = p; 734 p->irqaction.dev_id = p;
729 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ 735 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
730 IRQF_IRQPOLL | IRQF_NOBALANCING;
731 736
732 /* get hold of clock */ 737 /* get hold of clock */
733 p->clk = clk_get(&p->pdev->dev, "cmt_fck"); 738 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
@@ -737,6 +742,10 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
737 goto err2; 742 goto err2;
738 } 743 }
739 744
745 ret = clk_prepare(p->clk);
746 if (ret < 0)
747 goto err3;
748
740 if (res2 && (resource_size(res2) == 4)) { 749 if (res2 && (resource_size(res2) == 4)) {
741 /* assume both CMSTR and CMCSR to be 32-bit */ 750 /* assume both CMSTR and CMCSR to be 32-bit */
742 p->read_control = sh_cmt_read32; 751 p->read_control = sh_cmt_read32;
@@ -773,19 +782,21 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
773 cfg->clocksource_rating); 782 cfg->clocksource_rating);
774 if (ret) { 783 if (ret) {
775 dev_err(&p->pdev->dev, "registration failed\n"); 784 dev_err(&p->pdev->dev, "registration failed\n");
776 goto err3; 785 goto err4;
777 } 786 }
778 p->cs_enabled = false; 787 p->cs_enabled = false;
779 788
780 ret = setup_irq(irq, &p->irqaction); 789 ret = setup_irq(irq, &p->irqaction);
781 if (ret) { 790 if (ret) {
782 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); 791 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
783 goto err3; 792 goto err4;
784 } 793 }
785 794
786 platform_set_drvdata(pdev, p); 795 platform_set_drvdata(pdev, p);
787 796
788 return 0; 797 return 0;
798err4:
799 clk_unprepare(p->clk);
789err3: 800err3:
790 clk_put(p->clk); 801 clk_put(p->clk);
791err2: 802err2:
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 3cf12834681e..e30d76e0a6fa 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -302,8 +302,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
302 p->irqaction.handler = sh_mtu2_interrupt; 302 p->irqaction.handler = sh_mtu2_interrupt;
303 p->irqaction.dev_id = p; 303 p->irqaction.dev_id = p;
304 p->irqaction.irq = irq; 304 p->irqaction.irq = irq;
305 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ 305 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
306 IRQF_IRQPOLL | IRQF_NOBALANCING;
307 306
308 /* get hold of clock */ 307 /* get hold of clock */
309 p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); 308 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
@@ -358,7 +357,6 @@ static int sh_mtu2_probe(struct platform_device *pdev)
358 ret = sh_mtu2_setup(p, pdev); 357 ret = sh_mtu2_setup(p, pdev);
359 if (ret) { 358 if (ret) {
360 kfree(p); 359 kfree(p);
361 platform_set_drvdata(pdev, NULL);
362 pm_runtime_idle(&pdev->dev); 360 pm_runtime_idle(&pdev->dev);
363 return ret; 361 return ret;
364 } 362 }
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 63557cda0a7d..ecd7b60bfdfa 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -462,8 +462,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
462 p->irqaction.handler = sh_tmu_interrupt; 462 p->irqaction.handler = sh_tmu_interrupt;
463 p->irqaction.dev_id = p; 463 p->irqaction.dev_id = p;
464 p->irqaction.irq = irq; 464 p->irqaction.irq = irq;
465 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ 465 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
466 IRQF_IRQPOLL | IRQF_NOBALANCING;
467 466
468 /* get hold of clock */ 467 /* get hold of clock */
469 p->clk = clk_get(&p->pdev->dev, "tmu_fck"); 468 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
@@ -523,7 +522,6 @@ static int sh_tmu_probe(struct platform_device *pdev)
523 ret = sh_tmu_setup(p, pdev); 522 ret = sh_tmu_setup(p, pdev);
524 if (ret) { 523 if (ret) {
525 kfree(p); 524 kfree(p);
526 platform_set_drvdata(pdev, NULL);
527 pm_runtime_idle(&pdev->dev); 525 pm_runtime_idle(&pdev->dev);
528 return ret; 526 return ret;
529 } 527 }
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index a4f6119aafd8..bf497afba9ad 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -114,7 +114,7 @@ static int sun4i_clkevt_next_event(unsigned long evt,
114 114
115static struct clock_event_device sun4i_clockevent = { 115static struct clock_event_device sun4i_clockevent = {
116 .name = "sun4i_tick", 116 .name = "sun4i_tick",
117 .rating = 300, 117 .rating = 350,
118 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 118 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
119 .set_mode = sun4i_clkevt_mode, 119 .set_mode = sun4i_clkevt_mode,
120 .set_next_event = sun4i_clkevt_next_event, 120 .set_next_event = sun4i_clkevt_next_event,
@@ -138,7 +138,7 @@ static struct irqaction sun4i_timer_irq = {
138 .dev_id = &sun4i_clockevent, 138 .dev_id = &sun4i_clockevent,
139}; 139};
140 140
141static u32 sun4i_timer_sched_read(void) 141static u64 notrace sun4i_timer_sched_read(void)
142{ 142{
143 return ~readl(timer_base + TIMER_CNTVAL_REG(1)); 143 return ~readl(timer_base + TIMER_CNTVAL_REG(1));
144} 144}
@@ -170,9 +170,9 @@ static void __init sun4i_timer_init(struct device_node *node)
170 TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M), 170 TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
171 timer_base + TIMER_CTL_REG(1)); 171 timer_base + TIMER_CTL_REG(1));
172 172
173 setup_sched_clock(sun4i_timer_sched_read, 32, rate); 173 sched_clock_register(sun4i_timer_sched_read, 32, rate);
174 clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name, 174 clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
175 rate, 300, 32, clocksource_mmio_readl_down); 175 rate, 350, 32, clocksource_mmio_readl_down);
176 176
177 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); 177 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
178 178
@@ -190,7 +190,8 @@ static void __init sun4i_timer_init(struct device_node *node)
190 val = readl(timer_base + TIMER_IRQ_EN_REG); 190 val = readl(timer_base + TIMER_IRQ_EN_REG);
191 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 191 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
192 192
193 sun4i_clockevent.cpumask = cpumask_of(0); 193 sun4i_clockevent.cpumask = cpu_possible_mask;
194 sun4i_clockevent.irq = irq;
194 195
195 clockevents_config_and_register(&sun4i_clockevent, rate, 196 clockevents_config_and_register(&sun4i_clockevent, rate,
196 TIMER_SYNC_TICKS, 0xffffffff); 197 TIMER_SYNC_TICKS, 0xffffffff);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 642849256d82..d1869f02051c 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -149,7 +149,7 @@ static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
149 149
150static struct irqaction tegra_timer_irq = { 150static struct irqaction tegra_timer_irq = {
151 .name = "timer0", 151 .name = "timer0",
152 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_HIGH, 152 .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH,
153 .handler = tegra_timer_interrupt, 153 .handler = tegra_timer_interrupt,
154 .dev_id = &tegra_clockevent, 154 .dev_id = &tegra_clockevent,
155}; 155};
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 4e7f6802e840..ee8691b89944 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -76,6 +76,7 @@
76static void __iomem *timer_base, *local_base; 76static void __iomem *timer_base, *local_base;
77static unsigned int timer_clk; 77static unsigned int timer_clk;
78static bool timer25Mhz = true; 78static bool timer25Mhz = true;
79static u32 enable_mask;
79 80
80/* 81/*
81 * Number of timer ticks per jiffy. 82 * Number of timer ticks per jiffy.
@@ -121,8 +122,7 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
121 /* 122 /*
122 * Enable the timer. 123 * Enable the timer.
123 */ 124 */
124 local_timer_ctrl_clrset(TIMER0_RELOAD_EN, 125 local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
125 TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT));
126 return 0; 126 return 0;
127} 127}
128 128
@@ -141,9 +141,7 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
141 /* 141 /*
142 * Enable timer. 142 * Enable timer.
143 */ 143 */
144 local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | 144 local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
145 TIMER0_EN |
146 TIMER0_DIV(TIMER_DIVIDER_SHIFT));
147 } else { 145 } else {
148 /* 146 /*
149 * Disable timer. 147 * Disable timer.
@@ -240,10 +238,13 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
240 WARN_ON(!timer_base); 238 WARN_ON(!timer_base);
241 local_base = of_iomap(np, 1); 239 local_base = of_iomap(np, 1);
242 240
243 if (timer25Mhz) 241 if (timer25Mhz) {
244 set = TIMER0_25MHZ; 242 set = TIMER0_25MHZ;
245 else 243 enable_mask = TIMER0_EN;
244 } else {
246 clr = TIMER0_25MHZ; 245 clr = TIMER0_25MHZ;
246 enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
247 }
247 timer_ctrl_clrset(clr, set); 248 timer_ctrl_clrset(clr, set);
248 local_timer_ctrl_clrset(clr, set); 249 local_timer_ctrl_clrset(clr, set);
249 250
@@ -262,8 +263,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
262 writel(0xffffffff, timer_base + TIMER0_VAL_OFF); 263 writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
263 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); 264 writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
264 265
265 timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | 266 timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
266 TIMER0_DIV(TIMER_DIVIDER_SHIFT));
267 267
268 /* 268 /*
269 * Set scale and timer for sched_clock. 269 * Set scale and timer for sched_clock.
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 9c7f018a67ca..20066222f3f2 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(orion_timer_ctrl_clrset);
53/* 53/*
54 * Free-running clocksource handling. 54 * Free-running clocksource handling.
55 */ 55 */
56static u32 notrace orion_read_sched_clock(void) 56static u64 notrace orion_read_sched_clock(void)
57{ 57{
58 return ~readl(timer_base + TIMER0_VAL); 58 return ~readl(timer_base + TIMER0_VAL);
59} 59}
@@ -135,7 +135,7 @@ static void __init orion_timer_init(struct device_node *np)
135 clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource", 135 clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
136 clk_get_rate(clk), 300, 32, 136 clk_get_rate(clk), 300, 32,
137 clocksource_mmio_readl_down); 137 clocksource_mmio_readl_down);
138 setup_sched_clock(orion_read_sched_clock, 32, clk_get_rate(clk)); 138 sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
139 139
140 /* setup timer1 as clockevent timer */ 140 /* setup timer1 as clockevent timer */
141 if (setup_irq(irq, &orion_clkevt_irq)) 141 if (setup_irq(irq, &orion_clkevt_irq))
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
new file mode 100644
index 000000000000..deebcd6469fc
--- /dev/null
+++ b/drivers/clocksource/timer-sun5i.c
@@ -0,0 +1,192 @@
1/*
2 * Allwinner SoCs hstimer driver.
3 *
4 * Copyright (C) 2013 Maxime Ripard
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/clk.h>
14#include <linux/clockchips.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/irq.h>
18#include <linux/irqreturn.h>
19#include <linux/sched_clock.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23
24#define TIMER_IRQ_EN_REG 0x00
25#define TIMER_IRQ_EN(val) BIT(val)
26#define TIMER_IRQ_ST_REG 0x04
27#define TIMER_CTL_REG(val) (0x20 * (val) + 0x10)
28#define TIMER_CTL_ENABLE BIT(0)
29#define TIMER_CTL_RELOAD BIT(1)
30#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4)
31#define TIMER_CTL_ONESHOT BIT(7)
32#define TIMER_INTVAL_LO_REG(val) (0x20 * (val) + 0x14)
33#define TIMER_INTVAL_HI_REG(val) (0x20 * (val) + 0x18)
34#define TIMER_CNTVAL_LO_REG(val) (0x20 * (val) + 0x1c)
35#define TIMER_CNTVAL_HI_REG(val) (0x20 * (val) + 0x20)
36
37#define TIMER_SYNC_TICKS 3
38
39static void __iomem *timer_base;
40static u32 ticks_per_jiffy;
41
42/*
43 * When we disable a timer, we need to wait at least for 2 cycles of
44 * the timer source clock. We will use for that the clocksource timer
45 * that is already setup and runs at the same frequency than the other
46 * timers, and we never will be disabled.
47 */
48static void sun5i_clkevt_sync(void)
49{
50 u32 old = readl(timer_base + TIMER_CNTVAL_LO_REG(1));
51
52 while ((old - readl(timer_base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
53 cpu_relax();
54}
55
56static void sun5i_clkevt_time_stop(u8 timer)
57{
58 u32 val = readl(timer_base + TIMER_CTL_REG(timer));
59 writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer));
60
61 sun5i_clkevt_sync();
62}
63
64static void sun5i_clkevt_time_setup(u8 timer, u32 delay)
65{
66 writel(delay, timer_base + TIMER_INTVAL_LO_REG(timer));
67}
68
69static void sun5i_clkevt_time_start(u8 timer, bool periodic)
70{
71 u32 val = readl(timer_base + TIMER_CTL_REG(timer));
72
73 if (periodic)
74 val &= ~TIMER_CTL_ONESHOT;
75 else
76 val |= TIMER_CTL_ONESHOT;
77
78 writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
79 timer_base + TIMER_CTL_REG(timer));
80}
81
82static void sun5i_clkevt_mode(enum clock_event_mode mode,
83 struct clock_event_device *clk)
84{
85 switch (mode) {
86 case CLOCK_EVT_MODE_PERIODIC:
87 sun5i_clkevt_time_stop(0);
88 sun5i_clkevt_time_setup(0, ticks_per_jiffy);
89 sun5i_clkevt_time_start(0, true);
90 break;
91 case CLOCK_EVT_MODE_ONESHOT:
92 sun5i_clkevt_time_stop(0);
93 sun5i_clkevt_time_start(0, false);
94 break;
95 case CLOCK_EVT_MODE_UNUSED:
96 case CLOCK_EVT_MODE_SHUTDOWN:
97 default:
98 sun5i_clkevt_time_stop(0);
99 break;
100 }
101}
102
103static int sun5i_clkevt_next_event(unsigned long evt,
104 struct clock_event_device *unused)
105{
106 sun5i_clkevt_time_stop(0);
107 sun5i_clkevt_time_setup(0, evt - TIMER_SYNC_TICKS);
108 sun5i_clkevt_time_start(0, false);
109
110 return 0;
111}
112
113static struct clock_event_device sun5i_clockevent = {
114 .name = "sun5i_tick",
115 .rating = 340,
116 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
117 .set_mode = sun5i_clkevt_mode,
118 .set_next_event = sun5i_clkevt_next_event,
119};
120
121
122static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
123{
124 struct clock_event_device *evt = (struct clock_event_device *)dev_id;
125
126 writel(0x1, timer_base + TIMER_IRQ_ST_REG);
127 evt->event_handler(evt);
128
129 return IRQ_HANDLED;
130}
131
132static struct irqaction sun5i_timer_irq = {
133 .name = "sun5i_timer0",
134 .flags = IRQF_TIMER | IRQF_IRQPOLL,
135 .handler = sun5i_timer_interrupt,
136 .dev_id = &sun5i_clockevent,
137};
138
139static u64 sun5i_timer_sched_read(void)
140{
141 return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
142}
143
144static void __init sun5i_timer_init(struct device_node *node)
145{
146 unsigned long rate;
147 struct clk *clk;
148 int ret, irq;
149 u32 val;
150
151 timer_base = of_iomap(node, 0);
152 if (!timer_base)
153 panic("Can't map registers");
154
155 irq = irq_of_parse_and_map(node, 0);
156 if (irq <= 0)
157 panic("Can't parse IRQ");
158
159 clk = of_clk_get(node, 0);
160 if (IS_ERR(clk))
161 panic("Can't get timer clock");
162 clk_prepare_enable(clk);
163 rate = clk_get_rate(clk);
164
165 writel(~0, timer_base + TIMER_INTVAL_LO_REG(1));
166 writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
167 timer_base + TIMER_CTL_REG(1));
168
169 sched_clock_register(sun5i_timer_sched_read, 32, rate);
170 clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
171 rate, 340, 32, clocksource_mmio_readl_down);
172
173 ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
174
175 ret = setup_irq(irq, &sun5i_timer_irq);
176 if (ret)
177 pr_warn("failed to setup irq %d\n", irq);
178
179 /* Enable timer0 interrupt */
180 val = readl(timer_base + TIMER_IRQ_EN_REG);
181 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
182
183 sun5i_clockevent.cpumask = cpu_possible_mask;
184 sun5i_clockevent.irq = irq;
185
186 clockevents_config_and_register(&sun5i_clockevent, rate,
187 TIMER_SYNC_TICKS, 0xffffffff);
188}
189CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
190 sun5i_timer_init);
191CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
192 sun5i_timer_init);
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index ad3c0e83a779..1098ed3b9b89 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -124,7 +124,7 @@ static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
124 124
125static struct irqaction irq = { 125static struct irqaction irq = {
126 .name = "vt8500_timer", 126 .name = "vt8500_timer",
127 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 127 .flags = IRQF_TIMER | IRQF_IRQPOLL,
128 .handler = vt8500_timer_interrupt, 128 .handler = vt8500_timer_interrupt,
129 .dev_id = &clockevent, 129 .dev_id = &clockevent,
130}; 130};
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 16d7b4ac94be..8d19f7c06010 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -839,9 +839,6 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
839 839
840 /* set default policy */ 840 /* set default policy */
841 ret = cpufreq_set_policy(policy, &new_policy); 841 ret = cpufreq_set_policy(policy, &new_policy);
842 policy->user_policy.policy = policy->policy;
843 policy->user_policy.governor = policy->governor;
844
845 if (ret) { 842 if (ret) {
846 pr_debug("setting policy failed\n"); 843 pr_debug("setting policy failed\n");
847 if (cpufreq_driver->exit) 844 if (cpufreq_driver->exit)
@@ -1016,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1016 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1013 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1017#endif 1014#endif
1018 1015
1019 if (frozen) 1016 /*
1020 /* Restore the saved policy when doing light-weight init */ 1017 * Restore the saved policy when doing light-weight init and fall back
1021 policy = cpufreq_policy_restore(cpu); 1018 * to the full init if that fails.
1022 else 1019 */
1020 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1021 if (!policy) {
1022 frozen = false;
1023 policy = cpufreq_policy_alloc(); 1023 policy = cpufreq_policy_alloc();
1024 1024 if (!policy)
1025 if (!policy) 1025 goto nomem_out;
1026 goto nomem_out; 1026 }
1027
1028 1027
1029 /* 1028 /*
1030 * In the resume path, since we restore a saved policy, the assignment 1029 * In the resume path, since we restore a saved policy, the assignment
@@ -1069,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1069 */ 1068 */
1070 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1069 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1071 1070
1072 policy->user_policy.min = policy->min; 1071 if (!frozen) {
1073 policy->user_policy.max = policy->max; 1072 policy->user_policy.min = policy->min;
1073 policy->user_policy.max = policy->max;
1074 }
1074 1075
1075 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1076 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1076 CPUFREQ_START, policy); 1077 CPUFREQ_START, policy);
@@ -1101,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1101 1102
1102 cpufreq_init_policy(policy); 1103 cpufreq_init_policy(policy);
1103 1104
1105 if (!frozen) {
1106 policy->user_policy.policy = policy->policy;
1107 policy->user_policy.governor = policy->governor;
1108 }
1109
1104 kobject_uevent(&policy->kobj, KOBJ_ADD); 1110 kobject_uevent(&policy->kobj, KOBJ_ADD);
1105 up_read(&cpufreq_rwsem); 1111 up_read(&cpufreq_rwsem);
1106 1112
@@ -1118,8 +1124,11 @@ err_get_freq:
1118 if (cpufreq_driver->exit) 1124 if (cpufreq_driver->exit)
1119 cpufreq_driver->exit(policy); 1125 cpufreq_driver->exit(policy);
1120err_set_policy_cpu: 1126err_set_policy_cpu:
1121 if (frozen) 1127 if (frozen) {
1128 /* Do not leave stale fallback data behind. */
1129 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1122 cpufreq_policy_put_kobj(policy); 1130 cpufreq_policy_put_kobj(policy);
1131 }
1123 cpufreq_policy_free(policy); 1132 cpufreq_policy_free(policy);
1124 1133
1125nomem_out: 1134nomem_out:
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 5f1cbae36961..d51f17ed691e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -581,7 +581,8 @@ static void intel_pstate_timer_func(unsigned long __data)
581} 581}
582 582
583#define ICPU(model, policy) \ 583#define ICPU(model, policy) \
584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } 584 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
585 (unsigned long)&policy }
585 586
586static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 587static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
587 ICPU(0x2a, core_params), 588 ICPU(0x2a, core_params),
@@ -614,6 +615,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
614 cpu = all_cpu_data[cpunum]; 615 cpu = all_cpu_data[cpunum];
615 616
616 intel_pstate_get_cpu_pstates(cpu); 617 intel_pstate_get_cpu_pstates(cpu);
618 if (!cpu->pstate.current_pstate) {
619 all_cpu_data[cpunum] = NULL;
620 kfree(cpu);
621 return -ENODATA;
622 }
617 623
618 cpu->cpu = cpunum; 624 cpu->cpu = cpunum;
619 625
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 36795639df0d..6e51114057d0 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
65 .state_count = 2, 65 .state_count = 2,
66}; 66};
67 67
68static int __init calxeda_cpuidle_probe(struct platform_device *pdev) 68static int calxeda_cpuidle_probe(struct platform_device *pdev)
69{ 69{
70 return cpuidle_register(&calxeda_idle_driver, NULL); 70 return cpuidle_register(&calxeda_idle_driver, NULL);
71} 71}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9dd6e01eac33..f757a0f428bd 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
1410static int __init ixp_module_init(void) 1410static int __init ixp_module_init(void)
1411{ 1411{
1412 int num = ARRAY_SIZE(ixp4xx_algos); 1412 int num = ARRAY_SIZE(ixp4xx_algos);
1413 int i, err ; 1413 int i, err;
1414 1414
1415 pdev = platform_device_register_full(&ixp_dev_info); 1415 pdev = platform_device_register_full(&ixp_dev_info);
1416 if (IS_ERR(pdev)) 1416 if (IS_ERR(pdev))
1417 return PTR_ERR(pdev); 1417 return PTR_ERR(pdev);
1418 1418
1419 dev = &pdev->dev;
1420
1421 spin_lock_init(&desc_lock); 1419 spin_lock_init(&desc_lock);
1422 spin_lock_init(&emerg_lock); 1420 spin_lock_init(&emerg_lock);
1423 1421
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a49c777607c..87529181efcc 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
817 } 817 }
818 818
819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 819 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
820 if (dma_mapping_error(dev, dma_src)) {
821 dev_err(dev, "mapping src buffer failed\n");
822 goto free_resources;
823 }
820 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 824 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
825 if (dma_mapping_error(dev, dma_dest)) {
826 dev_err(dev, "mapping dest buffer failed\n");
827 goto unmap_src;
828 }
821 flags = DMA_PREP_INTERRUPT; 829 flags = DMA_PREP_INTERRUPT;
822 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, 830 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
823 IOAT_TEST_SIZE, flags); 831 IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
855 } 863 }
856 864
857unmap_dma: 865unmap_dma:
858 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
859 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 866 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
867unmap_src:
868 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
860free_resources: 869free_resources:
861 dma->device_free_chan_resources(dma_chan); 870 dma->device_free_chan_resources(dma_chan);
862out: 871out:
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index b53d0de17e15..98e14ee4833c 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,7 +1,7 @@
1#include "amd64_edac.h" 1#include "amd64_edac.h"
2#include <asm/amd_nb.h> 2#include <asm/amd_nb.h>
3 3
4static struct edac_pci_ctl_info *amd64_ctl_pci; 4static struct edac_pci_ctl_info *pci_ctl;
5 5
6static int report_gart_errors; 6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644); 7module_param(report_gart_errors, int, 0644);
@@ -162,7 +162,7 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
162 * scan the scrub rate mapping table for a close or matching bandwidth value to 162 * scan the scrub rate mapping table for a close or matching bandwidth value to
163 * issue. If requested is too big, then use last maximum value found. 163 * issue. If requested is too big, then use last maximum value found.
164 */ 164 */
165static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) 165static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
166{ 166{
167 u32 scrubval; 167 u32 scrubval;
168 int i; 168 int i;
@@ -198,7 +198,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
198 return 0; 198 return 0;
199} 199}
200 200
201static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) 201static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
202{ 202{
203 struct amd64_pvt *pvt = mci->pvt_info; 203 struct amd64_pvt *pvt = mci->pvt_info;
204 u32 min_scrubrate = 0x5; 204 u32 min_scrubrate = 0x5;
@@ -210,10 +210,10 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
210 if (pvt->fam == 0x15 && pvt->model < 0x10) 210 if (pvt->fam == 0x15 && pvt->model < 0x10)
211 f15h_select_dct(pvt, 0); 211 f15h_select_dct(pvt, 0);
212 212
213 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); 213 return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
214} 214}
215 215
216static int amd64_get_scrub_rate(struct mem_ctl_info *mci) 216static int get_scrub_rate(struct mem_ctl_info *mci)
217{ 217{
218 struct amd64_pvt *pvt = mci->pvt_info; 218 struct amd64_pvt *pvt = mci->pvt_info;
219 u32 scrubval = 0; 219 u32 scrubval = 0;
@@ -240,8 +240,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
240 * returns true if the SysAddr given by sys_addr matches the 240 * returns true if the SysAddr given by sys_addr matches the
241 * DRAM base/limit associated with node_id 241 * DRAM base/limit associated with node_id
242 */ 242 */
243static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, 243static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
244 u8 nid)
245{ 244{
246 u64 addr; 245 u64 addr;
247 246
@@ -285,7 +284,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
285 284
286 if (intlv_en == 0) { 285 if (intlv_en == 0) {
287 for (node_id = 0; node_id < DRAM_RANGES; node_id++) { 286 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
288 if (amd64_base_limit_match(pvt, sys_addr, node_id)) 287 if (base_limit_match(pvt, sys_addr, node_id))
289 goto found; 288 goto found;
290 } 289 }
291 goto err_no_match; 290 goto err_no_match;
@@ -309,7 +308,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
309 } 308 }
310 309
311 /* sanity test for sys_addr */ 310 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { 311 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" 312 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
314 "range for node %d with node interleaving enabled.\n", 313 "range for node %d with node interleaving enabled.\n",
315 __func__, sys_addr, node_id); 314 __func__, sys_addr, node_id);
@@ -660,7 +659,7 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
660 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs 659 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
661 * are ECC capable. 660 * are ECC capable.
662 */ 661 */
663static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt) 662static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
664{ 663{
665 u8 bit; 664 u8 bit;
666 unsigned long edac_cap = EDAC_FLAG_NONE; 665 unsigned long edac_cap = EDAC_FLAG_NONE;
@@ -675,9 +674,9 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
675 return edac_cap; 674 return edac_cap;
676} 675}
677 676
678static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); 677static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
679 678
680static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) 679static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
681{ 680{
682 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); 681 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
683 682
@@ -711,7 +710,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
711 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", 710 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
712 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); 711 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
713 712
714 amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0); 713 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
715 714
716 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); 715 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
717 716
@@ -722,19 +721,19 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
722 721
723 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); 722 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
724 723
725 amd64_debug_display_dimm_sizes(pvt, 0); 724 debug_display_dimm_sizes(pvt, 0);
726 725
727 /* everything below this point is Fam10h and above */ 726 /* everything below this point is Fam10h and above */
728 if (pvt->fam == 0xf) 727 if (pvt->fam == 0xf)
729 return; 728 return;
730 729
731 amd64_debug_display_dimm_sizes(pvt, 1); 730 debug_display_dimm_sizes(pvt, 1);
732 731
733 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); 732 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
734 733
735 /* Only if NOT ganged does dclr1 have valid info */ 734 /* Only if NOT ganged does dclr1 have valid info */
736 if (!dct_ganging_enabled(pvt)) 735 if (!dct_ganging_enabled(pvt))
737 amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1); 736 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
738} 737}
739 738
740/* 739/*
@@ -800,7 +799,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
800 } 799 }
801} 800}
802 801
803static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) 802static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
804{ 803{
805 enum mem_type type; 804 enum mem_type type;
806 805
@@ -1578,7 +1577,7 @@ static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1578 num_dcts_intlv, dct_sel); 1577 num_dcts_intlv, dct_sel);
1579 1578
1580 /* Verify we stay within the MAX number of channels allowed */ 1579 /* Verify we stay within the MAX number of channels allowed */
1581 if (channel > 4 || channel < 0) 1580 if (channel > 3)
1582 return -EINVAL; 1581 return -EINVAL;
1583 1582
1584 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0)); 1583 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
@@ -1702,7 +1701,7 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1702 * debug routine to display the memory sizes of all logical DIMMs and its 1701 * debug routine to display the memory sizes of all logical DIMMs and its
1703 * CSROWs 1702 * CSROWs
1704 */ 1703 */
1705static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) 1704static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1706{ 1705{
1707 int dimm, size0, size1; 1706 int dimm, size0, size1;
1708 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; 1707 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
@@ -1744,7 +1743,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1744 } 1743 }
1745} 1744}
1746 1745
1747static struct amd64_family_type amd64_family_types[] = { 1746static struct amd64_family_type family_types[] = {
1748 [K8_CPUS] = { 1747 [K8_CPUS] = {
1749 .ctl_name = "K8", 1748 .ctl_name = "K8",
1750 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 1749 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
@@ -2005,9 +2004,9 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
2005 string, ""); 2004 string, "");
2006} 2005}
2007 2006
2008static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, 2007static inline void decode_bus_error(int node_id, struct mce *m)
2009 struct mce *m)
2010{ 2008{
2009 struct mem_ctl_info *mci = mcis[node_id];
2011 struct amd64_pvt *pvt = mci->pvt_info; 2010 struct amd64_pvt *pvt = mci->pvt_info;
2012 u8 ecc_type = (m->status >> 45) & 0x3; 2011 u8 ecc_type = (m->status >> 45) & 0x3;
2013 u8 xec = XEC(m->status, 0x1f); 2012 u8 xec = XEC(m->status, 0x1f);
@@ -2035,11 +2034,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2035 __log_bus_error(mci, &err, ecc_type); 2034 __log_bus_error(mci, &err, ecc_type);
2036} 2035}
2037 2036
2038void amd64_decode_bus_error(int node_id, struct mce *m)
2039{
2040 __amd64_decode_bus_error(mcis[node_id], m);
2041}
2042
2043/* 2037/*
2044 * Use pvt->F2 which contains the F2 CPU PCI device to get the related 2038 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2045 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. 2039 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
@@ -2196,7 +2190,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
2196 * encompasses 2190 * encompasses
2197 * 2191 *
2198 */ 2192 */
2199static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) 2193static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2200{ 2194{
2201 u32 cs_mode, nr_pages; 2195 u32 cs_mode, nr_pages;
2202 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; 2196 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
@@ -2263,19 +2257,19 @@ static int init_csrows(struct mem_ctl_info *mci)
2263 pvt->mc_node_id, i); 2257 pvt->mc_node_id, i);
2264 2258
2265 if (row_dct0) { 2259 if (row_dct0) {
2266 nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2260 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2267 csrow->channels[0]->dimm->nr_pages = nr_pages; 2261 csrow->channels[0]->dimm->nr_pages = nr_pages;
2268 } 2262 }
2269 2263
2270 /* K8 has only one DCT */ 2264 /* K8 has only one DCT */
2271 if (pvt->fam != 0xf && row_dct1) { 2265 if (pvt->fam != 0xf && row_dct1) {
2272 int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); 2266 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2273 2267
2274 csrow->channels[1]->dimm->nr_pages = row_dct1_pages; 2268 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2275 nr_pages += row_dct1_pages; 2269 nr_pages += row_dct1_pages;
2276 } 2270 }
2277 2271
2278 mtype = amd64_determine_memory_type(pvt, i); 2272 mtype = determine_memory_type(pvt, i);
2279 2273
2280 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages); 2274 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2281 2275
@@ -2309,7 +2303,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2309} 2303}
2310 2304
2311/* check MCG_CTL on all the cpus on this node */ 2305/* check MCG_CTL on all the cpus on this node */
2312static bool amd64_nb_mce_bank_enabled_on_node(u16 nid) 2306static bool nb_mce_bank_enabled_on_node(u16 nid)
2313{ 2307{
2314 cpumask_var_t mask; 2308 cpumask_var_t mask;
2315 int cpu, nbe; 2309 int cpu, nbe;
@@ -2482,7 +2476,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2482 ecc_en = !!(value & NBCFG_ECC_ENABLE); 2476 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2483 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); 2477 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2484 2478
2485 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); 2479 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
2486 if (!nb_mce_en) 2480 if (!nb_mce_en)
2487 amd64_notice("NB MCE bank disabled, set MSR " 2481 amd64_notice("NB MCE bank disabled, set MSR "
2488 "0x%08x[4] on node %d to enable.\n", 2482 "0x%08x[4] on node %d to enable.\n",
@@ -2537,7 +2531,7 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2537 if (pvt->nbcap & NBCAP_CHIPKILL) 2531 if (pvt->nbcap & NBCAP_CHIPKILL)
2538 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; 2532 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2539 2533
2540 mci->edac_cap = amd64_determine_edac_cap(pvt); 2534 mci->edac_cap = determine_edac_cap(pvt);
2541 mci->mod_name = EDAC_MOD_STR; 2535 mci->mod_name = EDAC_MOD_STR;
2542 mci->mod_ver = EDAC_AMD64_VERSION; 2536 mci->mod_ver = EDAC_AMD64_VERSION;
2543 mci->ctl_name = fam->ctl_name; 2537 mci->ctl_name = fam->ctl_name;
@@ -2545,14 +2539,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2545 mci->ctl_page_to_phys = NULL; 2539 mci->ctl_page_to_phys = NULL;
2546 2540
2547 /* memory scrubber interface */ 2541 /* memory scrubber interface */
2548 mci->set_sdram_scrub_rate = amd64_set_scrub_rate; 2542 mci->set_sdram_scrub_rate = set_scrub_rate;
2549 mci->get_sdram_scrub_rate = amd64_get_scrub_rate; 2543 mci->get_sdram_scrub_rate = get_scrub_rate;
2550} 2544}
2551 2545
2552/* 2546/*
2553 * returns a pointer to the family descriptor on success, NULL otherwise. 2547 * returns a pointer to the family descriptor on success, NULL otherwise.
2554 */ 2548 */
2555static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) 2549static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
2556{ 2550{
2557 struct amd64_family_type *fam_type = NULL; 2551 struct amd64_family_type *fam_type = NULL;
2558 2552
@@ -2563,29 +2557,29 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2563 2557
2564 switch (pvt->fam) { 2558 switch (pvt->fam) {
2565 case 0xf: 2559 case 0xf:
2566 fam_type = &amd64_family_types[K8_CPUS]; 2560 fam_type = &family_types[K8_CPUS];
2567 pvt->ops = &amd64_family_types[K8_CPUS].ops; 2561 pvt->ops = &family_types[K8_CPUS].ops;
2568 break; 2562 break;
2569 2563
2570 case 0x10: 2564 case 0x10:
2571 fam_type = &amd64_family_types[F10_CPUS]; 2565 fam_type = &family_types[F10_CPUS];
2572 pvt->ops = &amd64_family_types[F10_CPUS].ops; 2566 pvt->ops = &family_types[F10_CPUS].ops;
2573 break; 2567 break;
2574 2568
2575 case 0x15: 2569 case 0x15:
2576 if (pvt->model == 0x30) { 2570 if (pvt->model == 0x30) {
2577 fam_type = &amd64_family_types[F15_M30H_CPUS]; 2571 fam_type = &family_types[F15_M30H_CPUS];
2578 pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops; 2572 pvt->ops = &family_types[F15_M30H_CPUS].ops;
2579 break; 2573 break;
2580 } 2574 }
2581 2575
2582 fam_type = &amd64_family_types[F15_CPUS]; 2576 fam_type = &family_types[F15_CPUS];
2583 pvt->ops = &amd64_family_types[F15_CPUS].ops; 2577 pvt->ops = &family_types[F15_CPUS].ops;
2584 break; 2578 break;
2585 2579
2586 case 0x16: 2580 case 0x16:
2587 fam_type = &amd64_family_types[F16_CPUS]; 2581 fam_type = &family_types[F16_CPUS];
2588 pvt->ops = &amd64_family_types[F16_CPUS].ops; 2582 pvt->ops = &family_types[F16_CPUS].ops;
2589 break; 2583 break;
2590 2584
2591 default: 2585 default:
@@ -2601,7 +2595,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2601 return fam_type; 2595 return fam_type;
2602} 2596}
2603 2597
2604static int amd64_init_one_instance(struct pci_dev *F2) 2598static int init_one_instance(struct pci_dev *F2)
2605{ 2599{
2606 struct amd64_pvt *pvt = NULL; 2600 struct amd64_pvt *pvt = NULL;
2607 struct amd64_family_type *fam_type = NULL; 2601 struct amd64_family_type *fam_type = NULL;
@@ -2619,7 +2613,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2619 pvt->F2 = F2; 2613 pvt->F2 = F2;
2620 2614
2621 ret = -EINVAL; 2615 ret = -EINVAL;
2622 fam_type = amd64_per_family_init(pvt); 2616 fam_type = per_family_init(pvt);
2623 if (!fam_type) 2617 if (!fam_type)
2624 goto err_free; 2618 goto err_free;
2625 2619
@@ -2680,7 +2674,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2680 if (report_gart_errors) 2674 if (report_gart_errors)
2681 amd_report_gart_errors(true); 2675 amd_report_gart_errors(true);
2682 2676
2683 amd_register_ecc_decoder(amd64_decode_bus_error); 2677 amd_register_ecc_decoder(decode_bus_error);
2684 2678
2685 mcis[nid] = mci; 2679 mcis[nid] = mci;
2686 2680
@@ -2703,8 +2697,8 @@ err_ret:
2703 return ret; 2697 return ret;
2704} 2698}
2705 2699
2706static int amd64_probe_one_instance(struct pci_dev *pdev, 2700static int probe_one_instance(struct pci_dev *pdev,
2707 const struct pci_device_id *mc_type) 2701 const struct pci_device_id *mc_type)
2708{ 2702{
2709 u16 nid = amd_get_node_id(pdev); 2703 u16 nid = amd_get_node_id(pdev);
2710 struct pci_dev *F3 = node_to_amd_nb(nid)->misc; 2704 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -2736,7 +2730,7 @@ static int amd64_probe_one_instance(struct pci_dev *pdev,
2736 goto err_enable; 2730 goto err_enable;
2737 } 2731 }
2738 2732
2739 ret = amd64_init_one_instance(pdev); 2733 ret = init_one_instance(pdev);
2740 if (ret < 0) { 2734 if (ret < 0) {
2741 amd64_err("Error probing instance: %d\n", nid); 2735 amd64_err("Error probing instance: %d\n", nid);
2742 restore_ecc_error_reporting(s, nid, F3); 2736 restore_ecc_error_reporting(s, nid, F3);
@@ -2752,7 +2746,7 @@ err_out:
2752 return ret; 2746 return ret;
2753} 2747}
2754 2748
2755static void amd64_remove_one_instance(struct pci_dev *pdev) 2749static void remove_one_instance(struct pci_dev *pdev)
2756{ 2750{
2757 struct mem_ctl_info *mci; 2751 struct mem_ctl_info *mci;
2758 struct amd64_pvt *pvt; 2752 struct amd64_pvt *pvt;
@@ -2777,7 +2771,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
2777 2771
2778 /* unregister from EDAC MCE */ 2772 /* unregister from EDAC MCE */
2779 amd_report_gart_errors(false); 2773 amd_report_gart_errors(false);
2780 amd_unregister_ecc_decoder(amd64_decode_bus_error); 2774 amd_unregister_ecc_decoder(decode_bus_error);
2781 2775
2782 kfree(ecc_stngs[nid]); 2776 kfree(ecc_stngs[nid]);
2783 ecc_stngs[nid] = NULL; 2777 ecc_stngs[nid] = NULL;
@@ -2795,7 +2789,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
2795 * PCI core identifies what devices are on a system during boot, and then 2789 * PCI core identifies what devices are on a system during boot, and then
2796 * inquiry this table to see if this driver is for a given device found. 2790 * inquiry this table to see if this driver is for a given device found.
2797 */ 2791 */
2798static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = { 2792static const struct pci_device_id amd64_pci_table[] = {
2799 { 2793 {
2800 .vendor = PCI_VENDOR_ID_AMD, 2794 .vendor = PCI_VENDOR_ID_AMD,
2801 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, 2795 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
@@ -2843,8 +2837,8 @@ MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2843 2837
2844static struct pci_driver amd64_pci_driver = { 2838static struct pci_driver amd64_pci_driver = {
2845 .name = EDAC_MOD_STR, 2839 .name = EDAC_MOD_STR,
2846 .probe = amd64_probe_one_instance, 2840 .probe = probe_one_instance,
2847 .remove = amd64_remove_one_instance, 2841 .remove = remove_one_instance,
2848 .id_table = amd64_pci_table, 2842 .id_table = amd64_pci_table,
2849}; 2843};
2850 2844
@@ -2853,23 +2847,18 @@ static void setup_pci_device(void)
2853 struct mem_ctl_info *mci; 2847 struct mem_ctl_info *mci;
2854 struct amd64_pvt *pvt; 2848 struct amd64_pvt *pvt;
2855 2849
2856 if (amd64_ctl_pci) 2850 if (pci_ctl)
2857 return; 2851 return;
2858 2852
2859 mci = mcis[0]; 2853 mci = mcis[0];
2860 if (mci) { 2854 if (!mci)
2861 2855 return;
2862 pvt = mci->pvt_info;
2863 amd64_ctl_pci =
2864 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2865
2866 if (!amd64_ctl_pci) {
2867 pr_warning("%s(): Unable to create PCI control\n",
2868 __func__);
2869 2856
2870 pr_warning("%s(): PCI error report via EDAC not set\n", 2857 pvt = mci->pvt_info;
2871 __func__); 2858 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2872 } 2859 if (!pci_ctl) {
2860 pr_warn("%s(): Unable to create PCI control\n", __func__);
2861 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2873 } 2862 }
2874} 2863}
2875 2864
@@ -2925,8 +2914,8 @@ err_ret:
2925 2914
2926static void __exit amd64_edac_exit(void) 2915static void __exit amd64_edac_exit(void)
2927{ 2916{
2928 if (amd64_ctl_pci) 2917 if (pci_ctl)
2929 edac_pci_release_generic_ctl(amd64_ctl_pci); 2918 edac_pci_release_generic_ctl(pci_ctl);
2930 2919
2931 pci_unregister_driver(&amd64_pci_driver); 2920 pci_unregister_driver(&amd64_pci_driver);
2932 2921
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 96e3ee3460a5..3a501b530e11 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -333,7 +333,7 @@ static void amd76x_remove_one(struct pci_dev *pdev)
333 edac_mc_free(mci); 333 edac_mc_free(mci);
334} 334}
335 335
336static DEFINE_PCI_DEVICE_TABLE(amd76x_pci_tbl) = { 336static const struct pci_device_id amd76x_pci_tbl[] = {
337 { 337 {
338 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 338 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
339 AMD762}, 339 AMD762},
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 644fec54681f..92d54fa65f93 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1182,9 +1182,11 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1182 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 1182 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1183 pvt->dev_info->err_dev, pvt->bridge_ck); 1183 pvt->dev_info->err_dev, pvt->bridge_ck);
1184 1184
1185 if (pvt->bridge_ck == NULL) 1185 if (pvt->bridge_ck == NULL) {
1186 pvt->bridge_ck = pci_scan_single_device(pdev->bus, 1186 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
1187 PCI_DEVFN(0, 1)); 1187 PCI_DEVFN(0, 1));
1188 pci_dev_get(pvt->bridge_ck);
1189 }
1188 1190
1189 if (pvt->bridge_ck == NULL) { 1191 if (pvt->bridge_ck == NULL) {
1190 e752x_printk(KERN_ERR, "error reporting device not found:" 1192 e752x_printk(KERN_ERR, "error reporting device not found:"
@@ -1421,7 +1423,7 @@ static void e752x_remove_one(struct pci_dev *pdev)
1421 edac_mc_free(mci); 1423 edac_mc_free(mci);
1422} 1424}
1423 1425
1424static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = { 1426static const struct pci_device_id e752x_pci_tbl[] = {
1425 { 1427 {
1426 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1428 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1427 E7520}, 1429 E7520},
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 1c4056a50383..3cda79bc8b00 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -555,7 +555,7 @@ static void e7xxx_remove_one(struct pci_dev *pdev)
555 edac_mc_free(mci); 555 edac_mc_free(mci);
556} 556}
557 557
558static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = { 558static const struct pci_device_id e7xxx_pci_tbl[] = {
559 { 559 {
560 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 560 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
561 E7205}, 561 E7205},
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 102674346035..592af5f0cf39 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -437,6 +437,9 @@ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
437{ 437{
438 int status; 438 int status;
439 439
440 if (!edac_dev->edac_check)
441 return;
442
440 status = cancel_delayed_work(&edac_dev->work); 443 status = cancel_delayed_work(&edac_dev->work);
441 if (status == 0) { 444 if (status == 0) {
442 /* workq instance might be running, wait for it */ 445 /* workq instance might be running, wait for it */
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 9f7e0e609516..51c0362acf5c 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -914,7 +914,7 @@ void __exit edac_debugfs_exit(void)
914 debugfs_remove(edac_debugfs); 914 debugfs_remove(edac_debugfs);
915} 915}
916 916
917int edac_create_debug_nodes(struct mem_ctl_info *mci) 917static int edac_create_debug_nodes(struct mem_ctl_info *mci)
918{ 918{
919 struct dentry *d, *parent; 919 struct dentry *d, *parent;
920 char name[80]; 920 char name[80];
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index 351945fa2ecd..9d9e18aefaaa 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -29,6 +29,25 @@ EXPORT_SYMBOL_GPL(edac_err_assert);
29 29
30static atomic_t edac_subsys_valid = ATOMIC_INIT(0); 30static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
31 31
32int edac_report_status = EDAC_REPORTING_ENABLED;
33EXPORT_SYMBOL_GPL(edac_report_status);
34
35static int __init edac_report_setup(char *str)
36{
37 if (!str)
38 return -EINVAL;
39
40 if (!strncmp(str, "on", 2))
41 set_edac_report_status(EDAC_REPORTING_ENABLED);
42 else if (!strncmp(str, "off", 3))
43 set_edac_report_status(EDAC_REPORTING_DISABLED);
44 else if (!strncmp(str, "force", 5))
45 set_edac_report_status(EDAC_REPORTING_FORCE);
46
47 return 0;
48}
49__setup("edac_report=", edac_report_setup);
50
32/* 51/*
33 * called to determine if there is an EDAC driver interested in 52 * called to determine if there is an EDAC driver interested in
34 * knowing an event (such as NMI) occurred 53 * knowing an event (such as NMI) occurred
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 694efcbf19c0..cd28b968e5c7 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -487,7 +487,7 @@ static void i3000_remove_one(struct pci_dev *pdev)
487 edac_mc_free(mci); 487 edac_mc_free(mci);
488} 488}
489 489
490static DEFINE_PCI_DEVICE_TABLE(i3000_pci_tbl) = { 490static const struct pci_device_id i3000_pci_tbl[] = {
491 { 491 {
492 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 492 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
493 I3000}, 493 I3000},
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index be10a74b16ea..fa1326e5a4b0 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -466,7 +466,7 @@ static void i3200_remove_one(struct pci_dev *pdev)
466 edac_mc_free(mci); 466 edac_mc_free(mci);
467} 467}
468 468
469static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = { 469static const struct pci_device_id i3200_pci_tbl[] = {
470 { 470 {
471 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 471 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
472 I3200}, 472 I3200},
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 63b2194e8c20..72e07e3cf718 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1530,7 +1530,7 @@ static void i5000_remove_one(struct pci_dev *pdev)
1530 * 1530 *
1531 * The "E500P" device is the first device supported. 1531 * The "E500P" device is the first device supported.
1532 */ 1532 */
1533static DEFINE_PCI_DEVICE_TABLE(i5000_pci_tbl) = { 1533static const struct pci_device_id i5000_pci_tbl[] = {
1534 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), 1534 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
1535 .driver_data = I5000P}, 1535 .driver_data = I5000P},
1536 1536
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 157b934e8ce3..36a38ee94fa8 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -1213,7 +1213,7 @@ static void i5100_remove_one(struct pci_dev *pdev)
1213 edac_mc_free(mci); 1213 edac_mc_free(mci);
1214} 1214}
1215 1215
1216static DEFINE_PCI_DEVICE_TABLE(i5100_pci_tbl) = { 1216static const struct pci_device_id i5100_pci_tbl[] = {
1217 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */ 1217 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
1218 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) }, 1218 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
1219 { 0, } 1219 { 0, }
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 0a05bbceb08f..e080cbfa8fc9 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -1416,7 +1416,7 @@ static void i5400_remove_one(struct pci_dev *pdev)
1416 * 1416 *
1417 * The "E500P" device is the first device supported. 1417 * The "E500P" device is the first device supported.
1418 */ 1418 */
1419static DEFINE_PCI_DEVICE_TABLE(i5400_pci_tbl) = { 1419static const struct pci_device_id i5400_pci_tbl[] = {
1420 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)}, 1420 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
1421 {0,} /* 0 terminated list. */ 1421 {0,} /* 0 terminated list. */
1422}; 1422};
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 9004c64b169e..d63f4798f7d0 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -1160,7 +1160,7 @@ static void i7300_remove_one(struct pci_dev *pdev)
1160 * 1160 *
1161 * Has only 8086:360c PCI ID 1161 * Has only 8086:360c PCI ID
1162 */ 1162 */
1163static DEFINE_PCI_DEVICE_TABLE(i7300_pci_tbl) = { 1163static const struct pci_device_id i7300_pci_tbl[] = {
1164 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)}, 1164 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1165 {0,} /* 0 terminated list. */ 1165 {0,} /* 0 terminated list. */
1166}; 1166};
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 80a963d64e58..87533ca7752e 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -394,7 +394,7 @@ static const struct pci_id_table pci_dev_table[] = {
394/* 394/*
395 * pci_device_id table for which devices we are looking for 395 * pci_device_id table for which devices we are looking for
396 */ 396 */
397static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = { 397static const struct pci_device_id i7core_pci_tbl[] = {
398 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)}, 398 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
399 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)}, 399 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
400 {0,} /* 0 terminated list. */ 400 {0,} /* 0 terminated list. */
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 57fdb77903ba..d730e276d1a8 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -386,7 +386,7 @@ static void i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
386 386
387EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); 387EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
388 388
389static DEFINE_PCI_DEVICE_TABLE(i82443bxgx_pci_tbl) = { 389static const struct pci_device_id i82443bxgx_pci_tbl[] = {
390 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, 390 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
391 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, 391 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
392 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, 392 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 3e3e431c8301..3382f6344e42 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -288,7 +288,7 @@ static void i82860_remove_one(struct pci_dev *pdev)
288 edac_mc_free(mci); 288 edac_mc_free(mci);
289} 289}
290 290
291static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = { 291static const struct pci_device_id i82860_pci_tbl[] = {
292 { 292 {
293 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 293 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
294 I82860}, 294 I82860},
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 2f8535fc451e..80573df0a4d7 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -527,7 +527,7 @@ static void i82875p_remove_one(struct pci_dev *pdev)
527 edac_mc_free(mci); 527 edac_mc_free(mci);
528} 528}
529 529
530static DEFINE_PCI_DEVICE_TABLE(i82875p_pci_tbl) = { 530static const struct pci_device_id i82875p_pci_tbl[] = {
531 { 531 {
532 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 532 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
533 I82875P}, 533 I82875P},
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0c8d4b0eaa32..10b10521f62e 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -628,7 +628,7 @@ static void i82975x_remove_one(struct pci_dev *pdev)
628 edac_mc_free(mci); 628 edac_mc_free(mci);
629} 629}
630 630
631static DEFINE_PCI_DEVICE_TABLE(i82975x_pci_tbl) = { 631static const struct pci_device_id i82975x_pci_tbl[] = {
632 { 632 {
633 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 633 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
634 I82975X 634 I82975X
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index fd46b0bd5f2a..8f9182179a7c 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -1,6 +1,8 @@
1/* 1/*
2 * Freescale MPC85xx Memory Controller kenel module 2 * Freescale MPC85xx Memory Controller kenel module
3 * 3 *
4 * Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
5 *
4 * Author: Dave Jiang <djiang@mvista.com> 6 * Author: Dave Jiang <djiang@mvista.com>
5 * 7 *
6 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under 8 * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
@@ -196,6 +198,42 @@ static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
196 edac_pci_handle_npe(pci, pci->ctl_name); 198 edac_pci_handle_npe(pci, pci->ctl_name);
197} 199}
198 200
201static void mpc85xx_pcie_check(struct edac_pci_ctl_info *pci)
202{
203 struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
204 u32 err_detect;
205
206 err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
207
208 pr_err("PCIe error(s) detected\n");
209 pr_err("PCIe ERR_DR register: 0x%08x\n", err_detect);
210 pr_err("PCIe ERR_CAP_STAT register: 0x%08x\n",
211 in_be32(pdata->pci_vbase + MPC85XX_PCI_GAS_TIMR));
212 pr_err("PCIe ERR_CAP_R0 register: 0x%08x\n",
213 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R0));
214 pr_err("PCIe ERR_CAP_R1 register: 0x%08x\n",
215 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R1));
216 pr_err("PCIe ERR_CAP_R2 register: 0x%08x\n",
217 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R2));
218 pr_err("PCIe ERR_CAP_R3 register: 0x%08x\n",
219 in_be32(pdata->pci_vbase + MPC85XX_PCIE_ERR_CAP_R3));
220
221 /* clear error bits */
222 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
223}
224
225static int mpc85xx_pcie_find_capability(struct device_node *np)
226{
227 struct pci_controller *hose;
228
229 if (!np)
230 return -EINVAL;
231
232 hose = pci_find_hose_for_OF_device(np);
233
234 return early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
235}
236
199static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id) 237static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
200{ 238{
201 struct edac_pci_ctl_info *pci = dev_id; 239 struct edac_pci_ctl_info *pci = dev_id;
@@ -207,7 +245,10 @@ static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
207 if (!err_detect) 245 if (!err_detect)
208 return IRQ_NONE; 246 return IRQ_NONE;
209 247
210 mpc85xx_pci_check(pci); 248 if (pdata->is_pcie)
249 mpc85xx_pcie_check(pci);
250 else
251 mpc85xx_pci_check(pci);
211 252
212 return IRQ_HANDLED; 253 return IRQ_HANDLED;
213} 254}
@@ -239,14 +280,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
239 pdata = pci->pvt_info; 280 pdata = pci->pvt_info;
240 pdata->name = "mpc85xx_pci_err"; 281 pdata->name = "mpc85xx_pci_err";
241 pdata->irq = NO_IRQ; 282 pdata->irq = NO_IRQ;
283
284 if (mpc85xx_pcie_find_capability(op->dev.of_node) > 0)
285 pdata->is_pcie = true;
286
242 dev_set_drvdata(&op->dev, pci); 287 dev_set_drvdata(&op->dev, pci);
243 pci->dev = &op->dev; 288 pci->dev = &op->dev;
244 pci->mod_name = EDAC_MOD_STR; 289 pci->mod_name = EDAC_MOD_STR;
245 pci->ctl_name = pdata->name; 290 pci->ctl_name = pdata->name;
246 pci->dev_name = dev_name(&op->dev); 291 pci->dev_name = dev_name(&op->dev);
247 292
248 if (edac_op_state == EDAC_OPSTATE_POLL) 293 if (edac_op_state == EDAC_OPSTATE_POLL) {
249 pci->edac_check = mpc85xx_pci_check; 294 if (pdata->is_pcie)
295 pci->edac_check = mpc85xx_pcie_check;
296 else
297 pci->edac_check = mpc85xx_pci_check;
298 }
250 299
251 pdata->edac_idx = edac_pci_idx++; 300 pdata->edac_idx = edac_pci_idx++;
252 301
@@ -275,16 +324,26 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
275 goto err; 324 goto err;
276 } 325 }
277 326
278 orig_pci_err_cap_dr = 327 if (pdata->is_pcie) {
279 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR); 328 orig_pci_err_cap_dr =
329 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR);
330 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, ~0);
331 orig_pci_err_en =
332 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
333 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, 0);
334 } else {
335 orig_pci_err_cap_dr =
336 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
280 337
281 /* PCI master abort is expected during config cycles */ 338 /* PCI master abort is expected during config cycles */
282 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40); 339 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
283 340
284 orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN); 341 orig_pci_err_en =
342 in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
285 343
286 /* disable master abort reporting */ 344 /* disable master abort reporting */
287 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40); 345 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
346 }
288 347
289 /* clear error bits */ 348 /* clear error bits */
290 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); 349 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
@@ -297,7 +356,8 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
297 if (edac_op_state == EDAC_OPSTATE_INT) { 356 if (edac_op_state == EDAC_OPSTATE_INT) {
298 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0); 357 pdata->irq = irq_of_parse_and_map(op->dev.of_node, 0);
299 res = devm_request_irq(&op->dev, pdata->irq, 358 res = devm_request_irq(&op->dev, pdata->irq,
300 mpc85xx_pci_isr, IRQF_DISABLED, 359 mpc85xx_pci_isr,
360 IRQF_DISABLED | IRQF_SHARED,
301 "[EDAC] PCI err", pci); 361 "[EDAC] PCI err", pci);
302 if (res < 0) { 362 if (res < 0) {
303 printk(KERN_ERR 363 printk(KERN_ERR
@@ -312,6 +372,22 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
312 pdata->irq); 372 pdata->irq);
313 } 373 }
314 374
375 if (pdata->is_pcie) {
376 /*
377 * Enable all PCIe error interrupt & error detect except invalid
378 * PEX_CONFIG_ADDR/PEX_CONFIG_DATA access interrupt generation
379 * enable bit and invalid PEX_CONFIG_ADDR/PEX_CONFIG_DATA access
380 * detection enable bit. Because PCIe bus code to initialize and
381 * configure these PCIe devices on booting will use some invalid
382 * PEX_CONFIG_ADDR/PEX_CONFIG_DATA, edac driver prints the much
383 * notice information. So disable this detect to fix ugly print.
384 */
385 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0
386 & ~PEX_ERR_ICCAIE_EN_BIT);
387 out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR, 0
388 | PEX_ERR_ICCAD_DISR_BIT);
389 }
390
315 devres_remove_group(&op->dev, mpc85xx_pci_err_probe); 391 devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
316 edac_dbg(3, "success\n"); 392 edac_dbg(3, "success\n");
317 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); 393 printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
index 932016f2cf06..8c6256436227 100644
--- a/drivers/edac/mpc85xx_edac.h
+++ b/drivers/edac/mpc85xx_edac.h
@@ -134,13 +134,19 @@
134#define MPC85XX_PCI_ERR_DR 0x0000 134#define MPC85XX_PCI_ERR_DR 0x0000
135#define MPC85XX_PCI_ERR_CAP_DR 0x0004 135#define MPC85XX_PCI_ERR_CAP_DR 0x0004
136#define MPC85XX_PCI_ERR_EN 0x0008 136#define MPC85XX_PCI_ERR_EN 0x0008
137#define PEX_ERR_ICCAIE_EN_BIT 0x00020000
137#define MPC85XX_PCI_ERR_ATTRIB 0x000c 138#define MPC85XX_PCI_ERR_ATTRIB 0x000c
138#define MPC85XX_PCI_ERR_ADDR 0x0010 139#define MPC85XX_PCI_ERR_ADDR 0x0010
140#define PEX_ERR_ICCAD_DISR_BIT 0x00020000
139#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014 141#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014
140#define MPC85XX_PCI_ERR_DL 0x0018 142#define MPC85XX_PCI_ERR_DL 0x0018
141#define MPC85XX_PCI_ERR_DH 0x001c 143#define MPC85XX_PCI_ERR_DH 0x001c
142#define MPC85XX_PCI_GAS_TIMR 0x0020 144#define MPC85XX_PCI_GAS_TIMR 0x0020
143#define MPC85XX_PCI_PCIX_TIMR 0x0024 145#define MPC85XX_PCI_PCIX_TIMR 0x0024
146#define MPC85XX_PCIE_ERR_CAP_R0 0x0028
147#define MPC85XX_PCIE_ERR_CAP_R1 0x002c
148#define MPC85XX_PCIE_ERR_CAP_R2 0x0030
149#define MPC85XX_PCIE_ERR_CAP_R3 0x0034
144 150
145struct mpc85xx_mc_pdata { 151struct mpc85xx_mc_pdata {
146 char *name; 152 char *name;
@@ -158,6 +164,7 @@ struct mpc85xx_l2_pdata {
158 164
159struct mpc85xx_pci_pdata { 165struct mpc85xx_pci_pdata {
160 char *name; 166 char *name;
167 bool is_pcie;
161 int edac_idx; 168 int edac_idx;
162 void __iomem *pci_vbase; 169 void __iomem *pci_vbase;
163 int irq; 170 int irq;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 2fd6a5490905..8f936bc7a010 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -383,7 +383,7 @@ static void r82600_remove_one(struct pci_dev *pdev)
383 edac_mc_free(mci); 383 edac_mc_free(mci);
384} 384}
385 385
386static DEFINE_PCI_DEVICE_TABLE(r82600_pci_tbl) = { 386static const struct pci_device_id r82600_pci_tbl[] = {
387 { 387 {
388 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) 388 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
389 }, 389 },
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index d7f1b57bd3be..54e2abe671f7 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -461,7 +461,7 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
461/* 461/*
462 * pci_device_id table for which devices we are looking for 462 * pci_device_id table for which devices we are looking for
463 */ 463 */
464static DEFINE_PCI_DEVICE_TABLE(sbridge_pci_tbl) = { 464static const struct pci_device_id sbridge_pci_tbl[] = {
465 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)}, 465 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
466 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)}, 466 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
467 {0,} /* 0 terminated list. */ 467 {0,} /* 0 terminated list. */
@@ -915,7 +915,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
915 } 915 }
916} 916}
917 917
918struct mem_ctl_info *get_mci_for_node_id(u8 node_id) 918static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
919{ 919{
920 struct sbridge_dev *sbridge_dev; 920 struct sbridge_dev *sbridge_dev;
921 921
@@ -1829,6 +1829,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
1829 struct mem_ctl_info *mci; 1829 struct mem_ctl_info *mci;
1830 struct sbridge_pvt *pvt; 1830 struct sbridge_pvt *pvt;
1831 1831
1832 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
1833 return NOTIFY_DONE;
1834
1832 mci = get_mci_for_node_id(mce->socketid); 1835 mci = get_mci_for_node_id(mce->socketid);
1833 if (!mci) 1836 if (!mci)
1834 return NOTIFY_BAD; 1837 return NOTIFY_BAD;
@@ -2142,9 +2145,10 @@ static int __init sbridge_init(void)
2142 opstate_init(); 2145 opstate_init();
2143 2146
2144 pci_rc = pci_register_driver(&sbridge_driver); 2147 pci_rc = pci_register_driver(&sbridge_driver);
2145
2146 if (pci_rc >= 0) { 2148 if (pci_rc >= 0) {
2147 mce_register_decode_chain(&sbridge_mce_dec); 2149 mce_register_decode_chain(&sbridge_mce_dec);
2150 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2151 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
2148 return 0; 2152 return 0;
2149 } 2153 }
2150 2154
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 1a4df82376ba..4891b450830b 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -448,7 +448,7 @@ static void x38_remove_one(struct pci_dev *pdev)
448 edac_mc_free(mci); 448 edac_mc_free(mci);
449} 449}
450 450
451static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = { 451static const struct pci_device_id x38_pci_tbl[] = {
452 { 452 {
453 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 453 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
454 X38}, 454 X38},
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 6aecbc86ec94..1e75f48b61f8 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,6 +36,17 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
36 backend for pstore by default. This setting can be overridden 36 backend for pstore by default. This setting can be overridden
37 using the efivars module's pstore_disable parameter. 37 using the efivars module's pstore_disable parameter.
38 38
39config EFI_RUNTIME_MAP
40 bool "Export efi runtime maps to sysfs"
41 depends on X86 && EFI && KEXEC
42 default y
43 help
44 Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
45 That memory map is used for example by kexec to set up efi virtual
46 mapping the 2nd kernel, but can also be used for debugging purposes.
47
48 See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map.
49
39endmenu 50endmenu
40 51
41config UEFI_CPER 52config UEFI_CPER
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 6c2a41ec21ba..9553496b0f43 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_EFI) += efi.o vars.o
5obj-$(CONFIG_EFI_VARS) += efivars.o 5obj-$(CONFIG_EFI_VARS) += efivars.o
6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o 6obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
7obj-$(CONFIG_UEFI_CPER) += cper.o 7obj-$(CONFIG_UEFI_CPER) += cper.o
8obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 2e2fbdec0845..4753bac65279 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -32,6 +32,9 @@ struct efi __read_mostly efi = {
32 .hcdp = EFI_INVALID_TABLE_ADDR, 32 .hcdp = EFI_INVALID_TABLE_ADDR,
33 .uga = EFI_INVALID_TABLE_ADDR, 33 .uga = EFI_INVALID_TABLE_ADDR,
34 .uv_systab = EFI_INVALID_TABLE_ADDR, 34 .uv_systab = EFI_INVALID_TABLE_ADDR,
35 .fw_vendor = EFI_INVALID_TABLE_ADDR,
36 .runtime = EFI_INVALID_TABLE_ADDR,
37 .config_table = EFI_INVALID_TABLE_ADDR,
35}; 38};
36EXPORT_SYMBOL(efi); 39EXPORT_SYMBOL(efi);
37 40
@@ -71,13 +74,49 @@ static ssize_t systab_show(struct kobject *kobj,
71static struct kobj_attribute efi_attr_systab = 74static struct kobj_attribute efi_attr_systab =
72 __ATTR(systab, 0400, systab_show, NULL); 75 __ATTR(systab, 0400, systab_show, NULL);
73 76
77#define EFI_FIELD(var) efi.var
78
79#define EFI_ATTR_SHOW(name) \
80static ssize_t name##_show(struct kobject *kobj, \
81 struct kobj_attribute *attr, char *buf) \
82{ \
83 return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
84}
85
86EFI_ATTR_SHOW(fw_vendor);
87EFI_ATTR_SHOW(runtime);
88EFI_ATTR_SHOW(config_table);
89
90static struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
91static struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
92static struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
93
74static struct attribute *efi_subsys_attrs[] = { 94static struct attribute *efi_subsys_attrs[] = {
75 &efi_attr_systab.attr, 95 &efi_attr_systab.attr,
76 NULL, /* maybe more in the future? */ 96 &efi_attr_fw_vendor.attr,
97 &efi_attr_runtime.attr,
98 &efi_attr_config_table.attr,
99 NULL,
77}; 100};
78 101
102static umode_t efi_attr_is_visible(struct kobject *kobj,
103 struct attribute *attr, int n)
104{
105 umode_t mode = attr->mode;
106
107 if (attr == &efi_attr_fw_vendor.attr)
108 return (efi.fw_vendor == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
109 else if (attr == &efi_attr_runtime.attr)
110 return (efi.runtime == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
111 else if (attr == &efi_attr_config_table.attr)
112 return (efi.config_table == EFI_INVALID_TABLE_ADDR) ? 0 : mode;
113
114 return mode;
115}
116
79static struct attribute_group efi_subsys_attr_group = { 117static struct attribute_group efi_subsys_attr_group = {
80 .attrs = efi_subsys_attrs, 118 .attrs = efi_subsys_attrs,
119 .is_visible = efi_attr_is_visible,
81}; 120};
82 121
83static struct efivars generic_efivars; 122static struct efivars generic_efivars;
@@ -128,6 +167,10 @@ static int __init efisubsys_init(void)
128 goto err_unregister; 167 goto err_unregister;
129 } 168 }
130 169
170 error = efi_runtime_map_init(efi_kobj);
171 if (error)
172 goto err_remove_group;
173
131 /* and the standard mountpoint for efivarfs */ 174 /* and the standard mountpoint for efivarfs */
132 efivars_kobj = kobject_create_and_add("efivars", efi_kobj); 175 efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
133 if (!efivars_kobj) { 176 if (!efivars_kobj) {
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
new file mode 100644
index 000000000000..97cdd16a2169
--- /dev/null
+++ b/drivers/firmware/efi/runtime-map.c
@@ -0,0 +1,181 @@
1/*
2 * linux/drivers/efi/runtime-map.c
3 * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com>
4 *
5 * This file is released under the GPLv2.
6 */
7
8#include <linux/string.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/efi.h>
13#include <linux/slab.h>
14
15#include <asm/setup.h>
16
17static void *efi_runtime_map;
18static int nr_efi_runtime_map;
19static u32 efi_memdesc_size;
20
21struct efi_runtime_map_entry {
22 efi_memory_desc_t md;
23 struct kobject kobj; /* kobject for each entry */
24};
25
26static struct efi_runtime_map_entry **map_entries;
27
28struct map_attribute {
29 struct attribute attr;
30 ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf);
31};
32
33static inline struct map_attribute *to_map_attr(struct attribute *attr)
34{
35 return container_of(attr, struct map_attribute, attr);
36}
37
38static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf)
39{
40 return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type);
41}
42
43#define EFI_RUNTIME_FIELD(var) entry->md.var
44
45#define EFI_RUNTIME_U64_ATTR_SHOW(name) \
46static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \
47{ \
48 return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \
49}
50
51EFI_RUNTIME_U64_ATTR_SHOW(phys_addr);
52EFI_RUNTIME_U64_ATTR_SHOW(virt_addr);
53EFI_RUNTIME_U64_ATTR_SHOW(num_pages);
54EFI_RUNTIME_U64_ATTR_SHOW(attribute);
55
56static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj)
57{
58 return container_of(kobj, struct efi_runtime_map_entry, kobj);
59}
60
61static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
62 char *buf)
63{
64 struct efi_runtime_map_entry *entry = to_map_entry(kobj);
65 struct map_attribute *map_attr = to_map_attr(attr);
66
67 return map_attr->show(entry, buf);
68}
69
70static struct map_attribute map_type_attr = __ATTR_RO(type);
71static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
72static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
73static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
74static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
75
76/*
77 * These are default attributes that are added for every memmap entry.
78 */
79static struct attribute *def_attrs[] = {
80 &map_type_attr.attr,
81 &map_phys_addr_attr.attr,
82 &map_virt_addr_attr.attr,
83 &map_num_pages_attr.attr,
84 &map_attribute_attr.attr,
85 NULL
86};
87
88static const struct sysfs_ops map_attr_ops = {
89 .show = map_attr_show,
90};
91
92static void map_release(struct kobject *kobj)
93{
94 struct efi_runtime_map_entry *entry;
95
96 entry = to_map_entry(kobj);
97 kfree(entry);
98}
99
100static struct kobj_type __refdata map_ktype = {
101 .sysfs_ops = &map_attr_ops,
102 .default_attrs = def_attrs,
103 .release = map_release,
104};
105
106static struct kset *map_kset;
107
108static struct efi_runtime_map_entry *
109add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
110{
111 int ret;
112 struct efi_runtime_map_entry *entry;
113
114 if (!map_kset) {
115 map_kset = kset_create_and_add("runtime-map", NULL, kobj);
116 if (!map_kset)
117 return ERR_PTR(-ENOMEM);
118 }
119
120 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
121 if (!entry) {
122 kset_unregister(map_kset);
123 return entry;
124 }
125
126 memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
127 sizeof(efi_memory_desc_t));
128
129 kobject_init(&entry->kobj, &map_ktype);
130 entry->kobj.kset = map_kset;
131 ret = kobject_add(&entry->kobj, NULL, "%d", nr);
132 if (ret) {
133 kobject_put(&entry->kobj);
134 kset_unregister(map_kset);
135 return ERR_PTR(ret);
136 }
137
138 return entry;
139}
140
141void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size)
142{
143 efi_runtime_map = map;
144 nr_efi_runtime_map = nr_entries;
145 efi_memdesc_size = desc_size;
146}
147
148int __init efi_runtime_map_init(struct kobject *efi_kobj)
149{
150 int i, j, ret = 0;
151 struct efi_runtime_map_entry *entry;
152
153 if (!efi_runtime_map)
154 return 0;
155
156 map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL);
157 if (!map_entries) {
158 ret = -ENOMEM;
159 goto out;
160 }
161
162 for (i = 0; i < nr_efi_runtime_map; i++) {
163 entry = add_sysfs_runtime_map_entry(efi_kobj, i);
164 if (IS_ERR(entry)) {
165 ret = PTR_ERR(entry);
166 goto out_add_entry;
167 }
168 *(map_entries + i) = entry;
169 }
170
171 return 0;
172out_add_entry:
173 for (j = i - 1; j > 0; j--) {
174 entry = *(map_entries + j);
175 kobject_put(&entry->kobj);
176 }
177 if (map_kset)
178 kset_unregister(map_kset);
179out:
180 return ret;
181}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 85071a1c4547..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1041,7 +1041,7 @@ void drm_mode_connector_list_update(struct drm_connector *connector)
1041 /* if equal delete the probed mode */ 1041 /* if equal delete the probed mode */
1042 mode->status = pmode->status; 1042 mode->status = pmode->status;
1043 /* Merge type bits together */ 1043 /* Merge type bits together */
1044 mode->type = pmode->type; 1044 mode->type |= pmode->type;
1045 list_del(&pmode->head); 1045 list_del(&pmode->head);
1046 drm_mode_destroy(connector->dev, pmode); 1046 drm_mode_destroy(connector->dev, pmode);
1047 break; 1047 break;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c79dd2b1f70e..d3c3b5b15824 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -906,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
906 WARN_ON(readq(&gtt_entries[i-1]) 906 WARN_ON(readq(&gtt_entries[i-1])
907 != gen8_pte_encode(addr, level, true)); 907 != gen8_pte_encode(addr, level, true));
908 908
909#if 0 /* TODO: Still needed on GEN8? */
910 /* This next bit makes the above posting read even more important. We 909 /* This next bit makes the above posting read even more important. We
911 * want to flush the TLBs only after we're certain all the PTE updates 910 * want to flush the TLBs only after we're certain all the PTE updates
912 * have finished. 911 * have finished.
913 */ 912 */
914 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 913 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
915 POSTING_READ(GFX_FLSH_CNTL_GEN6); 914 POSTING_READ(GFX_FLSH_CNTL_GEN6);
916#endif
917} 915}
918 916
919/* 917/*
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5d1dedc02f15..f13d5edc39d5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2713,6 +2713,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2713#undef GEN8_IRQ_INIT_NDX 2713#undef GEN8_IRQ_INIT_NDX
2714 2714
2715 POSTING_READ(GEN8_PCU_IIR); 2715 POSTING_READ(GEN8_PCU_IIR);
2716
2717 ibx_irq_preinstall(dev);
2716} 2718}
2717 2719
2718static void ibx_hpd_irq_setup(struct drm_device *dev) 2720static void ibx_hpd_irq_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 526c8ded16b0..b69dc3e66c16 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1057,12 +1057,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1057 enum pipe pipe; 1057 enum pipe pipe;
1058 struct intel_crtc *intel_crtc; 1058 struct intel_crtc *intel_crtc;
1059 1059
1060 dev_priv->ddi_plls.spll_refcount = 0;
1061 dev_priv->ddi_plls.wrpll1_refcount = 0;
1062 dev_priv->ddi_plls.wrpll2_refcount = 0;
1063
1060 for_each_pipe(pipe) { 1064 for_each_pipe(pipe) {
1061 intel_crtc = 1065 intel_crtc =
1062 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1066 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1063 1067
1064 if (!intel_crtc->active) 1068 if (!intel_crtc->active) {
1069 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1065 continue; 1070 continue;
1071 }
1066 1072
1067 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, 1073 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1068 pipe); 1074 pipe);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 54e82a80cf50..2bde35d34eb9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = {
10541 /* Sony Vaio Y cannot use SSC on LVDS */ 10541 /* Sony Vaio Y cannot use SSC on LVDS */
10542 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10542 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10543 10543
10544 /* 10544 /* Acer Aspire 5734Z must invert backlight brightness */
10545 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops 10545 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10546 * seem to use inverted backlight PWM. 10546
10547 */ 10547 /* Acer/eMachines G725 */
10548 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10548 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10549
10550 /* Acer/eMachines e725 */
10551 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10552
10553 /* Acer/Packard Bell NCL20 */
10554 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10555
10556 /* Acer Aspire 4736Z */
10557 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10549 10558
10550 /* Dell XPS13 HD Sandy Bridge */ 10559 /* Dell XPS13 HD Sandy Bridge */
10551 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, 10560 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -11044,10 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11044 11053
11045 intel_setup_overlay(dev); 11054 intel_setup_overlay(dev);
11046 11055
11047 drm_modeset_lock_all(dev); 11056 mutex_lock(&dev->mode_config.mutex);
11048 drm_mode_config_reset(dev); 11057 drm_mode_config_reset(dev);
11049 intel_modeset_setup_hw_state(dev, false); 11058 intel_modeset_setup_hw_state(dev, false);
11050 drm_modeset_unlock_all(dev); 11059 mutex_unlock(&dev->mode_config.mutex);
11051} 11060}
11052 11061
11053void intel_modeset_cleanup(struct drm_device *dev) 11062void intel_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f06378d3f9..2ea5568b6cf5 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
104 104
105 if (parent) { 105 if (parent) {
106 struct nouveau_device *device = nv_device(parent); 106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname); 107 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio; 108 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 } 109 }
113 110
114 return 0; 111 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a29d0..dd01c6c435d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
268 if (ret) 268 if (ret)
269 return ret; 269 return ret;
270 270
271 device->subdev[i] = devobj->subdev[i];
272
271 /* note: can't init *any* subdevs until devinit has been run 273 /* note: can't init *any* subdevs until devinit has been run
272 * due to not knowing exactly what the vbios init tables will 274 * due to not knowing exactly what the vbios init tables will
273 * mess with. devinit also can't be run until all of its 275 * mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef2b9ee..dbc5e33de94f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass; 161 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 162 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
164 device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; 165 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b0fa2e..5c8a63dc506a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
334 while ((mthd = &mthds[i++]) && (init = mthd->init)) { 334 while ((mthd = &mthds[i++]) && (init = mthd->init)) {
335 u32 addr = 0x80000000 | mthd->oclass; 335 u32 addr = 0x80000000 | mthd->oclass;
336 for (data = 0; init->count; init++) { 336 for (data = 0; init->count; init++) {
337 if (data != init->data) { 337 if (init == mthd->init || data != init->data) {
338 nv_wr32(priv, 0x40448c, init->data); 338 nv_wr32(priv, 0x40448c, init->data);
339 data = init->data; 339 data = init->data;
340 } 340 }
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa382ff2..d89dbdf39b0d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
75static inline struct nouveau_fb * 75static inline struct nouveau_fb *
76nouveau_fb(void *obj) 76nouveau_fb(void *obj)
77{ 77{
78 /* fbram uses this before device subdev pointer is valid */
79 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
80 nv_subidx(obj) == NVDEV_SUBDEV_FB)
81 return obj;
82
78 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB]; 83 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
79} 84}
80 85
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 9fa5da723871..7f50a858b16f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -73,7 +73,7 @@ struct nouveau_i2c {
73 int (*identify)(struct nouveau_i2c *, int index, 73 int (*identify)(struct nouveau_i2c *, int index,
74 const char *what, struct nouveau_i2c_board_info *, 74 const char *what, struct nouveau_i2c_board_info *,
75 bool (*match)(struct nouveau_i2c_port *, 75 bool (*match)(struct nouveau_i2c_port *,
76 struct i2c_board_info *)); 76 struct i2c_board_info *, void *), void *);
77 struct list_head ports; 77 struct list_head ports;
78}; 78};
79 79
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index ec7a54e91a08..4aca33887aaa 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -50,6 +50,13 @@ struct nouveau_instmem {
50static inline struct nouveau_instmem * 50static inline struct nouveau_instmem *
51nouveau_instmem(void *obj) 51nouveau_instmem(void *obj)
52{ 52{
53 /* nv04/nv40 impls need to create objects in their constructor,
54 * which is before the subdev pointer is valid
55 */
56 if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
57 nv_subidx(obj) == NVDEV_SUBDEV_INSTMEM)
58 return obj;
59
53 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM]; 60 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
54} 61}
55 62
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908cb82b6..df1b1b423093 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
365init_script(struct nouveau_bios *bios, int index) 365init_script(struct nouveau_bios *bios, int index)
366{ 366{
367 struct nvbios_init init = { .bios = bios }; 367 struct nvbios_init init = { .bios = bios };
368 u16 data; 368 u16 bmp_ver = bmp_version(bios), data;
369 369
370 if (bmp_version(bios) && bmp_version(bios) < 0x0510) { 370 if (bmp_ver && bmp_ver < 0x0510) {
371 if (index > 1) 371 if (index > 1 || bmp_ver < 0x0100)
372 return 0x0000; 372 return 0x0000;
373 373
374 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18); 374 data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
375 return nv_ro16(bios, data + (index * 2)); 375 return nv_ro16(bios, data + (index * 2));
376 } 376 }
377 377
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
1294 u16 offset = nv_ro16(bios, init->offset + 1); 1294 u16 offset = nv_ro16(bios, init->offset + 1);
1295 1295
1296 trace("JUMP\t0x%04x\n", offset); 1296 trace("JUMP\t0x%04x\n", offset);
1297 init->offset = offset; 1297
1298 if (init_exec(init))
1299 init->offset = offset;
1300 else
1301 init->offset += 3;
1298} 1302}
1299 1303
1300/** 1304/**
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 041fd5edaebf..c33c03d2f4af 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -197,7 +197,7 @@ static int
197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what, 197nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
198 struct nouveau_i2c_board_info *info, 198 struct nouveau_i2c_board_info *info,
199 bool (*match)(struct nouveau_i2c_port *, 199 bool (*match)(struct nouveau_i2c_port *,
200 struct i2c_board_info *)) 200 struct i2c_board_info *, void *), void *data)
201{ 201{
202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index); 202 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
203 int i; 203 int i;
@@ -221,7 +221,7 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
221 } 221 }
222 222
223 if (nv_probe_i2c(port, info[i].dev.addr) && 223 if (nv_probe_i2c(port, info[i].dev.addr) &&
224 (!match || match(port, &info[i].dev))) { 224 (!match || match(port, &info[i].dev, data))) {
225 nv_info(i2c, "detected %s: %s\n", what, 225 nv_info(i2c, "detected %s: %s\n", what,
226 info[i].dev.type); 226 info[i].dev.type);
227 return i; 227 return i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
index af129c2e8113..64f8b4702bf7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -100,7 +100,7 @@ mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
100static int 100static int
101mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb) 101mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
102{ 102{
103 struct nouveau_mxm *mxm = nouveau_mxm(bios); 103 struct nouveau_mxm *mxm = data;
104 struct context ctx = { .outp = (u32 *)(bios->data + pdcb) }; 104 struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
105 u8 type, i2cidx, link, ver, len; 105 u8 type, i2cidx, link, ver, len;
106 u8 *conn; 106 u8 *conn;
@@ -199,7 +199,7 @@ mxm_dcb_sanitise(struct nouveau_mxm *mxm)
199 return; 199 return;
200 } 200 }
201 201
202 dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry); 202 dcb_outp_foreach(bios, mxm, mxm_dcb_sanitise_entry);
203 mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL); 203 mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
204} 204}
205 205
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e44ed7b93c6d..7610fc5f8fa2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -29,9 +29,9 @@
29 29
30static bool 30static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c, 31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info) 32 struct i2c_board_info *info, void *data)
33{ 33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); 34 struct nouveau_therm_priv *priv = data;
35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor; 35 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
36 struct i2c_client *client; 36 struct i2c_client *client;
37 37
@@ -96,7 +96,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
96 }; 96 };
97 97
98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 98 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
99 board, probe_monitoring_device); 99 board, probe_monitoring_device, therm);
100 if (priv->ic) 100 if (priv->ic)
101 return; 101 return;
102 } 102 }
@@ -108,7 +108,7 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
108 }; 108 };
109 109
110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 110 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
111 board, probe_monitoring_device); 111 board, probe_monitoring_device, therm);
112 if (priv->ic) 112 if (priv->ic)
113 return; 113 return;
114 } 114 }
@@ -117,5 +117,5 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
117 device. Let's try our static list. 117 device. Let's try our static list.
118 */ 118 */
119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", 119 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
120 nv_board_infos, probe_monitoring_device); 120 nv_board_infos, probe_monitoring_device, therm);
121} 121}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 936a71c59080..7fdc51e2a571 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -643,7 +643,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
643 get_tmds_slave(encoder)) 643 get_tmds_slave(encoder))
644 return; 644 return;
645 645
646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL); 646 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL, NULL);
647 if (type < 0) 647 if (type < 0)
648 return; 648 return;
649 649
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index cc4b208ce546..244822df8ffc 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -59,7 +59,7 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 59 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
60 60
61 return i2c->identify(i2c, i2c_index, "TV encoder", 61 return i2c->identify(i2c, i2c_index, "TV encoder",
62 nv04_tv_encoder_info, NULL); 62 nv04_tv_encoder_info, NULL, NULL);
63} 63}
64 64
65 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81ed7b9..900fae01793e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
447 if (ret) 447 if (ret)
448 goto done; 448 goto done;
449 449
450 info->offset = ntfy->node->offset;
451
450done: 452done:
451 if (ret) 453 if (ret)
452 nouveau_abi16_ntfy_fini(chan, ntfy); 454 nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c740454049..ba0183fb84f3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
51 bool dsm_detected; 51 bool dsm_detected;
52 bool optimus_detected; 52 bool optimus_detected;
53 acpi_handle dhandle; 53 acpi_handle dhandle;
54 acpi_handle other_handle;
54 acpi_handle rom_handle; 55 acpi_handle rom_handle;
55} nouveau_dsm_priv; 56} nouveau_dsm_priv;
56 57
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
260 if (!dhandle) 261 if (!dhandle)
261 return false; 262 return false;
262 263
263 if (!acpi_has_method(dhandle, "_DSM")) 264 if (!acpi_has_method(dhandle, "_DSM")) {
265 nouveau_dsm_priv.other_handle = dhandle;
264 return false; 266 return false;
265 267 }
266 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER)) 268 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
267 retval |= NOUVEAU_DSM_HAS_MUX; 269 retval |= NOUVEAU_DSM_HAS_MUX;
268 270
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
338 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 340 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
339 acpi_method_name); 341 acpi_method_name);
340 nouveau_dsm_priv.dsm_detected = true; 342 nouveau_dsm_priv.dsm_detected = true;
343 /*
344 * On some systems hotplug events are generated for the device
345 * being switched off when _DSM is executed. They cause ACPI
346 * hotplug to trigger and attempt to remove the device from
347 * the system, which causes it to break down. Prevent that from
348 * happening by setting the no_hotplug flag for the involved
349 * ACPI device objects.
350 */
351 acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
352 acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
341 ret = true; 353 ret = true;
342 } 354 }
343 355
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efdfc7dd..25ea82f8def3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
610 ret = nouveau_fence_sync(fence, chan); 610 ret = nouveau_fence_sync(fence, chan);
611 nouveau_fence_unref(&fence); 611 nouveau_fence_unref(&fence);
612 if (ret) 612 if (ret)
613 return ret; 613 goto fail_free;
614 614
615 if (new_bo != old_bo) { 615 if (new_bo != old_bo) {
616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 616 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b1970596a782..0b9621c9aeea 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1143 } 1143 }
1144 1144
1145 if (tiling_flags & RADEON_TILING_MACRO) { 1145 if (tiling_flags & RADEON_TILING_MACRO) {
1146 if (rdev->family >= CHIP_BONAIRE) 1146 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1147 tmp = rdev->config.cik.tile_config;
1148 else if (rdev->family >= CHIP_TAHITI)
1149 tmp = rdev->config.si.tile_config;
1150 else if (rdev->family >= CHIP_CAYMAN)
1151 tmp = rdev->config.cayman.tile_config;
1152 else
1153 tmp = rdev->config.evergreen.tile_config;
1154 1147
1155 switch ((tmp & 0xf0) >> 4) { 1148 /* Set NUM_BANKS. */
1156 case 0: /* 4 banks */ 1149 if (rdev->family >= CHIP_BONAIRE) {
1157 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK); 1150 unsigned tileb, index, num_banks, tile_split_bytes;
1158 break; 1151
1159 case 1: /* 8 banks */ 1152 /* Calculate the macrotile mode index. */
1160 default: 1153 tile_split_bytes = 64 << tile_split;
1161 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK); 1154 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1162 break; 1155 tileb = min(tile_split_bytes, tileb);
1163 case 2: /* 16 banks */ 1156
1164 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK); 1157 for (index = 0; tileb > 64; index++) {
1165 break; 1158 tileb >>= 1;
1159 }
1160
1161 if (index >= 16) {
1162 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1163 target_fb->bits_per_pixel, tile_split);
1164 return -EINVAL;
1165 }
1166
1167 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1168 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1169 } else {
1170 /* SI and older. */
1171 if (rdev->family >= CHIP_TAHITI)
1172 tmp = rdev->config.si.tile_config;
1173 else if (rdev->family >= CHIP_CAYMAN)
1174 tmp = rdev->config.cayman.tile_config;
1175 else
1176 tmp = rdev->config.evergreen.tile_config;
1177
1178 switch ((tmp & 0xf0) >> 4) {
1179 case 0: /* 4 banks */
1180 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1181 break;
1182 case 1: /* 8 banks */
1183 default:
1184 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1185 break;
1186 case 2: /* 16 banks */
1187 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1188 break;
1189 }
1166 } 1190 }
1167 1191
1168 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1192 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1169
1170 evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
1171 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split); 1193 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
1172 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw); 1194 fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
1173 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh); 1195 fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,19 +1202,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1202 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1181 1203
1182 if (rdev->family >= CHIP_BONAIRE) { 1204 if (rdev->family >= CHIP_BONAIRE) {
1183 u32 num_pipe_configs = rdev->config.cik.max_tile_pipes; 1205 /* Read the pipe config from the 2D TILED SCANOUT mode.
1184 u32 num_rb = rdev->config.cik.max_backends_per_se; 1206 * It should be the same for the other modes too, but not all
1185 if (num_pipe_configs > 8) 1207 * modes set the pipe config field. */
1186 num_pipe_configs = 8; 1208 u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1187 if (num_pipe_configs == 8) 1209
1188 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16); 1210 fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1189 else if (num_pipe_configs == 4) {
1190 if (num_rb == 4)
1191 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1192 else if (num_rb < 4)
1193 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1194 } else if (num_pipe_configs == 2)
1195 fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1196 } else if ((rdev->family == CHIP_TAHITI) || 1211 } else if ((rdev->family == CHIP_TAHITI) ||
1197 (rdev->family == CHIP_PITCAIRN)) 1212 (rdev->family == CHIP_PITCAIRN))
1198 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16); 1213 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3c9067..e950fabd7f5e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
3057 * Returns the disabled RB bitmask. 3057 * Returns the disabled RB bitmask.
3058 */ 3058 */
3059static u32 cik_get_rb_disabled(struct radeon_device *rdev, 3059static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3060 u32 max_rb_num, u32 se_num, 3060 u32 max_rb_num_per_se,
3061 u32 sh_per_se) 3061 u32 sh_per_se)
3062{ 3062{
3063 u32 data, mask; 3063 u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3071 3071
3072 data >>= BACKEND_DISABLE_SHIFT; 3072 data >>= BACKEND_DISABLE_SHIFT;
3073 3073
3074 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se); 3074 mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
3075 3075
3076 return data & mask; 3076 return data & mask;
3077} 3077}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3088 */ 3088 */
3089static void cik_setup_rb(struct radeon_device *rdev, 3089static void cik_setup_rb(struct radeon_device *rdev,
3090 u32 se_num, u32 sh_per_se, 3090 u32 se_num, u32 sh_per_se,
3091 u32 max_rb_num) 3091 u32 max_rb_num_per_se)
3092{ 3092{
3093 int i, j; 3093 int i, j;
3094 u32 data, mask; 3094 u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3098 for (i = 0; i < se_num; i++) { 3098 for (i = 0; i < se_num; i++) {
3099 for (j = 0; j < sh_per_se; j++) { 3099 for (j = 0; j < sh_per_se; j++) {
3100 cik_select_se_sh(rdev, i, j); 3100 cik_select_se_sh(rdev, i, j);
3101 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 3101 data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3102 if (rdev->family == CHIP_HAWAII) 3102 if (rdev->family == CHIP_HAWAII)
3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH); 3103 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3104 else 3104 else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3108 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3109 3109
3110 mask = 1; 3110 mask = 1;
3111 for (i = 0; i < max_rb_num; i++) { 3111 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3112 if (!(disabled_rbs & mask)) 3112 if (!(disabled_rbs & mask))
3113 enabled_rbs |= mask; 3113 enabled_rbs |= mask;
3114 mask <<= 1; 3114 mask <<= 1;
3115 } 3115 }
3116 3116
3117 rdev->config.cik.backend_enable_mask = enabled_rbs;
3118
3117 for (i = 0; i < se_num; i++) { 3119 for (i = 0; i < se_num; i++) {
3118 cik_select_se_sh(rdev, i, 0xffffffff); 3120 cik_select_se_sh(rdev, i, 0xffffffff);
3119 data = 0; 3121 data = 0;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b1f990d0eaa1..45e1f447bc79 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1940,7 +1940,7 @@ struct si_asic {
1940 unsigned sc_earlyz_tile_fifo_size; 1940 unsigned sc_earlyz_tile_fifo_size;
1941 1941
1942 unsigned num_tile_pipes; 1942 unsigned num_tile_pipes;
1943 unsigned num_backends_per_se; 1943 unsigned backend_enable_mask;
1944 unsigned backend_disable_mask_per_asic; 1944 unsigned backend_disable_mask_per_asic;
1945 unsigned backend_map; 1945 unsigned backend_map;
1946 unsigned num_texture_channel_caches; 1946 unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
1970 unsigned sc_earlyz_tile_fifo_size; 1970 unsigned sc_earlyz_tile_fifo_size;
1971 1971
1972 unsigned num_tile_pipes; 1972 unsigned num_tile_pipes;
1973 unsigned num_backends_per_se; 1973 unsigned backend_enable_mask;
1974 unsigned backend_disable_mask_per_asic; 1974 unsigned backend_disable_mask_per_asic;
1975 unsigned backend_map; 1975 unsigned backend_map;
1976 unsigned num_texture_channel_caches; 1976 unsigned num_texture_channel_caches;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302eaeea15..485848f889f5 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
33 bool atpx_detected; 33 bool atpx_detected;
34 /* handle for device - and atpx */ 34 /* handle for device - and atpx */
35 acpi_handle dhandle; 35 acpi_handle dhandle;
36 acpi_handle other_handle;
36 struct radeon_atpx atpx; 37 struct radeon_atpx atpx;
37} radeon_atpx_priv; 38} radeon_atpx_priv;
38 39
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
451 return false; 452 return false;
452 453
453 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); 454 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
454 if (ACPI_FAILURE(status)) 455 if (ACPI_FAILURE(status)) {
456 radeon_atpx_priv.other_handle = dhandle;
455 return false; 457 return false;
456 458 }
457 radeon_atpx_priv.dhandle = dhandle; 459 radeon_atpx_priv.dhandle = dhandle;
458 radeon_atpx_priv.atpx.handle = atpx_handle; 460 radeon_atpx_priv.atpx.handle = atpx_handle;
459 return true; 461 return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
530 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 532 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
531 acpi_method_name); 533 acpi_method_name);
532 radeon_atpx_priv.atpx_detected = true; 534 radeon_atpx_priv.atpx_detected = true;
535 /*
536 * On some systems hotplug events are generated for the device
537 * being switched off when ATPX is executed. They cause ACPI
538 * hotplug to trigger and attempt to remove the device from
539 * the system, which causes it to break down. Prevent that from
540 * happening by setting the no_hotplug flag for the involved
541 * ACPI device objects.
542 */
543 acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
544 acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
533 return true; 545 return true;
534 } 546 }
535 return false; 547 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1958b36ad0e5..db39ea36bf22 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
77 * 2.33.0 - Add SI tiling mode array query 77 * 2.33.0 - Add SI tiling mode array query
78 * 2.34.0 - Add CIK tiling mode array query 78 * 2.34.0 - Add CIK tiling mode array query
79 * 2.35.0 - Add CIK macrotile mode array query 79 * 2.35.0 - Add CIK macrotile mode array query
80 * 2.36.0 - Fix CIK DCE tiling setup
80 */ 81 */
81#define KMS_DRIVER_MAJOR 2 82#define KMS_DRIVER_MAJOR 2
82#define KMS_DRIVER_MINOR 35 83#define KMS_DRIVER_MINOR 36
83#define KMS_DRIVER_PATCHLEVEL 0 84#define KMS_DRIVER_PATCHLEVEL 0
84int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 85int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
85int radeon_driver_unload_kms(struct drm_device *dev); 86int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b474bd37..21d593c0ecaf 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
461 case RADEON_INFO_SI_CP_DMA_COMPUTE: 461 case RADEON_INFO_SI_CP_DMA_COMPUTE:
462 *value = 1; 462 *value = 1;
463 break; 463 break;
464 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
465 if (rdev->family >= CHIP_BONAIRE) {
466 *value = rdev->config.cik.backend_enable_mask;
467 } else if (rdev->family >= CHIP_TAHITI) {
468 *value = rdev->config.si.backend_enable_mask;
469 } else {
470 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
471 }
472 break;
464 default: 473 default:
465 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 474 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
466 return -EINVAL; 475 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088bac66..b9c0529b4a2e 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
473 return -EINVAL; 473 return -EINVAL;
474 } 474 }
475 475
476 if ((start >> 28) != (end >> 28)) { 476 if ((start >> 28) != ((end - 1) >> 28)) {
477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", 477 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
478 start, end); 478 start, end);
479 return -EINVAL; 479 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a36736dab5e0..85e1edfaa3be 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
2811} 2811}
2812 2812
2813static u32 si_get_rb_disabled(struct radeon_device *rdev, 2813static u32 si_get_rb_disabled(struct radeon_device *rdev,
2814 u32 max_rb_num, u32 se_num, 2814 u32 max_rb_num_per_se,
2815 u32 sh_per_se) 2815 u32 sh_per_se)
2816{ 2816{
2817 u32 data, mask; 2817 u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
2825 2825
2826 data >>= BACKEND_DISABLE_SHIFT; 2826 data >>= BACKEND_DISABLE_SHIFT;
2827 2827
2828 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se); 2828 mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
2829 2829
2830 return data & mask; 2830 return data & mask;
2831} 2831}
2832 2832
2833static void si_setup_rb(struct radeon_device *rdev, 2833static void si_setup_rb(struct radeon_device *rdev,
2834 u32 se_num, u32 sh_per_se, 2834 u32 se_num, u32 sh_per_se,
2835 u32 max_rb_num) 2835 u32 max_rb_num_per_se)
2836{ 2836{
2837 int i, j; 2837 int i, j;
2838 u32 data, mask; 2838 u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
2842 for (i = 0; i < se_num; i++) { 2842 for (i = 0; i < se_num; i++) {
2843 for (j = 0; j < sh_per_se; j++) { 2843 for (j = 0; j < sh_per_se; j++) {
2844 si_select_se_sh(rdev, i, j); 2844 si_select_se_sh(rdev, i, j);
2845 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se); 2845 data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH); 2846 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
2847 } 2847 }
2848 } 2848 }
2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff); 2849 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
2850 2850
2851 mask = 1; 2851 mask = 1;
2852 for (i = 0; i < max_rb_num; i++) { 2852 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
2853 if (!(disabled_rbs & mask)) 2853 if (!(disabled_rbs & mask))
2854 enabled_rbs |= mask; 2854 enabled_rbs |= mask;
2855 mask <<= 1; 2855 mask <<= 1;
2856 } 2856 }
2857 2857
2858 rdev->config.si.backend_enable_mask = enabled_rbs;
2859
2858 for (i = 0; i < se_num; i++) { 2860 for (i = 0; i < se_num; i++) {
2859 si_select_se_sh(rdev, i, 0xffffffff); 2861 si_select_se_sh(rdev, i, 0xffffffff);
2860 data = 0; 2862 data = 0;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 78be66176840..bbb0b0d463f7 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
36#include <linux/cpu.h> 36#include <linux/cpu.h>
37#include <linux/smp.h> 37#include <linux/smp.h>
38#include <linux/moduleparam.h> 38#include <linux/moduleparam.h>
39#include <linux/pci.h>
39#include <asm/msr.h> 40#include <asm/msr.h>
40#include <asm/processor.h> 41#include <asm/processor.h>
41#include <asm/cpu_device_id.h> 42#include <asm/cpu_device_id.h>
@@ -52,7 +53,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
52 53
53#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ 54#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
54#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ 55#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
55#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ 56#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */
56#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ 57#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
57#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) 58#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
58#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) 59#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
@@ -176,20 +177,33 @@ static ssize_t show_temp(struct device *dev,
176 /* Check whether the time interval has elapsed */ 177 /* Check whether the time interval has elapsed */
177 if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) { 178 if (!tdata->valid || time_after(jiffies, tdata->last_updated + HZ)) {
178 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); 179 rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx);
179 tdata->valid = 0; 180 /*
180 /* Check whether the data is valid */ 181 * Ignore the valid bit. In all observed cases the register
181 if (eax & 0x80000000) { 182 * value is either low or zero if the valid bit is 0.
182 tdata->temp = tdata->tjmax - 183 * Return it instead of reporting an error which doesn't
183 ((eax >> 16) & 0x7f) * 1000; 184 * really help at all.
184 tdata->valid = 1; 185 */
185 } 186 tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
187 tdata->valid = 1;
186 tdata->last_updated = jiffies; 188 tdata->last_updated = jiffies;
187 } 189 }
188 190
189 mutex_unlock(&tdata->update_lock); 191 mutex_unlock(&tdata->update_lock);
190 return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; 192 return sprintf(buf, "%d\n", tdata->temp);
191} 193}
192 194
195struct tjmax_pci {
196 unsigned int device;
197 int tjmax;
198};
199
200static const struct tjmax_pci tjmax_pci_table[] = {
201 { 0x0708, 110000 }, /* CE41x0 (Sodaville ) */
202 { 0x0c72, 102000 }, /* Atom S1240 (Centerton) */
203 { 0x0c73, 95000 }, /* Atom S1220 (Centerton) */
204 { 0x0c75, 95000 }, /* Atom S1260 (Centerton) */
205};
206
193struct tjmax { 207struct tjmax {
194 char const *id; 208 char const *id;
195 int tjmax; 209 int tjmax;
@@ -198,9 +212,6 @@ struct tjmax {
198static const struct tjmax tjmax_table[] = { 212static const struct tjmax tjmax_table[] = {
199 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */ 213 { "CPU 230", 100000 }, /* Model 0x1c, stepping 2 */
200 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */ 214 { "CPU 330", 125000 }, /* Model 0x1c, stepping 2 */
201 { "CPU CE4110", 110000 }, /* Model 0x1c, stepping 10 Sodaville */
202 { "CPU CE4150", 110000 }, /* Model 0x1c, stepping 10 */
203 { "CPU CE4170", 110000 }, /* Model 0x1c, stepping 10 */
204}; 215};
205 216
206struct tjmax_model { 217struct tjmax_model {
@@ -222,8 +233,11 @@ static const struct tjmax_model tjmax_model_table[] = {
222 * is undetectable by software 233 * is undetectable by software
223 */ 234 */
224 { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */ 235 { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
225 { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z2760) */ 236 { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
226 { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx) */ 237 { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
238 * Also matches S12x0 (stepping 9), covered by
239 * PCI table
240 */
227}; 241};
228 242
229static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) 243static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
@@ -236,8 +250,20 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
236 int err; 250 int err;
237 u32 eax, edx; 251 u32 eax, edx;
238 int i; 252 int i;
253 struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
254
255 /*
256 * Explicit tjmax table entries override heuristics.
257 * First try PCI host bridge IDs, followed by model ID strings
258 * and model/stepping information.
259 */
260 if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL) {
261 for (i = 0; i < ARRAY_SIZE(tjmax_pci_table); i++) {
262 if (host_bridge->device == tjmax_pci_table[i].device)
263 return tjmax_pci_table[i].tjmax;
264 }
265 }
239 266
240 /* explicit tjmax table entries override heuristics */
241 for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) { 267 for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
242 if (strstr(c->x86_model_id, tjmax_table[i].id)) 268 if (strstr(c->x86_model_id, tjmax_table[i].id))
243 return tjmax_table[i].tjmax; 269 return tjmax_table[i].tjmax;
@@ -343,12 +369,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
343 if (cpu_has_tjmax(c)) 369 if (cpu_has_tjmax(c))
344 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); 370 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
345 } else { 371 } else {
346 val = (eax >> 16) & 0xff; 372 val = (eax >> 16) & 0x7f;
347 /* 373 /*
348 * If the TjMax is not plausible, an assumption 374 * If the TjMax is not plausible, an assumption
349 * will be used 375 * will be used
350 */ 376 */
351 if (val) { 377 if (val >= 85) {
352 dev_dbg(dev, "TjMax is %d degrees C\n", val); 378 dev_dbg(dev, "TjMax is %d degrees C\n", val);
353 return val * 1000; 379 return val * 1000;
354 } 380 }
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 960fac3fb166..afd31042b452 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -45,7 +45,7 @@ static const char * const input_names[] = {
45/* Conversion function for VDDOUT and VBAT */ 45/* Conversion function for VDDOUT and VBAT */
46static inline int volt_reg_to_mv(int value) 46static inline int volt_reg_to_mv(int value)
47{ 47{
48 return DIV_ROUND_CLOSEST(value * 1000, 512) + 2500; 48 return DIV_ROUND_CLOSEST(value * 2000, 1023) + 2500;
49} 49}
50 50
51/* Conversion function for ADC channels 4, 5 and 6 */ 51/* Conversion function for ADC channels 4, 5 and 6 */
@@ -57,7 +57,7 @@ static inline int input_reg_to_mv(int value)
57/* Conversion function for VBBAT */ 57/* Conversion function for VBBAT */
58static inline int vbbat_reg_to_mv(int value) 58static inline int vbbat_reg_to_mv(int value)
59{ 59{
60 return DIV_ROUND_CLOSEST(value * 2500, 512); 60 return DIV_ROUND_CLOSEST(value * 5000, 1023);
61} 61}
62 62
63static inline int da9052_enable_vddout_channel(struct da9052 *da9052) 63static inline int da9052_enable_vddout_channel(struct da9052 *da9052)
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index dff841085baf..6040121a405a 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -249,7 +249,7 @@ static void fam15h_power_remove(struct pci_dev *pdev)
249 sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group); 249 sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group);
250} 250}
251 251
252static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = { 252static const struct pci_device_id fam15h_power_id_table[] = {
253 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 253 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
254 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 254 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
255 {} 255 {}
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d65f3fd895dd..baf375b5ab0d 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -204,12 +204,13 @@ static void k10temp_remove(struct pci_dev *pdev)
204 &sensor_dev_attr_temp1_crit_hyst.dev_attr); 204 &sensor_dev_attr_temp1_crit_hyst.dev_attr);
205} 205}
206 206
207static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = { 207static const struct pci_device_id k10temp_id_table[] = {
208 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 208 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, 209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, 210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
212 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 212 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
213 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
213 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 214 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
214 {} 215 {}
215}; 216};
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 5b50e9e4f96b..734d55d48cc8 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -135,7 +135,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
135static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1); 135static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
136static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 136static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
137 137
138static DEFINE_PCI_DEVICE_TABLE(k8temp_ids) = { 138static const struct pci_device_id k8temp_ids[] = {
139 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, 139 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
140 { 0 }, 140 { 0 },
141}; 141};
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index cf811c1a1475..8686e966fa28 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -3936,6 +3936,18 @@ static int nct6775_probe(struct platform_device *pdev)
3936 return PTR_ERR_OR_ZERO(hwmon_dev); 3936 return PTR_ERR_OR_ZERO(hwmon_dev);
3937} 3937}
3938 3938
3939static void nct6791_enable_io_mapping(int sioaddr)
3940{
3941 int val;
3942
3943 val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
3944 if (val & 0x10) {
3945 pr_info("Enabling hardware monitor logical device mappings.\n");
3946 superio_outb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
3947 val & ~0x10);
3948 }
3949}
3950
3939#ifdef CONFIG_PM 3951#ifdef CONFIG_PM
3940static int nct6775_suspend(struct device *dev) 3952static int nct6775_suspend(struct device *dev)
3941{ 3953{
@@ -3955,11 +3967,20 @@ static int nct6775_suspend(struct device *dev)
3955static int nct6775_resume(struct device *dev) 3967static int nct6775_resume(struct device *dev)
3956{ 3968{
3957 struct nct6775_data *data = dev_get_drvdata(dev); 3969 struct nct6775_data *data = dev_get_drvdata(dev);
3958 int i, j; 3970 int i, j, err = 0;
3959 3971
3960 mutex_lock(&data->update_lock); 3972 mutex_lock(&data->update_lock);
3961 data->bank = 0xff; /* Force initial bank selection */ 3973 data->bank = 0xff; /* Force initial bank selection */
3962 3974
3975 if (data->kind == nct6791) {
3976 err = superio_enter(data->sioreg);
3977 if (err)
3978 goto abort;
3979
3980 nct6791_enable_io_mapping(data->sioreg);
3981 superio_exit(data->sioreg);
3982 }
3983
3963 /* Restore limits */ 3984 /* Restore limits */
3964 for (i = 0; i < data->in_num; i++) { 3985 for (i = 0; i < data->in_num; i++) {
3965 if (!(data->have_in & (1 << i))) 3986 if (!(data->have_in & (1 << i)))
@@ -3996,11 +4017,12 @@ static int nct6775_resume(struct device *dev)
3996 nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2); 4017 nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
3997 } 4018 }
3998 4019
4020abort:
3999 /* Force re-reading all values */ 4021 /* Force re-reading all values */
4000 data->valid = false; 4022 data->valid = false;
4001 mutex_unlock(&data->update_lock); 4023 mutex_unlock(&data->update_lock);
4002 4024
4003 return 0; 4025 return err;
4004} 4026}
4005 4027
4006static const struct dev_pm_ops nct6775_dev_pm_ops = { 4028static const struct dev_pm_ops nct6775_dev_pm_ops = {
@@ -4088,15 +4110,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4088 pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n"); 4110 pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
4089 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); 4111 superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
4090 } 4112 }
4091 if (sio_data->kind == nct6791) { 4113
4092 val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE); 4114 if (sio_data->kind == nct6791)
4093 if (val & 0x10) { 4115 nct6791_enable_io_mapping(sioaddr);
4094 pr_info("Enabling hardware monitor logical device mappings.\n");
4095 superio_outb(sioaddr,
4096 NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
4097 val & ~0x10);
4098 }
4099 }
4100 4116
4101 superio_exit(sioaddr); 4117 superio_exit(sioaddr);
4102 pr_info("Found %s or compatible chip at %#x:%#x\n", 4118 pr_info("Found %s or compatible chip at %#x:%#x\n",
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 72a889702f0d..e74bd7e620e8 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -754,7 +754,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
754 return data; 754 return data;
755} 755}
756 756
757static DEFINE_PCI_DEVICE_TABLE(sis5595_pci_ids) = { 757static const struct pci_device_id sis5595_pci_ids[] = {
758 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, 758 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
759 { 0, } 759 { 0, }
760}; 760};
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index c9dcce8c3dc3..babd732b4e18 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -824,7 +824,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
824 return data; 824 return data;
825} 825}
826 826
827static DEFINE_PCI_DEVICE_TABLE(via686a_pci_ids) = { 827static const struct pci_device_id via686a_pci_ids[] = {
828 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, 828 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
829 { } 829 { }
830}; 830};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index aee14e2192f8..b3babe3326fb 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -766,7 +766,7 @@ static struct platform_driver vt8231_driver = {
766 .remove = vt8231_remove, 766 .remove = vt8231_remove,
767}; 767};
768 768
769static DEFINE_PCI_DEVICE_TABLE(vt8231_pci_ids) = { 769static const struct pci_device_id vt8231_pci_ids[] = {
770 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, 770 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
771 { 0, } 771 { 0, }
772}; 772};
diff --git a/drivers/ide/buddha.c b/drivers/ide/buddha.c
index b1d38590ac01..46eaf58d881b 100644
--- a/drivers/ide/buddha.c
+++ b/drivers/ide/buddha.c
@@ -198,7 +198,7 @@ fail_base2:
198 continue; 198 continue;
199 } 199 }
200 } 200 }
201 buddha_board = ZTWO_VADDR(board); 201 buddha_board = (unsigned long)ZTWO_VADDR(board);
202 202
203 /* write to BUDDHA_IRQ_MR to enable the board IRQ */ 203 /* write to BUDDHA_IRQ_MR to enable the board IRQ */
204 /* X-Surf doesn't have this. IRQs are always on */ 204 /* X-Surf doesn't have this. IRQs are always on */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f80b700f821c..6c0e0452dd9b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
123 * which is also the index into the MWAIT hint array. 123 * which is also the index into the MWAIT hint array.
124 * Thus C0 is a dummy. 124 * Thus C0 is a dummy.
125 */ 125 */
126static struct cpuidle_state nehalem_cstates[] __initdata = { 126static struct cpuidle_state nehalem_cstates[] = {
127 { 127 {
128 .name = "C1-NHM", 128 .name = "C1-NHM",
129 .desc = "MWAIT 0x00", 129 .desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
156 .enter = NULL } 156 .enter = NULL }
157}; 157};
158 158
159static struct cpuidle_state snb_cstates[] __initdata = { 159static struct cpuidle_state snb_cstates[] = {
160 { 160 {
161 .name = "C1-SNB", 161 .name = "C1-SNB",
162 .desc = "MWAIT 0x00", 162 .desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
196 .enter = NULL } 196 .enter = NULL }
197}; 197};
198 198
199static struct cpuidle_state ivb_cstates[] __initdata = { 199static struct cpuidle_state ivb_cstates[] = {
200 { 200 {
201 .name = "C1-IVB", 201 .name = "C1-IVB",
202 .desc = "MWAIT 0x00", 202 .desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
236 .enter = NULL } 236 .enter = NULL }
237}; 237};
238 238
239static struct cpuidle_state hsw_cstates[] __initdata = { 239static struct cpuidle_state hsw_cstates[] = {
240 { 240 {
241 .name = "C1-HSW", 241 .name = "C1-HSW",
242 .desc = "MWAIT 0x00", 242 .desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
297 .enter = NULL } 297 .enter = NULL }
298}; 298};
299 299
300static struct cpuidle_state atom_cstates[] __initdata = { 300static struct cpuidle_state atom_cstates[] = {
301 { 301 {
302 .name = "C1E-ATM", 302 .name = "C1E-ATM",
303 .desc = "MWAIT 0x00", 303 .desc = "MWAIT 0x00",
@@ -329,7 +329,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
329 { 329 {
330 .enter = NULL } 330 .enter = NULL }
331}; 331};
332static struct cpuidle_state avn_cstates[] __initdata = { 332static struct cpuidle_state avn_cstates[] = {
333 { 333 {
334 .name = "C1-AVN", 334 .name = "C1-AVN",
335 .desc = "MWAIT 0x00", 335 .desc = "MWAIT 0x00",
@@ -344,6 +344,8 @@ static struct cpuidle_state avn_cstates[] __initdata = {
344 .exit_latency = 15, 344 .exit_latency = 15,
345 .target_residency = 45, 345 .target_residency = 45,
346 .enter = &intel_idle }, 346 .enter = &intel_idle },
347 {
348 .enter = NULL }
347}; 349};
348 350
349/** 351/**
@@ -375,16 +377,7 @@ static int intel_idle(struct cpuidle_device *dev,
375 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 377 if (!(lapic_timer_reliable_states & (1 << (cstate))))
376 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); 378 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
377 379
378 if (!current_set_polling_and_test()) { 380 mwait_idle_with_hints(eax, ecx);
379
380 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
381 clflush((void *)&current_thread_info()->flags);
382
383 __monitor((void *)&current_thread_info()->flags, 0, 0);
384 smp_mb();
385 if (!need_resched())
386 __mwait(eax, ecx);
387 }
388 381
389 if (!(lapic_timer_reliable_states & (1 << (cstate)))) 382 if (!(lapic_timer_reliable_states & (1 << (cstate))))
390 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); 383 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76c791c..45126879ad28 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 524 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
525} 525}
526 526
527#define VLAN_NONE 0xfff
528#define FILTER_SEL_VLAN_NONE 0xffff
529#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
530#define FILTER_SEL_WIDTH_VIN_P_FC \
531 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
532#define FILTER_SEL_WIDTH_TAG_P_FC \
533 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
534#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
535
536static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
537 struct l2t_entry *l2t)
538{
539 unsigned int ntuple = 0;
540 u32 viid;
541
542 switch (dev->rdev.lldi.filt_mode) {
543
544 /* default filter mode */
545 case HW_TPL_FR_MT_PR_IV_P_FC:
546 if (l2t->vlan == VLAN_NONE)
547 ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
548 else {
549 ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
550 ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
551 }
552 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
553 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
554 break;
555 case HW_TPL_FR_MT_PR_OV_P_FC: {
556 viid = cxgb4_port_viid(l2t->neigh->dev);
557
558 ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
559 ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
560 ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
561 ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
562 FILTER_SEL_WIDTH_VLD_TAG_P_FC;
563 break;
564 }
565 default:
566 break;
567 }
568 return ntuple;
569}
570
571static int send_connect(struct c4iw_ep *ep) 527static int send_connect(struct c4iw_ep *ep)
572{ 528{
573 struct cpl_act_open_req *req; 529 struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
641 req->local_ip = la->sin_addr.s_addr; 597 req->local_ip = la->sin_addr.s_addr;
642 req->peer_ip = ra->sin_addr.s_addr; 598 req->peer_ip = ra->sin_addr.s_addr;
643 req->opt0 = cpu_to_be64(opt0); 599 req->opt0 = cpu_to_be64(opt0);
644 req->params = cpu_to_be32(select_ntuple(ep->com.dev, 600 req->params = cpu_to_be32(cxgb4_select_ntuple(
645 ep->dst, ep->l2t)); 601 ep->com.dev->rdev.lldi.ports[0],
602 ep->l2t));
646 req->opt2 = cpu_to_be32(opt2); 603 req->opt2 = cpu_to_be32(opt2);
647 } else { 604 } else {
648 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 605 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
662 req6->peer_ip_lo = *((__be64 *) 619 req6->peer_ip_lo = *((__be64 *)
663 (ra6->sin6_addr.s6_addr + 8)); 620 (ra6->sin6_addr.s6_addr + 8));
664 req6->opt0 = cpu_to_be64(opt0); 621 req6->opt0 = cpu_to_be64(opt0);
665 req6->params = cpu_to_be32( 622 req6->params = cpu_to_be32(cxgb4_select_ntuple(
666 select_ntuple(ep->com.dev, ep->dst, 623 ep->com.dev->rdev.lldi.ports[0],
667 ep->l2t)); 624 ep->l2t));
668 req6->opt2 = cpu_to_be32(opt2); 625 req6->opt2 = cpu_to_be32(opt2);
669 } 626 }
670 } else { 627 } else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
681 t5_req->peer_ip = ra->sin_addr.s_addr; 638 t5_req->peer_ip = ra->sin_addr.s_addr;
682 t5_req->opt0 = cpu_to_be64(opt0); 639 t5_req->opt0 = cpu_to_be64(opt0);
683 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 640 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
684 select_ntuple(ep->com.dev, 641 cxgb4_select_ntuple(
685 ep->dst, ep->l2t))); 642 ep->com.dev->rdev.lldi.ports[0],
643 ep->l2t)));
686 t5_req->opt2 = cpu_to_be32(opt2); 644 t5_req->opt2 = cpu_to_be32(opt2);
687 } else { 645 } else {
688 t5_req6 = (struct cpl_t5_act_open_req6 *) 646 t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
703 (ra6->sin6_addr.s6_addr + 8)); 661 (ra6->sin6_addr.s6_addr + 8));
704 t5_req6->opt0 = cpu_to_be64(opt0); 662 t5_req6->opt0 = cpu_to_be64(opt0);
705 t5_req6->params = (__force __be64)cpu_to_be32( 663 t5_req6->params = (__force __be64)cpu_to_be32(
706 select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 664 cxgb4_select_ntuple(
665 ep->com.dev->rdev.lldi.ports[0],
666 ep->l2t));
707 t5_req6->opt2 = cpu_to_be32(opt2); 667 t5_req6->opt2 = cpu_to_be32(opt2);
708 } 668 }
709 } 669 }
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1630 memset(req, 0, sizeof(*req)); 1590 memset(req, 0, sizeof(*req));
1631 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1591 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1632 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1592 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
1633 req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 1593 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1594 ep->com.dev->rdev.lldi.ports[0],
1634 ep->l2t)); 1595 ep->l2t));
1635 sin = (struct sockaddr_in *)&ep->com.local_addr; 1596 sin = (struct sockaddr_in *)&ep->com.local_addr;
1636 req->le.lport = sin->sin_port; 1597 req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2938 /* 2899 /*
2939 * Allocate a server TID. 2900 * Allocate a server TID.
2940 */ 2901 */
2941 if (dev->rdev.lldi.enable_fw_ofld_conn) 2902 if (dev->rdev.lldi.enable_fw_ofld_conn &&
2903 ep->com.local_addr.ss_family == AF_INET)
2942 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 2904 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
2943 cm_id->local_addr.ss_family, ep); 2905 cm_id->local_addr.ss_family, ep);
2944 else 2906 else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3323 /* 3285 /*
3324 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3286 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3325 */ 3287 */
3326 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) 3288 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3327 - dev->rdev.lldi.tids->sftid_base
3328 + dev->rdev.lldi.tids->nstids;
3329 3289
3330 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3290 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
3331 if (!lep) { 3291 if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3397 window = (__force u16) htons((__force u16)tcph->window); 3357 window = (__force u16) htons((__force u16)tcph->window);
3398 3358
3399 /* Calcuate filter portion for LE region. */ 3359 /* Calcuate filter portion for LE region. */
3400 filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); 3360 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3361 dev->rdev.lldi.ports[0],
3362 e));
3401 3363
3402 /* 3364 /*
3403 * Synthesize the cpl_pass_accept_req. We have everything except the 3365 * Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c838833..cdc7df4fdb8a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/if_arp.h> /* For ARPHRD_xxx */
34#include <linux/module.h> 35#include <linux/module.h>
35#include <net/rtnetlink.h> 36#include <net/rtnetlink.h>
36#include "ipoib.h" 37#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
103 return -EINVAL; 104 return -EINVAL;
104 105
105 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); 106 pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
106 if (!pdev) 107 if (!pdev || pdev->type != ARPHRD_INFINIBAND)
107 return -ENODEV; 108 return -ENODEV;
108 109
109 ppriv = netdev_priv(pdev); 110 ppriv = netdev_priv(pdev);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 846ccdd905b1..d2965e4b3224 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1871 break; 1871 break;
1872 1872
1873 case EV_ABS: 1873 case EV_ABS:
1874 input_alloc_absinfo(dev);
1875 if (!dev->absinfo)
1876 return;
1877
1874 __set_bit(code, dev->absbit); 1878 __set_bit(code, dev->absbit);
1875 break; 1879 break;
1876 1880
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 75762d6ff3ba..aa127ba392a4 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
455 } 455 }
456} 456}
457 457
458static irqreturn_t zforce_interrupt(int irq, void *dev_id) 458static irqreturn_t zforce_irq(int irq, void *dev_id)
459{
460 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client;
462
463 if (ts->suspended && device_may_wakeup(&client->dev))
464 pm_wakeup_event(&client->dev, 500);
465
466 return IRQ_WAKE_THREAD;
467}
468
469static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
459{ 470{
460 struct zforce_ts *ts = dev_id; 471 struct zforce_ts *ts = dev_id;
461 struct i2c_client *client = ts->client; 472 struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
465 u8 *payload; 476 u8 *payload;
466 477
467 /* 478 /*
468 * When suspended, emit a wakeup signal if necessary and return. 479 * When still suspended, return.
469 * Due to the level-interrupt we will get re-triggered later. 480 * Due to the level-interrupt we will get re-triggered later.
470 */ 481 */
471 if (ts->suspended) { 482 if (ts->suspended) {
472 if (device_may_wakeup(&client->dev))
473 pm_wakeup_event(&client->dev, 500);
474 msleep(20); 483 msleep(20);
475 return IRQ_HANDLED; 484 return IRQ_HANDLED;
476 } 485 }
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
763 * Therefore we can trigger the interrupt anytime it is low and do 772 * Therefore we can trigger the interrupt anytime it is low and do
764 * not need to limit it to the interrupt edge. 773 * not need to limit it to the interrupt edge.
765 */ 774 */
766 ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, 775 ret = devm_request_threaded_irq(&client->dev, client->irq,
767 zforce_interrupt, 776 zforce_irq, zforce_irq_thread,
768 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 777 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
769 input_dev->name, ts); 778 input_dev->name, ts);
770 if (ret) { 779 if (ret) {
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd026c237..4a4825528188 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
1643 int i; 1643 int i;
1644 struct pci_dev *tmp_hfcpci = NULL; 1644 struct pci_dev *tmp_hfcpci = NULL;
1645 1645
1646#ifdef __BIG_ENDIAN
1647#error "not running on big endian machines now"
1648#endif
1649
1650 strcpy(tmp, hfcpci_revision); 1646 strcpy(tmp, hfcpci_revision);
1651 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); 1647 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1652 1648
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63aa6995..33eeb4602c7e 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
290 struct IsdnCardState *cs = card->cs; 290 struct IsdnCardState *cs = card->cs;
291 char tmp[64]; 291 char tmp[64];
292 292
293#ifdef __BIG_ENDIAN
294#error "not running on big endian machines now"
295#endif
296
297 strcpy(tmp, telespci_revision); 293 strcpy(tmp, telespci_revision);
298 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); 294 printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
299 if (cs->typ != ISDN_CTYPE_TELESPCI) 295 if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 05188351711d..a97263e902ff 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -244,18 +244,12 @@ static int lp5521_update_program_memory(struct lp55xx_chip *chip,
244 if (i % 2) 244 if (i % 2)
245 goto err; 245 goto err;
246 246
247 mutex_lock(&chip->lock);
248
249 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) { 247 for (i = 0; i < LP5521_PROGRAM_LENGTH; i++) {
250 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]); 248 ret = lp55xx_write(chip, addr[idx] + i, pattern[i]);
251 if (ret) { 249 if (ret)
252 mutex_unlock(&chip->lock);
253 return -EINVAL; 250 return -EINVAL;
254 }
255 } 251 }
256 252
257 mutex_unlock(&chip->lock);
258
259 return size; 253 return size;
260 254
261err: 255err:
@@ -427,15 +421,17 @@ static ssize_t store_engine_load(struct device *dev,
427{ 421{
428 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 422 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
429 struct lp55xx_chip *chip = led->chip; 423 struct lp55xx_chip *chip = led->chip;
424 int ret;
430 425
431 mutex_lock(&chip->lock); 426 mutex_lock(&chip->lock);
432 427
433 chip->engine_idx = nr; 428 chip->engine_idx = nr;
434 lp5521_load_engine(chip); 429 lp5521_load_engine(chip);
430 ret = lp5521_update_program_memory(chip, buf, len);
435 431
436 mutex_unlock(&chip->lock); 432 mutex_unlock(&chip->lock);
437 433
438 return lp5521_update_program_memory(chip, buf, len); 434 return ret;
439} 435}
440store_load(1) 436store_load(1)
441store_load(2) 437store_load(2)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 6b553d9f4266..fd9ab5f61441 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -337,18 +337,12 @@ static int lp5523_update_program_memory(struct lp55xx_chip *chip,
337 if (i % 2) 337 if (i % 2)
338 goto err; 338 goto err;
339 339
340 mutex_lock(&chip->lock);
341
342 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) { 340 for (i = 0; i < LP5523_PROGRAM_LENGTH; i++) {
343 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]); 341 ret = lp55xx_write(chip, LP5523_REG_PROG_MEM + i, pattern[i]);
344 if (ret) { 342 if (ret)
345 mutex_unlock(&chip->lock);
346 return -EINVAL; 343 return -EINVAL;
347 }
348 } 344 }
349 345
350 mutex_unlock(&chip->lock);
351
352 return size; 346 return size;
353 347
354err: 348err:
@@ -548,15 +542,17 @@ static ssize_t store_engine_load(struct device *dev,
548{ 542{
549 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); 543 struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev));
550 struct lp55xx_chip *chip = led->chip; 544 struct lp55xx_chip *chip = led->chip;
545 int ret;
551 546
552 mutex_lock(&chip->lock); 547 mutex_lock(&chip->lock);
553 548
554 chip->engine_idx = nr; 549 chip->engine_idx = nr;
555 lp5523_load_engine_and_select_page(chip); 550 lp5523_load_engine_and_select_page(chip);
551 ret = lp5523_update_program_memory(chip, buf, len);
556 552
557 mutex_unlock(&chip->lock); 553 mutex_unlock(&chip->lock);
558 554
559 return lp5523_update_program_memory(chip, buf, len); 555 return ret;
560} 556}
561store_load(1) 557store_load(1)
562store_load(2) 558store_load(2)
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index d26a312f117a..3067d56b11a6 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -32,7 +32,7 @@ config ADB_MACII
32 32
33config ADB_MACIISI 33config ADB_MACIISI
34 bool "Include Mac IIsi ADB driver" 34 bool "Include Mac IIsi ADB driver"
35 depends on ADB && MAC 35 depends on ADB && MAC && BROKEN
36 help 36 help
37 Say Y here if want your kernel to support Macintosh systems that use 37 Say Y here if want your kernel to support Macintosh systems that use
38 the Mac IIsi style ADB. This includes the IIsi, IIvi, IIvx, Classic 38 the Mac IIsi style ADB. This includes the IIsi, IIvi, IIvx, Classic
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 21f4d7ff0da2..369d919bdafe 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1077,6 +1077,7 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1077 rdev->raid_disk = -1; 1077 rdev->raid_disk = -1;
1078 clear_bit(Faulty, &rdev->flags); 1078 clear_bit(Faulty, &rdev->flags);
1079 clear_bit(In_sync, &rdev->flags); 1079 clear_bit(In_sync, &rdev->flags);
1080 clear_bit(Bitmap_sync, &rdev->flags);
1080 clear_bit(WriteMostly, &rdev->flags); 1081 clear_bit(WriteMostly, &rdev->flags);
1081 1082
1082 if (mddev->raid_disks == 0) { 1083 if (mddev->raid_disks == 0) {
@@ -1155,6 +1156,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1155 */ 1156 */
1156 if (ev1 < mddev->bitmap->events_cleared) 1157 if (ev1 < mddev->bitmap->events_cleared)
1157 return 0; 1158 return 0;
1159 if (ev1 < mddev->events)
1160 set_bit(Bitmap_sync, &rdev->flags);
1158 } else { 1161 } else {
1159 if (ev1 < mddev->events) 1162 if (ev1 < mddev->events)
1160 /* just a hot-add of a new device, leave raid_disk at -1 */ 1163 /* just a hot-add of a new device, leave raid_disk at -1 */
@@ -1563,6 +1566,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1563 rdev->raid_disk = -1; 1566 rdev->raid_disk = -1;
1564 clear_bit(Faulty, &rdev->flags); 1567 clear_bit(Faulty, &rdev->flags);
1565 clear_bit(In_sync, &rdev->flags); 1568 clear_bit(In_sync, &rdev->flags);
1569 clear_bit(Bitmap_sync, &rdev->flags);
1566 clear_bit(WriteMostly, &rdev->flags); 1570 clear_bit(WriteMostly, &rdev->flags);
1567 1571
1568 if (mddev->raid_disks == 0) { 1572 if (mddev->raid_disks == 0) {
@@ -1645,6 +1649,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1645 */ 1649 */
1646 if (ev1 < mddev->bitmap->events_cleared) 1650 if (ev1 < mddev->bitmap->events_cleared)
1647 return 0; 1651 return 0;
1652 if (ev1 < mddev->events)
1653 set_bit(Bitmap_sync, &rdev->flags);
1648 } else { 1654 } else {
1649 if (ev1 < mddev->events) 1655 if (ev1 < mddev->events)
1650 /* just a hot-add of a new device, leave raid_disk at -1 */ 1656 /* just a hot-add of a new device, leave raid_disk at -1 */
@@ -2788,6 +2794,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2788 else 2794 else
2789 rdev->saved_raid_disk = -1; 2795 rdev->saved_raid_disk = -1;
2790 clear_bit(In_sync, &rdev->flags); 2796 clear_bit(In_sync, &rdev->flags);
2797 clear_bit(Bitmap_sync, &rdev->flags);
2791 err = rdev->mddev->pers-> 2798 err = rdev->mddev->pers->
2792 hot_add_disk(rdev->mddev, rdev); 2799 hot_add_disk(rdev->mddev, rdev);
2793 if (err) { 2800 if (err) {
@@ -5760,6 +5767,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
5760 info->raid_disk < mddev->raid_disks) { 5767 info->raid_disk < mddev->raid_disks) {
5761 rdev->raid_disk = info->raid_disk; 5768 rdev->raid_disk = info->raid_disk;
5762 set_bit(In_sync, &rdev->flags); 5769 set_bit(In_sync, &rdev->flags);
5770 clear_bit(Bitmap_sync, &rdev->flags);
5763 } else 5771 } else
5764 rdev->raid_disk = -1; 5772 rdev->raid_disk = -1;
5765 } else 5773 } else
@@ -7706,7 +7714,8 @@ static int remove_and_add_spares(struct mddev *mddev,
7706 if (test_bit(Faulty, &rdev->flags)) 7714 if (test_bit(Faulty, &rdev->flags))
7707 continue; 7715 continue;
7708 if (mddev->ro && 7716 if (mddev->ro &&
7709 rdev->saved_raid_disk < 0) 7717 ! (rdev->saved_raid_disk >= 0 &&
7718 !test_bit(Bitmap_sync, &rdev->flags)))
7710 continue; 7719 continue;
7711 7720
7712 rdev->recovery_offset = 0; 7721 rdev->recovery_offset = 0;
@@ -7787,9 +7796,12 @@ void md_check_recovery(struct mddev *mddev)
7787 * As we only add devices that are already in-sync, 7796 * As we only add devices that are already in-sync,
7788 * we can activate the spares immediately. 7797 * we can activate the spares immediately.
7789 */ 7798 */
7790 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7791 remove_and_add_spares(mddev, NULL); 7799 remove_and_add_spares(mddev, NULL);
7792 mddev->pers->spare_active(mddev); 7800 /* There is no thread, but we need to call
7801 * ->spare_active and clear saved_raid_disk
7802 */
7803 md_reap_sync_thread(mddev);
7804 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7793 goto unlock; 7805 goto unlock;
7794 } 7806 }
7795 7807
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2f5cc8a7ef3e..0095ec84ffc7 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -129,6 +129,9 @@ struct md_rdev {
129enum flag_bits { 129enum flag_bits {
130 Faulty, /* device is known to have a fault */ 130 Faulty, /* device is known to have a fault */
131 In_sync, /* device is in_sync with rest of array */ 131 In_sync, /* device is in_sync with rest of array */
132 Bitmap_sync, /* ..actually, not quite In_sync. Need a
133 * bitmap-based recovery to get fully in sync
134 */
132 Unmerged, /* device is being added to array and should 135 Unmerged, /* device is being added to array and should
133 * be considerred for bvec_merge_fn but not 136 * be considerred for bvec_merge_fn but not
134 * yet for actual IO 137 * yet for actual IO
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1e5a540995e9..a49cfcc7a343 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -924,9 +924,8 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
924 conf->next_window_requests++; 924 conf->next_window_requests++;
925 else 925 else
926 conf->current_window_requests++; 926 conf->current_window_requests++;
927 }
928 if (bio->bi_sector >= conf->start_next_window)
929 sector = conf->start_next_window; 927 sector = conf->start_next_window;
928 }
930 } 929 }
931 930
932 conf->nr_pending++; 931 conf->nr_pending++;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e8389e69..06eeb99ea6fc 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1319,7 +1319,7 @@ read_again:
1319 /* Could not read all from this device, so we will 1319 /* Could not read all from this device, so we will
1320 * need another r10_bio. 1320 * need another r10_bio.
1321 */ 1321 */
1322 sectors_handled = (r10_bio->sectors + max_sectors 1322 sectors_handled = (r10_bio->sector + max_sectors
1323 - bio->bi_sector); 1323 - bio->bi_sector);
1324 r10_bio->sectors = max_sectors; 1324 r10_bio->sectors = max_sectors;
1325 spin_lock_irq(&conf->device_lock); 1325 spin_lock_irq(&conf->device_lock);
@@ -1327,7 +1327,7 @@ read_again:
1327 bio->bi_phys_segments = 2; 1327 bio->bi_phys_segments = 2;
1328 else 1328 else
1329 bio->bi_phys_segments++; 1329 bio->bi_phys_segments++;
1330 spin_unlock(&conf->device_lock); 1330 spin_unlock_irq(&conf->device_lock);
1331 /* Cannot call generic_make_request directly 1331 /* Cannot call generic_make_request directly
1332 * as that will be queued in __generic_make_request 1332 * as that will be queued in __generic_make_request
1333 * and subsequent mempool_alloc might block 1333 * and subsequent mempool_alloc might block
@@ -3218,10 +3218,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3218 if (j == conf->copies) { 3218 if (j == conf->copies) {
3219 /* Cannot recover, so abort the recovery or 3219 /* Cannot recover, so abort the recovery or
3220 * record a bad block */ 3220 * record a bad block */
3221 put_buf(r10_bio);
3222 if (rb2)
3223 atomic_dec(&rb2->remaining);
3224 r10_bio = rb2;
3225 if (any_working) { 3221 if (any_working) {
3226 /* problem is that there are bad blocks 3222 /* problem is that there are bad blocks
3227 * on other device(s) 3223 * on other device(s)
@@ -3253,6 +3249,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3253 mirror->recovery_disabled 3249 mirror->recovery_disabled
3254 = mddev->recovery_disabled; 3250 = mddev->recovery_disabled;
3255 } 3251 }
3252 put_buf(r10_bio);
3253 if (rb2)
3254 atomic_dec(&rb2->remaining);
3255 r10_bio = rb2;
3256 break; 3256 break;
3257 } 3257 }
3258 } 3258 }
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cc055da02e2a..cbb15716a5db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -687,7 +687,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
687 } else { 687 } else {
688 if (!test_bit(STRIPE_HANDLE, &sh->state)) 688 if (!test_bit(STRIPE_HANDLE, &sh->state))
689 atomic_inc(&conf->active_stripes); 689 atomic_inc(&conf->active_stripes);
690 BUG_ON(list_empty(&sh->lru)); 690 BUG_ON(list_empty(&sh->lru) &&
691 !test_bit(STRIPE_EXPANDING, &sh->state));
691 list_del_init(&sh->lru); 692 list_del_init(&sh->lru);
692 if (sh->group) { 693 if (sh->group) {
693 sh->group->stripes_cnt--; 694 sh->group->stripes_cnt--;
@@ -3608,7 +3609,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3608 */ 3609 */
3609 set_bit(R5_Insync, &dev->flags); 3610 set_bit(R5_Insync, &dev->flags);
3610 3611
3611 if (rdev && test_bit(R5_WriteError, &dev->flags)) { 3612 if (test_bit(R5_WriteError, &dev->flags)) {
3612 /* This flag does not apply to '.replacement' 3613 /* This flag does not apply to '.replacement'
3613 * only to .rdev, so make sure to check that*/ 3614 * only to .rdev, so make sure to check that*/
3614 struct md_rdev *rdev2 = rcu_dereference( 3615 struct md_rdev *rdev2 = rcu_dereference(
@@ -3621,7 +3622,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3621 } else 3622 } else
3622 clear_bit(R5_WriteError, &dev->flags); 3623 clear_bit(R5_WriteError, &dev->flags);
3623 } 3624 }
3624 if (rdev && test_bit(R5_MadeGood, &dev->flags)) { 3625 if (test_bit(R5_MadeGood, &dev->flags)) {
3625 /* This flag does not apply to '.replacement' 3626 /* This flag does not apply to '.replacement'
3626 * only to .rdev, so make sure to check that*/ 3627 * only to .rdev, so make sure to check that*/
3627 struct md_rdev *rdev2 = rcu_dereference( 3628 struct md_rdev *rdev2 = rcu_dereference(
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 11e20afbdcac..705698fd2c7e 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -1228,8 +1228,14 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
1228 1228
1229 pcr->remove_pci = true; 1229 pcr->remove_pci = true;
1230 1230
1231 cancel_delayed_work(&pcr->carddet_work); 1231 /* Disable interrupts at the pcr level */
1232 cancel_delayed_work(&pcr->idle_work); 1232 spin_lock_irq(&pcr->lock);
1233 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1234 pcr->bier = 0;
1235 spin_unlock_irq(&pcr->lock);
1236
1237 cancel_delayed_work_sync(&pcr->carddet_work);
1238 cancel_delayed_work_sync(&pcr->idle_work);
1233 1239
1234 mfd_remove_devices(&pcidev->dev); 1240 mfd_remove_devices(&pcidev->dev);
1235 1241
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d210d131fef2..0f55589a56b8 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
73 return -ENOMEM; 73 return -ENOMEM;
74 } 74 }
75 info->map.cached = 75 info->map.cached =
76 ioremap_cached(info->map.phys, info->map.size); 76 ioremap_cache(info->map.phys, info->map.size);
77 if (!info->map.cached) 77 if (!info->map.cached)
78 printk(KERN_WARNING "Failed to ioremap cached %s\n", 78 printk(KERN_WARNING "Failed to ioremap cached %s\n",
79 info->map.name); 79 info->map.name);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7772ef..4ced59436558 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2201 2201
2202 port = &(SLAVE_AD_INFO(slave).port); 2202 port = &(SLAVE_AD_INFO(slave).port);
2203 2203
2204 // if slave is null, the whole port is not initialized 2204 /* if slave is null, the whole port is not initialized */
2205 if (!port->slave) { 2205 if (!port->slave) {
2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n", 2206 pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
2207 slave->bond->dev->name, slave->dev->name); 2207 slave->bond->dev->name, slave->dev->name);
2208 return; 2208 return;
2209 } 2209 }
2210 2210
2211 __get_state_machine_lock(port);
2212
2211 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2213 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2212 port->actor_oper_port_key = port->actor_admin_port_key |= 2214 port->actor_oper_port_key = port->actor_admin_port_key |=
2213 (__get_link_speed(port) << 1); 2215 (__get_link_speed(port) << 1);
2214 pr_debug("Port %d changed speed\n", port->actor_port_number); 2216 pr_debug("Port %d changed speed\n", port->actor_port_number);
2215 // there is no need to reselect a new aggregator, just signal the 2217 /* there is no need to reselect a new aggregator, just signal the
2216 // state machines to reinitialize 2218 * state machines to reinitialize
2219 */
2217 port->sm_vars |= AD_PORT_BEGIN; 2220 port->sm_vars |= AD_PORT_BEGIN;
2221
2222 __release_state_machine_lock(port);
2218} 2223}
2219 2224
2220/** 2225/**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2229 2234
2230 port = &(SLAVE_AD_INFO(slave).port); 2235 port = &(SLAVE_AD_INFO(slave).port);
2231 2236
2232 // if slave is null, the whole port is not initialized 2237 /* if slave is null, the whole port is not initialized */
2233 if (!port->slave) { 2238 if (!port->slave) {
2234 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n", 2239 pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
2235 slave->bond->dev->name, slave->dev->name); 2240 slave->bond->dev->name, slave->dev->name);
2236 return; 2241 return;
2237 } 2242 }
2238 2243
2244 __get_state_machine_lock(port);
2245
2239 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2246 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2240 port->actor_oper_port_key = port->actor_admin_port_key |= 2247 port->actor_oper_port_key = port->actor_admin_port_key |=
2241 __get_duplex(port); 2248 __get_duplex(port);
2242 pr_debug("Port %d changed duplex\n", port->actor_port_number); 2249 pr_debug("Port %d changed duplex\n", port->actor_port_number);
2243 // there is no need to reselect a new aggregator, just signal the 2250 /* there is no need to reselect a new aggregator, just signal the
2244 // state machines to reinitialize 2251 * state machines to reinitialize
2252 */
2245 port->sm_vars |= AD_PORT_BEGIN; 2253 port->sm_vars |= AD_PORT_BEGIN;
2254
2255 __release_state_machine_lock(port);
2246} 2256}
2247 2257
2248/** 2258/**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2258 2268
2259 port = &(SLAVE_AD_INFO(slave).port); 2269 port = &(SLAVE_AD_INFO(slave).port);
2260 2270
2261 // if slave is null, the whole port is not initialized 2271 /* if slave is null, the whole port is not initialized */
2262 if (!port->slave) { 2272 if (!port->slave) {
2263 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n", 2273 pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
2264 slave->bond->dev->name, slave->dev->name); 2274 slave->bond->dev->name, slave->dev->name);
2265 return; 2275 return;
2266 } 2276 }
2267 2277
2268 // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) 2278 __get_state_machine_lock(port);
2269 // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report 2279 /* on link down we are zeroing duplex and speed since
2280 * some of the adaptors(ce1000.lan) report full duplex/speed
2281 * instead of N/A(duplex) / 0(speed).
2282 *
2283 * on link up we are forcing recheck on the duplex and speed since
2284 * some of he adaptors(ce1000.lan) report.
2285 */
2270 if (link == BOND_LINK_UP) { 2286 if (link == BOND_LINK_UP) {
2271 port->is_enabled = true; 2287 port->is_enabled = true;
2272 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2288 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2282 port->actor_oper_port_key = (port->actor_admin_port_key &= 2298 port->actor_oper_port_key = (port->actor_admin_port_key &=
2283 ~AD_SPEED_KEY_BITS); 2299 ~AD_SPEED_KEY_BITS);
2284 } 2300 }
2285 //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN"))); 2301 pr_debug("Port %d changed link status to %s",
2286 // there is no need to reselect a new aggregator, just signal the 2302 port->actor_port_number,
2287 // state machines to reinitialize 2303 (link == BOND_LINK_UP) ? "UP" : "DOWN");
2304 /* there is no need to reselect a new aggregator, just signal the
2305 * state machines to reinitialize
2306 */
2288 port->sm_vars |= AD_PORT_BEGIN; 2307 port->sm_vars |= AD_PORT_BEGIN;
2308
2309 __release_state_machine_lock(port);
2289} 2310}
2290 2311
2291/* 2312/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 398e299ee1bd..6191b551a0e8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1763,7 +1763,7 @@ static int __bond_release_one(struct net_device *bond_dev,
1763 } 1763 }
1764 1764
1765 if (all) { 1765 if (all) {
1766 rcu_assign_pointer(bond->curr_active_slave, NULL); 1766 RCU_INIT_POINTER(bond->curr_active_slave, NULL);
1767 } else if (oldcurrent == slave) { 1767 } else if (oldcurrent == slave) {
1768 /* 1768 /*
1769 * Note that we hold RTNL over this sequence, so there 1769 * Note that we hold RTNL over this sequence, so there
@@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond,
3732} 3732}
3733 3733
3734 3734
3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 3735static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
3736 void *accel_priv)
3736{ 3737{
3737 /* 3738 /*
3738 * This helper function exists to help dev_pick_tx get the correct 3739 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index fb3dd4399cf3..f615fdec0f1b 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -113,7 +113,7 @@ static const struct net_device_ops hydra_netdev_ops = {
113static int hydra_init(struct zorro_dev *z) 113static int hydra_init(struct zorro_dev *z)
114{ 114{
115 struct net_device *dev; 115 struct net_device *dev;
116 unsigned long board = ZTWO_VADDR(z->resource.start); 116 unsigned long board = (unsigned long)ZTWO_VADDR(z->resource.start);
117 unsigned long ioaddr = board+HYDRA_NIC_BASE; 117 unsigned long ioaddr = board+HYDRA_NIC_BASE;
118 const char name[] = "NE2000"; 118 const char name[] = "NE2000";
119 int start_page, stop_page; 119 int start_page, stop_page;
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 85ec4c2d2645..ae2a12b7db62 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -287,7 +287,7 @@ static const struct net_device_ops zorro8390_netdev_ops = {
287}; 287};
288 288
289static int zorro8390_init(struct net_device *dev, unsigned long board, 289static int zorro8390_init(struct net_device *dev, unsigned long board,
290 const char *name, unsigned long ioaddr) 290 const char *name, void __iomem *ioaddr)
291{ 291{
292 int i; 292 int i;
293 int err; 293 int err;
@@ -354,7 +354,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
354 start_page = NESM_START_PG; 354 start_page = NESM_START_PG;
355 stop_page = NESM_STOP_PG; 355 stop_page = NESM_STOP_PG;
356 356
357 dev->base_addr = ioaddr; 357 dev->base_addr = (unsigned long)ioaddr;
358 dev->irq = IRQ_AMIGA_PORTS; 358 dev->irq = IRQ_AMIGA_PORTS;
359 359
360 /* Install the Interrupt handler */ 360 /* Install the Interrupt handler */
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 0866e7627433..56139184b801 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -57,6 +57,7 @@
57#include <linux/zorro.h> 57#include <linux/zorro.h>
58#include <linux/bitops.h> 58#include <linux/bitops.h>
59 59
60#include <asm/byteorder.h>
60#include <asm/irq.h> 61#include <asm/irq.h>
61#include <asm/amigaints.h> 62#include <asm/amigaints.h>
62#include <asm/amigahw.h> 63#include <asm/amigahw.h>
@@ -678,6 +679,7 @@ static int a2065_init_one(struct zorro_dev *z,
678 unsigned long base_addr = board + A2065_LANCE; 679 unsigned long base_addr = board + A2065_LANCE;
679 unsigned long mem_start = board + A2065_RAM; 680 unsigned long mem_start = board + A2065_RAM;
680 struct resource *r1, *r2; 681 struct resource *r1, *r2;
682 u32 serial;
681 int err; 683 int err;
682 684
683 r1 = request_mem_region(base_addr, sizeof(struct lance_regs), 685 r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
@@ -702,6 +704,7 @@ static int a2065_init_one(struct zorro_dev *z,
702 r1->name = dev->name; 704 r1->name = dev->name;
703 r2->name = dev->name; 705 r2->name = dev->name;
704 706
707 serial = be32_to_cpu(z->rom.er_SerialNumber);
705 dev->dev_addr[0] = 0x00; 708 dev->dev_addr[0] = 0x00;
706 if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */ 709 if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
707 dev->dev_addr[1] = 0x80; 710 dev->dev_addr[1] = 0x80;
@@ -710,11 +713,11 @@ static int a2065_init_one(struct zorro_dev *z,
710 dev->dev_addr[1] = 0x00; 713 dev->dev_addr[1] = 0x00;
711 dev->dev_addr[2] = 0x9f; 714 dev->dev_addr[2] = 0x9f;
712 } 715 }
713 dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; 716 dev->dev_addr[3] = (serial >> 16) & 0xff;
714 dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; 717 dev->dev_addr[4] = (serial >> 8) & 0xff;
715 dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; 718 dev->dev_addr[5] = serial & 0xff;
716 dev->base_addr = ZTWO_VADDR(base_addr); 719 dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
717 dev->mem_start = ZTWO_VADDR(mem_start); 720 dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
718 dev->mem_end = dev->mem_start + A2065_RAM_SIZE; 721 dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
719 722
720 priv->ll = (volatile struct lance_regs *)dev->base_addr; 723 priv->ll = (volatile struct lance_regs *)dev->base_addr;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index c178eb4c8166..b08101b31b8b 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -51,6 +51,7 @@
51#include <linux/zorro.h> 51#include <linux/zorro.h>
52#include <linux/bitops.h> 52#include <linux/bitops.h>
53 53
54#include <asm/byteorder.h>
54#include <asm/amigaints.h> 55#include <asm/amigaints.h>
55#include <asm/amigahw.h> 56#include <asm/amigahw.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
@@ -718,6 +719,7 @@ static int ariadne_init_one(struct zorro_dev *z,
718 struct resource *r1, *r2; 719 struct resource *r1, *r2;
719 struct net_device *dev; 720 struct net_device *dev;
720 struct ariadne_private *priv; 721 struct ariadne_private *priv;
722 u32 serial;
721 int err; 723 int err;
722 724
723 r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); 725 r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
@@ -741,14 +743,15 @@ static int ariadne_init_one(struct zorro_dev *z,
741 r1->name = dev->name; 743 r1->name = dev->name;
742 r2->name = dev->name; 744 r2->name = dev->name;
743 745
746 serial = be32_to_cpu(z->rom.er_SerialNumber);
744 dev->dev_addr[0] = 0x00; 747 dev->dev_addr[0] = 0x00;
745 dev->dev_addr[1] = 0x60; 748 dev->dev_addr[1] = 0x60;
746 dev->dev_addr[2] = 0x30; 749 dev->dev_addr[2] = 0x30;
747 dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff; 750 dev->dev_addr[3] = (serial >> 16) & 0xff;
748 dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff; 751 dev->dev_addr[4] = (serial >> 8) & 0xff;
749 dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff; 752 dev->dev_addr[5] = serial & 0xff;
750 dev->base_addr = ZTWO_VADDR(base_addr); 753 dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
751 dev->mem_start = ZTWO_VADDR(mem_start); 754 dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
752 dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE; 755 dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
753 756
754 dev->netdev_ops = &ariadne_netdev_ops; 757 dev->netdev_ops = &ariadne_netdev_ops;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1304d2..248baf6273fb 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
565 /* Make sure pointer to data buffer is set */ 565 /* Make sure pointer to data buffer is set */
566 wmb(); 566 wmb();
567 567
568 skb_tx_timestamp(skb);
569
568 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 570 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
569 571
570 /* Increment index to point to the next BD */ 572 /* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
579 581
580 arc_reg_set(priv, R_STATUS, TXPL_MASK); 582 arc_reg_set(priv, R_STATUS, TXPL_MASK);
581 583
582 skb_tx_timestamp(skb);
583
584 return NETDEV_TX_OK; 584 return NETDEV_TX_OK;
585} 585}
586 586
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760ada28..29801750f239 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
145 * Mask some pcie error bits 145 * Mask some pcie error bits
146 */ 146 */
147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
148 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); 148 if (pos) {
149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); 149 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); 150 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
151 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
152 }
151 /* clear error status */ 153 /* clear error status */
152 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, 154 pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED | 155 PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2c9a86..ec6119089b82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
520#define BNX2X_FP_STATE_IDLE 0 520#define BNX2X_FP_STATE_IDLE 0
521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ 521#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ 522#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
523#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ 523#define BNX2X_FP_STATE_DISABLED (1 << 2)
524#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ 524#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
525#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
526#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
525#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) 527#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
526#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) 528#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
527#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) 529#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
528 /* protect state */ 530 /* protect state */
529 spinlock_t lock; 531 spinlock_t lock;
@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
613{ 615{
614 bool rc = true; 616 bool rc = true;
615 617
616 spin_lock(&fp->lock); 618 spin_lock_bh(&fp->lock);
617 if (fp->state & BNX2X_FP_LOCKED) { 619 if (fp->state & BNX2X_FP_LOCKED) {
618 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); 620 WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
619 fp->state |= BNX2X_FP_STATE_NAPI_YIELD; 621 fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
622 /* we don't care if someone yielded */ 624 /* we don't care if someone yielded */
623 fp->state = BNX2X_FP_STATE_NAPI; 625 fp->state = BNX2X_FP_STATE_NAPI;
624 } 626 }
625 spin_unlock(&fp->lock); 627 spin_unlock_bh(&fp->lock);
626 return rc; 628 return rc;
627} 629}
628 630
@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
631{ 633{
632 bool rc = false; 634 bool rc = false;
633 635
634 spin_lock(&fp->lock); 636 spin_lock_bh(&fp->lock);
635 WARN_ON(fp->state & 637 WARN_ON(fp->state &
636 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); 638 (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
637 639
638 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 640 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
639 rc = true; 641 rc = true;
640 fp->state = BNX2X_FP_STATE_IDLE; 642
641 spin_unlock(&fp->lock); 643 /* state ==> idle, unless currently disabled */
644 fp->state &= BNX2X_FP_STATE_DISABLED;
645 spin_unlock_bh(&fp->lock);
642 return rc; 646 return rc;
643} 647}
644 648
@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
669 673
670 if (fp->state & BNX2X_FP_STATE_POLL_YIELD) 674 if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
671 rc = true; 675 rc = true;
672 fp->state = BNX2X_FP_STATE_IDLE; 676
677 /* state ==> idle, unless currently disabled */
678 fp->state &= BNX2X_FP_STATE_DISABLED;
673 spin_unlock_bh(&fp->lock); 679 spin_unlock_bh(&fp->lock);
674 return rc; 680 return rc;
675} 681}
@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
677/* true if a socket is polling, even if it did not get the lock */ 683/* true if a socket is polling, even if it did not get the lock */
678static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) 684static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
679{ 685{
680 WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); 686 WARN_ON(!(fp->state & BNX2X_FP_OWNED));
681 return fp->state & BNX2X_FP_USER_PEND; 687 return fp->state & BNX2X_FP_USER_PEND;
682} 688}
689
690/* false if fp is currently owned */
691static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
692{
693 int rc = true;
694
695 spin_lock_bh(&fp->lock);
696 if (fp->state & BNX2X_FP_OWNED)
697 rc = false;
698 fp->state |= BNX2X_FP_STATE_DISABLED;
699 spin_unlock_bh(&fp->lock);
700
701 return rc;
702}
683#else 703#else
684static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) 704static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
685{ 705{
@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
709{ 729{
710 return false; 730 return false;
711} 731}
732static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
733{
734 return true;
735}
712#endif /* CONFIG_NET_RX_BUSY_POLL */ 736#endif /* CONFIG_NET_RX_BUSY_POLL */
713 737
714/* Use 2500 as a mini-jumbo MTU for FCoE */ 738/* Use 2500 as a mini-jumbo MTU for FCoE */
@@ -1250,7 +1274,10 @@ struct bnx2x_slowpath {
1250 * Therefore, if they would have been defined in the same union, 1274 * Therefore, if they would have been defined in the same union,
1251 * data can get corrupted. 1275 * data can get corrupted.
1252 */ 1276 */
1253 struct afex_vif_list_ramrod_data func_afex_rdata; 1277 union {
1278 struct afex_vif_list_ramrod_data viflist_data;
1279 struct function_update_data func_update;
1280 } func_afex_rdata;
1254 1281
1255 /* used by dmae command executer */ 1282 /* used by dmae command executer */
1256 struct dmae_command dmae[MAX_DMAE_C]; 1283 struct dmae_command dmae[MAX_DMAE_C];
@@ -2499,4 +2526,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
2499#define MCPR_SCRATCH_BASE(bp) \ 2526#define MCPR_SCRATCH_BASE(bp) \
2500 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) 2527 (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
2501 2528
2529#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
2530
2502#endif /* bnx2x.h */ 2531#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec96130533cc..bf811565ee24 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
160 struct sk_buff *skb = tx_buf->skb; 160 struct sk_buff *skb = tx_buf->skb;
161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 161 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
162 int nbd; 162 int nbd;
163 u16 split_bd_len = 0;
163 164
164 /* prefetch skb end pointer to speedup dev_kfree_skb() */ 165 /* prefetch skb end pointer to speedup dev_kfree_skb() */
165 prefetch(&skb->end); 166 prefetch(&skb->end);
@@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
167 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", 168 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
168 txdata->txq_index, idx, tx_buf, skb); 169 txdata->txq_index, idx, tx_buf, skb);
169 170
170 /* unmap first bd */
171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; 171 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
172 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
173 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
174 172
175 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 173 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
176#ifdef BNX2X_STOP_ON_ERROR 174#ifdef BNX2X_STOP_ON_ERROR
@@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
188 --nbd; 186 --nbd;
189 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 187 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
190 188
191 /* ...and the TSO split header bd since they have no mapping */ 189 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
192 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 190 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
191 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
192 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
193 --nbd; 193 --nbd;
194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 194 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
195 } 195 }
196 196
197 /* unmap first bd */
198 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
199 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
200 DMA_TO_DEVICE);
201
197 /* now free frags */ 202 /* now free frags */
198 while (nbd > 0) { 203 while (nbd > 0) {
199 204
@@ -1790,26 +1795,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1790{ 1795{
1791 int i; 1796 int i;
1792 1797
1793 local_bh_disable();
1794 for_each_rx_queue_cnic(bp, i) { 1798 for_each_rx_queue_cnic(bp, i) {
1795 napi_disable(&bnx2x_fp(bp, i, napi)); 1799 napi_disable(&bnx2x_fp(bp, i, napi));
1796 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1800 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1797 mdelay(1); 1801 usleep_range(1000, 2000);
1798 } 1802 }
1799 local_bh_enable();
1800} 1803}
1801 1804
1802static void bnx2x_napi_disable(struct bnx2x *bp) 1805static void bnx2x_napi_disable(struct bnx2x *bp)
1803{ 1806{
1804 int i; 1807 int i;
1805 1808
1806 local_bh_disable();
1807 for_each_eth_queue(bp, i) { 1809 for_each_eth_queue(bp, i) {
1808 napi_disable(&bnx2x_fp(bp, i, napi)); 1810 napi_disable(&bnx2x_fp(bp, i, napi));
1809 while (!bnx2x_fp_lock_napi(&bp->fp[i])) 1811 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1810 mdelay(1); 1812 usleep_range(1000, 2000);
1811 } 1813 }
1812 local_bh_enable();
1813} 1814}
1814 1815
1815void bnx2x_netif_start(struct bnx2x *bp) 1816void bnx2x_netif_start(struct bnx2x *bp)
@@ -1832,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1832 bnx2x_napi_disable_cnic(bp); 1833 bnx2x_napi_disable_cnic(bp);
1833} 1834}
1834 1835
1835u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1836u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1837 void *accel_priv)
1836{ 1838{
1837 struct bnx2x *bp = netdev_priv(dev); 1839 struct bnx2x *bp = netdev_priv(dev);
1838 1840
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index da8fcaa74495..41f3ca5ad972 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); 524int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
525 525
526/* select_queue callback */ 526/* select_queue callback */
527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 527u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
528 void *accel_priv);
528 529
529static inline void bnx2x_update_rx_prod(struct bnx2x *bp, 530static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
530 struct bnx2x_fastpath *fp, 531 struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02431ca..11fc79585491 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3865 3865
3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars); 3866 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3867 } else { 3867 } else {
3868 /* Enable Auto-Detect to support 1G over CL37 as well */
3869 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3870 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
3871
3872 /* Force cl48 sync_status LOW to avoid getting stuck in CL73
3873 * parallel-detect loop when CL73 and CL37 are enabled.
3874 */
3875 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3876 MDIO_AER_BLOCK_AER_REG, 0);
3877 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3878 MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
3879 bnx2x_set_aer_mmd(params, phy);
3880
3868 bnx2x_disable_kr2(params, vars, phy); 3881 bnx2x_disable_kr2(params, vars, phy);
3869 } 3882 }
3870 3883
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8120 *edc_mode = EDC_MODE_ACTIVE_DAC; 8133 *edc_mode = EDC_MODE_ACTIVE_DAC;
8121 else 8134 else
8122 check_limiting_mode = 1; 8135 check_limiting_mode = 1;
8123 } else if (copper_module_type & 8136 } else {
8124 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) { 8137 *edc_mode = EDC_MODE_PASSIVE_DAC;
8138 /* Even in case PASSIVE_DAC indication is not set,
8139 * treat it as a passive DAC cable, since some cables
8140 * don't have this indication.
8141 */
8142 if (copper_module_type &
8143 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
8125 DP(NETIF_MSG_LINK, 8144 DP(NETIF_MSG_LINK,
8126 "Passive Copper cable detected\n"); 8145 "Passive Copper cable detected\n");
8127 *edc_mode = 8146 } else {
8128 EDC_MODE_PASSIVE_DAC; 8147 DP(NETIF_MSG_LINK,
8129 } else { 8148 "Unknown copper-cable-type\n");
8130 DP(NETIF_MSG_LINK, 8149 }
8131 "Unknown copper-cable-type 0x%x !!!\n",
8132 copper_module_type);
8133 return -EINVAL;
8134 } 8150 }
8135 break; 8151 break;
8136 } 8152 }
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10825 (1<<11)); 10841 (1<<11));
10826 10842
10827 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10843 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10828 (phy->speed_cap_mask & 10844 (phy->speed_cap_mask &
10829 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 10845 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
10830 (phy->req_line_speed == SPEED_1000)) { 10846 (phy->req_line_speed == SPEED_1000)) {
10831 an_1000_val |= (1<<8); 10847 an_1000_val |= (1<<8);
10832 autoneg_val |= (1<<9 | 1<<12); 10848 autoneg_val |= (1<<9 | 1<<12);
10833 if (phy->req_duplex == DUPLEX_FULL) 10849 if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10843 0x09, 10859 0x09,
10844 &an_1000_val); 10860 &an_1000_val);
10845 10861
10846 /* Set 100 speed advertisement */ 10862 /* Advertise 10/100 link speed */
10847 if (((phy->req_line_speed == SPEED_AUTO_NEG) && 10863 if (phy->req_line_speed == SPEED_AUTO_NEG) {
10848 (phy->speed_cap_mask & 10864 if (phy->speed_cap_mask &
10849 (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | 10865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
10850 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { 10866 an_10_100_val |= (1<<5);
10851 an_10_100_val |= (1<<7); 10867 autoneg_val |= (1<<9 | 1<<12);
10852 /* Enable autoneg and restart autoneg for legacy speeds */ 10868 DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
10853 autoneg_val |= (1<<9 | 1<<12); 10869 }
10854 10870 if (phy->speed_cap_mask &
10855 if (phy->req_duplex == DUPLEX_FULL) 10871 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
10856 an_10_100_val |= (1<<8);
10857 DP(NETIF_MSG_LINK, "Advertising 100M\n");
10858 }
10859
10860 /* Set 10 speed advertisement */
10861 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
10862 (phy->speed_cap_mask &
10863 (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
10864 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
10865 an_10_100_val |= (1<<5);
10866 autoneg_val |= (1<<9 | 1<<12);
10867 if (phy->req_duplex == DUPLEX_FULL)
10868 an_10_100_val |= (1<<6); 10872 an_10_100_val |= (1<<6);
10869 DP(NETIF_MSG_LINK, "Advertising 10M\n"); 10873 autoneg_val |= (1<<9 | 1<<12);
10874 DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
10875 }
10876 if (phy->speed_cap_mask &
10877 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
10878 an_10_100_val |= (1<<7);
10879 autoneg_val |= (1<<9 | 1<<12);
10880 DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
10881 }
10882 if (phy->speed_cap_mask &
10883 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
10884 an_10_100_val |= (1<<8);
10885 autoneg_val |= (1<<9 | 1<<12);
10886 DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
10887 }
10870 } 10888 }
10871 10889
10872 /* Only 10/100 are allowed to work in FORCE mode */ 10890 /* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
13342 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 13360 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
13343 old_status, status); 13361 old_status, status);
13344 13362
13363 /* Do not touch the link in case physical link down */
13364 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
13365 return 1;
13366
13345 /* a. Update shmem->link_status accordingly 13367 /* a. Update shmem->link_status accordingly
13346 * b. Update link_vars->link_up 13368 * b. Update link_vars->link_up
13347 */ 13369 */
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13550 */ 13572 */
13551 not_kr2_device = (((base_page & 0x8000) == 0) || 13573 not_kr2_device = (((base_page & 0x8000) == 0) ||
13552 (((base_page & 0x8000) && 13574 (((base_page & 0x8000) &&
13553 ((next_page & 0xe0) == 0x2)))); 13575 ((next_page & 0xe0) == 0x20))));
13554 13576
13555 /* In case KR2 is already disabled, check if we need to re-enable it */ 13577 /* In case KR2 is already disabled, check if we need to re-enable it */
13556 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13578 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0eca9b33..0067b975873f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11447 } 11447 }
11448 } 11448 }
11449 11449
11450 /* adjust igu_sb_cnt to MF for E1x */ 11450 /* adjust igu_sb_cnt to MF for E1H */
11451 if (CHIP_IS_E1x(bp) && IS_MF(bp)) 11451 if (CHIP_IS_E1H(bp) && IS_MF(bp))
11452 bp->igu_sb_cnt /= E1HVN_MAX; 11452 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
11453 11453
11454 /* port info */ 11454 /* port info */
11455 bnx2x_get_port_hwinfo(bp); 11455 bnx2x_get_port_hwinfo(bp);
@@ -12942,25 +12942,26 @@ static void __bnx2x_remove(struct pci_dev *pdev,
12942 pci_set_power_state(pdev, PCI_D3hot); 12942 pci_set_power_state(pdev, PCI_D3hot);
12943 } 12943 }
12944 12944
12945 if (bp->regview) 12945 if (remove_netdev) {
12946 iounmap(bp->regview); 12946 if (bp->regview)
12947 iounmap(bp->regview);
12947 12948
12948 /* for vf doorbells are part of the regview and were unmapped along with 12949 /* For vfs, doorbells are part of the regview and were unmapped
12949 * it. FW is only loaded by PF. 12950 * along with it. FW is only loaded by PF.
12950 */ 12951 */
12951 if (IS_PF(bp)) { 12952 if (IS_PF(bp)) {
12952 if (bp->doorbells) 12953 if (bp->doorbells)
12953 iounmap(bp->doorbells); 12954 iounmap(bp->doorbells);
12954 12955
12955 bnx2x_release_firmware(bp); 12956 bnx2x_release_firmware(bp);
12956 } 12957 }
12957 bnx2x_free_mem_bp(bp); 12958 bnx2x_free_mem_bp(bp);
12958 12959
12959 if (remove_netdev)
12960 free_netdev(dev); 12960 free_netdev(dev);
12961 12961
12962 if (atomic_read(&pdev->enable_cnt) == 1) 12962 if (atomic_read(&pdev->enable_cnt) == 1)
12963 pci_release_regions(pdev); 12963 pci_release_regions(pdev);
12964 }
12964 12965
12965 pci_disable_device(pdev); 12966 pci_disable_device(pdev);
12966} 12967}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35267c8..14ffb6e56e59 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca 7179#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da 7180#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea 7181#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
7182#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
7182#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 7183#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
7183#define MDIO_WC_REG_XGXS_STATUS3 0x8129 7184#define MDIO_WC_REG_XGXS_STATUS3 0x8129
7184#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 7185#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92abf5094..18438a504d57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2038 struct bnx2x_vlan_mac_ramrod_params p; 2038 struct bnx2x_vlan_mac_ramrod_params p;
2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; 2039 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; 2040 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
2041 unsigned long flags;
2041 int read_lock; 2042 int read_lock;
2042 int rc = 0; 2043 int rc = 0;
2043 2044
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2046 spin_lock_bh(&exeq->lock); 2047 spin_lock_bh(&exeq->lock);
2047 2048
2048 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { 2049 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
2049 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == 2050 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2050 *vlan_mac_flags) { 2051 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2052 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2051 rc = exeq->remove(bp, exeq->owner, exeq_pos); 2053 rc = exeq->remove(bp, exeq->owner, exeq_pos);
2052 if (rc) { 2054 if (rc) {
2053 BNX2X_ERR("Failed to remove command\n"); 2055 BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
2080 return read_lock; 2082 return read_lock;
2081 2083
2082 list_for_each_entry(pos, &o->head, link) { 2084 list_for_each_entry(pos, &o->head, link) {
2083 if (pos->vlan_mac_flags == *vlan_mac_flags) { 2085 flags = pos->vlan_mac_flags;
2086 if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
2087 BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2084 p.user_req.vlan_mac_flags = pos->vlan_mac_flags; 2088 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2085 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); 2089 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
2086 rc = bnx2x_config_vlan_mac(bp, &p); 2090 rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
4382 struct bnx2x_raw_obj *r = &o->raw; 4386 struct bnx2x_raw_obj *r = &o->raw;
4383 4387
4384 /* Do nothing if only driver cleanup was requested */ 4388 /* Do nothing if only driver cleanup was requested */
4385 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) 4389 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4390 DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
4391 p->ramrod_flags);
4386 return 0; 4392 return 0;
4393 }
4387 4394
4388 r->set_pending(r); 4395 r->set_pending(r);
4389 4396
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e33abf9..6a53c15c85a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
266 BNX2X_DONT_CONSUME_CAM_CREDIT, 266 BNX2X_DONT_CONSUME_CAM_CREDIT,
267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, 267 BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
268}; 268};
269/* When looking for matching filters, some flags are not interesting */
270#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
271 1 << BNX2X_ETH_MAC | \
272 1 << BNX2X_ISCSI_ETH_MAC | \
273 1 << BNX2X_NETQ_ETH_MAC)
274#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
275 ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
269 276
270struct bnx2x_vlan_mac_ramrod_params { 277struct bnx2x_vlan_mac_ramrod_params {
271 /* Object to run the command from */ 278 /* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 2e46c28fc601..e7845e5be1c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1209 /* next state */ 1209 /* next state */
1210 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1210 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1211 1211
1212 /* record the accept flags in vfdb so hypervisor can modify them
1213 * if necessary
1214 */
1215 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
1216 ramrod->rx_accept_flags;
1212 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1217 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1213 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1218 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1214op_err: 1219op_err:
@@ -1224,39 +1229,43 @@ op_pending:
1224 return; 1229 return;
1225} 1230}
1226 1231
1232static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
1233 struct bnx2x_rx_mode_ramrod_params *ramrod,
1234 struct bnx2x_virtf *vf,
1235 unsigned long accept_flags)
1236{
1237 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1238
1239 memset(ramrod, 0, sizeof(*ramrod));
1240 ramrod->cid = vfq->cid;
1241 ramrod->cl_id = vfq_cl_id(vf, vfq);
1242 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1243 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1244 ramrod->rx_accept_flags = accept_flags;
1245 ramrod->tx_accept_flags = accept_flags;
1246 ramrod->pstate = &vf->filter_state;
1247 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1248
1249 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1250 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1251 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1252
1253 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1254 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1255}
1256
1227int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1257int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1228 struct bnx2x_virtf *vf, 1258 struct bnx2x_virtf *vf,
1229 struct bnx2x_vfop_cmd *cmd, 1259 struct bnx2x_vfop_cmd *cmd,
1230 int qid, unsigned long accept_flags) 1260 int qid, unsigned long accept_flags)
1231{ 1261{
1232 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1233 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1262 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1234 1263
1235 if (vfop) { 1264 if (vfop) {
1236 struct bnx2x_rx_mode_ramrod_params *ramrod = 1265 struct bnx2x_rx_mode_ramrod_params *ramrod =
1237 &vf->op_params.rx_mode; 1266 &vf->op_params.rx_mode;
1238 1267
1239 memset(ramrod, 0, sizeof(*ramrod)); 1268 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
1240
1241 /* Prepare ramrod parameters */
1242 ramrod->cid = vfq->cid;
1243 ramrod->cl_id = vfq_cl_id(vf, vfq);
1244 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1245 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1246
1247 ramrod->rx_accept_flags = accept_flags;
1248 ramrod->tx_accept_flags = accept_flags;
1249 ramrod->pstate = &vf->filter_state;
1250 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1251
1252 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1253 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1254 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1255
1256 ramrod->rdata =
1257 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1258 ramrod->rdata_mapping =
1259 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1260 1269
1261 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1270 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1262 bnx2x_vfop_rxmode, cmd->done); 1271 bnx2x_vfop_rxmode, cmd->done);
@@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
3202 bnx2x_iov_static_resc(bp, vf); 3211 bnx2x_iov_static_resc(bp, vf);
3203 } 3212 }
3204 3213
3205 /* prepare msix vectors in VF configuration space */ 3214 /* prepare msix vectors in VF configuration space - the value in the
3215 * PCI configuration space should be the index of the last entry,
3216 * namely one less than the actual size of the table
3217 */
3206 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3218 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
3207 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3219 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
3208 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3220 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
3209 num_vf_queues); 3221 num_vf_queues - 1);
3210 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3222 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
3211 vf_idx, num_vf_queues); 3223 vf_idx, num_vf_queues - 1);
3212 } 3224 }
3213 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3225 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
3214 3226
@@ -3436,10 +3448,18 @@ out:
3436 3448
3437int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3449int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3438{ 3450{
3451 struct bnx2x_queue_state_params q_params = {NULL};
3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3453 struct bnx2x_queue_update_params *update_params;
3454 struct pf_vf_bulletin_content *bulletin = NULL;
3455 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
3439 struct bnx2x *bp = netdev_priv(dev); 3456 struct bnx2x *bp = netdev_priv(dev);
3440 int rc, q_logical_state; 3457 struct bnx2x_vlan_mac_obj *vlan_obj;
3458 unsigned long vlan_mac_flags = 0;
3459 unsigned long ramrod_flags = 0;
3441 struct bnx2x_virtf *vf = NULL; 3460 struct bnx2x_virtf *vf = NULL;
3442 struct pf_vf_bulletin_content *bulletin = NULL; 3461 unsigned long accept_flags;
3462 int rc;
3443 3463
3444 /* sanity and init */ 3464 /* sanity and init */
3445 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3465 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3457 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3477 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3458 * to the VF since it doesn't have anything to do with it. But it useful 3478 * to the VF since it doesn't have anything to do with it. But it useful
3459 * to store it here in case the VF is not up yet and we can only 3479 * to store it here in case the VF is not up yet and we can only
3460 * configure the vlan later when it does. 3480 * configure the vlan later when it does. Treat vlan id 0 as remove the
3481 * Host tag.
3461 */ 3482 */
3462 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3483 if (vlan > 0)
3484 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3485 else
3486 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
3463 bulletin->vlan = vlan; 3487 bulletin->vlan = vlan;
3464 3488
3465 /* is vf initialized and queue set up? */ 3489 /* is vf initialized and queue set up? */
3466 q_logical_state = 3490 if (vf->state != VF_ENABLED ||
3467 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3491 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
3468 if (vf->state == VF_ENABLED && 3492 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3469 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3493 return rc;
3470 /* configure the vlan in device on this vf's queue */
3471 unsigned long ramrod_flags = 0;
3472 unsigned long vlan_mac_flags = 0;
3473 struct bnx2x_vlan_mac_obj *vlan_obj =
3474 &bnx2x_leading_vfq(vf, vlan_obj);
3475 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3476 struct bnx2x_queue_state_params q_params = {NULL};
3477 struct bnx2x_queue_update_params *update_params;
3478 3494
3479 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3495 /* configure the vlan in device on this vf's queue */
3480 if (rc) 3496 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3481 return rc; 3497 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
3482 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3498 if (rc)
3499 return rc;
3483 3500
3484 /* must lock vfpf channel to protect against vf flows */ 3501 /* must lock vfpf channel to protect against vf flows */
3485 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3502 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3486 3503
3487 /* remove existing vlans */ 3504 /* remove existing vlans */
3488 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3489 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3506 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3490 &ramrod_flags); 3507 &ramrod_flags);
3491 if (rc) { 3508 if (rc) {
3492 BNX2X_ERR("failed to delete vlans\n"); 3509 BNX2X_ERR("failed to delete vlans\n");
3493 rc = -EINVAL; 3510 rc = -EINVAL;
3494 goto out; 3511 goto out;
3495 } 3512 }
3513
3514 /* need to remove/add the VF's accept_any_vlan bit */
3515 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
3516 if (vlan)
3517 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3518 else
3519 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
3520
3521 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
3522 accept_flags);
3523 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
3524 bnx2x_config_rx_mode(bp, &rx_ramrod);
3525
3526 /* configure the new vlan to device */
3527 memset(&ramrod_param, 0, sizeof(ramrod_param));
3528 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3529 ramrod_param.vlan_mac_obj = vlan_obj;
3530 ramrod_param.ramrod_flags = ramrod_flags;
3531 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
3532 &ramrod_param.user_req.vlan_mac_flags);
3533 ramrod_param.user_req.u.vlan.vlan = vlan;
3534 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3535 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3536 if (rc) {
3537 BNX2X_ERR("failed to configure vlan\n");
3538 rc = -EINVAL;
3539 goto out;
3540 }
3496 3541
3497 /* send queue update ramrod to configure default vlan and silent 3542 /* send queue update ramrod to configure default vlan and silent
3498 * vlan removal 3543 * vlan removal
3544 */
3545 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3546 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3547 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
3548 update_params = &q_params.params.update;
3549 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3550 &update_params->update_flags);
3551 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3552 &update_params->update_flags);
3553 if (vlan == 0) {
3554 /* if vlan is 0 then we want to leave the VF traffic
3555 * untagged, and leave the incoming traffic untouched
3556 * (i.e. do not remove any vlan tags).
3499 */ 3557 */
3500 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3558 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3501 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3559 &update_params->update_flags);
3502 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3560 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3503 update_params = &q_params.params.update; 3561 &update_params->update_flags);
3504 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3562 } else {
3563 /* configure default vlan to vf queue and set silent
3564 * vlan removal (the vf remains unaware of this vlan).
3565 */
3566 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3505 &update_params->update_flags); 3567 &update_params->update_flags);
3506 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3568 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3507 &update_params->update_flags); 3569 &update_params->update_flags);
3570 update_params->def_vlan = vlan;
3571 update_params->silent_removal_value =
3572 vlan & VLAN_VID_MASK;
3573 update_params->silent_removal_mask = VLAN_VID_MASK;
3574 }
3508 3575
3509 if (vlan == 0) { 3576 /* Update the Queue state */
3510 /* if vlan is 0 then we want to leave the VF traffic 3577 rc = bnx2x_queue_state_change(bp, &q_params);
3511 * untagged, and leave the incoming traffic untouched 3578 if (rc) {
3512 * (i.e. do not remove any vlan tags). 3579 BNX2X_ERR("Failed to configure default VLAN\n");
3513 */ 3580 goto out;
3514 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3581 }
3515 &update_params->update_flags);
3516 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3517 &update_params->update_flags);
3518 } else {
3519 /* configure the new vlan to device */
3520 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3521 ramrod_param.vlan_mac_obj = vlan_obj;
3522 ramrod_param.ramrod_flags = ramrod_flags;
3523 ramrod_param.user_req.u.vlan.vlan = vlan;
3524 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3525 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3526 if (rc) {
3527 BNX2X_ERR("failed to configure vlan\n");
3528 rc = -EINVAL;
3529 goto out;
3530 }
3531
3532 /* configure default vlan to vf queue and set silent
3533 * vlan removal (the vf remains unaware of this vlan).
3534 */
3535 update_params = &q_params.params.update;
3536 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3537 &update_params->update_flags);
3538 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3539 &update_params->update_flags);
3540 update_params->def_vlan = vlan;
3541 }
3542 3582
3543 /* Update the Queue state */
3544 rc = bnx2x_queue_state_change(bp, &q_params);
3545 if (rc) {
3546 BNX2X_ERR("Failed to configure default VLAN\n");
3547 goto out;
3548 }
3549 3583
3550 /* clear the flag indicating that this VF needs its vlan 3584 /* clear the flag indicating that this VF needs its vlan
3551 * (will only be set if the HV configured the Vlan before vf was 3585 * (will only be set if the HV configured the Vlan before vf was
3552 * up and we were called because the VF came up later 3586 * up and we were called because the VF came up later
3553 */ 3587 */
3554out: 3588out:
3555 vf->cfg_flags &= ~VF_CFG_VLAN; 3589 vf->cfg_flags &= ~VF_CFG_VLAN;
3556 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3590 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3557 } 3591
3558 return rc; 3592 return rc;
3559} 3593}
3560 3594
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a9366629..8c213fa52174 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
74 /* VLANs object */ 74 /* VLANs object */
75 struct bnx2x_vlan_mac_obj vlan_obj; 75 struct bnx2x_vlan_mac_obj vlan_obj;
76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
77 unsigned long accept_flags; /* last accept flags configured */
77 78
78 /* Queue Slow-path State object */ 79 /* Queue Slow-path State object */
79 struct bnx2x_queue_sp_obj sp_obj; 80 struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a151d789..0756d7dabdd5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
208 return -EINVAL; 208 return -EINVAL;
209 } 209 }
210 210
211 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg); 211 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
212 212
213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT; 213 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
214 214
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1598 1598
1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) { 1599 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1600 unsigned long accept = 0; 1600 unsigned long accept = 0;
1601 struct pf_vf_bulletin_content *bulletin =
1602 BP_VF_BULLETIN(bp, vf->index);
1601 1603
1602 /* covert VF-PF if mask to bnx2x accept flags */ 1604 /* covert VF-PF if mask to bnx2x accept flags */
1603 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST) 1605 if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1617 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept); 1619 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1618 1620
1619 /* A packet arriving the vf's mac should be accepted 1621 /* A packet arriving the vf's mac should be accepted
1620 * with any vlan 1622 * with any vlan, unless a vlan has already been
1623 * configured.
1621 */ 1624 */
1622 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept); 1625 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1626 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1623 1627
1624 /* set rx-mode */ 1628 /* set rx-mode */
1625 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, 1629 rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1710 goto response; 1714 goto response;
1711 } 1715 }
1712 } 1716 }
1717 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1718 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1719 int i;
1720
1721 /* search for vlan filters */
1722 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1723 if (filters->filters[i].flags &
1724 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1725 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1726 vf->abs_vfid);
1727 vf->op_rc = -EPERM;
1728 goto response;
1729 }
1730 }
1731 }
1713 1732
1714 /* verify vf_qid */ 1733 /* verify vf_qid */
1715 if (filters->vf_qid > vf_rxq_count(vf)) 1734 if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1805 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; 1824 vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
1806 1825
1807 /* flags handled individually for backward/forward compatability */ 1826 /* flags handled individually for backward/forward compatability */
1827 vf_op_params->rss_flags = 0;
1828 vf_op_params->ramrod_flags = 0;
1829
1808 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) 1830 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1809 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); 1831 __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
1810 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) 1832 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f3dd93b4aeaa..15a66e4b1f57 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7622{ 7622{
7623 u32 base = (u32) mapping & 0xffffffff; 7623 u32 base = (u32) mapping & 0xffffffff;
7624 7624
7625 return (base > 0xffffdcc0) && (base + len + 8 < base); 7625 return base + len + 8 < base;
7626} 7626}
7627 7627
7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes 7628/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6c9308850453..56e0415f8cdf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -228,6 +228,25 @@ struct tp_params {
228 228
229 uint32_t dack_re; /* DACK timer resolution */ 229 uint32_t dack_re; /* DACK timer resolution */
230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 230 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
231
232 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
233 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
234
235 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
236 * subset of the set of fields which may be present in the Compressed
237 * Filter Tuple portion of filters and TCP TCB connections. The
238 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
239 * Since a variable number of fields may or may not be present, their
240 * shifted field positions within the Compressed Filter Tuple may
241 * vary, or not even be present if the field isn't selected in
242 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
243 * places we store their offsets here, or a -1 if the field isn't
244 * present.
245 */
246 int vlan_shift;
247 int vnic_shift;
248 int port_shift;
249 int protocol_shift;
231}; 250};
232 251
233struct vpd_params { 252struct vpd_params {
@@ -926,6 +945,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
926 const u8 *fw_data, unsigned int fw_size, 945 const u8 *fw_data, unsigned int fw_size,
927 struct fw_hdr *card_fw, enum dev_state state, int *reset); 946 struct fw_hdr *card_fw, enum dev_state state, int *reset);
928int t4_prep_adapter(struct adapter *adapter); 947int t4_prep_adapter(struct adapter *adapter);
948int t4_init_tp_params(struct adapter *adap);
949int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
929int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 950int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
930void t4_fatal_err(struct adapter *adapter); 951void t4_fatal_err(struct adapter *adapter);
931int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 952int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d6b12e035a7d..fff02ed1295e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2986,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2986 if (stid >= 0) { 2986 if (stid >= 0) {
2987 t->stid_tab[stid].data = data; 2987 t->stid_tab[stid].data = data;
2988 stid += t->stid_base; 2988 stid += t->stid_base;
2989 t->stids_in_use++; 2989 /* IPv6 requires max of 520 bits or 16 cells in TCAM
2990 * This is equivalent to 4 TIDs. With CLIP enabled it
2991 * needs 2 TIDs.
2992 */
2993 if (family == PF_INET)
2994 t->stids_in_use++;
2995 else
2996 t->stids_in_use += 4;
2990 } 2997 }
2991 spin_unlock_bh(&t->stid_lock); 2998 spin_unlock_bh(&t->stid_lock);
2992 return stid; 2999 return stid;
@@ -3012,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3012 } 3019 }
3013 if (stid >= 0) { 3020 if (stid >= 0) {
3014 t->stid_tab[stid].data = data; 3021 t->stid_tab[stid].data = data;
3015 stid += t->stid_base; 3022 stid -= t->nstids;
3023 stid += t->sftid_base;
3016 t->stids_in_use++; 3024 t->stids_in_use++;
3017 } 3025 }
3018 spin_unlock_bh(&t->stid_lock); 3026 spin_unlock_bh(&t->stid_lock);
@@ -3024,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
3024 */ 3032 */
3025void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 3033void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3026{ 3034{
3027 stid -= t->stid_base; 3035 /* Is it a server filter TID? */
3036 if (t->nsftids && (stid >= t->sftid_base)) {
3037 stid -= t->sftid_base;
3038 stid += t->nstids;
3039 } else {
3040 stid -= t->stid_base;
3041 }
3042
3028 spin_lock_bh(&t->stid_lock); 3043 spin_lock_bh(&t->stid_lock);
3029 if (family == PF_INET) 3044 if (family == PF_INET)
3030 __clear_bit(stid, t->stid_bmap); 3045 __clear_bit(stid, t->stid_bmap);
3031 else 3046 else
3032 bitmap_release_region(t->stid_bmap, stid, 2); 3047 bitmap_release_region(t->stid_bmap, stid, 2);
3033 t->stid_tab[stid].data = NULL; 3048 t->stid_tab[stid].data = NULL;
3034 t->stids_in_use--; 3049 if (family == PF_INET)
3050 t->stids_in_use--;
3051 else
3052 t->stids_in_use -= 4;
3035 spin_unlock_bh(&t->stid_lock); 3053 spin_unlock_bh(&t->stid_lock);
3036} 3054}
3037EXPORT_SYMBOL(cxgb4_free_stid); 3055EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3134,6 +3152,7 @@ static int tid_init(struct tid_info *t)
3134 size_t size; 3152 size_t size;
3135 unsigned int stid_bmap_size; 3153 unsigned int stid_bmap_size;
3136 unsigned int natids = t->natids; 3154 unsigned int natids = t->natids;
3155 struct adapter *adap = container_of(t, struct adapter, tids);
3137 3156
3138 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 3157 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3139 size = t->ntids * sizeof(*t->tid_tab) + 3158 size = t->ntids * sizeof(*t->tid_tab) +
@@ -3167,6 +3186,11 @@ static int tid_init(struct tid_info *t)
3167 t->afree = t->atid_tab; 3186 t->afree = t->atid_tab;
3168 } 3187 }
3169 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 3188 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3189 /* Reserve stid 0 for T4/T5 adapters */
3190 if (!t->stid_base &&
3191 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3192 __set_bit(0, t->stid_bmap);
3193
3170 return 0; 3194 return 0;
3171} 3195}
3172 3196
@@ -3731,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3731 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3755 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3732 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3756 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3733 (adap->fn * 4)); 3757 (adap->fn * 4));
3734 lli.filt_mode = adap->filter_mode; 3758 lli.filt_mode = adap->params.tp.vlan_pri_map;
3735 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3759 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3736 for (i = 0; i < NCHAN; i++) 3760 for (i = 0; i < NCHAN; i++)
3737 lli.tx_modq[i] = i; 3761 lli.tx_modq[i] = i;
@@ -4179,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4179 adap = netdev2adap(dev); 4203 adap = netdev2adap(dev);
4180 4204
4181 /* Adjust stid to correct filter index */ 4205 /* Adjust stid to correct filter index */
4182 stid -= adap->tids.nstids; 4206 stid -= adap->tids.sftid_base;
4183 stid += adap->tids.nftids; 4207 stid += adap->tids.nftids;
4184 4208
4185 /* Check to make sure the filter requested is writable ... 4209 /* Check to make sure the filter requested is writable ...
@@ -4205,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4205 f->fs.val.lip[i] = val[i]; 4229 f->fs.val.lip[i] = val[i];
4206 f->fs.mask.lip[i] = ~0; 4230 f->fs.mask.lip[i] = ~0;
4207 } 4231 }
4208 if (adap->filter_mode & F_PORT) { 4232 if (adap->params.tp.vlan_pri_map & F_PORT) {
4209 f->fs.val.iport = port; 4233 f->fs.val.iport = port;
4210 f->fs.mask.iport = mask; 4234 f->fs.mask.iport = mask;
4211 } 4235 }
4212 } 4236 }
4213 4237
4238 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4239 f->fs.val.proto = IPPROTO_TCP;
4240 f->fs.mask.proto = ~0;
4241 }
4242
4214 f->fs.dirsteer = 1; 4243 f->fs.dirsteer = 1;
4215 f->fs.iq = queue; 4244 f->fs.iq = queue;
4216 /* Mark filter as locked */ 4245 /* Mark filter as locked */
@@ -4237,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4237 adap = netdev2adap(dev); 4266 adap = netdev2adap(dev);
4238 4267
4239 /* Adjust stid to correct filter index */ 4268 /* Adjust stid to correct filter index */
4240 stid -= adap->tids.nstids; 4269 stid -= adap->tids.sftid_base;
4241 stid += adap->tids.nftids; 4270 stid += adap->tids.nftids;
4242 4271
4243 f = &adap->tids.ftid_tab[stid]; 4272 f = &adap->tids.ftid_tab[stid];
@@ -5092,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
5092 enum dev_state state; 5121 enum dev_state state;
5093 u32 params[7], val[7]; 5122 u32 params[7], val[7];
5094 struct fw_caps_config_cmd caps_cmd; 5123 struct fw_caps_config_cmd caps_cmd;
5095 int reset = 1, j; 5124 int reset = 1;
5096 5125
5097 /* 5126 /*
5098 * Contact FW, advertising Master capability (and potentially forcing 5127 * Contact FW, advertising Master capability (and potentially forcing
@@ -5434,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
5434 /* 5463 /*
5435 * These are finalized by FW initialization, load their values now. 5464 * These are finalized by FW initialization, load their values now.
5436 */ 5465 */
5437 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5438 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5439 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5440 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 5466 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5441 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 5467 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5442 adap->params.b_wnd); 5468 adap->params.b_wnd);
5443 5469
5444 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 5470 t4_init_tp_params(adap);
5445 for (j = 0; j < NCHAN; j++)
5446 adap->params.tp.tx_modq[j] = j;
5447
5448 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5449 &adap->filter_mode, 1,
5450 TP_VLAN_PRI_MAP);
5451
5452 adap->flags |= FW_OK; 5471 adap->flags |= FW_OK;
5453 return 0; 5472 return 0;
5454 5473
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f2451c30..4dd0a82533e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
131 131
132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) 132static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
133{ 133{
134 stid -= t->stid_base; 134 /* Is it a server filter TID? */
135 if (t->nsftids && (stid >= t->sftid_base)) {
136 stid -= t->sftid_base;
137 stid += t->nstids;
138 } else {
139 stid -= t->stid_base;
140 }
141
135 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; 142 return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
136} 143}
137 144
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 29878098101e..81e8402a74b4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
45#include "l2t.h" 45#include "l2t.h"
46#include "t4_msg.h" 46#include "t4_msg.h"
47#include "t4fw_api.h" 47#include "t4fw_api.h"
48#include "t4_regs.h"
48 49
49#define VLAN_NONE 0xfff 50#define VLAN_NONE 0xfff
50 51
@@ -411,6 +412,40 @@ done:
411} 412}
412EXPORT_SYMBOL(cxgb4_l2t_get); 413EXPORT_SYMBOL(cxgb4_l2t_get);
413 414
415u64 cxgb4_select_ntuple(struct net_device *dev,
416 const struct l2t_entry *l2t)
417{
418 struct adapter *adap = netdev2adap(dev);
419 struct tp_params *tp = &adap->params.tp;
420 u64 ntuple = 0;
421
422 /* Initialize each of the fields which we care about which are present
423 * in the Compressed Filter Tuple.
424 */
425 if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
426 ntuple |= (u64)(F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
427
428 if (tp->port_shift >= 0)
429 ntuple |= (u64)l2t->lport << tp->port_shift;
430
431 if (tp->protocol_shift >= 0)
432 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
433
434 if (tp->vnic_shift >= 0) {
435 u32 viid = cxgb4_port_viid(dev);
436 u32 vf = FW_VIID_VIN_GET(viid);
437 u32 pf = FW_VIID_PFN_GET(viid);
438 u32 vld = FW_VIID_VIVLD_GET(viid);
439
440 ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
441 V_FT_VNID_ID_PF(pf) |
442 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
443 }
444
445 return ntuple;
446}
447EXPORT_SYMBOL(cxgb4_select_ntuple);
448
414/* 449/*
415 * Called when address resolution fails for an L2T entry to handle packets 450 * Called when address resolution fails for an L2T entry to handle packets
416 * on the arpq head. If a packet specifies a failure handler it is invoked, 451 * on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1fce1c..85eb5c71358d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, 98struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
99 const struct net_device *physdev, 99 const struct net_device *physdev,
100 unsigned int priority); 100 unsigned int priority);
101 101u64 cxgb4_select_ntuple(struct net_device *dev,
102 const struct l2t_entry *l2t);
102void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 103void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
103struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); 104struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
104int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 105int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index cc380c36e1a8..cc3511a5cd0c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
2581 #undef READ_FL_BUF 2581 #undef READ_FL_BUF
2582 2582
2583 if (fl_small_pg != PAGE_SIZE || 2583 if (fl_small_pg != PAGE_SIZE ||
2584 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || 2584 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
2585 (fl_large_pg & (fl_large_pg-1)) != 0))) { 2585 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2586 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2587 fl_small_pg, fl_large_pg); 2587 fl_small_pg, fl_large_pg);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 74a6fce5a15a..e1413eacdbd2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3808,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
3808 return 0; 3808 return 0;
3809} 3809}
3810 3810
3811/**
3812 * t4_init_tp_params - initialize adap->params.tp
3813 * @adap: the adapter
3814 *
3815 * Initialize various fields of the adapter's TP Parameters structure.
3816 */
3817int t4_init_tp_params(struct adapter *adap)
3818{
3819 int chan;
3820 u32 v;
3821
3822 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3823 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3824 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3825
3826 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3827 for (chan = 0; chan < NCHAN; chan++)
3828 adap->params.tp.tx_modq[chan] = chan;
3829
3830 /* Cache the adapter's Compressed Filter Mode and global Incress
3831 * Configuration.
3832 */
3833 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3834 &adap->params.tp.vlan_pri_map, 1,
3835 TP_VLAN_PRI_MAP);
3836 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3837 &adap->params.tp.ingress_config, 1,
3838 TP_INGRESS_CONFIG);
3839
3840 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3841 * shift positions of several elements of the Compressed Filter Tuple
3842 * for this adapter which we need frequently ...
3843 */
3844 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3845 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3846 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3847 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3848 F_PROTOCOL);
3849
3850 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3851 * represents the presense of an Outer VLAN instead of a VNIC ID.
3852 */
3853 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3854 adap->params.tp.vnic_shift = -1;
3855
3856 return 0;
3857}
3858
3859/**
3860 * t4_filter_field_shift - calculate filter field shift
3861 * @adap: the adapter
3862 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3863 *
3864 * Return the shift position of a filter field within the Compressed
3865 * Filter Tuple. The filter field is specified via its selection bit
3866 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
3867 */
3868int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
3869{
3870 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3871 unsigned int sel;
3872 int field_shift;
3873
3874 if ((filter_mode & filter_sel) == 0)
3875 return -1;
3876
3877 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3878 switch (filter_mode & sel) {
3879 case F_FCOE:
3880 field_shift += W_FT_FCOE;
3881 break;
3882 case F_PORT:
3883 field_shift += W_FT_PORT;
3884 break;
3885 case F_VNIC_ID:
3886 field_shift += W_FT_VNIC_ID;
3887 break;
3888 case F_VLAN:
3889 field_shift += W_FT_VLAN;
3890 break;
3891 case F_TOS:
3892 field_shift += W_FT_TOS;
3893 break;
3894 case F_PROTOCOL:
3895 field_shift += W_FT_PROTOCOL;
3896 break;
3897 case F_ETHERTYPE:
3898 field_shift += W_FT_ETHERTYPE;
3899 break;
3900 case F_MACMATCH:
3901 field_shift += W_FT_MACMATCH;
3902 break;
3903 case F_MPSHITTYPE:
3904 field_shift += W_FT_MPSHITTYPE;
3905 break;
3906 case F_FRAGMENTATION:
3907 field_shift += W_FT_FRAGMENTATION;
3908 break;
3909 }
3910 }
3911 return field_shift;
3912}
3913
3811int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3914int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3812{ 3915{
3813 u8 addr[6]; 3916 u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 0a8205d69d2c..4082522d8140 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1171,10 +1171,50 @@
1171 1171
1172#define A_TP_TX_SCHED_PCMD 0x25 1172#define A_TP_TX_SCHED_PCMD 0x25
1173 1173
1174#define S_VNIC 11
1175#define V_VNIC(x) ((x) << S_VNIC)
1176#define F_VNIC V_VNIC(1U)
1177
1178#define S_FRAGMENTATION 9
1179#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
1180#define F_FRAGMENTATION V_FRAGMENTATION(1U)
1181
1182#define S_MPSHITTYPE 8
1183#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
1184#define F_MPSHITTYPE V_MPSHITTYPE(1U)
1185
1186#define S_MACMATCH 7
1187#define V_MACMATCH(x) ((x) << S_MACMATCH)
1188#define F_MACMATCH V_MACMATCH(1U)
1189
1190#define S_ETHERTYPE 6
1191#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
1192#define F_ETHERTYPE V_ETHERTYPE(1U)
1193
1194#define S_PROTOCOL 5
1195#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
1196#define F_PROTOCOL V_PROTOCOL(1U)
1197
1198#define S_TOS 4
1199#define V_TOS(x) ((x) << S_TOS)
1200#define F_TOS V_TOS(1U)
1201
1202#define S_VLAN 3
1203#define V_VLAN(x) ((x) << S_VLAN)
1204#define F_VLAN V_VLAN(1U)
1205
1206#define S_VNIC_ID 2
1207#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
1208#define F_VNIC_ID V_VNIC_ID(1U)
1209
1174#define S_PORT 1 1210#define S_PORT 1
1175#define V_PORT(x) ((x) << S_PORT) 1211#define V_PORT(x) ((x) << S_PORT)
1176#define F_PORT V_PORT(1U) 1212#define F_PORT V_PORT(1U)
1177 1213
1214#define S_FCOE 0
1215#define V_FCOE(x) ((x) << S_FCOE)
1216#define F_FCOE V_FCOE(1U)
1217
1178#define NUM_MPS_CLS_SRAM_L_INSTANCES 336 1218#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1179#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 1219#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
1180 1220
@@ -1213,4 +1253,37 @@
1213#define V_CHIPID(x) ((x) << S_CHIPID) 1253#define V_CHIPID(x) ((x) << S_CHIPID)
1214#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID) 1254#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
1215 1255
1256/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
1257 * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
1258 * selects for a particular field being present. These fields, when present
1259 * in the Compressed Filter Tuple, have the following widths in bits.
1260 */
1261#define W_FT_FCOE 1
1262#define W_FT_PORT 3
1263#define W_FT_VNIC_ID 17
1264#define W_FT_VLAN 17
1265#define W_FT_TOS 8
1266#define W_FT_PROTOCOL 8
1267#define W_FT_ETHERTYPE 16
1268#define W_FT_MACMATCH 9
1269#define W_FT_MPSHITTYPE 3
1270#define W_FT_FRAGMENTATION 1
1271
1272/* Some of the Compressed Filter Tuple fields have internal structure. These
1273 * bit shifts/masks describe those structures. All shifts are relative to the
1274 * base position of the fields within the Compressed Filter Tuple
1275 */
1276#define S_FT_VLAN_VLD 16
1277#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
1278#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
1279
1280#define S_FT_VNID_ID_VF 0
1281#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
1282
1283#define S_FT_VNID_ID_PF 7
1284#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
1285
1286#define S_FT_VNID_ID_VLD 16
1287#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
1288
1216#endif /* __T4_REGS_H */ 1289#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5878df619b53..4ccaf9af6fc9 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
104#define BE3_MAX_RSS_QS 16 104#define BE3_MAX_RSS_QS 16
105#define BE3_MAX_TX_QS 16 105#define BE3_MAX_TX_QS 16
106#define BE3_MAX_EVT_QS 16 106#define BE3_MAX_EVT_QS 16
107#define BE3_SRIOV_MAX_EVT_QS 8
107 108
108#define MAX_RX_QS 32 109#define MAX_RX_QS 32
109#define MAX_EVT_QS 32 110#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
480 struct list_head entry; 481 struct list_head entry;
481 482
482 u32 flash_status; 483 u32 flash_status;
483 struct completion flash_compl; 484 struct completion et_cmd_compl;
484 485
485 struct be_resources res; /* resources available for the func */ 486 struct be_resources res; /* resources available for the func */
486 u16 num_vfs; /* Number of VFs provisioned by PF */ 487 u16 num_vfs; /* Number of VFs provisioned by PF */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e0e8bc1ef14c..94c35c8d799d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
141 subsystem = resp_hdr->subsystem; 141 subsystem = resp_hdr->subsystem;
142 } 142 }
143 143
144 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 complete(&adapter->et_cmd_compl);
147 return 0;
148 }
149
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) || 150 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) && 151 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) { 152 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status; 153 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl); 154 complete(&adapter->et_cmd_compl);
149 } 155 }
150 156
151 if (compl_status == MCC_STATUS_SUCCESS) { 157 if (compl_status == MCC_STATUS_SUCCESS) {
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2017 0x3ea83c02, 0x4a110304}; 2023 0x3ea83c02, 0x4a110304};
2018 int status; 2024 int status;
2019 2025
2026 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2027 return 0;
2028
2020 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2029 if (mutex_lock_interruptible(&adapter->mbox_lock))
2021 return -1; 2030 return -1;
2022 2031
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2160 be_mcc_notify(adapter); 2169 be_mcc_notify(adapter);
2161 spin_unlock_bh(&adapter->mcc_lock); 2170 spin_unlock_bh(&adapter->mcc_lock);
2162 2171
2163 if (!wait_for_completion_timeout(&adapter->flash_compl, 2172 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2164 msecs_to_jiffies(60000))) 2173 msecs_to_jiffies(60000)))
2165 status = -1; 2174 status = -1;
2166 else 2175 else
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2255 be_mcc_notify(adapter); 2264 be_mcc_notify(adapter);
2256 spin_unlock_bh(&adapter->mcc_lock); 2265 spin_unlock_bh(&adapter->mcc_lock);
2257 2266
2258 if (!wait_for_completion_timeout(&adapter->flash_compl, 2267 if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2259 msecs_to_jiffies(40000))) 2268 msecs_to_jiffies(40000)))
2260 status = -1; 2269 status = -1;
2261 else 2270 else
2262 status = adapter->flash_status; 2271 status = adapter->flash_status;
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2367{ 2376{
2368 struct be_mcc_wrb *wrb; 2377 struct be_mcc_wrb *wrb;
2369 struct be_cmd_req_loopback_test *req; 2378 struct be_cmd_req_loopback_test *req;
2379 struct be_cmd_resp_loopback_test *resp;
2370 int status; 2380 int status;
2371 2381
2372 spin_lock_bh(&adapter->mcc_lock); 2382 spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2381 2391
2382 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 2392 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2383 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); 2393 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2384 req->hdr.timeout = cpu_to_le32(4);
2385 2394
2395 req->hdr.timeout = cpu_to_le32(15);
2386 req->pattern = cpu_to_le64(pattern); 2396 req->pattern = cpu_to_le64(pattern);
2387 req->src_port = cpu_to_le32(port_num); 2397 req->src_port = cpu_to_le32(port_num);
2388 req->dest_port = cpu_to_le32(port_num); 2398 req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2390 req->num_pkts = cpu_to_le32(num_pkts); 2400 req->num_pkts = cpu_to_le32(num_pkts);
2391 req->loopback_type = cpu_to_le32(loopback_type); 2401 req->loopback_type = cpu_to_le32(loopback_type);
2392 2402
2393 status = be_mcc_notify_wait(adapter); 2403 be_mcc_notify(adapter);
2394 if (!status) { 2404
2395 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); 2405 spin_unlock_bh(&adapter->mcc_lock);
2396 status = le32_to_cpu(resp->status);
2397 }
2398 2406
2407 wait_for_completion(&adapter->et_cmd_compl);
2408 resp = embedded_payload(wrb);
2409 status = le32_to_cpu(resp->status);
2410
2411 return status;
2399err: 2412err:
2400 spin_unlock_bh(&adapter->mcc_lock); 2413 spin_unlock_bh(&adapter->mcc_lock);
2401 return status; 2414 return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0fde69d5cb6a..a37039d353c5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1776,6 +1776,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1776 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; 1776 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1777 struct be_queue_info *rxq = &rxo->q; 1777 struct be_queue_info *rxq = &rxo->q;
1778 struct page *pagep = NULL; 1778 struct page *pagep = NULL;
1779 struct device *dev = &adapter->pdev->dev;
1779 struct be_eth_rx_d *rxd; 1780 struct be_eth_rx_d *rxd;
1780 u64 page_dmaaddr = 0, frag_dmaaddr; 1781 u64 page_dmaaddr = 0, frag_dmaaddr;
1781 u32 posted, page_offset = 0; 1782 u32 posted, page_offset = 0;
@@ -1788,9 +1789,15 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1788 rx_stats(rxo)->rx_post_fail++; 1789 rx_stats(rxo)->rx_post_fail++;
1789 break; 1790 break;
1790 } 1791 }
1791 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep, 1792 page_dmaaddr = dma_map_page(dev, pagep, 0,
1792 0, adapter->big_page_size, 1793 adapter->big_page_size,
1793 DMA_FROM_DEVICE); 1794 DMA_FROM_DEVICE);
1795 if (dma_mapping_error(dev, page_dmaaddr)) {
1796 put_page(pagep);
1797 pagep = NULL;
1798 rx_stats(rxo)->rx_post_fail++;
1799 break;
1800 }
1794 page_info->page_offset = 0; 1801 page_info->page_offset = 0;
1795 } else { 1802 } else {
1796 get_page(pagep); 1803 get_page(pagep);
@@ -2744,13 +2751,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
2744 if (!BEx_chip(adapter)) 2751 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | 2752 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6; 2753 RSS_ENABLE_UDP_IPV6;
2754 } else {
2755 /* Disable RSS, if only default RX Q is created */
2756 adapter->rss_flags = RSS_ENABLE_NONE;
2757 }
2747 2758
2748 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, 2759 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2749 128); 2760 128);
2750 if (rc) { 2761 if (rc) {
2751 adapter->rss_flags = 0; 2762 adapter->rss_flags = RSS_ENABLE_NONE;
2752 return rc; 2763 return rc;
2753 }
2754 } 2764 }
2755 2765
2756 /* First time posting */ 2766 /* First time posting */
@@ -3124,11 +3134,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3124{ 3134{
3125 struct pci_dev *pdev = adapter->pdev; 3135 struct pci_dev *pdev = adapter->pdev;
3126 bool use_sriov = false; 3136 bool use_sriov = false;
3137 int max_vfs;
3127 3138
3128 if (BE3_chip(adapter) && sriov_want(adapter)) { 3139 max_vfs = pci_sriov_get_totalvfs(pdev);
3129 int max_vfs;
3130 3140
3131 max_vfs = pci_sriov_get_totalvfs(pdev); 3141 if (BE3_chip(adapter) && sriov_want(adapter)) {
3132 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3142 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3133 use_sriov = res->max_vfs; 3143 use_sriov = res->max_vfs;
3134 } 3144 }
@@ -3159,7 +3169,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
3159 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 3169 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3160 res->max_rx_qs = res->max_rss_qs + 1; 3170 res->max_rx_qs = res->max_rss_qs + 1;
3161 3171
3162 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1; 3172 if (be_physfn(adapter))
3173 res->max_evt_qs = (max_vfs > 0) ?
3174 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3175 else
3176 res->max_evt_qs = 1;
3163 3177
3164 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; 3178 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3165 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) 3179 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -4205,7 +4219,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
4205 spin_lock_init(&adapter->mcc_lock); 4219 spin_lock_init(&adapter->mcc_lock);
4206 spin_lock_init(&adapter->mcc_cq_lock); 4220 spin_lock_init(&adapter->mcc_cq_lock);
4207 4221
4208 init_completion(&adapter->flash_compl); 4222 init_completion(&adapter->et_cmd_compl);
4209 pci_save_state(adapter->pdev); 4223 pci_save_state(adapter->pdev);
4210 return 0; 4224 return 0;
4211 4225
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e7c8b749c5a5..50bb71c663e2 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -428,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
428 /* If this was the last BD in the ring, start at the beginning again. */ 428 /* If this was the last BD in the ring, start at the beginning again. */
429 bdp = fec_enet_get_nextdesc(bdp, fep); 429 bdp = fec_enet_get_nextdesc(bdp, fep);
430 430
431 skb_tx_timestamp(skb);
432
431 fep->cur_tx = bdp; 433 fep->cur_tx = bdp;
432 434
433 if (fep->cur_tx == fep->dirty_tx) 435 if (fep->cur_tx == fep->dirty_tx)
@@ -436,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
436 /* Trigger transmission start */ 438 /* Trigger transmission start */
437 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 439 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
438 440
439 skb_tx_timestamp(skb);
440
441 return NETDEV_TX_OK; 441 return NETDEV_TX_OK;
442} 442}
443 443
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e9bb3c..ff2d806eaef7 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
718 e1000_release_phy_80003es2lan(hw); 718 e1000_release_phy_80003es2lan(hw);
719 719
720 /* Disable IBIST slave mode (far-end loopback) */ 720 /* Disable IBIST slave mode (far-end loopback) */
721 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 721 ret_val =
722 &kum_reg_data); 722 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
723 &kum_reg_data);
724 if (ret_val)
725 return ret_val;
723 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; 726 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
724 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 727 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
725 kum_reg_data); 728 kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945ab7334..6d14eea17918 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7015,13 +7015,11 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
7015}; 7015};
7016MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 7016MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7017 7017
7018#ifdef CONFIG_PM
7019static const struct dev_pm_ops e1000_pm_ops = { 7018static const struct dev_pm_ops e1000_pm_ops = {
7020 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 7019 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
7021 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, 7020 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
7022 e1000_idle) 7021 e1000_idle)
7023}; 7022};
7024#endif
7025 7023
7026/* PCI Device API Driver */ 7024/* PCI Device API Driver */
7027static struct pci_driver e1000_driver = { 7025static struct pci_driver e1000_driver = {
@@ -7029,11 +7027,9 @@ static struct pci_driver e1000_driver = {
7029 .id_table = e1000_pci_tbl, 7027 .id_table = e1000_pci_tbl,
7030 .probe = e1000_probe, 7028 .probe = e1000_probe,
7031 .remove = e1000_remove, 7029 .remove = e1000_remove,
7032#ifdef CONFIG_PM
7033 .driver = { 7030 .driver = {
7034 .pm = &e1000_pm_ops, 7031 .pm = &e1000_pm_ops,
7035 }, 7032 },
7036#endif
7037 .shutdown = e1000_shutdown, 7033 .shutdown = e1000_shutdown,
7038 .err_handler = &e1000_err_handler 7034 .err_handler = &e1000_err_handler
7039}; 7035};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59505c0..20e71f4ca426 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1757 * it across the board. 1757 * it across the board.
1758 */ 1758 */
1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1759 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1760 if (ret_val) 1760 if (ret_val) {
1761 /* If the first read fails, another entity may have 1761 /* If the first read fails, another entity may have
1762 * ownership of the resources, wait and try again to 1762 * ownership of the resources, wait and try again to
1763 * see if they have relinquished the resources yet. 1763 * see if they have relinquished the resources yet.
1764 */ 1764 */
1765 udelay(usec_interval); 1765 if (usec_interval >= 1000)
1766 msleep(usec_interval / 1000);
1767 else
1768 udelay(usec_interval);
1769 }
1766 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); 1770 ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
1767 if (ret_val) 1771 if (ret_val)
1768 break; 1772 break;
1769 if (phy_status & BMSR_LSTATUS) 1773 if (phy_status & BMSR_LSTATUS)
1770 break; 1774 break;
1771 if (usec_interval >= 1000) 1775 if (usec_interval >= 1000)
1772 mdelay(usec_interval / 1000); 1776 msleep(usec_interval / 1000);
1773 else 1777 else
1774 udelay(usec_interval); 1778 udelay(usec_interval);
1775 } 1779 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cc06854296a3..5bcc870f8367 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
6827 return __ixgbe_maybe_stop_tx(tx_ring, size); 6827 return __ixgbe_maybe_stop_tx(tx_ring, size);
6828} 6828}
6829 6829
6830#ifdef IXGBE_FCOE 6830static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6831static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6831 void *accel_priv)
6832{ 6832{
6833 struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
6834#ifdef IXGBE_FCOE
6833 struct ixgbe_adapter *adapter; 6835 struct ixgbe_adapter *adapter;
6834 struct ixgbe_ring_feature *f; 6836 struct ixgbe_ring_feature *f;
6835 int txq; 6837 int txq;
6838#endif
6839
6840 if (fwd_adapter)
6841 return skb->queue_mapping + fwd_adapter->tx_base_queue;
6842
6843#ifdef IXGBE_FCOE
6836 6844
6837 /* 6845 /*
6838 * only execute the code below if protocol is FCoE 6846 * only execute the code below if protocol is FCoE
@@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6858 txq -= f->indices; 6866 txq -= f->indices;
6859 6867
6860 return txq + f->offset; 6868 return txq + f->offset;
6869#else
6870 return __netdev_pick_tx(dev, skb);
6871#endif
6861} 6872}
6862 6873
6863#endif
6864netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, 6874netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6865 struct ixgbe_adapter *adapter, 6875 struct ixgbe_adapter *adapter,
6866 struct ixgbe_ring *tx_ring) 6876 struct ixgbe_ring *tx_ring)
@@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
7629 kfree(fwd_adapter); 7639 kfree(fwd_adapter);
7630} 7640}
7631 7641
7632static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb,
7633 struct net_device *dev,
7634 void *priv)
7635{
7636 struct ixgbe_fwd_adapter *fwd_adapter = priv;
7637 unsigned int queue;
7638 struct ixgbe_ring *tx_ring;
7639
7640 queue = skb->queue_mapping + fwd_adapter->tx_base_queue;
7641 tx_ring = fwd_adapter->real_adapter->tx_ring[queue];
7642
7643 return __ixgbe_xmit_frame(skb, dev, tx_ring);
7644}
7645
7646static const struct net_device_ops ixgbe_netdev_ops = { 7642static const struct net_device_ops ixgbe_netdev_ops = {
7647 .ndo_open = ixgbe_open, 7643 .ndo_open = ixgbe_open,
7648 .ndo_stop = ixgbe_close, 7644 .ndo_stop = ixgbe_close,
7649 .ndo_start_xmit = ixgbe_xmit_frame, 7645 .ndo_start_xmit = ixgbe_xmit_frame,
7650#ifdef IXGBE_FCOE
7651 .ndo_select_queue = ixgbe_select_queue, 7646 .ndo_select_queue = ixgbe_select_queue,
7652#endif
7653 .ndo_set_rx_mode = ixgbe_set_rx_mode, 7647 .ndo_set_rx_mode = ixgbe_set_rx_mode,
7654 .ndo_validate_addr = eth_validate_addr, 7648 .ndo_validate_addr = eth_validate_addr,
7655 .ndo_set_mac_address = ixgbe_set_mac, 7649 .ndo_set_mac_address = ixgbe_set_mac,
@@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7689 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7683 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7690 .ndo_dfwd_add_station = ixgbe_fwd_add, 7684 .ndo_dfwd_add_station = ixgbe_fwd_add,
7691 .ndo_dfwd_del_station = ixgbe_fwd_del, 7685 .ndo_dfwd_del_station = ixgbe_fwd_del,
7692 .ndo_dfwd_start_xmit = ixgbe_fwd_xmit,
7693}; 7686};
7694 7687
7695/** 7688/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d8cf11..72084f70adbb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
291{ 291{
292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev); 292 struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
293 int err; 293 int err;
294#ifdef CONFIG_PCI_IOV
294 u32 current_flags = adapter->flags; 295 u32 current_flags = adapter->flags;
296#endif
295 297
296 err = ixgbe_disable_sriov(adapter); 298 err = ixgbe_disable_sriov(adapter);
297 299
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6a6c1f76d8e0..ec94a20d7099 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev)
619} 619}
620 620
621static u16 621static u16
622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) 622ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
623 void *accel_priv)
623{ 624{
624 /* we are currently only using the first queue */ 625 /* we are currently only using the first queue */
625 return 0; 626 return 0;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960b583b..c4eeb69a5bee 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
92 if (time_is_before_jiffies(end)) 92 if (time_is_before_jiffies(end))
93 ++timedout; 93 ++timedout;
94 } else { 94 } else {
95 /* wait_event_timeout does not guarantee a delay of at
96 * least one whole jiffie, so timeout must be no less
97 * than two.
98 */
99 if (timeout < 2)
100 timeout = 2;
95 wait_event_timeout(dev->smi_busy_wait, 101 wait_event_timeout(dev->smi_busy_wait,
96 orion_mdio_smi_is_done(dev), 102 orion_mdio_smi_is_done(dev),
97 timeout); 103 timeout);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f54ebd5a1702..a7fcd593b2db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
592 } 592 }
593} 593}
594 594
595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 595u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
596 void *accel_priv)
596{ 597{
597 struct mlx4_en_priv *priv = netdev_priv(dev); 598 struct mlx4_en_priv *priv = netdev_priv(dev);
598 u16 rings_p_up = priv->num_tx_rings_p_up; 599 u16 rings_p_up = priv->num_tx_rings_p_up;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3758de59c05..d5758adceaa2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 714int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
715 715
716void mlx4_en_tx_irq(struct mlx4_cq *mcq); 716void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
718 void *accel_priv);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 719netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 720
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 721int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 346a4e025c34..04b3ec1352f1 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -52,7 +52,6 @@
52#include <linux/bitrev.h> 52#include <linux/bitrev.h>
53#include <linux/slab.h> 53#include <linux/slab.h>
54 54
55#include <asm/bootinfo.h>
56#include <asm/pgtable.h> 55#include <asm/pgtable.h>
57#include <asm/io.h> 56#include <asm/io.h>
58#include <asm/hwtest.h> 57#include <asm/hwtest.h>
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd4f262..cc68657f0536 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
1604 u32 seq_number; 1604 u32 seq_number;
1605 u8 vhdr_len = 0; 1605 u8 vhdr_len = 0;
1606 1606
1607 if (unlikely(ring > adapter->max_rds_rings)) 1607 if (unlikely(ring >= adapter->max_rds_rings))
1608 return NULL; 1608 return NULL;
1609 1609
1610 rds_ring = &recv_ctx->rds_rings[ring]; 1610 rds_ring = &recv_ctx->rds_rings[ring];
1611 1611
1612 index = netxen_get_lro_sts_refhandle(sts_data0); 1612 index = netxen_get_lro_sts_refhandle(sts_data0);
1613 if (unlikely(index > rds_ring->num_desc)) 1613 if (unlikely(index >= rds_ring->num_desc))
1614 return NULL; 1614 return NULL;
1615 1615
1616 buffer = &rds_ring->rx_buf_arr[index]; 1616 buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0ac1cd8..f2a7c7166e24 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
487 struct qlcnic_mailbox *mailbox; 487 struct qlcnic_mailbox *mailbox;
488 u8 extend_lb_time; 488 u8 extend_lb_time;
489 u8 phys_port_id[ETH_ALEN]; 489 u8 phys_port_id[ETH_ALEN];
490 u8 lb_mode;
490}; 491};
491 492
492struct qlcnic_adapter_stats { 493struct qlcnic_adapter_stats {
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
578 dma_addr_t phys_addr; 579 dma_addr_t phys_addr;
579 dma_addr_t hw_cons_phys_addr; 580 dma_addr_t hw_cons_phys_addr;
580 struct netdev_queue *txq; 581 struct netdev_queue *txq;
582 /* Lock to protect Tx descriptors cleanup */
583 spinlock_t tx_clean_lock;
581} ____cacheline_internodealigned_in_smp; 584} ____cacheline_internodealigned_in_smp;
582 585
583/* 586/*
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
808 811
809#define QLCNIC_ILB_MODE 0x1 812#define QLCNIC_ILB_MODE 0x1
810#define QLCNIC_ELB_MODE 0x2 813#define QLCNIC_ELB_MODE 0x2
814#define QLCNIC_LB_MODE_MASK 0x3
811 815
812#define QLCNIC_LINKEVENT 0x1 816#define QLCNIC_LINKEVENT 0x1
813#define QLCNIC_LB_RESPONSE 0x2 817#define QLCNIC_LB_RESPONSE 0x2
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
1093 struct qlcnic_filter_hash rx_fhash; 1097 struct qlcnic_filter_hash rx_fhash;
1094 struct list_head vf_mc_list; 1098 struct list_head vf_mc_list;
1095 1099
1096 spinlock_t tx_clean_lock;
1097 spinlock_t mac_learn_lock; 1100 spinlock_t mac_learn_lock;
1098 /* spinlock for catching rcv filters for eswitch traffic */ 1101 /* spinlock for catching rcv filters for eswitch traffic */
1099 spinlock_t rx_mac_learn_lock; 1102 spinlock_t rx_mac_learn_lock;
@@ -1708,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
1708void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); 1711void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
1709void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); 1712void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
1710void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); 1713void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
1714void qlcnic_update_stats(struct qlcnic_adapter *);
1711 1715
1712/* Adapter hardware abstraction */ 1716/* Adapter hardware abstraction */
1713struct qlcnic_hardware_ops { 1717struct qlcnic_hardware_ops {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 6055d397a29e..f776f99f7915 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1684,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1684 } 1684 }
1685 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); 1685 } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
1686 1686
1687 /* Make sure carrier is off and queue is stopped during loopback */
1688 if (netif_running(netdev)) {
1689 netif_carrier_off(netdev);
1690 netif_tx_stop_all_queues(netdev);
1691 }
1692
1693 ret = qlcnic_do_lb_test(adapter, mode); 1687 ret = qlcnic_do_lb_test(adapter, mode);
1694 1688
1695 qlcnic_83xx_clear_lb_mode(adapter, mode); 1689 qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2121,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
2121 ahw->link_autoneg = MSB(MSW(data[3])); 2115 ahw->link_autoneg = MSB(MSW(data[3]));
2122 ahw->module_type = MSB(LSW(data[3])); 2116 ahw->module_type = MSB(LSW(data[3]));
2123 ahw->has_link_events = 1; 2117 ahw->has_link_events = 1;
2118 ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
2124 qlcnic_advert_link_change(adapter, link_status); 2119 qlcnic_advert_link_change(adapter, link_status);
2125} 2120}
2126 2121
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index e3be2760665c..6b08194aa0d4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
167 167
168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 168#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
169 169
170static inline int qlcnic_82xx_statistics(void) 170static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
171{ 171{
172 return ARRAY_SIZE(qlcnic_device_gstrings_stats) + 172 return ARRAY_SIZE(qlcnic_gstrings_stats) +
173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 173 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
174 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
174} 175}
175 176
176static inline int qlcnic_83xx_statistics(void) 177static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
177{ 178{
178 return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + 179 return ARRAY_SIZE(qlcnic_gstrings_stats) +
180 ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
179 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + 181 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
180 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); 182 ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
183 QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
181} 184}
182 185
183static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) 186static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
184{ 187{
185 if (qlcnic_82xx_check(adapter)) 188 int len = -1;
186 return qlcnic_82xx_statistics(); 189
187 else if (qlcnic_83xx_check(adapter)) 190 if (qlcnic_82xx_check(adapter)) {
188 return qlcnic_83xx_statistics(); 191 len = qlcnic_82xx_statistics(adapter);
189 else 192 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
190 return -1; 193 len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
194 } else if (qlcnic_83xx_check(adapter)) {
195 len = qlcnic_83xx_statistics(adapter);
196 }
197
198 return len;
191} 199}
192 200
193#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 201#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
@@ -920,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev)
920 928
921static int qlcnic_get_sset_count(struct net_device *dev, int sset) 929static int qlcnic_get_sset_count(struct net_device *dev, int sset)
922{ 930{
923 int len;
924 931
925 struct qlcnic_adapter *adapter = netdev_priv(dev); 932 struct qlcnic_adapter *adapter = netdev_priv(dev);
926 switch (sset) { 933 switch (sset) {
927 case ETH_SS_TEST: 934 case ETH_SS_TEST:
928 return QLCNIC_TEST_LEN; 935 return QLCNIC_TEST_LEN;
929 case ETH_SS_STATS: 936 case ETH_SS_STATS:
930 len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; 937 return qlcnic_dev_statistics_len(adapter);
931 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
932 qlcnic_83xx_check(adapter))
933 return len;
934 return qlcnic_82xx_statistics();
935 default: 938 default:
936 return -EOPNOTSUPP; 939 return -EOPNOTSUPP;
937 } 940 }
@@ -1267,7 +1270,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1267 return data; 1270 return data;
1268} 1271}
1269 1272
1270static void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1273void qlcnic_update_stats(struct qlcnic_adapter *adapter)
1271{ 1274{
1272 struct qlcnic_host_tx_ring *tx_ring; 1275 struct qlcnic_host_tx_ring *tx_ring;
1273 int ring; 1276 int ring;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5d0ca9..c4262c23ed7c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
134 struct qlcnic_skb_frag *buffrag; 134 struct qlcnic_skb_frag *buffrag;
135 int i, j; 135 int i, j;
136 136
137 spin_lock(&tx_ring->tx_clean_lock);
138
137 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
138 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
139 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
157 } 159 }
158 cmd_buf++; 160 cmd_buf++;
159 } 161 }
162
163 spin_unlock(&tx_ring->tx_clean_lock);
160} 164}
161 165
162void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter) 166void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index eda6c691d897..ad1531ae3aa8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -689,6 +689,10 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
689 adapter->ahw->linkup = 0; 689 adapter->ahw->linkup = 0;
690 netif_carrier_off(netdev); 690 netif_carrier_off(netdev);
691 } else if (!adapter->ahw->linkup && linkup) { 691 } else if (!adapter->ahw->linkup && linkup) {
692 /* Do not advertise Link up if the port is in loopback mode */
693 if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
694 return;
695
692 netdev_info(netdev, "NIC Link is up\n"); 696 netdev_info(netdev, "NIC Link is up\n");
693 adapter->ahw->linkup = 1; 697 adapter->ahw->linkup = 1;
694 netif_carrier_on(netdev); 698 netif_carrier_on(netdev);
@@ -778,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
778 struct net_device *netdev = adapter->netdev; 782 struct net_device *netdev = adapter->netdev;
779 struct qlcnic_skb_frag *frag; 783 struct qlcnic_skb_frag *frag;
780 784
781 if (!spin_trylock(&adapter->tx_clean_lock)) 785 if (!spin_trylock(&tx_ring->tx_clean_lock))
782 return 1; 786 return 1;
783 787
784 sw_consumer = tx_ring->sw_consumer; 788 sw_consumer = tx_ring->sw_consumer;
@@ -807,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
807 break; 811 break;
808 } 812 }
809 813
814 tx_ring->sw_consumer = sw_consumer;
815
810 if (count && netif_running(netdev)) { 816 if (count && netif_running(netdev)) {
811 tx_ring->sw_consumer = sw_consumer;
812 smp_mb(); 817 smp_mb();
813 if (netif_tx_queue_stopped(tx_ring->txq) && 818 if (netif_tx_queue_stopped(tx_ring->txq) &&
814 netif_carrier_ok(netdev)) { 819 netif_carrier_ok(netdev)) {
@@ -834,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
834 */ 839 */
835 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 840 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
836 done = (sw_consumer == hw_consumer); 841 done = (sw_consumer == hw_consumer);
837 spin_unlock(&adapter->tx_clean_lock); 842
843 spin_unlock(&tx_ring->tx_clean_lock);
838 844
839 return done; 845 return done;
840} 846}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2c8cac0c6a55..550791b8fbae 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1756 if (qlcnic_sriov_vf_check(adapter)) 1756 if (qlcnic_sriov_vf_check(adapter))
1757 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); 1757 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1758 smp_mb(); 1758 smp_mb();
1759 spin_lock(&adapter->tx_clean_lock);
1760 netif_carrier_off(netdev); 1759 netif_carrier_off(netdev);
1761 adapter->ahw->linkup = 0; 1760 adapter->ahw->linkup = 0;
1762 netif_tx_disable(netdev); 1761 netif_tx_disable(netdev);
@@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1777 1776
1778 for (ring = 0; ring < adapter->drv_tx_rings; ring++) 1777 for (ring = 0; ring < adapter->drv_tx_rings; ring++)
1779 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]); 1778 qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
1780 spin_unlock(&adapter->tx_clean_lock);
1781} 1779}
1782 1780
1783/* Usage: During suspend and firmware recovery module */ 1781/* Usage: During suspend and firmware recovery module */
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
2172 } 2170 }
2173 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); 2171 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
2174 tx_ring->cmd_buf_arr = cmd_buf_arr; 2172 tx_ring->cmd_buf_arr = cmd_buf_arr;
2173 spin_lock_init(&tx_ring->tx_clean_lock);
2175 } 2174 }
2176 2175
2177 if (qlcnic_83xx_check(adapter) || 2176 if (qlcnic_83xx_check(adapter) ||
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2299 rwlock_init(&adapter->ahw->crb_lock); 2298 rwlock_init(&adapter->ahw->crb_lock);
2300 mutex_init(&adapter->ahw->mem_lock); 2299 mutex_init(&adapter->ahw->mem_lock);
2301 2300
2302 spin_lock_init(&adapter->tx_clean_lock);
2303 INIT_LIST_HEAD(&adapter->mac_list); 2301 INIT_LIST_HEAD(&adapter->mac_list);
2304 2302
2305 qlcnic_register_dcb(adapter); 2303 qlcnic_register_dcb(adapter);
@@ -2782,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2782 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2780 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2783 struct net_device_stats *stats = &netdev->stats; 2781 struct net_device_stats *stats = &netdev->stats;
2784 2782
2783 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2784 qlcnic_update_stats(adapter);
2785
2785 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 2786 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2786 stats->tx_packets = adapter->stats.xmitfinished; 2787 stats->tx_packets = adapter->stats.xmitfinished;
2787 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; 2788 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460b1502..024f8161d2fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
75 num_vfs = sriov->num_vfs; 75 num_vfs = sriov->num_vfs;
76 max = num_vfs + 1; 76 max = num_vfs + 1;
77 info->bit_offsets = 0xffff; 77 info->bit_offsets = 0xffff;
78 info->max_tx_ques = res->num_tx_queues / max;
79 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters; 78 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
80 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC; 79 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
81 80
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
86 info->max_tx_mac_filters = temp; 85 info->max_tx_mac_filters = temp;
87 info->min_tx_bw = 0; 86 info->min_tx_bw = 0;
88 info->max_tx_bw = MAX_BW; 87 info->max_tx_bw = MAX_BW;
88 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
89 } else { 89 } else {
90 id = qlcnic_sriov_func_to_index(adapter, func); 90 id = qlcnic_sriov_func_to_index(adapter, func);
91 if (id < 0) 91 if (id < 0)
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
95 info->max_tx_bw = vp->max_tx_bw; 95 info->max_tx_bw = vp->max_tx_bw;
96 info->max_rx_ucast_mac_filters = num_vf_macs; 96 info->max_rx_ucast_mac_filters = num_vf_macs;
97 info->max_tx_mac_filters = num_vf_macs; 97 info->max_tx_mac_filters = num_vf_macs;
98 info->max_tx_ques = QLCNIC_SINGLE_RING;
98 } 99 }
99 100
100 info->max_rx_ip_addr = res->num_destip / max; 101 info->max_rx_ip_addr = res->num_destip / max;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 449f506d2e8f..f705aeeba767 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4765,6 +4765,8 @@ static int qlge_probe(struct pci_dev *pdev,
4765 NETIF_F_RXCSUM; 4765 NETIF_F_RXCSUM;
4766 ndev->features = ndev->hw_features; 4766 ndev->features = ndev->hw_features;
4767 ndev->vlan_features = ndev->hw_features; 4767 ndev->vlan_features = ndev->hw_features;
4768 /* vlan gets same features (except vlan filter) */
4769 ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4768 4770
4769 if (test_bit(QL_DMA64, &qdev->flags)) 4771 if (test_bit(QL_DMA64, &qdev->flags))
4770 ndev->features |= NETIF_F_HIGHDMA; 4772 ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a84ac5..797b56a0efc4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 622 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
623 return -EOPNOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 if (netif_msg_hw(priv)) { 625 priv->adv_ts = 0;
626 if (priv->dma_cap.time_stamp) { 626 if (priv->dma_cap.atime_stamp && priv->extend_desc)
627 pr_debug("IEEE 1588-2002 Time Stamp supported\n"); 627 priv->adv_ts = 1;
628 priv->adv_ts = 0; 628
629 } 629 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
630 if (priv->dma_cap.atime_stamp && priv->extend_desc) { 630 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
631 pr_debug 631
632 ("IEEE 1588-2008 Advanced Time Stamp supported\n"); 632 if (netif_msg_hw(priv) && priv->adv_ts)
633 priv->adv_ts = 1; 633 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
634 }
635 }
636 634
637 priv->hw->ptp = &stmmac_ptp; 635 priv->hw->ptp = &stmmac_ptp;
638 priv->hwts_tx_en = 0; 636 priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eeed0f92..7680581ebe12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
56 56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend); 57 priv->hw->ptp->config_addend(priv->ioaddr, addend);
58 58
59 spin_unlock_irqrestore(&priv->lock, flags); 59 spin_unlock_irqrestore(&priv->ptp_lock, flags);
60 60
61 return 0; 61 return 0;
62} 62}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
91 91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); 92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
93 93
94 spin_unlock_irqrestore(&priv->lock, flags); 94 spin_unlock_irqrestore(&priv->ptp_lock, flags);
95 95
96 return 0; 96 return 0;
97} 97}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 5120d9ce1dd4..5330fd298705 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
740 /* set speed_in input in case RMII mode is used in 100Mbps */ 740 /* set speed_in input in case RMII mode is used in 100Mbps */
741 if (phy->speed == 100) 741 if (phy->speed == 100)
742 mac_control |= BIT(15); 742 mac_control |= BIT(15);
743 else if (phy->speed == 10)
744 mac_control |= BIT(18); /* In Band mode */
743 745
744 *link = true; 746 *link = true;
745 } else { 747 } else {
@@ -2106,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
2106 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { 2108 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
2107 for (i = res->start; i <= res->end; i++) { 2109 for (i = res->start; i <= res->end; i++) {
2108 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, 2110 if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
2109 dev_name(priv->dev), priv)) { 2111 dev_name(&pdev->dev), priv)) {
2110 dev_err(priv->dev, "error attaching irq\n"); 2112 dev_err(priv->dev, "error attaching irq\n");
2111 goto clean_ale_ret; 2113 goto clean_ale_ret;
2112 } 2114 }
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 628b736e5ae7..0e9fb3301b11 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
2080} 2080}
2081 2081
2082/* Return subqueue id on this core (one per core). */ 2082/* Return subqueue id on this core (one per core). */
2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) 2083static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
2084 void *accel_priv)
2084{ 2085{
2085 return smp_processor_id(); 2086 return smp_processor_id();
2086} 2087}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index cce6c4bc556a..ef312bc6b865 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1618,6 +1618,7 @@ static void rhine_reset_task(struct work_struct *work)
1618 goto out_unlock; 1618 goto out_unlock;
1619 1619
1620 napi_disable(&rp->napi); 1620 napi_disable(&rp->napi);
1621 netif_tx_disable(dev);
1621 spin_lock_bh(&rp->lock); 1622 spin_lock_bh(&rp->lock);
1622 1623
1623 /* clear all descriptors */ 1624 /* clear all descriptors */
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252613fa..5d78c1d08abd 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
571 case HDLCDRVCTL_CALIBRATE: 571 case HDLCDRVCTL_CALIBRATE:
572 if(!capable(CAP_SYS_RAWIO)) 572 if(!capable(CAP_SYS_RAWIO))
573 return -EPERM; 573 return -EPERM;
574 if (bi.data.calibrate > INT_MAX / s->par.bitrate)
575 return -EINVAL;
574 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; 576 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
575 return 0; 577 return 0;
576 578
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411574db..61dd2447e1bb 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1057 break; 1057 break;
1058 1058
1059 case SIOCYAMGCFG: 1059 case SIOCYAMGCFG:
1060 memset(&yi, 0, sizeof(yi));
1060 yi.cfg.mask = 0xffffffff; 1061 yi.cfg.mask = 0xffffffff;
1061 yi.cfg.iobase = yp->iobase; 1062 yi.cfg.iobase = yp->iobase;
1062 yi.cfg.irq = yp->irq; 1063 yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f8135725bcf6..71baeb3ed905 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
261 struct sk_buff *skb; 261 struct sk_buff *skb;
262 262
263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; 263 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
264 if (!net) { 264 if (!net || net->reg_state != NETREG_REGISTERED) {
265 netdev_err(net, "got receive callback but net device"
266 " not initialized yet\n");
267 packet->status = NVSP_STAT_FAIL; 265 packet->status = NVSP_STAT_FAIL;
268 return 0; 266 return 0;
269 } 267 }
@@ -435,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
435 SET_ETHTOOL_OPS(net, &ethtool_ops); 433 SET_ETHTOOL_OPS(net, &ethtool_ops);
436 SET_NETDEV_DEV(net, &dev->device); 434 SET_NETDEV_DEV(net, &dev->device);
437 435
438 ret = register_netdev(net);
439 if (ret != 0) {
440 pr_err("Unable to register netdev.\n");
441 free_netdev(net);
442 goto out;
443 }
444
445 /* Notify the netvsc driver of the new device */ 436 /* Notify the netvsc driver of the new device */
446 device_info.ring_size = ring_size; 437 device_info.ring_size = ring_size;
447 ret = rndis_filter_device_add(dev, &device_info); 438 ret = rndis_filter_device_add(dev, &device_info);
448 if (ret != 0) { 439 if (ret != 0) {
449 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 440 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
450 unregister_netdev(net);
451 free_netdev(net); 441 free_netdev(net);
452 hv_set_drvdata(dev, NULL); 442 hv_set_drvdata(dev, NULL);
453 return ret; 443 return ret;
@@ -456,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
456 446
457 netif_carrier_on(net); 447 netif_carrier_on(net);
458 448
459out: 449 ret = register_netdev(net);
450 if (ret != 0) {
451 pr_err("Unable to register netdev.\n");
452 rndis_filter_device_remove(dev);
453 free_netdev(net);
454 }
455
460 return ret; 456 return ret;
461} 457}
462 458
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf93798dc67..bc8faaec33f5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
299 299
300 if (vlan->fwd_priv) { 300 if (vlan->fwd_priv) {
301 skb->dev = vlan->lowerdev; 301 skb->dev = vlan->lowerdev;
302 ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); 302 ret = dev_queue_xmit_accel(skb, vlan->fwd_priv);
303 } else { 303 } else {
304 ret = macvlan_queue_xmit(skb, dev); 304 ret = macvlan_queue_xmit(skb, dev);
305 } 305 }
@@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = {
338 .cache_update = eth_header_cache_update, 338 .cache_update = eth_header_cache_update,
339}; 339};
340 340
341static struct rtnl_link_ops macvlan_link_ops;
342
341static int macvlan_open(struct net_device *dev) 343static int macvlan_open(struct net_device *dev)
342{ 344{
343 struct macvlan_dev *vlan = netdev_priv(dev); 345 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev)
353 goto hash_add; 355 goto hash_add;
354 } 356 }
355 357
356 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { 358 if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD &&
359 dev->rtnl_link_ops == &macvlan_link_ops) {
357 vlan->fwd_priv = 360 vlan->fwd_priv =
358 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); 361 lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
359 362
@@ -362,10 +365,8 @@ static int macvlan_open(struct net_device *dev)
362 */ 365 */
363 if (IS_ERR_OR_NULL(vlan->fwd_priv)) { 366 if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
364 vlan->fwd_priv = NULL; 367 vlan->fwd_priv = NULL;
365 } else { 368 } else
366 dev->features &= ~NETIF_F_LLTX;
367 return 0; 369 return 0;
368 }
369 } 370 }
370 371
371 err = -EBUSY; 372 err = -EBUSY;
@@ -690,8 +691,18 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
690 netdev_features_t features) 691 netdev_features_t features)
691{ 692{
692 struct macvlan_dev *vlan = netdev_priv(dev); 693 struct macvlan_dev *vlan = netdev_priv(dev);
694 netdev_features_t mask;
695
696 features |= NETIF_F_ALL_FOR_ALL;
697 features &= (vlan->set_features | ~MACVLAN_FEATURES);
698 mask = features;
699
700 features = netdev_increment_features(vlan->lowerdev->features,
701 features,
702 mask);
703 features |= NETIF_F_LLTX;
693 704
694 return features & (vlan->set_features | ~MACVLAN_FEATURES); 705 return features;
695} 706}
696 707
697static const struct ethtool_ops macvlan_ethtool_ops = { 708static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
1019 break; 1030 break;
1020 case NETDEV_FEAT_CHANGE: 1031 case NETDEV_FEAT_CHANGE:
1021 list_for_each_entry(vlan, &port->vlans, list) { 1032 list_for_each_entry(vlan, &port->vlans, list) {
1022 vlan->dev->features = dev->features & MACVLAN_FEATURES;
1023 vlan->dev->gso_max_size = dev->gso_max_size; 1033 vlan->dev->gso_max_size = dev->gso_max_size;
1024 netdev_features_change(vlan->dev); 1034 netdev_update_features(vlan->dev);
1025 } 1035 }
1026 break; 1036 break;
1027 case NETDEV_UNREGISTER: 1037 case NETDEV_UNREGISTER:
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994436b7..98434b84f041 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
565 int err = 0; 565 int err = 0;
566 566
567 atomic_set(&phydev->irq_disable, 0); 567 atomic_set(&phydev->irq_disable, 0);
568 if (request_irq(phydev->irq, phy_interrupt, 568 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
569 IRQF_SHARED, 569 phydev) < 0) {
570 "phy_interrupt",
571 phydev) < 0) {
572 pr_warn("%s: Can't get IRQ %d (PHY)\n", 570 pr_warn("%s: Can't get IRQ %d (PHY)\n",
573 phydev->bus->name, phydev->irq); 571 phydev->bus->name, phydev->irq);
574 phydev->irq = PHY_POLL; 572 phydev->irq = PHY_POLL;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 736050d6b451..b75ae5bde673 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1647 return NETDEV_TX_OK; 1647 return NETDEV_TX_OK;
1648} 1648}
1649 1649
1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) 1650static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1651 void *accel_priv)
1651{ 1652{
1652 /* 1653 /*
1653 * This helper function exists to help dev_pick_tx get the correct 1654 * This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7c8343a4f918..ecec8029c5e8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -348,7 +348,8 @@ unlock:
348 * different rxq no. here. If we could not get rxhash, then we would 348 * different rxq no. here. If we could not get rxhash, then we would
349 * hope the rxq no. may help here. 349 * hope the rxq no. may help here.
350 */ 350 */
351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) 351static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
352 void *accel_priv)
352{ 353{
353 struct tun_struct *tun = netdev_priv(dev); 354 struct tun_struct *tun = netdev_priv(dev);
354 struct tun_flow_entry *e; 355 struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a01670f0..47b0f732b0b1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
276 module will be called cdc_mbim. 276 module will be called cdc_mbim.
277 277
278config USB_NET_DM9601 278config USB_NET_DM9601
279 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 279 tristate "Davicom DM96xx based USB 10/100 ethernet devices"
280 depends on USB_USBNET 280 depends on USB_USBNET
281 select CRC32 281 select CRC32
282 help 282 help
283 This option adds support for Davicom DM9601 based USB 1.1 283 This option adds support for Davicom DM9601/DM9620/DM9621A
284 10/100 Ethernet adapters. 284 based USB 10/100 Ethernet adapters.
285 285
286config USB_NET_SR9700 286config USB_NET_SR9700
287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices" 287 tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f926cff..e80219877730 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices 2 * Davicom DM96xx USB 10/100Mbps ethernet devices
3 * 3 *
4 * Peter Korsgaard <jacmet@sunsite.dk> 4 * Peter Korsgaard <jacmet@sunsite.dk>
5 * 5 *
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
364 dev->net->ethtool_ops = &dm9601_ethtool_ops; 364 dev->net->ethtool_ops = &dm9601_ethtool_ops;
365 dev->net->hard_header_len += DM_TX_OVERHEAD; 365 dev->net->hard_header_len += DM_TX_OVERHEAD;
366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; 366 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
367 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; 367
368 /* dm9620/21a require room for 4 byte padding, even in dm9601
369 * mode, so we need +1 to be able to receive full size
370 * ethernet frames.
371 */
372 dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
368 373
369 dev->mii.dev = dev->net; 374 dev->mii.dev = dev->net;
370 dev->mii.mdio_read = dm9601_mdio_read; 375 dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
468static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 473static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
469 gfp_t flags) 474 gfp_t flags)
470{ 475{
471 int len; 476 int len, pad;
472 477
473 /* format: 478 /* format:
474 b1: packet length low 479 b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
476 b3..n: packet data 481 b3..n: packet data
477 */ 482 */
478 483
479 len = skb->len; 484 len = skb->len + DM_TX_OVERHEAD;
480 485
481 if (skb_headroom(skb) < DM_TX_OVERHEAD) { 486 /* workaround for dm962x errata with tx fifo getting out of
487 * sync if a USB bulk transfer retry happens right after a
488 * packet with odd / maxpacket length by adding up to 3 bytes
489 * padding.
490 */
491 while ((len & 1) || !(len % dev->maxpacket))
492 len++;
493
494 len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
495 pad = len - skb->len;
496
497 if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
482 struct sk_buff *skb2; 498 struct sk_buff *skb2;
483 499
484 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); 500 skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
485 dev_kfree_skb_any(skb); 501 dev_kfree_skb_any(skb);
486 skb = skb2; 502 skb = skb2;
487 if (!skb) 503 if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
490 506
491 __skb_push(skb, DM_TX_OVERHEAD); 507 __skb_push(skb, DM_TX_OVERHEAD);
492 508
493 /* usbnet adds padding if length is a multiple of packet size 509 if (pad) {
494 if so, adjust length value in header */ 510 memset(skb->data + skb->len, 0, pad);
495 if ((skb->len % dev->maxpacket) == 0) 511 __skb_put(skb, pad);
496 len++; 512 }
497 513
498 skb->data[0] = len; 514 skb->data[0] = len;
499 skb->data[1] = len >> 8; 515 skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
543} 559}
544 560
545static const struct driver_info dm9601_info = { 561static const struct driver_info dm9601_info = {
546 .description = "Davicom DM9601 USB Ethernet", 562 .description = "Davicom DM96xx USB 10/100 Ethernet",
547 .flags = FLAG_ETHER | FLAG_LINK_INTR, 563 .flags = FLAG_ETHER | FLAG_LINK_INTR,
548 .bind = dm9601_bind, 564 .bind = dm9601_bind,
549 .rx_fixup = dm9601_rx_fixup, 565 .rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,22 @@ static const struct usb_device_id products[] = {
594 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */ 610 USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
595 .driver_info = (unsigned long)&dm9601_info, 611 .driver_info = (unsigned long)&dm9601_info,
596 }, 612 },
613 {
614 USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
615 .driver_info = (unsigned long)&dm9601_info,
616 },
617 {
618 USB_DEVICE(0x0a46, 0x9622), /* DM9622 USB to Fast Ethernet Adapter */
619 .driver_info = (unsigned long)&dm9601_info,
620 },
621 {
622 USB_DEVICE(0x0a46, 0x0269), /* DM962OA USB to Fast Ethernet Adapter */
623 .driver_info = (unsigned long)&dm9601_info,
624 },
625 {
626 USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
627 .driver_info = (unsigned long)&dm9601_info,
628 },
597 {}, // END 629 {}, // END
598}; 630};
599 631
@@ -612,5 +644,5 @@ static struct usb_driver dm9601_driver = {
612module_usb_driver(dm9601_driver); 644module_usb_driver(dm9601_driver);
613 645
614MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>"); 646MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
615MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices"); 647MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
616MODULE_LICENSE("GPL"); 648MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6aaf49..1a482344b3f5 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
185#define BM_REQUEST_TYPE (0xa1) 185#define BM_REQUEST_TYPE (0xa1)
186#define B_NOTIFICATION (0x20) 186#define B_NOTIFICATION (0x20)
187#define W_VALUE (0x0) 187#define W_VALUE (0x0)
188#define W_INDEX (0x2)
189#define W_LENGTH (0x2) 188#define W_LENGTH (0x2)
190 189
191#define B_OVERRUN (0x1<<6) 190#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1487 struct uart_icount *icount; 1486 struct uart_icount *icount;
1488 struct hso_serial_state_notification *serial_state_notification; 1487 struct hso_serial_state_notification *serial_state_notification;
1489 struct usb_device *usb; 1488 struct usb_device *usb;
1489 int if_num;
1490 1490
1491 /* Sanity checks */ 1491 /* Sanity checks */
1492 if (!serial) 1492 if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
1495 handle_usb_error(status, __func__, serial->parent); 1495 handle_usb_error(status, __func__, serial->parent);
1496 return; 1496 return;
1497 } 1497 }
1498
1499 /* tiocmget is only supported on HSO_PORT_MODEM */
1498 tiocmget = serial->tiocmget; 1500 tiocmget = serial->tiocmget;
1499 if (!tiocmget) 1501 if (!tiocmget)
1500 return; 1502 return;
1503 BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
1504
1501 usb = serial->parent->usb; 1505 usb = serial->parent->usb;
1506 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
1507
1508 /* wIndex should be the USB interface number of the port to which the
1509 * notification applies, which should always be the Modem port.
1510 */
1502 serial_state_notification = &tiocmget->serial_state_notification; 1511 serial_state_notification = &tiocmget->serial_state_notification;
1503 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || 1512 if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
1504 serial_state_notification->bNotification != B_NOTIFICATION || 1513 serial_state_notification->bNotification != B_NOTIFICATION ||
1505 le16_to_cpu(serial_state_notification->wValue) != W_VALUE || 1514 le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
1506 le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || 1515 le16_to_cpu(serial_state_notification->wIndex) != if_num ||
1507 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { 1516 le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
1508 dev_warn(&usb->dev, 1517 dev_warn(&usb->dev,
1509 "hso received invalid serial state notification\n"); 1518 "hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3780aa..f54637828574 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,7 +117,6 @@ enum {
117struct mcs7830_data { 117struct mcs7830_data {
118 u8 multi_filter[8]; 118 u8 multi_filter[8];
119 u8 config; 119 u8 config;
120 u8 link_counter;
121}; 120};
122 121
123static const char driver_name[] = "MOSCHIP usb-ethernet driver"; 122static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
561{ 560{
562 u8 *buf = urb->transfer_buffer; 561 u8 *buf = urb->transfer_buffer;
563 bool link, link_changed; 562 bool link, link_changed;
564 struct mcs7830_data *data = mcs7830_get_data(dev);
565 563
566 if (urb->actual_length < 16) 564 if (urb->actual_length < 16)
567 return; 565 return;
568 566
569 link = !(buf[1] & 0x20); 567 link = !(buf[1] == 0x20);
570 link_changed = netif_carrier_ok(dev->net) != link; 568 link_changed = netif_carrier_ok(dev->net) != link;
571 if (link_changed) { 569 if (link_changed) {
572 data->link_counter++; 570 usbnet_link_change(dev, link, 0);
573 /* 571 netdev_dbg(dev->net, "Link Status is: %d\n", link);
574 track link state 20 times to guard against erroneous 572 }
575 link state changes reported sometimes by the chip
576 */
577 if (data->link_counter > 20) {
578 data->link_counter = 0;
579 usbnet_link_change(dev, link, 0);
580 netdev_dbg(dev->net, "Link Status is: %d\n", link);
581 }
582 } else
583 data->link_counter = 0;
584} 573}
585 574
586static const struct driver_info moschip_info = { 575static const struct driver_info moschip_info = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8494bb53ebdc..aba04f561760 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1245,7 +1245,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
1245 return -ENOMEM; 1245 return -ENOMEM;
1246 1246
1247 urb->num_sgs = num_sgs; 1247 urb->num_sgs = num_sgs;
1248 sg_init_table(urb->sg, urb->num_sgs); 1248 sg_init_table(urb->sg, urb->num_sgs + 1);
1249 1249
1250 sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb)); 1250 sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
1251 total_len += skb_headlen(skb); 1251 total_len += skb_headlen(skb);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d208f8604981..5d776447d9c3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
1797 if (err) 1797 if (err)
1798 return err; 1798 return err;
1799 1799
1800 if (netif_running(vi->dev)) 1800 if (netif_running(vi->dev)) {
1801 for (i = 0; i < vi->curr_queue_pairs; i++)
1802 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1803 schedule_delayed_work(&vi->refill, 0);
1804
1801 for (i = 0; i < vi->max_queue_pairs; i++) 1805 for (i = 0; i < vi->max_queue_pairs; i++)
1802 virtnet_napi_enable(&vi->rq[i]); 1806 virtnet_napi_enable(&vi->rq[i]);
1807 }
1803 1808
1804 netif_device_attach(vi->dev); 1809 netif_device_attach(vi->dev);
1805 1810
1806 for (i = 0; i < vi->curr_queue_pairs; i++)
1807 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1808 schedule_delayed_work(&vi->refill, 0);
1809
1810 mutex_lock(&vi->config_lock); 1811 mutex_lock(&vi->config_lock);
1811 vi->config_enable = true; 1812 vi->config_enable = true;
1812 mutex_unlock(&vi->config_lock); 1813 mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 249e01c5600c..ed384fee76ac 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2440 /* update header length based on lower device */ 2440 /* update header length based on lower device */
2441 dev->hard_header_len = lowerdev->hard_header_len + 2441 dev->hard_header_len = lowerdev->hard_header_len +
2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); 2442 (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
2443 } 2443 } else if (use_ipv6)
2444 vxlan->flags |= VXLAN_F_IPV6;
2444 2445
2445 if (data[IFLA_VXLAN_TOS]) 2446 if (data[IFLA_VXLAN_TOS])
2446 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 2447 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 8d78253c26ce..a366d6b4626f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
76 mask2 |= ATH9K_INT_CST; 76 mask2 |= ATH9K_INT_CST;
77 if (isr2 & AR_ISR_S2_TSFOOR) 77 if (isr2 & AR_ISR_S2_TSFOOR)
78 mask2 |= ATH9K_INT_TSFOOR; 78 mask2 |= ATH9K_INT_TSFOOR;
79
80 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
81 REG_WRITE(ah, AR_ISR_S2, isr2);
82 isr &= ~AR_ISR_BCNMISC;
83 }
79 } 84 }
80 85
81 isr = REG_READ(ah, AR_ISR_RAC); 86 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
87 isr = REG_READ(ah, AR_ISR_RAC);
88
82 if (isr == 0xffffffff) { 89 if (isr == 0xffffffff) {
83 *masked = 0; 90 *masked = 0;
84 return false; 91 return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
97 104
98 *masked |= ATH9K_INT_TX; 105 *masked |= ATH9K_INT_TX;
99 106
100 s0_s = REG_READ(ah, AR_ISR_S0_S); 107 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
108 s0_s = REG_READ(ah, AR_ISR_S0_S);
109 s1_s = REG_READ(ah, AR_ISR_S1_S);
110 } else {
111 s0_s = REG_READ(ah, AR_ISR_S0);
112 REG_WRITE(ah, AR_ISR_S0, s0_s);
113 s1_s = REG_READ(ah, AR_ISR_S1);
114 REG_WRITE(ah, AR_ISR_S1, s1_s);
115
116 isr &= ~(AR_ISR_TXOK |
117 AR_ISR_TXDESC |
118 AR_ISR_TXERR |
119 AR_ISR_TXEOL);
120 }
121
101 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); 122 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
102 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); 123 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
103
104 s1_s = REG_READ(ah, AR_ISR_S1_S);
105 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); 124 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
106 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); 125 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
107 } 126 }
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
114 *masked |= mask2; 133 *masked |= mask2;
115 } 134 }
116 135
117 if (AR_SREV_9100(ah)) 136 if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
118 return true;
119
120 if (isr & AR_ISR_GENTMR) {
121 u32 s5_s; 137 u32 s5_s;
122 138
123 s5_s = REG_READ(ah, AR_ISR_S5_S); 139 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
140 s5_s = REG_READ(ah, AR_ISR_S5_S);
141 } else {
142 s5_s = REG_READ(ah, AR_ISR_S5);
143 }
144
124 ah->intr_gen_timer_trigger = 145 ah->intr_gen_timer_trigger =
125 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); 146 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
126 147
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
133 if ((s5_s & AR_ISR_S5_TIM_TIMER) && 154 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
134 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 155 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
135 *masked |= ATH9K_INT_TIM_TIMER; 156 *masked |= ATH9K_INT_TIM_TIMER;
157
158 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
159 REG_WRITE(ah, AR_ISR_S5, s5_s);
160 isr &= ~AR_ISR_GENTMR;
161 }
136 } 162 }
137 163
164 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
165 REG_WRITE(ah, AR_ISR, isr);
166 REG_READ(ah, AR_ISR);
167 }
168
169 if (AR_SREV_9100(ah))
170 return true;
171
138 if (sync_cause) { 172 if (sync_cause) {
139 ath9k_debug_sync_cause(common, sync_cause); 173 ath9k_debug_sync_cause(common, sync_cause);
140 fatal_int = 174 fatal_int =
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657fdd9cc..608d739d1378 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
127 struct ath9k_vif_iter_data *iter_data = data; 127 struct ath9k_vif_iter_data *iter_data = data;
128 int i; 128 int i;
129 129
130 for (i = 0; i < ETH_ALEN; i++) 130 if (iter_data->hw_macaddr != NULL) {
131 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); 131 for (i = 0; i < ETH_ALEN; i++)
132 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
133 } else {
134 iter_data->hw_macaddr = mac;
135 }
132} 136}
133 137
134static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, 138static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
135 struct ieee80211_vif *vif) 139 struct ieee80211_vif *vif)
136{ 140{
137 struct ath_common *common = ath9k_hw_common(priv->ah); 141 struct ath_common *common = ath9k_hw_common(priv->ah);
138 struct ath9k_vif_iter_data iter_data; 142 struct ath9k_vif_iter_data iter_data;
139 143
140 /* 144 /*
141 * Use the hardware MAC address as reference, the hardware uses it 145 * Pick the MAC address of the first interface as the new hardware
142 * together with the BSSID mask when matching addresses. 146 * MAC address. The hardware will use it together with the BSSID mask
147 * when matching addresses.
143 */ 148 */
144 iter_data.hw_macaddr = common->macaddr; 149 iter_data.hw_macaddr = NULL;
145 memset(&iter_data.mask, 0xff, ETH_ALEN); 150 memset(&iter_data.mask, 0xff, ETH_ALEN);
146 151
147 if (vif) 152 if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
153 ath9k_htc_bssid_iter, &iter_data); 158 ath9k_htc_bssid_iter, &iter_data);
154 159
155 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 160 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
161
162 if (iter_data.hw_macaddr)
163 memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
164
156 ath_hw_setbssidmask(common); 165 ath_hw_setbssidmask(common);
157} 166}
158 167
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1063 goto out; 1072 goto out;
1064 } 1073 }
1065 1074
1066 ath9k_htc_set_bssid_mask(priv, vif); 1075 ath9k_htc_set_mac_bssid_mask(priv, vif);
1067 1076
1068 priv->vif_slot |= (1 << avp->index); 1077 priv->vif_slot |= (1 << avp->index);
1069 priv->nvifs++; 1078 priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1128 1137
1129 ath9k_htc_set_opmode(priv); 1138 ath9k_htc_set_opmode(priv);
1130 1139
1131 ath9k_htc_set_bssid_mask(priv, vif); 1140 ath9k_htc_set_mac_bssid_mask(priv, vif);
1132 1141
1133 /* 1142 /*
1134 * Stop ANI only if there are no associated station interfaces. 1143 * Stop ANI only if there are no associated station interfaces.
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f452c7b166..21aa09e0e825 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
965 struct ath_common *common = ath9k_hw_common(ah); 965 struct ath_common *common = ath9k_hw_common(ah);
966 966
967 /* 967 /*
968 * Use the hardware MAC address as reference, the hardware uses it 968 * Pick the MAC address of the first interface as the new hardware
969 * together with the BSSID mask when matching addresses. 969 * MAC address. The hardware will use it together with the BSSID mask
970 * when matching addresses.
970 */ 971 */
971 memset(iter_data, 0, sizeof(*iter_data)); 972 memset(iter_data, 0, sizeof(*iter_data));
972 memset(&iter_data->mask, 0xff, ETH_ALEN); 973 memset(&iter_data->mask, 0xff, ETH_ALEN);
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 86605027c41d..e6272546395a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, 357 {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, 358 {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, 359 {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
360 {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
361 {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, 361 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 362 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
363 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 364 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 365 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
365 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 366 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
366 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
367 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 368 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, 376 {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
373 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, 377 {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
377#endif /* CONFIG_IWLMVM */ 383#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c72438bb2faf..a1b32ee9594a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 2011 (hwsim_flags & HWSIM_TX_STAT_ACK)) {
2012 if (skb->len >= 16) { 2012 if (skb->len >= 16) {
2013 hdr = (struct ieee80211_hdr *) skb->data; 2013 hdr = (struct ieee80211_hdr *) skb->data;
2014 mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], 2014 mac80211_hwsim_monitor_ack(data2->channel,
2015 hdr->addr2); 2015 hdr->addr2);
2016 } 2016 }
2017 txi->flags |= IEEE80211_TX_STAT_ACK; 2017 txi->flags |= IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 78e8a6666cc6..8bb8988c435c 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
746} 746}
747 747
748static u16 748static u16
749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) 749mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
750 void *accel_priv)
750{ 751{
751 skb->priority = cfg80211_classify8021d(skb); 752 skb->priority = cfg80211_classify8021d(skb);
752 return mwifiex_1d_to_wmm_queue[skb->priority]; 753 return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0f494444bcd1..5a53195d016b 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
740 }; 740 };
741 int index = rtlpci->rx_ring[rx_queue_idx].idx; 741 int index = rtlpci->rx_ring[rx_queue_idx].idx;
742 742
743 if (rtlpci->driver_is_goingto_unload)
744 return;
743 /*RX NORMAL PKT */ 745 /*RX NORMAL PKT */
744 while (count--) { 746 while (count--) {
745 /*rx descriptor */ 747 /*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1636 */ 1638 */
1637 set_hal_stop(rtlhal); 1639 set_hal_stop(rtlhal);
1638 1640
1641 rtlpci->driver_is_goingto_unload = true;
1639 rtlpriv->cfg->ops->disable_interrupt(hw); 1642 rtlpriv->cfg->ops->disable_interrupt(hw);
1640 cancel_work_sync(&rtlpriv->works.lps_change_work); 1643 cancel_work_sync(&rtlpriv->works.lps_change_work);
1641 1644
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1653 ppsc->rfchange_inprogress = true; 1656 ppsc->rfchange_inprogress = true;
1654 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); 1657 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1655 1658
1656 rtlpci->driver_is_goingto_unload = true;
1657 rtlpriv->cfg->ops->hw_disable(hw); 1659 rtlpriv->cfg->ops->hw_disable(hw);
1658 /* some things are not needed if firmware not available */ 1660 /* some things are not needed if firmware not available */
1659 if (!rtlpriv->max_fw_size) 1661 if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b41c83..c47794b9d42f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
101 101
102#define MAX_PENDING_REQS 256 102#define MAX_PENDING_REQS 256
103 103
104/* It's possible for an skb to have a maximal number of frags
105 * but still be less than MAX_BUFFER_OFFSET in size. Thus the
106 * worst-case number of copy operations is MAX_SKB_FRAGS per
107 * ring slot.
108 */
109#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
110
104struct xenvif { 111struct xenvif {
105 /* Unique identifier for this interface. */ 112 /* Unique identifier for this interface. */
106 domid_t domid; 113 domid_t domid;
@@ -143,13 +150,13 @@ struct xenvif {
143 */ 150 */
144 RING_IDX rx_req_cons_peek; 151 RING_IDX rx_req_cons_peek;
145 152
146 /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 153 /* This array is allocated seperately as it is large */
147 * head/fragment page uses 2 copy operations because it 154 struct gnttab_copy *grant_copy_op;
148 * straddles two buffers in the frontend.
149 */
150 struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
151 struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
152 155
156 /* We create one meta structure per ring request we consume, so
157 * the maximum number is the same as the ring size.
158 */
159 struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
153 160
154 u8 fe_dev_addr[6]; 161 u8 fe_dev_addr[6];
155 162
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 870f1fa58370..fff8cddfed81 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/if_vlan.h> 36#include <linux/if_vlan.h>
37#include <linux/vmalloc.h>
37 38
38#include <xen/events.h> 39#include <xen/events.h>
39#include <asm/xen/hypercall.h> 40#include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
307 SET_NETDEV_DEV(dev, parent); 308 SET_NETDEV_DEV(dev, parent);
308 309
309 vif = netdev_priv(dev); 310 vif = netdev_priv(dev);
311
312 vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
313 MAX_GRANT_COPY_OPS);
314 if (vif->grant_copy_op == NULL) {
315 pr_warn("Could not allocate grant copy space for %s\n", name);
316 free_netdev(dev);
317 return ERR_PTR(-ENOMEM);
318 }
319
310 vif->domid = domid; 320 vif->domid = domid;
311 vif->handle = handle; 321 vif->handle = handle;
312 vif->can_sg = 1; 322 vif->can_sg = 1;
@@ -487,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
487 497
488 unregister_netdev(vif->dev); 498 unregister_netdev(vif->dev);
489 499
500 vfree(vif->grant_copy_op);
490 free_netdev(vif->dev); 501 free_netdev(vif->dev);
491 502
492 module_put(THIS_MODULE); 503 module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 27bbe58dcbe7..78425554a537 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
608 if (!npo.copy_prod) 608 if (!npo.copy_prod)
609 return; 609 return;
610 610
611 BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); 611 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
613 613
614 while ((skb = __skb_dequeue(&rxq)) != NULL) { 614 while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1209,8 +1209,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1209 goto out; 1209 goto out;
1210 1210
1211 if (!skb_partial_csum_set(skb, off, 1211 if (!skb_partial_csum_set(skb, off,
1212 offsetof(struct tcphdr, check))) 1212 offsetof(struct tcphdr, check))) {
1213 err = -EPROTO;
1213 goto out; 1214 goto out;
1215 }
1214 1216
1215 if (recalculate_partial_csum) 1217 if (recalculate_partial_csum)
1216 tcp_hdr(skb)->check = 1218 tcp_hdr(skb)->check =
@@ -1227,8 +1229,10 @@ static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1227 goto out; 1229 goto out;
1228 1230
1229 if (!skb_partial_csum_set(skb, off, 1231 if (!skb_partial_csum_set(skb, off,
1230 offsetof(struct udphdr, check))) 1232 offsetof(struct udphdr, check))) {
1233 err = -EPROTO;
1231 goto out; 1234 goto out;
1235 }
1232 1236
1233 if (recalculate_partial_csum) 1237 if (recalculate_partial_csum)
1234 udp_hdr(skb)->check = 1238 udp_hdr(skb)->check =
@@ -1350,8 +1354,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1350 goto out; 1354 goto out;
1351 1355
1352 if (!skb_partial_csum_set(skb, off, 1356 if (!skb_partial_csum_set(skb, off,
1353 offsetof(struct tcphdr, check))) 1357 offsetof(struct tcphdr, check))) {
1358 err = -EPROTO;
1354 goto out; 1359 goto out;
1360 }
1355 1361
1356 if (recalculate_partial_csum) 1362 if (recalculate_partial_csum)
1357 tcp_hdr(skb)->check = 1363 tcp_hdr(skb)->check =
@@ -1368,8 +1374,10 @@ static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1368 goto out; 1374 goto out;
1369 1375
1370 if (!skb_partial_csum_set(skb, off, 1376 if (!skb_partial_csum_set(skb, off,
1371 offsetof(struct udphdr, check))) 1377 offsetof(struct udphdr, check))) {
1378 err = -EPROTO;
1372 goto out; 1379 goto out;
1380 }
1373 1381
1374 if (recalculate_partial_csum) 1382 if (recalculate_partial_csum)
1375 udp_hdr(skb)->check = 1383 udp_hdr(skb)->check =
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f8990246f..c6973f101a3e 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
20 depends on OF_IRQ 20 depends on OF_IRQ
21 help 21 help
22 This option builds in test cases for the device tree infrastructure 22 This option builds in test cases for the device tree infrastructure
23 that are executed one at boot time, and the results dumped to the 23 that are executed once at boot time, and the results dumped to the
24 console. 24 console.
25 25
26 If unsure, say N here, but this option is safe to enable. 26 If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317bdb81c..d3dd41c840f1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
69 (unsigned long long)cp, (unsigned long long)s, 69 (unsigned long long)cp, (unsigned long long)s,
70 (unsigned long long)da); 70 (unsigned long long)da);
71 71
72 /*
73 * If the number of address cells is larger than 2 we assume the
74 * mapping doesn't specify a physical address. Rather, the address
75 * specifies an identifier that must match exactly.
76 */
77 if (na > 2 && memcmp(range, addr, na * 4) != 0)
78 return OF_BAD_ADDR;
79
80 if (da < cp || da >= (cp + s)) 72 if (da < cp || da >= (cp + s))
81 return OF_BAD_ADDR; 73 return OF_BAD_ADDR;
82 return da - cp; 74 return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b97c43..758b4f8b30b7 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
922 */ 922 */
923void __init unflatten_and_copy_device_tree(void) 923void __init unflatten_and_copy_device_tree(void)
924{ 924{
925 int size = __be32_to_cpu(initial_boot_params->totalsize); 925 int size;
926 void *dt = early_init_dt_alloc_memory_arch(size, 926 void *dt;
927
928 if (!initial_boot_params) {
929 pr_warn("No valid device tree found, continuing without\n");
930 return;
931 }
932
933 size = __be32_to_cpu(initial_boot_params->totalsize);
934 dt = early_init_dt_alloc_memory_arch(size,
927 __alignof__(struct boot_param_header)); 935 __alignof__(struct boot_param_header));
928 936
929 if (dt) { 937 if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b47fae4..27212402c532 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
165 if (of_get_property(ipar, "interrupt-controller", NULL) != 165 if (of_get_property(ipar, "interrupt-controller", NULL) !=
166 NULL) { 166 NULL) {
167 pr_debug(" -> got it !\n"); 167 pr_debug(" -> got it !\n");
168 of_node_put(old);
169 return 0; 168 return 0;
170 } 169 }
171 170
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
250 * Successfully parsed an interrrupt-map translation; copy new 249 * Successfully parsed an interrrupt-map translation; copy new
251 * interrupt specifier into the out_irq structure 250 * interrupt specifier into the out_irq structure
252 */ 251 */
253 of_node_put(out_irq->np); 252 out_irq->np = newpar;
254 out_irq->np = of_node_get(newpar);
255 253
256 match_array = imap - newaddrsize - newintsize; 254 match_array = imap - newaddrsize - newintsize;
257 for (i = 0; i < newintsize; i++) 255 for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
268 } 266 }
269 fail: 267 fail:
270 of_node_put(ipar); 268 of_node_put(ipar);
271 of_node_put(out_irq->np);
272 of_node_put(newpar); 269 of_node_put(newpar);
273 270
274 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 7578d79b3688..2f650f68af14 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -300,7 +300,7 @@ static int __init parport_mfc3_init(void)
300 if (!request_mem_region(piabase, sizeof(struct pia), "PIA")) 300 if (!request_mem_region(piabase, sizeof(struct pia), "PIA"))
301 continue; 301 continue;
302 302
303 pp = (struct pia *)ZTWO_VADDR(piabase); 303 pp = ZTWO_VADDR(piabase);
304 pp->crb = 0; 304 pp->crb = 0;
305 pp->pddrb = 255; /* all data pins output */ 305 pp->pddrb = 255; /* all data pins output */
306 pp->crb = PIA_DDR|32|8; 306 pp->crb = PIA_DDR|32|8;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1cf605f67673..e86439283a5d 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
279 279
280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); 280 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
281 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
282 acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status); 282 if (status != AE_NOT_FOUND)
283 acpi_handle_warn(handle,
284 "can't evaluate _ADR (%#x)\n", status);
283 return AE_OK; 285 return AE_OK;
284 } 286 }
285 287
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
643 slot->flags &= (~SLOT_ENABLED); 645 slot->flags &= (~SLOT_ENABLED);
644} 646}
645 647
648static bool acpiphp_no_hotplug(acpi_handle handle)
649{
650 struct acpi_device *adev = NULL;
651
652 acpi_bus_get_device(handle, &adev);
653 return adev && adev->flags.no_hotplug;
654}
655
656static bool slot_no_hotplug(struct acpiphp_slot *slot)
657{
658 struct acpiphp_func *func;
659
660 list_for_each_entry(func, &slot->funcs, sibling)
661 if (acpiphp_no_hotplug(func_to_handle(func)))
662 return true;
663
664 return false;
665}
646 666
647/** 667/**
648 * get_slot_status - get ACPI slot status 668 * get_slot_status - get ACPI slot status
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
701 unsigned long long sta; 721 unsigned long long sta;
702 722
703 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 723 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
704 alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL; 724 alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
725 || acpiphp_no_hotplug(handle);
705 } 726 }
706 if (!alive) { 727 if (!alive) {
707 u32 v; 728 u32 v;
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
741 struct pci_dev *dev, *tmp; 762 struct pci_dev *dev, *tmp;
742 763
743 mutex_lock(&slot->crit_sect); 764 mutex_lock(&slot->crit_sect);
744 /* wake up all functions */ 765 if (slot_no_hotplug(slot)) {
745 if (get_slot_status(slot) == ACPI_STA_ALL) { 766 ; /* do nothing */
767 } else if (get_slot_status(slot) == ACPI_STA_ALL) {
746 /* remove stale devices if any */ 768 /* remove stale devices if any */
747 list_for_each_entry_safe(dev, tmp, &bus->devices, 769 list_for_each_entry_safe(dev, tmp, &bus->devices,
748 bus_list) 770 bus_list)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074efbe62..f7ebdba14bde 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
330static void pci_acpi_setup(struct device *dev) 330static void pci_acpi_setup(struct device *dev)
331{ 331{
332 struct pci_dev *pci_dev = to_pci_dev(dev); 332 struct pci_dev *pci_dev = to_pci_dev(dev);
333 acpi_handle handle = ACPI_HANDLE(dev); 333 struct acpi_device *adev = ACPI_COMPANION(dev);
334 struct acpi_device *adev;
335 334
336 if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid) 335 if (!adev)
336 return;
337
338 pci_acpi_add_pm_notifier(adev, pci_dev);
339 if (!adev->wakeup.flags.valid)
337 return; 340 return;
338 341
339 device_set_wakeup_capable(dev, true); 342 device_set_wakeup_capable(dev, true);
340 acpi_pci_sleep_wake(pci_dev, false); 343 acpi_pci_sleep_wake(pci_dev, false);
341
342 pci_acpi_add_pm_notifier(adev, pci_dev);
343 if (adev->wakeup.flags.run_wake) 344 if (adev->wakeup.flags.run_wake)
344 device_set_run_wake(dev, true); 345 device_set_run_wake(dev, true);
345} 346}
346 347
347static void pci_acpi_cleanup(struct device *dev) 348static void pci_acpi_cleanup(struct device *dev)
348{ 349{
349 acpi_handle handle = ACPI_HANDLE(dev); 350 struct acpi_device *adev = ACPI_COMPANION(dev);
350 struct acpi_device *adev; 351
352 if (!adev)
353 return;
351 354
352 if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) { 355 pci_acpi_remove_pm_notifier(adev);
356 if (adev->wakeup.flags.valid) {
353 device_set_wakeup_capable(dev, false); 357 device_set_wakeup_capable(dev, false);
354 device_set_run_wake(dev, false); 358 device_set_run_wake(dev, false);
355 pci_acpi_remove_pm_notifier(adev);
356 } 359 }
357} 360}
358 361
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 114f5ef4b73a..2832576d8b12 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,7 +512,6 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
512 512
513static const struct acpi_device_id byt_gpio_acpi_match[] = { 513static const struct acpi_device_id byt_gpio_acpi_match[] = {
514 { "INT33B2", 0 }, 514 { "INT33B2", 0 },
515 { "INT33FC", 0 },
516 { } 515 { }
517}; 516};
518MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match); 517MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 5e2054afe840..85ad58c6da17 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
196config BATTERY_MAX17042 196config BATTERY_MAX17042
197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge" 197 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
198 depends on I2C 198 depends on I2C
199 select REGMAP_I2C
199 help 200 help
200 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries 201 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
201 in handheld and portable equipment. The MAX17042 is configured 202 in handheld and portable equipment. The MAX17042 is configured
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 00e667296360..557af943b2f5 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
511 dev_set_drvdata(dev, psy); 511 dev_set_drvdata(dev, psy);
512 psy->dev = dev; 512 psy->dev = dev;
513 513
514 rc = dev_set_name(dev, "%s", psy->name);
515 if (rc)
516 goto dev_set_name_failed;
517
514 INIT_WORK(&psy->changed_work, power_supply_changed_work); 518 INIT_WORK(&psy->changed_work, power_supply_changed_work);
515 519
516 rc = power_supply_check_supplies(psy); 520 rc = power_supply_check_supplies(psy);
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
524 if (rc) 528 if (rc)
525 goto wakeup_init_failed; 529 goto wakeup_init_failed;
526 530
527 rc = kobject_set_name(&dev->kobj, "%s", psy->name);
528 if (rc)
529 goto kobject_set_name_failed;
530
531 rc = device_add(dev); 531 rc = device_add(dev);
532 if (rc) 532 if (rc)
533 goto device_add_failed; 533 goto device_add_failed;
@@ -553,11 +553,11 @@ create_triggers_failed:
553register_cooler_failed: 553register_cooler_failed:
554 psy_unregister_thermal(psy); 554 psy_unregister_thermal(psy);
555register_thermal_failed: 555register_thermal_failed:
556wakeup_init_failed:
557 device_del(dev); 556 device_del(dev);
558kobject_set_name_failed:
559device_add_failed: 557device_add_failed:
558wakeup_init_failed:
560check_supplies_failed: 559check_supplies_failed:
560dev_set_name_failed:
561 put_device(dev); 561 put_device(dev);
562success: 562success:
563 return rc; 563 return rc;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index f14876256a4a..a2325bc5e497 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -34,11 +34,11 @@
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/mod_devicetable.h>
38#include <linux/log2.h> 37#include <linux/log2.h>
39#include <linux/pm.h> 38#include <linux/pm.h>
40#include <linux/of.h> 39#include <linux/of.h>
41#include <linux/of_platform.h> 40#include <linux/of_platform.h>
41#include <linux/dmi.h>
42 42
43/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ 43/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
44#include <asm-generic/rtc.h> 44#include <asm-generic/rtc.h>
@@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
377 return 0; 377 return 0;
378} 378}
379 379
380/*
381 * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
382 */
383static bool alarm_disable_quirk;
384
385static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
386{
387 alarm_disable_quirk = true;
388 pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
389 pr_info("RTC alarms disabled\n");
390 return 0;
391}
392
393static const struct dmi_system_id rtc_quirks[] __initconst = {
394 /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
395 {
396 .callback = set_alarm_disable_quirk,
397 .ident = "IBM Truman",
398 .matches = {
399 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
400 DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
401 },
402 },
403 /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
404 {
405 .callback = set_alarm_disable_quirk,
406 .ident = "Gigabyte GA-990XA-UD3",
407 .matches = {
408 DMI_MATCH(DMI_SYS_VENDOR,
409 "Gigabyte Technology Co., Ltd."),
410 DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
411 },
412 },
413 /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
414 {
415 .callback = set_alarm_disable_quirk,
416 .ident = "Toshiba Satellite L300",
417 .matches = {
418 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
419 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
420 },
421 },
422 {}
423};
424
380static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled) 425static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
381{ 426{
382 struct cmos_rtc *cmos = dev_get_drvdata(dev); 427 struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
385 if (!is_valid_irq(cmos->irq)) 430 if (!is_valid_irq(cmos->irq))
386 return -EINVAL; 431 return -EINVAL;
387 432
433 if (alarm_disable_quirk)
434 return 0;
435
388 spin_lock_irqsave(&rtc_lock, flags); 436 spin_lock_irqsave(&rtc_lock, flags);
389 437
390 if (enabled) 438 if (enabled)
@@ -1157,6 +1205,8 @@ static int __init cmos_init(void)
1157 platform_driver_registered = true; 1205 platform_driver_registered = true;
1158 } 1206 }
1159 1207
1208 dmi_check_system(rtc_quirks);
1209
1160 if (retval == 0) 1210 if (retval == 0)
1161 return 0; 1211 return 0;
1162 1212
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index f302efa937ef..1eef0f586950 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3386,7 +3386,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3386 3386
3387 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3387 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3388 /* 3388 /*
3389 * safe offline allready running 3389 * safe offline already running
3390 * could only be called by normal offline so safe_offline flag 3390 * could only be called by normal offline so safe_offline flag
3391 * needs to be removed to run normal offline and kill all I/O 3391 * needs to be removed to run normal offline and kill all I/O
3392 */ 3392 */
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 6fbe09686d18..fea76aed9eea 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -183,7 +183,6 @@ extern unsigned long sclp_console_full;
183extern u8 sclp_fac84; 183extern u8 sclp_fac84;
184extern unsigned long long sclp_rzm; 184extern unsigned long long sclp_rzm;
185extern unsigned long long sclp_rnmax; 185extern unsigned long long sclp_rnmax;
186extern __initdata int sclp_early_read_info_sccb_valid;
187 186
188/* useful inlines */ 187/* useful inlines */
189 188
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index eaa21d542c5c..cb3c4e05a385 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -455,8 +455,6 @@ static int __init sclp_detect_standby_memory(void)
455 455
456 if (OLDMEM_BASE) /* No standby memory in kdump mode */ 456 if (OLDMEM_BASE) /* No standby memory in kdump mode */
457 return 0; 457 return 0;
458 if (!sclp_early_read_info_sccb_valid)
459 return 0;
460 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 458 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
461 return 0; 459 return 0;
462 rc = -ENOMEM; 460 rc = -ENOMEM;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 1465e9563101..82f2c389b4d1 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -35,11 +35,12 @@ struct read_info_sccb {
35 u8 _reserved5[4096 - 112]; /* 112-4095 */ 35 u8 _reserved5[4096 - 112]; /* 112-4095 */
36} __packed __aligned(PAGE_SIZE); 36} __packed __aligned(PAGE_SIZE);
37 37
38static __initdata struct read_info_sccb early_read_info_sccb; 38static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
39static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE); 39static unsigned int sclp_con_has_vt220 __initdata;
40static unsigned int sclp_con_has_linemode __initdata;
40static unsigned long sclp_hsa_size; 41static unsigned long sclp_hsa_size;
42static struct sclp_ipl_info sclp_ipl_info;
41 43
42__initdata int sclp_early_read_info_sccb_valid;
43u64 sclp_facilities; 44u64 sclp_facilities;
44u8 sclp_fac84; 45u8 sclp_fac84;
45unsigned long long sclp_rzm; 46unsigned long long sclp_rzm;
@@ -63,15 +64,12 @@ out:
63 return rc; 64 return rc;
64} 65}
65 66
66static void __init sclp_read_info_early(void) 67static int __init sclp_read_info_early(struct read_info_sccb *sccb)
67{ 68{
68 int rc; 69 int rc, i;
69 int i;
70 struct read_info_sccb *sccb;
71 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, 70 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
72 SCLP_CMDW_READ_SCP_INFO}; 71 SCLP_CMDW_READ_SCP_INFO};
73 72
74 sccb = &early_read_info_sccb;
75 for (i = 0; i < ARRAY_SIZE(commands); i++) { 73 for (i = 0; i < ARRAY_SIZE(commands); i++) {
76 do { 74 do {
77 memset(sccb, 0, sizeof(*sccb)); 75 memset(sccb, 0, sizeof(*sccb));
@@ -83,24 +81,19 @@ static void __init sclp_read_info_early(void)
83 81
84 if (rc) 82 if (rc)
85 break; 83 break;
86 if (sccb->header.response_code == 0x10) { 84 if (sccb->header.response_code == 0x10)
87 sclp_early_read_info_sccb_valid = 1; 85 return 0;
88 break;
89 }
90 if (sccb->header.response_code != 0x1f0) 86 if (sccb->header.response_code != 0x1f0)
91 break; 87 break;
92 } 88 }
89 return -EIO;
93} 90}
94 91
95static void __init sclp_facilities_detect(void) 92static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
96{ 93{
97 struct read_info_sccb *sccb; 94 if (sclp_read_info_early(sccb))
98
99 sclp_read_info_early();
100 if (!sclp_early_read_info_sccb_valid)
101 return; 95 return;
102 96
103 sccb = &early_read_info_sccb;
104 sclp_facilities = sccb->facilities; 97 sclp_facilities = sccb->facilities;
105 sclp_fac84 = sccb->fac84; 98 sclp_fac84 = sccb->fac84;
106 if (sccb->fac85 & 0x02) 99 if (sccb->fac85 & 0x02)
@@ -108,30 +101,22 @@ static void __init sclp_facilities_detect(void)
108 sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 101 sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
109 sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 102 sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
110 sclp_rzm <<= 20; 103 sclp_rzm <<= 20;
104
105 /* Save IPL information */
106 sclp_ipl_info.is_valid = 1;
107 if (sccb->flags & 0x2)
108 sclp_ipl_info.has_dump = 1;
109 memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
111} 110}
112 111
113bool __init sclp_has_linemode(void) 112bool __init sclp_has_linemode(void)
114{ 113{
115 struct init_sccb *sccb = (void *) &sccb_early; 114 return !!sclp_con_has_linemode;
116
117 if (sccb->header.response_code != 0x20)
118 return 0;
119 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
120 return 0;
121 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
122 return 0;
123 return 1;
124} 115}
125 116
126bool __init sclp_has_vt220(void) 117bool __init sclp_has_vt220(void)
127{ 118{
128 struct init_sccb *sccb = (void *) &sccb_early; 119 return !!sclp_con_has_vt220;
129
130 if (sccb->header.response_code != 0x20)
131 return 0;
132 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
133 return 1;
134 return 0;
135} 120}
136 121
137unsigned long long sclp_get_rnmax(void) 122unsigned long long sclp_get_rnmax(void)
@@ -146,19 +131,12 @@ unsigned long long sclp_get_rzm(void)
146 131
147/* 132/*
148 * This function will be called after sclp_facilities_detect(), which gets 133 * This function will be called after sclp_facilities_detect(), which gets
149 * called from early.c code. Therefore the sccb should have valid contents. 134 * called from early.c code. The sclp_facilities_detect() function retrieves
135 * and saves the IPL information.
150 */ 136 */
151void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 137void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
152{ 138{
153 struct read_info_sccb *sccb; 139 *info = sclp_ipl_info;
154
155 if (!sclp_early_read_info_sccb_valid)
156 return;
157 sccb = &early_read_info_sccb;
158 info->is_valid = 1;
159 if (sccb->flags & 0x2)
160 info->has_dump = 1;
161 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
162} 140}
163 141
164static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb) 142static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
@@ -189,11 +167,10 @@ static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
189 sccb->evbuf.dbs = 1; 167 sccb->evbuf.dbs = 1;
190} 168}
191 169
192static int __init sclp_set_event_mask(unsigned long receive_mask, 170static int __init sclp_set_event_mask(struct init_sccb *sccb,
171 unsigned long receive_mask,
193 unsigned long send_mask) 172 unsigned long send_mask)
194{ 173{
195 struct init_sccb *sccb = (void *) &sccb_early;
196
197 memset(sccb, 0, sizeof(*sccb)); 174 memset(sccb, 0, sizeof(*sccb));
198 sccb->header.length = sizeof(*sccb); 175 sccb->header.length = sizeof(*sccb);
199 sccb->mask_length = sizeof(sccb_mask_t); 176 sccb->mask_length = sizeof(sccb_mask_t);
@@ -202,10 +179,8 @@ static int __init sclp_set_event_mask(unsigned long receive_mask,
202 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb); 179 return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
203} 180}
204 181
205static long __init sclp_hsa_size_init(void) 182static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
206{ 183{
207 struct sdias_sccb *sccb = (void *) &sccb_early;
208
209 sccb_init_eq_size(sccb); 184 sccb_init_eq_size(sccb);
210 if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb)) 185 if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
211 return -EIO; 186 return -EIO;
@@ -214,10 +189,8 @@ static long __init sclp_hsa_size_init(void)
214 return 0; 189 return 0;
215} 190}
216 191
217static long __init sclp_hsa_copy_wait(void) 192static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
218{ 193{
219 struct sccb_header *sccb = (void *) &sccb_early;
220
221 memset(sccb, 0, PAGE_SIZE); 194 memset(sccb, 0, PAGE_SIZE);
222 sccb->length = PAGE_SIZE; 195 sccb->length = PAGE_SIZE;
223 if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb)) 196 if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
@@ -230,34 +203,62 @@ unsigned long sclp_get_hsa_size(void)
230 return sclp_hsa_size; 203 return sclp_hsa_size;
231} 204}
232 205
233static void __init sclp_hsa_size_detect(void) 206static void __init sclp_hsa_size_detect(void *sccb)
234{ 207{
235 long size; 208 long size;
236 209
237 /* First try synchronous interface (LPAR) */ 210 /* First try synchronous interface (LPAR) */
238 if (sclp_set_event_mask(0, 0x40000010)) 211 if (sclp_set_event_mask(sccb, 0, 0x40000010))
239 return; 212 return;
240 size = sclp_hsa_size_init(); 213 size = sclp_hsa_size_init(sccb);
241 if (size < 0) 214 if (size < 0)
242 return; 215 return;
243 if (size != 0) 216 if (size != 0)
244 goto out; 217 goto out;
245 /* Then try asynchronous interface (z/VM) */ 218 /* Then try asynchronous interface (z/VM) */
246 if (sclp_set_event_mask(0x00000010, 0x40000010)) 219 if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
247 return; 220 return;
248 size = sclp_hsa_size_init(); 221 size = sclp_hsa_size_init(sccb);
249 if (size < 0) 222 if (size < 0)
250 return; 223 return;
251 size = sclp_hsa_copy_wait(); 224 size = sclp_hsa_copy_wait(sccb);
252 if (size < 0) 225 if (size < 0)
253 return; 226 return;
254out: 227out:
255 sclp_hsa_size = size; 228 sclp_hsa_size = size;
256} 229}
257 230
231static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
232{
233 if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
234 return 0;
235 if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
236 return 0;
237 return 1;
238}
239
240static void __init sclp_console_detect(struct init_sccb *sccb)
241{
242 if (sccb->header.response_code != 0x20)
243 return;
244
245 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
246 sclp_con_has_vt220 = 1;
247
248 if (sclp_con_check_linemode(sccb))
249 sclp_con_has_linemode = 1;
250}
251
258void __init sclp_early_detect(void) 252void __init sclp_early_detect(void)
259{ 253{
260 sclp_facilities_detect(); 254 void *sccb = &sccb_early;
261 sclp_hsa_size_detect(); 255
262 sclp_set_event_mask(0, 0); 256 sclp_facilities_detect(sccb);
257 sclp_hsa_size_detect(sccb);
258
259 /* Turn off SCLP event notifications. Also save remote masks in the
260 * sccb. These are sufficient to detect sclp console capabilities.
261 */
262 sclp_set_event_mask(sccb, 0, 0);
263 sclp_console_detect(sccb);
263} 264}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3f4ca4e09a4c..e91b89dc6d1f 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -125,10 +125,7 @@ static void tty3270_resize_work(struct work_struct *work);
125 */ 125 */
126static void tty3270_set_timer(struct tty3270 *tp, int expires) 126static void tty3270_set_timer(struct tty3270 *tp, int expires)
127{ 127{
128 if (expires == 0) 128 mod_timer(&tp->timer, jiffies + expires);
129 del_timer(&tp->timer);
130 else
131 mod_timer(&tp->timer, jiffies + expires);
132} 129}
133 130
134/* 131/*
@@ -744,7 +741,6 @@ tty3270_free_view(struct tty3270 *tp)
744{ 741{
745 int pages; 742 int pages;
746 743
747 del_timer_sync(&tp->timer);
748 kbd_free(tp->kbd); 744 kbd_free(tp->kbd);
749 raw3270_request_free(tp->kreset); 745 raw3270_request_free(tp->kreset);
750 raw3270_request_free(tp->read); 746 raw3270_request_free(tp->read);
@@ -877,6 +873,7 @@ tty3270_free(struct raw3270_view *view)
877{ 873{
878 struct tty3270 *tp = container_of(view, struct tty3270, view); 874 struct tty3270 *tp = container_of(view, struct tty3270, view);
879 875
876 del_timer_sync(&tp->timer);
880 tty3270_free_screen(tp->screen, tp->view.rows); 877 tty3270_free_screen(tp->screen, tp->view.rows);
881 tty3270_free_view(tp); 878 tty3270_free_view(tp);
882} 879}
@@ -942,7 +939,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
942 return rc; 939 return rc;
943 } 940 }
944 941
945 tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows); 942 tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
946 if (IS_ERR(tp->screen)) { 943 if (IS_ERR(tp->screen)) {
947 rc = PTR_ERR(tp->screen); 944 rc = PTR_ERR(tp->screen);
948 raw3270_put_view(&tp->view); 945 raw3270_put_view(&tp->view);
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a9fe3de2dec1..b3f791b2c1f8 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -260,16 +260,16 @@ static int blacklist_parse_proc_parameters(char *buf)
260 260
261 parm = strsep(&buf, " "); 261 parm = strsep(&buf, " ");
262 262
263 if (strcmp("free", parm) == 0) 263 if (strcmp("free", parm) == 0) {
264 rc = blacklist_parse_parameters(buf, free, 0); 264 rc = blacklist_parse_parameters(buf, free, 0);
265 else if (strcmp("add", parm) == 0) 265 css_schedule_eval_all_unreg(0);
266 } else if (strcmp("add", parm) == 0)
266 rc = blacklist_parse_parameters(buf, add, 0); 267 rc = blacklist_parse_parameters(buf, add, 0);
267 else if (strcmp("purge", parm) == 0) 268 else if (strcmp("purge", parm) == 0)
268 return ccw_purge_blacklisted(); 269 return ccw_purge_blacklisted();
269 else 270 else
270 return -EINVAL; 271 return -EINVAL;
271 272
272 css_schedule_reprobe();
273 273
274 return rc; 274 return rc;
275} 275}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 959135a01847..fd3367a1dc7a 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -128,14 +128,14 @@ static ssize_t ccwgroup_online_store(struct device *dev,
128 const char *buf, size_t count) 128 const char *buf, size_t count)
129{ 129{
130 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 130 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
131 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
132 unsigned long value; 131 unsigned long value;
133 int ret; 132 int ret;
134 133
135 if (!dev->driver) 134 device_lock(dev);
136 return -EINVAL; 135 if (!dev->driver) {
137 if (!try_module_get(gdrv->driver.owner)) 136 ret = -EINVAL;
138 return -EINVAL; 137 goto out;
138 }
139 139
140 ret = kstrtoul(buf, 0, &value); 140 ret = kstrtoul(buf, 0, &value);
141 if (ret) 141 if (ret)
@@ -148,7 +148,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
148 else 148 else
149 ret = -EINVAL; 149 ret = -EINVAL;
150out: 150out:
151 module_put(gdrv->driver.owner); 151 device_unlock(dev);
152 return (ret == 0) ? count : ret; 152 return (ret == 0) ? count : ret;
153} 153}
154 154
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 13299f902676..f6b9188c5af5 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -55,6 +55,7 @@ int chsc_error_from_response(int response)
55 case 0x0004: 55 case 0x0004:
56 return -EOPNOTSUPP; 56 return -EOPNOTSUPP;
57 case 0x000b: 57 case 0x000b:
58 case 0x0107: /* "Channel busy" for the op 0x003d */
58 return -EBUSY; 59 return -EBUSY;
59 case 0x0100: 60 case 0x0100:
60 case 0x0102: 61 case 0x0102:
@@ -237,26 +238,6 @@ void chsc_chp_offline(struct chp_id chpid)
237 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 238 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
238} 239}
239 240
240static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
241{
242 struct schib schib;
243 /*
244 * We don't know the device yet, but since a path
245 * may be available now to the device we'll have
246 * to do recognition again.
247 * Since we don't have any idea about which chpid
248 * that beast may be on we'll have to do a stsch
249 * on all devices, grr...
250 */
251 if (stsch_err(schid, &schib))
252 /* We're through */
253 return -ENXIO;
254
255 /* Put it on the slow path. */
256 css_schedule_eval(schid);
257 return 0;
258}
259
260static int __s390_process_res_acc(struct subchannel *sch, void *data) 241static int __s390_process_res_acc(struct subchannel *sch, void *data)
261{ 242{
262 spin_lock_irq(sch->lock); 243 spin_lock_irq(sch->lock);
@@ -287,8 +268,8 @@ static void s390_process_res_acc(struct chp_link *link)
287 * The more information we have (info), the less scanning 268 * The more information we have (info), the less scanning
288 * will we have to do. 269 * will we have to do.
289 */ 270 */
290 for_each_subchannel_staged(__s390_process_res_acc, 271 for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
291 s390_process_res_acc_new_sch, link); 272 css_schedule_reprobe();
292} 273}
293 274
294static int 275static int
@@ -663,19 +644,6 @@ static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
663 return 0; 644 return 0;
664} 645}
665 646
666static int
667__s390_vary_chpid_on(struct subchannel_id schid, void *data)
668{
669 struct schib schib;
670
671 if (stsch_err(schid, &schib))
672 /* We're through */
673 return -ENXIO;
674 /* Put it on the slow path. */
675 css_schedule_eval(schid);
676 return 0;
677}
678
679/** 647/**
680 * chsc_chp_vary - propagate channel-path vary operation to subchannels 648 * chsc_chp_vary - propagate channel-path vary operation to subchannels
681 * @chpid: channl-path ID 649 * @chpid: channl-path ID
@@ -694,7 +662,8 @@ int chsc_chp_vary(struct chp_id chpid, int on)
694 /* Try to update the channel path description. */ 662 /* Try to update the channel path description. */
695 chp_update_desc(chp); 663 chp_update_desc(chp);
696 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 664 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
697 __s390_vary_chpid_on, &chpid); 665 NULL, &chpid);
666 css_schedule_reprobe();
698 } else 667 } else
699 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 668 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
700 NULL, &chpid); 669 NULL, &chpid);
@@ -1234,3 +1203,35 @@ out:
1234 return ret; 1203 return ret;
1235} 1204}
1236EXPORT_SYMBOL_GPL(chsc_scm_info); 1205EXPORT_SYMBOL_GPL(chsc_scm_info);
1206
1207/**
1208 * chsc_pnso_brinfo() - Perform Network-Subchannel Operation, Bridge Info.
1209 * @schid: id of the subchannel on which PNSO is performed
1210 * @brinfo_area: request and response block for the operation
1211 * @resume_token: resume token for multiblock response
1212 * @cnc: Boolean change-notification control
1213 *
1214 * brinfo_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
1215 *
1216 * Returns 0 on success.
1217 */
1218int chsc_pnso_brinfo(struct subchannel_id schid,
1219 struct chsc_pnso_area *brinfo_area,
1220 struct chsc_brinfo_resume_token resume_token,
1221 int cnc)
1222{
1223 memset(brinfo_area, 0, sizeof(*brinfo_area));
1224 brinfo_area->request.length = 0x0030;
1225 brinfo_area->request.code = 0x003d; /* network-subchannel operation */
1226 brinfo_area->m = schid.m;
1227 brinfo_area->ssid = schid.ssid;
1228 brinfo_area->sch = schid.sch_no;
1229 brinfo_area->cssid = schid.cssid;
1230 brinfo_area->oc = 0; /* Store-network-bridging-information list */
1231 brinfo_area->resume_token = resume_token;
1232 brinfo_area->n = (cnc != 0);
1233 if (chsc(brinfo_area))
1234 return -EIO;
1235 return chsc_error_from_response(brinfo_area->response.code);
1236}
1237EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 23d072e70eb2..7e53a9c8b0b9 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -61,7 +61,9 @@ struct css_chsc_char {
61 u32 : 20; 61 u32 : 20;
62 u32 scssc : 1; /* bit 107 */ 62 u32 scssc : 1; /* bit 107 */
63 u32 scsscf : 1; /* bit 108 */ 63 u32 scsscf : 1; /* bit 108 */
64 u32 : 19; 64 u32:7;
65 u32 pnso:1; /* bit 116 */
66 u32:11;
65}__attribute__((packed)); 67}__attribute__((packed));
66 68
67extern struct css_chsc_char css_chsc_characteristics; 69extern struct css_chsc_char css_chsc_characteristics;
@@ -188,6 +190,53 @@ struct chsc_scm_info {
188 190
189int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); 191int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
190 192
193struct chsc_brinfo_resume_token {
194 u64 t1;
195 u64 t2;
196} __packed;
197
198struct chsc_brinfo_naihdr {
199 struct chsc_brinfo_resume_token resume_token;
200 u32:32;
201 u32 instance;
202 u32:24;
203 u8 naids;
204 u32 reserved[3];
205} __packed;
206
207struct chsc_pnso_area {
208 struct chsc_header request;
209 u8:2;
210 u8 m:1;
211 u8:5;
212 u8:2;
213 u8 ssid:2;
214 u8 fmt:4;
215 u16 sch;
216 u8:8;
217 u8 cssid;
218 u16:16;
219 u8 oc;
220 u32:24;
221 struct chsc_brinfo_resume_token resume_token;
222 u32 n:1;
223 u32:31;
224 u32 reserved[3];
225 struct chsc_header response;
226 u32:32;
227 struct chsc_brinfo_naihdr naihdr;
228 union {
229 struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
230 struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
231 struct qdio_brinfo_entry_l2 l2[0];
232 } entries;
233} __packed;
234
235int chsc_pnso_brinfo(struct subchannel_id schid,
236 struct chsc_pnso_area *brinfo_area,
237 struct chsc_brinfo_resume_token resume_token,
238 int cnc);
239
191#ifdef CONFIG_SCM_BUS 240#ifdef CONFIG_SCM_BUS
192int scm_update_information(void); 241int scm_update_information(void);
193int scm_process_availability_information(void); 242int scm_process_availability_information(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 8c2cb87bccc5..0268e5fd59b5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -69,7 +69,8 @@ static int call_fn_known_sch(struct device *dev, void *data)
69 struct cb_data *cb = data; 69 struct cb_data *cb = data;
70 int rc = 0; 70 int rc = 0;
71 71
72 idset_sch_del(cb->set, sch->schid); 72 if (cb->set)
73 idset_sch_del(cb->set, sch->schid);
73 if (cb->fn_known_sch) 74 if (cb->fn_known_sch)
74 rc = cb->fn_known_sch(sch, cb->data); 75 rc = cb->fn_known_sch(sch, cb->data);
75 return rc; 76 return rc;
@@ -115,6 +116,13 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
115 cb.fn_known_sch = fn_known; 116 cb.fn_known_sch = fn_known;
116 cb.fn_unknown_sch = fn_unknown; 117 cb.fn_unknown_sch = fn_unknown;
117 118
119 if (fn_known && !fn_unknown) {
120 /* Skip idset allocation in case of known-only loop. */
121 cb.set = NULL;
122 return bus_for_each_dev(&css_bus_type, NULL, &cb,
123 call_fn_known_sch);
124 }
125
118 cb.set = idset_sch_new(); 126 cb.set = idset_sch_new();
119 if (!cb.set) 127 if (!cb.set)
120 /* fall back to brute force scanning in case of oom */ 128 /* fall back to brute force scanning in case of oom */
@@ -553,6 +561,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
553 default: 561 default:
554 rc = 0; 562 rc = 0;
555 } 563 }
564 /* Allow scheduling here since the containing loop might
565 * take a while. */
566 cond_resched();
556 } 567 }
557 return rc; 568 return rc;
558} 569}
@@ -572,7 +583,7 @@ static void css_slow_path_func(struct work_struct *unused)
572 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 583 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
573} 584}
574 585
575static DECLARE_WORK(slow_path_work, css_slow_path_func); 586static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
576struct workqueue_struct *cio_work_q; 587struct workqueue_struct *cio_work_q;
577 588
578void css_schedule_eval(struct subchannel_id schid) 589void css_schedule_eval(struct subchannel_id schid)
@@ -582,7 +593,7 @@ void css_schedule_eval(struct subchannel_id schid)
582 spin_lock_irqsave(&slow_subchannel_lock, flags); 593 spin_lock_irqsave(&slow_subchannel_lock, flags);
583 idset_sch_add(slow_subchannel_set, schid); 594 idset_sch_add(slow_subchannel_set, schid);
584 atomic_set(&css_eval_scheduled, 1); 595 atomic_set(&css_eval_scheduled, 1);
585 queue_work(cio_work_q, &slow_path_work); 596 queue_delayed_work(cio_work_q, &slow_path_work, 0);
586 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 597 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
587} 598}
588 599
@@ -593,7 +604,7 @@ void css_schedule_eval_all(void)
593 spin_lock_irqsave(&slow_subchannel_lock, flags); 604 spin_lock_irqsave(&slow_subchannel_lock, flags);
594 idset_fill(slow_subchannel_set); 605 idset_fill(slow_subchannel_set);
595 atomic_set(&css_eval_scheduled, 1); 606 atomic_set(&css_eval_scheduled, 1);
596 queue_work(cio_work_q, &slow_path_work); 607 queue_delayed_work(cio_work_q, &slow_path_work, 0);
597 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 608 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
598} 609}
599 610
@@ -606,7 +617,7 @@ static int __unset_registered(struct device *dev, void *data)
606 return 0; 617 return 0;
607} 618}
608 619
609static void css_schedule_eval_all_unreg(void) 620void css_schedule_eval_all_unreg(unsigned long delay)
610{ 621{
611 unsigned long flags; 622 unsigned long flags;
612 struct idset *unreg_set; 623 struct idset *unreg_set;
@@ -624,7 +635,7 @@ static void css_schedule_eval_all_unreg(void)
624 spin_lock_irqsave(&slow_subchannel_lock, flags); 635 spin_lock_irqsave(&slow_subchannel_lock, flags);
625 idset_add_set(slow_subchannel_set, unreg_set); 636 idset_add_set(slow_subchannel_set, unreg_set);
626 atomic_set(&css_eval_scheduled, 1); 637 atomic_set(&css_eval_scheduled, 1);
627 queue_work(cio_work_q, &slow_path_work); 638 queue_delayed_work(cio_work_q, &slow_path_work, delay);
628 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 639 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
629 idset_free(unreg_set); 640 idset_free(unreg_set);
630} 641}
@@ -637,7 +648,8 @@ void css_wait_for_slow_path(void)
637/* Schedule reprobing of all unregistered subchannels. */ 648/* Schedule reprobing of all unregistered subchannels. */
638void css_schedule_reprobe(void) 649void css_schedule_reprobe(void)
639{ 650{
640 css_schedule_eval_all_unreg(); 651 /* Schedule with a delay to allow merging of subsequent calls. */
652 css_schedule_eval_all_unreg(1 * HZ);
641} 653}
642EXPORT_SYMBOL_GPL(css_schedule_reprobe); 654EXPORT_SYMBOL_GPL(css_schedule_reprobe);
643 655
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 29351321bad6..2c9107e20251 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -133,6 +133,7 @@ extern struct channel_subsystem *channel_subsystems[];
133/* Helper functions to build lists for the slow path. */ 133/* Helper functions to build lists for the slow path. */
134void css_schedule_eval(struct subchannel_id schid); 134void css_schedule_eval(struct subchannel_id schid);
135void css_schedule_eval_all(void); 135void css_schedule_eval_all(void);
136void css_schedule_eval_all_unreg(unsigned long delay);
136int css_complete_work(void); 137int css_complete_work(void);
137 138
138int sch_is_pseudo_sch(struct subchannel *); 139int sch_is_pseudo_sch(struct subchannel *);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e4a7ab2bb629..e9d783563cbb 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -333,9 +333,9 @@ int ccw_device_set_offline(struct ccw_device *cdev)
333 if (ret != 0) 333 if (ret != 0)
334 return ret; 334 return ret;
335 } 335 }
336 cdev->online = 0;
337 spin_lock_irq(cdev->ccwlock); 336 spin_lock_irq(cdev->ccwlock);
338 sch = to_subchannel(cdev->dev.parent); 337 sch = to_subchannel(cdev->dev.parent);
338 cdev->online = 0;
339 /* Wait until a final state or DISCONNECTED is reached */ 339 /* Wait until a final state or DISCONNECTED is reached */
340 while (!dev_fsm_final_state(cdev) && 340 while (!dev_fsm_final_state(cdev) &&
341 cdev->private->state != DEV_STATE_DISCONNECTED) { 341 cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -446,7 +446,10 @@ int ccw_device_set_online(struct ccw_device *cdev)
446 ret = cdev->drv->set_online(cdev); 446 ret = cdev->drv->set_online(cdev);
447 if (ret) 447 if (ret)
448 goto rollback; 448 goto rollback;
449
450 spin_lock_irq(cdev->ccwlock);
449 cdev->online = 1; 451 cdev->online = 1;
452 spin_unlock_irq(cdev->ccwlock);
450 return 0; 453 return 0;
451 454
452rollback: 455rollback:
@@ -546,17 +549,12 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
546 if (!dev_fsm_final_state(cdev) && 549 if (!dev_fsm_final_state(cdev) &&
547 cdev->private->state != DEV_STATE_DISCONNECTED) { 550 cdev->private->state != DEV_STATE_DISCONNECTED) {
548 ret = -EAGAIN; 551 ret = -EAGAIN;
549 goto out_onoff; 552 goto out;
550 } 553 }
551 /* Prevent conflict between pending work and on-/offline processing.*/ 554 /* Prevent conflict between pending work and on-/offline processing.*/
552 if (work_pending(&cdev->private->todo_work)) { 555 if (work_pending(&cdev->private->todo_work)) {
553 ret = -EAGAIN; 556 ret = -EAGAIN;
554 goto out_onoff; 557 goto out;
555 }
556
557 if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
558 ret = -EINVAL;
559 goto out_onoff;
560 } 558 }
561 if (!strncmp(buf, "force\n", count)) { 559 if (!strncmp(buf, "force\n", count)) {
562 force = 1; 560 force = 1;
@@ -568,6 +566,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
568 } 566 }
569 if (ret) 567 if (ret)
570 goto out; 568 goto out;
569
570 device_lock(dev);
571 switch (i) { 571 switch (i) {
572 case 0: 572 case 0:
573 ret = online_store_handle_offline(cdev); 573 ret = online_store_handle_offline(cdev);
@@ -578,10 +578,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
578 default: 578 default:
579 ret = -EINVAL; 579 ret = -EINVAL;
580 } 580 }
581 device_unlock(dev);
582
581out: 583out:
582 if (cdev->drv)
583 module_put(cdev->drv->driver.owner);
584out_onoff:
585 atomic_set(&cdev->private->onoff, 0); 584 atomic_set(&cdev->private->onoff, 0);
586 return (ret < 0) ? ret : count; 585 return (ret < 0) ? ret : count;
587} 586}
@@ -1745,8 +1744,7 @@ ccw_device_probe (struct device *dev)
1745 return 0; 1744 return 0;
1746} 1745}
1747 1746
1748static int 1747static int ccw_device_remove(struct device *dev)
1749ccw_device_remove (struct device *dev)
1750{ 1748{
1751 struct ccw_device *cdev = to_ccwdev(dev); 1749 struct ccw_device *cdev = to_ccwdev(dev);
1752 struct ccw_driver *cdrv = cdev->drv; 1750 struct ccw_driver *cdrv = cdev->drv;
@@ -1754,9 +1752,10 @@ ccw_device_remove (struct device *dev)
1754 1752
1755 if (cdrv->remove) 1753 if (cdrv->remove)
1756 cdrv->remove(cdev); 1754 cdrv->remove(cdev);
1755
1756 spin_lock_irq(cdev->ccwlock);
1757 if (cdev->online) { 1757 if (cdev->online) {
1758 cdev->online = 0; 1758 cdev->online = 0;
1759 spin_lock_irq(cdev->ccwlock);
1760 ret = ccw_device_offline(cdev); 1759 ret = ccw_device_offline(cdev);
1761 spin_unlock_irq(cdev->ccwlock); 1760 spin_unlock_irq(cdev->ccwlock);
1762 if (ret == 0) 1761 if (ret == 0)
@@ -1769,10 +1768,12 @@ ccw_device_remove (struct device *dev)
1769 cdev->private->dev_id.devno); 1768 cdev->private->dev_id.devno);
1770 /* Give up reference obtained in ccw_device_set_online(). */ 1769 /* Give up reference obtained in ccw_device_set_online(). */
1771 put_device(&cdev->dev); 1770 put_device(&cdev->dev);
1771 spin_lock_irq(cdev->ccwlock);
1772 } 1772 }
1773 ccw_device_set_timeout(cdev, 0); 1773 ccw_device_set_timeout(cdev, 0);
1774 cdev->drv = NULL; 1774 cdev->drv = NULL;
1775 cdev->private->int_class = IRQIO_CIO; 1775 cdev->private->int_class = IRQIO_CIO;
1776 spin_unlock_irq(cdev->ccwlock);
1776 return 0; 1777 return 0;
1777} 1778}
1778 1779
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3e602e8affa7..c883a085c059 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1752,6 +1752,97 @@ int qdio_stop_irq(struct ccw_device *cdev, int nr)
1752} 1752}
1753EXPORT_SYMBOL(qdio_stop_irq); 1753EXPORT_SYMBOL(qdio_stop_irq);
1754 1754
1755/**
1756 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1757 * @schid: Subchannel ID.
1758 * @cnc: Boolean Change-Notification Control
1759 * @response: Response code will be stored at this address
1760 * @cb: Callback function will be executed for each element
1761 * of the address list
1762 * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
1763 * @type: Type of the address entry passed to the callback
1764 * @entry: Entry containg the address of the specified type
1765 * @priv: Pointer to pass to the callback function.
1766 *
1767 * Performs "Store-network-bridging-information list" operation and calls
1768 * the callback function for every entry in the list. If "change-
1769 * notification-control" is set, further changes in the address list
1770 * will be reported via the IPA command.
1771 */
1772int qdio_pnso_brinfo(struct subchannel_id schid,
1773 int cnc, u16 *response,
1774 void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1775 void *entry),
1776 void *priv)
1777{
1778 struct chsc_pnso_area *rr;
1779 int rc;
1780 u32 prev_instance = 0;
1781 int isfirstblock = 1;
1782 int i, size, elems;
1783
1784 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1785 if (rr == NULL)
1786 return -ENOMEM;
1787 do {
1788 /* on the first iteration, naihdr.resume_token will be zero */
1789 rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1790 if (rc != 0 && rc != -EBUSY)
1791 goto out;
1792 if (rr->response.code != 1) {
1793 rc = -EIO;
1794 continue;
1795 } else
1796 rc = 0;
1797
1798 if (cb == NULL)
1799 continue;
1800
1801 size = rr->naihdr.naids;
1802 elems = (rr->response.length -
1803 sizeof(struct chsc_header) -
1804 sizeof(struct chsc_brinfo_naihdr)) /
1805 size;
1806
1807 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1808 /* Inform the caller that they need to scrap */
1809 /* the data that was already reported via cb */
1810 rc = -EAGAIN;
1811 break;
1812 }
1813 isfirstblock = 0;
1814 prev_instance = rr->naihdr.instance;
1815 for (i = 0; i < elems; i++)
1816 switch (size) {
1817 case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1818 (*cb)(priv, l3_ipv6_addr,
1819 &rr->entries.l3_ipv6[i]);
1820 break;
1821 case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1822 (*cb)(priv, l3_ipv4_addr,
1823 &rr->entries.l3_ipv4[i]);
1824 break;
1825 case sizeof(struct qdio_brinfo_entry_l2):
1826 (*cb)(priv, l2_addr_lnid,
1827 &rr->entries.l2[i]);
1828 break;
1829 default:
1830 WARN_ON_ONCE(1);
1831 rc = -EIO;
1832 goto out;
1833 }
1834 } while (rr->response.code == 0x0107 || /* channel busy */
1835 (rr->response.code == 1 && /* list stored */
1836 /* resume token is non-zero => list incomplete */
1837 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1838 (*response) = rr->response.code;
1839
1840out:
1841 free_page((unsigned long)rr);
1842 return rc;
1843}
1844EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
1845
1755static int __init init_QDIO(void) 1846static int __init init_QDIO(void)
1756{ 1847{
1757 int rc; 1848 int rc;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 02300dcfac91..ab3baa7f9508 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -591,7 +591,13 @@ static int ap_init_queue(ap_qid_t qid)
591 if (rc != -ENODEV && rc != -EBUSY) 591 if (rc != -ENODEV && rc != -EBUSY)
592 break; 592 break;
593 if (i < AP_MAX_RESET - 1) { 593 if (i < AP_MAX_RESET - 1) {
594 udelay(5); 594 /* Time we are waiting until we give up (0.7sec * 90).
595 * Since the actual request (in progress) will not
596 * interrupted immediately for the reset command,
597 * we have to be patient. In worst case we have to
598 * wait 60sec + reset time (some msec).
599 */
600 schedule_timeout(AP_RESET_TIMEOUT);
595 status = ap_test_queue(qid, &dummy, &dummy); 601 status = ap_test_queue(qid, &dummy, &dummy);
596 } 602 }
597 } 603 }
@@ -992,6 +998,28 @@ static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
992 998
993static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 999static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
994 1000
1001static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1002{
1003 if (ap_configuration != NULL) { /* QCI not supported */
1004 if (test_facility(76)) { /* format 1 - 256 bit domain field */
1005 return snprintf(buf, PAGE_SIZE,
1006 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1007 ap_configuration->adm[0], ap_configuration->adm[1],
1008 ap_configuration->adm[2], ap_configuration->adm[3],
1009 ap_configuration->adm[4], ap_configuration->adm[5],
1010 ap_configuration->adm[6], ap_configuration->adm[7]);
1011 } else { /* format 0 - 16 bit domain field */
1012 return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
1013 ap_configuration->adm[0], ap_configuration->adm[1]);
1014 }
1015 } else {
1016 return snprintf(buf, PAGE_SIZE, "not supported\n");
1017 }
1018}
1019
1020static BUS_ATTR(ap_control_domain_mask, 0444,
1021 ap_control_domain_mask_show, NULL);
1022
995static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 1023static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
996{ 1024{
997 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 1025 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
@@ -1077,6 +1105,7 @@ static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
1077 1105
1078static struct bus_attribute *const ap_bus_attrs[] = { 1106static struct bus_attribute *const ap_bus_attrs[] = {
1079 &bus_attr_ap_domain, 1107 &bus_attr_ap_domain,
1108 &bus_attr_ap_control_domain_mask,
1080 &bus_attr_config_time, 1109 &bus_attr_config_time,
1081 &bus_attr_poll_thread, 1110 &bus_attr_poll_thread,
1082 &bus_attr_ap_interrupts, 1111 &bus_attr_ap_interrupts,
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 685f6cc022f9..6405ae24a7a6 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -33,7 +33,7 @@
33#define AP_DEVICES 64 /* Number of AP devices. */ 33#define AP_DEVICES 64 /* Number of AP devices. */
34#define AP_DOMAINS 16 /* Number of AP domains. */ 34#define AP_DOMAINS 16 /* Number of AP domains. */
35#define AP_MAX_RESET 90 /* Maximum number of resets. */ 35#define AP_MAX_RESET 90 /* Maximum number of resets. */
36#define AP_RESET_TIMEOUT (HZ/2) /* Time in ticks for reset timeouts. */ 36#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
37#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ 37#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
38#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ 38#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
39 39
@@ -125,6 +125,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
125#define AP_FUNC_CRT4K 2 125#define AP_FUNC_CRT4K 2
126#define AP_FUNC_COPRO 3 126#define AP_FUNC_COPRO 3
127#define AP_FUNC_ACCEL 4 127#define AP_FUNC_ACCEL 4
128#define AP_FUNC_EP11 5
129#define AP_FUNC_APXA 6
128 130
129/* 131/*
130 * AP reset flag states 132 * AP reset flag states
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 31cfaa556072..4b824b15194f 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -44,6 +44,8 @@
44#include "zcrypt_debug.h" 44#include "zcrypt_debug.h"
45#include "zcrypt_api.h" 45#include "zcrypt_api.h"
46 46
47#include "zcrypt_msgtype6.h"
48
47/* 49/*
48 * Module description. 50 * Module description.
49 */ 51 */
@@ -554,9 +556,9 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
554 spin_lock_bh(&zcrypt_device_lock); 556 spin_lock_bh(&zcrypt_device_lock);
555 list_for_each_entry(zdev, &zcrypt_device_list, list) { 557 list_for_each_entry(zdev, &zcrypt_device_list, list) {
556 if (!zdev->online || !zdev->ops->send_cprb || 558 if (!zdev->online || !zdev->ops->send_cprb ||
557 (xcRB->user_defined != AUTOSELECT && 559 (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) ||
558 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) 560 (xcRB->user_defined != AUTOSELECT &&
559 ) 561 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined))
560 continue; 562 continue;
561 zcrypt_device_get(zdev); 563 zcrypt_device_get(zdev);
562 get_device(&zdev->ap_dev->device); 564 get_device(&zdev->ap_dev->device);
@@ -581,6 +583,90 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
581 return -ENODEV; 583 return -ENODEV;
582} 584}
583 585
586struct ep11_target_dev_list {
587 unsigned short targets_num;
588 struct ep11_target_dev *targets;
589};
590
591static bool is_desired_ep11dev(unsigned int dev_qid,
592 struct ep11_target_dev_list dev_list)
593{
594 int n;
595
596 for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) {
597 if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) &&
598 (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) {
599 return true;
600 }
601 }
602 return false;
603}
604
605static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
606{
607 struct zcrypt_device *zdev;
608 bool autoselect = false;
609 int rc;
610 struct ep11_target_dev_list ep11_dev_list = {
611 .targets_num = 0x00,
612 .targets = NULL,
613 };
614
615 ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num;
616
617 /* empty list indicates autoselect (all available targets) */
618 if (ep11_dev_list.targets_num == 0)
619 autoselect = true;
620 else {
621 ep11_dev_list.targets = kcalloc((unsigned short)
622 xcrb->targets_num,
623 sizeof(struct ep11_target_dev),
624 GFP_KERNEL);
625 if (!ep11_dev_list.targets)
626 return -ENOMEM;
627
628 if (copy_from_user(ep11_dev_list.targets,
629 (struct ep11_target_dev *)xcrb->targets,
630 xcrb->targets_num *
631 sizeof(struct ep11_target_dev)))
632 return -EFAULT;
633 }
634
635 spin_lock_bh(&zcrypt_device_lock);
636 list_for_each_entry(zdev, &zcrypt_device_list, list) {
637 /* check if device is eligible */
638 if (!zdev->online ||
639 zdev->ops->variant != MSGTYPE06_VARIANT_EP11)
640 continue;
641
642 /* check if device is selected as valid target */
643 if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) &&
644 !autoselect)
645 continue;
646
647 zcrypt_device_get(zdev);
648 get_device(&zdev->ap_dev->device);
649 zdev->request_count++;
650 __zcrypt_decrease_preference(zdev);
651 if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
652 spin_unlock_bh(&zcrypt_device_lock);
653 rc = zdev->ops->send_ep11_cprb(zdev, xcrb);
654 spin_lock_bh(&zcrypt_device_lock);
655 module_put(zdev->ap_dev->drv->driver.owner);
656 } else {
657 rc = -EAGAIN;
658 }
659 zdev->request_count--;
660 __zcrypt_increase_preference(zdev);
661 put_device(&zdev->ap_dev->device);
662 zcrypt_device_put(zdev);
663 spin_unlock_bh(&zcrypt_device_lock);
664 return rc;
665 }
666 spin_unlock_bh(&zcrypt_device_lock);
667 return -ENODEV;
668}
669
584static long zcrypt_rng(char *buffer) 670static long zcrypt_rng(char *buffer)
585{ 671{
586 struct zcrypt_device *zdev; 672 struct zcrypt_device *zdev;
@@ -784,6 +870,23 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
784 return -EFAULT; 870 return -EFAULT;
785 return rc; 871 return rc;
786 } 872 }
873 case ZSENDEP11CPRB: {
874 struct ep11_urb __user *uxcrb = (void __user *)arg;
875 struct ep11_urb xcrb;
876 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
877 return -EFAULT;
878 do {
879 rc = zcrypt_send_ep11_cprb(&xcrb);
880 } while (rc == -EAGAIN);
881 /* on failure: retry once again after a requested rescan */
882 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
883 do {
884 rc = zcrypt_send_ep11_cprb(&xcrb);
885 } while (rc == -EAGAIN);
886 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
887 return -EFAULT;
888 return rc;
889 }
787 case Z90STAT_STATUS_MASK: { 890 case Z90STAT_STATUS_MASK: {
788 char status[AP_DEVICES]; 891 char status[AP_DEVICES];
789 zcrypt_status_mask(status); 892 zcrypt_status_mask(status);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 89632919c993..b3d496bfaa7e 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -74,6 +74,7 @@ struct ica_z90_status {
74#define ZCRYPT_CEX2A 6 74#define ZCRYPT_CEX2A 6
75#define ZCRYPT_CEX3C 7 75#define ZCRYPT_CEX3C 7
76#define ZCRYPT_CEX3A 8 76#define ZCRYPT_CEX3A 8
77#define ZCRYPT_CEX4 10
77 78
78/** 79/**
79 * Large random numbers are pulled in 4096 byte chunks from the crypto cards 80 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
@@ -89,6 +90,7 @@ struct zcrypt_ops {
89 long (*rsa_modexpo_crt)(struct zcrypt_device *, 90 long (*rsa_modexpo_crt)(struct zcrypt_device *,
90 struct ica_rsa_modexpo_crt *); 91 struct ica_rsa_modexpo_crt *);
91 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); 92 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
93 long (*send_ep11_cprb)(struct zcrypt_device *, struct ep11_urb *);
92 long (*rng)(struct zcrypt_device *, char *); 94 long (*rng)(struct zcrypt_device *, char *);
93 struct list_head list; /* zcrypt ops list. */ 95 struct list_head list; /* zcrypt ops list. */
94 struct module *owner; 96 struct module *owner;
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index ce1226398ac9..569f8b1d86c0 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -30,7 +30,12 @@
30#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE 30#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
31#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE 31#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
32 32
33#define CEX4_CLEANUP_TIME (15*HZ) 33/* Waiting time for requests to be processed.
34 * Currently there are some types of request which are not deterministic.
35 * But the maximum time limit managed by the stomper code is set to 60sec.
36 * Hence we have to wait at least that time period.
37 */
38#define CEX4_CLEANUP_TIME (61*HZ)
34 39
35static struct ap_device_id zcrypt_cex4_ids[] = { 40static struct ap_device_id zcrypt_cex4_ids[] = {
36 { AP_DEVICE(AP_DEVICE_TYPE_CEX4) }, 41 { AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
@@ -101,6 +106,19 @@ static int zcrypt_cex4_probe(struct ap_device *ap_dev)
101 zdev->speed_rating = CEX4C_SPEED_RATING; 106 zdev->speed_rating = CEX4C_SPEED_RATING;
102 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 107 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
103 MSGTYPE06_VARIANT_DEFAULT); 108 MSGTYPE06_VARIANT_DEFAULT);
109 } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_EP11)) {
110 zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
111 if (!zdev)
112 return -ENOMEM;
113 zdev->type_string = "CEX4P";
114 zdev->user_space_type = ZCRYPT_CEX4;
115 zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
116 zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
117 zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
118 zdev->short_crt = 0;
119 zdev->speed_rating = CEX4C_SPEED_RATING;
120 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
121 MSGTYPE06_VARIANT_EP11);
104 } 122 }
105 break; 123 break;
106 } 124 }
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 0079b6617211..7b23f43c7b08 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -106,15 +106,15 @@ static inline int convert_error(struct zcrypt_device *zdev,
106 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A 106 // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
107 /* 107 /*
108 * To sent a message of the wrong type is a bug in the 108 * To sent a message of the wrong type is a bug in the
109 * device driver. Warn about it, disable the device 109 * device driver. Send error msg, disable the device
110 * and then repeat the request. 110 * and then repeat the request.
111 */ 111 */
112 WARN_ON(1);
113 atomic_set(&zcrypt_rescan_req, 1); 112 atomic_set(&zcrypt_rescan_req, 1);
114 zdev->online = 0; 113 zdev->online = 0;
114 pr_err("Cryptographic device %x failed and was set offline\n",
115 zdev->ap_dev->qid);
115 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 116 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
116 zdev->ap_dev->qid, 117 zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
117 zdev->online, ehdr->reply_code);
118 return -EAGAIN; 118 return -EAGAIN;
119 case REP82_ERROR_TRANSPORT_FAIL: 119 case REP82_ERROR_TRANSPORT_FAIL:
120 case REP82_ERROR_MACHINE_FAILURE: 120 case REP82_ERROR_MACHINE_FAILURE:
@@ -122,15 +122,17 @@ static inline int convert_error(struct zcrypt_device *zdev,
122 /* If a card fails disable it and repeat the request. */ 122 /* If a card fails disable it and repeat the request. */
123 atomic_set(&zcrypt_rescan_req, 1); 123 atomic_set(&zcrypt_rescan_req, 1);
124 zdev->online = 0; 124 zdev->online = 0;
125 pr_err("Cryptographic device %x failed and was set offline\n",
126 zdev->ap_dev->qid);
125 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 127 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
126 zdev->ap_dev->qid, 128 zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
127 zdev->online, ehdr->reply_code);
128 return -EAGAIN; 129 return -EAGAIN;
129 default: 130 default:
130 zdev->online = 0; 131 zdev->online = 0;
132 pr_err("Cryptographic device %x failed and was set offline\n",
133 zdev->ap_dev->qid);
131 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", 134 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
132 zdev->ap_dev->qid, 135 zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
133 zdev->online, ehdr->reply_code);
134 return -EAGAIN; /* repeat the request on a different device. */ 136 return -EAGAIN; /* repeat the request on a different device. */
135 } 137 }
136} 138}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 7c522f338bda..334e282f255b 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -25,6 +25,9 @@
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */ 26 */
27 27
28#define KMSG_COMPONENT "zcrypt"
29#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
30
28#include <linux/module.h> 31#include <linux/module.h>
29#include <linux/slab.h> 32#include <linux/slab.h>
30#include <linux/init.h> 33#include <linux/init.h>
@@ -332,6 +335,11 @@ static int convert_type80(struct zcrypt_device *zdev,
332 if (t80h->len < sizeof(*t80h) + outputdatalength) { 335 if (t80h->len < sizeof(*t80h) + outputdatalength) {
333 /* The result is too short, the CEX2A card may not do that.. */ 336 /* The result is too short, the CEX2A card may not do that.. */
334 zdev->online = 0; 337 zdev->online = 0;
338 pr_err("Cryptographic device %x failed and was set offline\n",
339 zdev->ap_dev->qid);
340 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
341 zdev->ap_dev->qid, zdev->online, t80h->code);
342
335 return -EAGAIN; /* repeat the request on a different device. */ 343 return -EAGAIN; /* repeat the request on a different device. */
336 } 344 }
337 if (zdev->user_space_type == ZCRYPT_CEX2A) 345 if (zdev->user_space_type == ZCRYPT_CEX2A)
@@ -359,6 +367,10 @@ static int convert_response(struct zcrypt_device *zdev,
359 outputdata, outputdatalength); 367 outputdata, outputdatalength);
360 default: /* Unknown response type, this should NEVER EVER happen */ 368 default: /* Unknown response type, this should NEVER EVER happen */
361 zdev->online = 0; 369 zdev->online = 0;
370 pr_err("Cryptographic device %x failed and was set offline\n",
371 zdev->ap_dev->qid);
372 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
373 zdev->ap_dev->qid, zdev->online);
362 return -EAGAIN; /* repeat the request on a different device. */ 374 return -EAGAIN; /* repeat the request on a different device. */
363 } 375 }
364} 376}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 7d97fa5a26d0..dc542e0a3055 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -25,6 +25,9 @@
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */ 26 */
27 27
28#define KMSG_COMPONENT "zcrypt"
29#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
30
28#include <linux/module.h> 31#include <linux/module.h>
29#include <linux/init.h> 32#include <linux/init.h>
30#include <linux/err.h> 33#include <linux/err.h>
@@ -50,6 +53,7 @@ struct response_type {
50}; 53};
51#define PCIXCC_RESPONSE_TYPE_ICA 0 54#define PCIXCC_RESPONSE_TYPE_ICA 0
52#define PCIXCC_RESPONSE_TYPE_XCRB 1 55#define PCIXCC_RESPONSE_TYPE_XCRB 1
56#define PCIXCC_RESPONSE_TYPE_EP11 2
53 57
54MODULE_AUTHOR("IBM Corporation"); 58MODULE_AUTHOR("IBM Corporation");
55MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \ 59MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
@@ -358,6 +362,91 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
358 return 0; 362 return 0;
359} 363}
360 364
365static int xcrb_msg_to_type6_ep11cprb_msgx(struct zcrypt_device *zdev,
366 struct ap_message *ap_msg,
367 struct ep11_urb *xcRB)
368{
369 unsigned int lfmt;
370
371 static struct type6_hdr static_type6_ep11_hdr = {
372 .type = 0x06,
373 .rqid = {0x00, 0x01},
374 .function_code = {0x00, 0x00},
375 .agent_id[0] = 0x58, /* {'X'} */
376 .agent_id[1] = 0x43, /* {'C'} */
377 .offset1 = 0x00000058,
378 };
379
380 struct {
381 struct type6_hdr hdr;
382 struct ep11_cprb cprbx;
383 unsigned char pld_tag; /* fixed value 0x30 */
384 unsigned char pld_lenfmt; /* payload length format */
385 } __packed * msg = ap_msg->message;
386
387 struct pld_hdr {
388 unsigned char func_tag; /* fixed value 0x4 */
389 unsigned char func_len; /* fixed value 0x4 */
390 unsigned int func_val; /* function ID */
391 unsigned char dom_tag; /* fixed value 0x4 */
392 unsigned char dom_len; /* fixed value 0x4 */
393 unsigned int dom_val; /* domain id */
394 } __packed * payload_hdr;
395
396 /* length checks */
397 ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
398 if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
399 (sizeof(struct type6_hdr)))
400 return -EINVAL;
401
402 if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
403 (sizeof(struct type86_fmt2_msg)))
404 return -EINVAL;
405
406 /* prepare type6 header */
407 msg->hdr = static_type6_ep11_hdr;
408 msg->hdr.ToCardLen1 = xcRB->req_len;
409 msg->hdr.FromCardLen1 = xcRB->resp_len;
410
411 /* Import CPRB data from the ioctl input parameter */
412 if (copy_from_user(&(msg->cprbx.cprb_len),
413 (char *)xcRB->req, xcRB->req_len)) {
414 return -EFAULT;
415 }
416
417 /*
418 The target domain field within the cprb body/payload block will be
419 replaced by the usage domain for non-management commands only.
420 Therefore we check the first bit of the 'flags' parameter for
421 management command indication.
422 0 - non management command
423 1 - management command
424 */
425 if (!((msg->cprbx.flags & 0x80) == 0x80)) {
426 msg->cprbx.target_id = (unsigned int)
427 AP_QID_QUEUE(zdev->ap_dev->qid);
428
429 if ((msg->pld_lenfmt & 0x80) == 0x80) { /*ext.len.fmt 2 or 3*/
430 switch (msg->pld_lenfmt & 0x03) {
431 case 1:
432 lfmt = 2;
433 break;
434 case 2:
435 lfmt = 3;
436 break;
437 default:
438 return -EINVAL;
439 }
440 } else {
441 lfmt = 1; /* length format #1 */
442 }
443 payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
444 payload_hdr->dom_val = (unsigned int)
445 AP_QID_QUEUE(zdev->ap_dev->qid);
446 }
447 return 0;
448}
449
361/** 450/**
362 * Copy results from a type 86 ICA reply message back to user space. 451 * Copy results from a type 86 ICA reply message back to user space.
363 * 452 *
@@ -377,6 +466,12 @@ struct type86x_reply {
377 char text[0]; 466 char text[0];
378} __packed; 467} __packed;
379 468
469struct type86_ep11_reply {
470 struct type86_hdr hdr;
471 struct type86_fmt2_ext fmt2;
472 struct ep11_cprb cprbx;
473} __packed;
474
380static int convert_type86_ica(struct zcrypt_device *zdev, 475static int convert_type86_ica(struct zcrypt_device *zdev,
381 struct ap_message *reply, 476 struct ap_message *reply,
382 char __user *outputdata, 477 char __user *outputdata,
@@ -440,6 +535,11 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
440 if (service_rc == 8 && service_rs == 72) 535 if (service_rc == 8 && service_rs == 72)
441 return -EINVAL; 536 return -EINVAL;
442 zdev->online = 0; 537 zdev->online = 0;
538 pr_err("Cryptographic device %x failed and was set offline\n",
539 zdev->ap_dev->qid);
540 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
541 zdev->ap_dev->qid, zdev->online,
542 msg->hdr.reply_code);
443 return -EAGAIN; /* repeat the request on a different device. */ 543 return -EAGAIN; /* repeat the request on a different device. */
444 } 544 }
445 data = msg->text; 545 data = msg->text;
@@ -503,6 +603,33 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
503 return 0; 603 return 0;
504} 604}
505 605
606/**
607 * Copy results from a type 86 EP11 XCRB reply message back to user space.
608 *
609 * @zdev: crypto device pointer
610 * @reply: reply AP message.
611 * @xcRB: pointer to EP11 user request block
612 *
613 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
614 */
615static int convert_type86_ep11_xcrb(struct zcrypt_device *zdev,
616 struct ap_message *reply,
617 struct ep11_urb *xcRB)
618{
619 struct type86_fmt2_msg *msg = reply->message;
620 char *data = reply->message;
621
622 if (xcRB->resp_len < msg->fmt2.count1)
623 return -EINVAL;
624
625 /* Copy response CPRB to user */
626 if (copy_to_user((char *)xcRB->resp,
627 data + msg->fmt2.offset1, msg->fmt2.count1))
628 return -EFAULT;
629 xcRB->resp_len = msg->fmt2.count1;
630 return 0;
631}
632
506static int convert_type86_rng(struct zcrypt_device *zdev, 633static int convert_type86_rng(struct zcrypt_device *zdev,
507 struct ap_message *reply, 634 struct ap_message *reply,
508 char *buffer) 635 char *buffer)
@@ -551,6 +678,10 @@ static int convert_response_ica(struct zcrypt_device *zdev,
551 * response */ 678 * response */
552 default: /* Unknown response type, this should NEVER EVER happen */ 679 default: /* Unknown response type, this should NEVER EVER happen */
553 zdev->online = 0; 680 zdev->online = 0;
681 pr_err("Cryptographic device %x failed and was set offline\n",
682 zdev->ap_dev->qid);
683 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
684 zdev->ap_dev->qid, zdev->online);
554 return -EAGAIN; /* repeat the request on a different device. */ 685 return -EAGAIN; /* repeat the request on a different device. */
555 } 686 }
556} 687}
@@ -579,10 +710,40 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
579 default: /* Unknown response type, this should NEVER EVER happen */ 710 default: /* Unknown response type, this should NEVER EVER happen */
580 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 711 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
581 zdev->online = 0; 712 zdev->online = 0;
713 pr_err("Cryptographic device %x failed and was set offline\n",
714 zdev->ap_dev->qid);
715 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
716 zdev->ap_dev->qid, zdev->online);
582 return -EAGAIN; /* repeat the request on a different device. */ 717 return -EAGAIN; /* repeat the request on a different device. */
583 } 718 }
584} 719}
585 720
721static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
722 struct ap_message *reply, struct ep11_urb *xcRB)
723{
724 struct type86_ep11_reply *msg = reply->message;
725
726 /* Response type byte is the second byte in the response. */
727 switch (((unsigned char *)reply->message)[1]) {
728 case TYPE82_RSP_CODE:
729 case TYPE87_RSP_CODE:
730 return convert_error(zdev, reply);
731 case TYPE86_RSP_CODE:
732 if (msg->hdr.reply_code)
733 return convert_error(zdev, reply);
734 if (msg->cprbx.cprb_ver_id == 0x04)
735 return convert_type86_ep11_xcrb(zdev, reply, xcRB);
736 /* Fall through, no break, incorrect cprb version is an unknown resp.*/
737 default: /* Unknown response type, this should NEVER EVER happen */
738 zdev->online = 0;
739 pr_err("Cryptographic device %x failed and was set offline\n",
740 zdev->ap_dev->qid);
741 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
742 zdev->ap_dev->qid, zdev->online);
743 return -EAGAIN; /* repeat the request on a different device. */
744 }
745}
746
586static int convert_response_rng(struct zcrypt_device *zdev, 747static int convert_response_rng(struct zcrypt_device *zdev,
587 struct ap_message *reply, 748 struct ap_message *reply,
588 char *data) 749 char *data)
@@ -602,6 +763,10 @@ static int convert_response_rng(struct zcrypt_device *zdev,
602 * response */ 763 * response */
603 default: /* Unknown response type, this should NEVER EVER happen */ 764 default: /* Unknown response type, this should NEVER EVER happen */
604 zdev->online = 0; 765 zdev->online = 0;
766 pr_err("Cryptographic device %x failed and was set offline\n",
767 zdev->ap_dev->qid);
768 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
769 zdev->ap_dev->qid, zdev->online);
605 return -EAGAIN; /* repeat the request on a different device. */ 770 return -EAGAIN; /* repeat the request on a different device. */
606 } 771 }
607} 772}
@@ -657,6 +822,51 @@ out:
657 complete(&(resp_type->work)); 822 complete(&(resp_type->work));
658} 823}
659 824
825/**
826 * This function is called from the AP bus code after a crypto request
827 * "msg" has finished with the reply message "reply".
828 * It is called from tasklet context.
829 * @ap_dev: pointer to the AP device
830 * @msg: pointer to the AP message
831 * @reply: pointer to the AP reply message
832 */
833static void zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev,
834 struct ap_message *msg,
835 struct ap_message *reply)
836{
837 static struct error_hdr error_reply = {
838 .type = TYPE82_RSP_CODE,
839 .reply_code = REP82_ERROR_MACHINE_FAILURE,
840 };
841 struct response_type *resp_type =
842 (struct response_type *)msg->private;
843 struct type86_ep11_reply *t86r;
844 int length;
845
846 /* Copy the reply message to the request message buffer. */
847 if (IS_ERR(reply)) {
848 memcpy(msg->message, &error_reply, sizeof(error_reply));
849 goto out;
850 }
851 t86r = reply->message;
852 if (t86r->hdr.type == TYPE86_RSP_CODE &&
853 t86r->cprbx.cprb_ver_id == 0x04) {
854 switch (resp_type->type) {
855 case PCIXCC_RESPONSE_TYPE_EP11:
856 length = t86r->fmt2.offset1 + t86r->fmt2.count1;
857 length = min(MSGTYPE06_MAX_MSG_SIZE, length);
858 memcpy(msg->message, reply->message, length);
859 break;
860 default:
861 memcpy(msg->message, &error_reply, sizeof(error_reply));
862 }
863 } else {
864 memcpy(msg->message, reply->message, sizeof(error_reply));
865 }
866out:
867 complete(&(resp_type->work));
868}
869
660static atomic_t zcrypt_step = ATOMIC_INIT(0); 870static atomic_t zcrypt_step = ATOMIC_INIT(0);
661 871
662/** 872/**
@@ -782,6 +992,46 @@ out_free:
782} 992}
783 993
784/** 994/**
995 * The request distributor calls this function if it picked the CEX4P
996 * device to handle a send_ep11_cprb request.
997 * @zdev: pointer to zcrypt_device structure that identifies the
998 * CEX4P device to the request distributor
999 * @xcRB: pointer to the ep11 user request block
1000 */
1001static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_device *zdev,
1002 struct ep11_urb *xcrb)
1003{
1004 struct ap_message ap_msg;
1005 struct response_type resp_type = {
1006 .type = PCIXCC_RESPONSE_TYPE_EP11,
1007 };
1008 int rc;
1009
1010 ap_init_message(&ap_msg);
1011 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
1012 if (!ap_msg.message)
1013 return -ENOMEM;
1014 ap_msg.receive = zcrypt_msgtype6_receive_ep11;
1015 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
1016 atomic_inc_return(&zcrypt_step);
1017 ap_msg.private = &resp_type;
1018 rc = xcrb_msg_to_type6_ep11cprb_msgx(zdev, &ap_msg, xcrb);
1019 if (rc)
1020 goto out_free;
1021 init_completion(&resp_type.work);
1022 ap_queue_message(zdev->ap_dev, &ap_msg);
1023 rc = wait_for_completion_interruptible(&resp_type.work);
1024 if (rc == 0)
1025 rc = convert_response_ep11_xcrb(zdev, &ap_msg, xcrb);
1026 else /* Signal pending. */
1027 ap_cancel_message(zdev->ap_dev, &ap_msg);
1028
1029out_free:
1030 kzfree(ap_msg.message);
1031 return rc;
1032}
1033
1034/**
785 * The request distributor calls this function if it picked the PCIXCC/CEX2C 1035 * The request distributor calls this function if it picked the PCIXCC/CEX2C
786 * device to generate random data. 1036 * device to generate random data.
787 * @zdev: pointer to zcrypt_device structure that identifies the 1037 * @zdev: pointer to zcrypt_device structure that identifies the
@@ -839,10 +1089,19 @@ static struct zcrypt_ops zcrypt_msgtype6_ops = {
839 .rng = zcrypt_msgtype6_rng, 1089 .rng = zcrypt_msgtype6_rng,
840}; 1090};
841 1091
1092static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
1093 .owner = THIS_MODULE,
1094 .variant = MSGTYPE06_VARIANT_EP11,
1095 .rsa_modexpo = NULL,
1096 .rsa_modexpo_crt = NULL,
1097 .send_ep11_cprb = zcrypt_msgtype6_send_ep11_cprb,
1098};
1099
842int __init zcrypt_msgtype6_init(void) 1100int __init zcrypt_msgtype6_init(void)
843{ 1101{
844 zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops); 1102 zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
845 zcrypt_msgtype_register(&zcrypt_msgtype6_ops); 1103 zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
1104 zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
846 return 0; 1105 return 0;
847} 1106}
848 1107
@@ -850,6 +1109,7 @@ void __exit zcrypt_msgtype6_exit(void)
850{ 1109{
851 zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops); 1110 zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
852 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops); 1111 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
1112 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
853} 1113}
854 1114
855module_init(zcrypt_msgtype6_init); 1115module_init(zcrypt_msgtype6_init);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 1e500d3c0735..207247570623 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -32,6 +32,7 @@
32#define MSGTYPE06_NAME "zcrypt_msgtype6" 32#define MSGTYPE06_NAME "zcrypt_msgtype6"
33#define MSGTYPE06_VARIANT_DEFAULT 0 33#define MSGTYPE06_VARIANT_DEFAULT 0
34#define MSGTYPE06_VARIANT_NORNG 1 34#define MSGTYPE06_VARIANT_NORNG 1
35#define MSGTYPE06_VARIANT_EP11 2
35 36
36#define MSGTYPE06_MAX_MSG_SIZE (12*1024) 37#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
37 38
@@ -99,6 +100,7 @@ struct type86_hdr {
99} __packed; 100} __packed;
100 101
101#define TYPE86_RSP_CODE 0x86 102#define TYPE86_RSP_CODE 0x86
103#define TYPE87_RSP_CODE 0x87
102#define TYPE86_FMT2 0x02 104#define TYPE86_FMT2 0x02
103 105
104struct type86_fmt2_ext { 106struct type86_fmt2_ext {
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index f2b71d8df01f..7a743f4c646c 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -24,6 +24,9 @@
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */ 25 */
26 26
27#define KMSG_COMPONENT "zcrypt"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
27#include <linux/module.h> 30#include <linux/module.h>
28#include <linux/slab.h> 31#include <linux/slab.h>
29#include <linux/init.h> 32#include <linux/init.h>
@@ -199,6 +202,10 @@ static int convert_type84(struct zcrypt_device *zdev,
199 if (t84h->len < sizeof(*t84h) + outputdatalength) { 202 if (t84h->len < sizeof(*t84h) + outputdatalength) {
200 /* The result is too short, the PCICA card may not do that.. */ 203 /* The result is too short, the PCICA card may not do that.. */
201 zdev->online = 0; 204 zdev->online = 0;
205 pr_err("Cryptographic device %x failed and was set offline\n",
206 zdev->ap_dev->qid);
207 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
208 zdev->ap_dev->qid, zdev->online, t84h->code);
202 return -EAGAIN; /* repeat the request on a different device. */ 209 return -EAGAIN; /* repeat the request on a different device. */
203 } 210 }
204 BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE); 211 BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE);
@@ -223,6 +230,10 @@ static int convert_response(struct zcrypt_device *zdev,
223 outputdata, outputdatalength); 230 outputdata, outputdatalength);
224 default: /* Unknown response type, this should NEVER EVER happen */ 231 default: /* Unknown response type, this should NEVER EVER happen */
225 zdev->online = 0; 232 zdev->online = 0;
233 pr_err("Cryptographic device %x failed and was set offline\n",
234 zdev->ap_dev->qid);
235 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
236 zdev->ap_dev->qid, zdev->online);
226 return -EAGAIN; /* repeat the request on a different device. */ 237 return -EAGAIN; /* repeat the request on a different device. */
227 } 238 }
228} 239}
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 0d90a4334055..4d14c04b746e 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -24,6 +24,9 @@
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */ 25 */
26 26
27#define KMSG_COMPONENT "zcrypt"
28#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29
27#include <linux/module.h> 30#include <linux/module.h>
28#include <linux/init.h> 31#include <linux/init.h>
29#include <linux/gfp.h> 32#include <linux/gfp.h>
@@ -372,6 +375,11 @@ static int convert_type86(struct zcrypt_device *zdev,
372 if (service_rc == 8 && service_rs == 72) 375 if (service_rc == 8 && service_rs == 72)
373 return -EINVAL; 376 return -EINVAL;
374 zdev->online = 0; 377 zdev->online = 0;
378 pr_err("Cryptographic device %x failed and was set offline\n",
379 zdev->ap_dev->qid);
380 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
381 zdev->ap_dev->qid, zdev->online,
382 msg->hdr.reply_code);
375 return -EAGAIN; /* repeat the request on a different device. */ 383 return -EAGAIN; /* repeat the request on a different device. */
376 } 384 }
377 data = msg->text; 385 data = msg->text;
@@ -425,6 +433,10 @@ static int convert_response(struct zcrypt_device *zdev,
425 /* no break, incorrect cprb version is an unknown response */ 433 /* no break, incorrect cprb version is an unknown response */
426 default: /* Unknown response type, this should NEVER EVER happen */ 434 default: /* Unknown response type, this should NEVER EVER happen */
427 zdev->online = 0; 435 zdev->online = 0;
436 pr_err("Cryptographic device %x failed and was set offline\n",
437 zdev->ap_dev->qid);
438 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
439 zdev->ap_dev->qid, zdev->online);
428 return -EAGAIN; /* repeat the request on a different device. */ 440 return -EAGAIN; /* repeat the request on a different device. */
429 } 441 }
430} 442}
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 30fa38a0ad39..9176bfbd5745 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -201,7 +201,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
201 instance->irq = IRQ_AMIGA_PORTS; 201 instance->irq = IRQ_AMIGA_PORTS;
202 instance->unique_id = z->slotaddr; 202 instance->unique_id = z->slotaddr;
203 203
204 regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); 204 regs = ZTWO_VADDR(z->resource.start);
205 regs->DAWR = DAWR_A2091; 205 regs->DAWR = DAWR_A2091;
206 206
207 wdregs.SASR = &regs->SASR; 207 wdregs.SASR = &regs->SASR;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index c0f4f4290dd6..dd5b64726ddc 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -220,7 +220,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
220 220
221 instance->irq = IRQ_AMIGA_PORTS; 221 instance->irq = IRQ_AMIGA_PORTS;
222 222
223 regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); 223 regs = ZTWO_VADDR(res->start);
224 regs->DAWR = DAWR_A3000; 224 regs->DAWR = DAWR_A3000;
225 225
226 wdregs.SASR = &regs->SASR; 226 wdregs.SASR = &regs->SASR;
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
index 70c521f79f7c..f5a2ab41543b 100644
--- a/drivers/scsi/a4000t.c
+++ b/drivers/scsi/a4000t.c
@@ -56,7 +56,7 @@ static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)
56 scsi_addr = res->start + A4000T_SCSI_OFFSET; 56 scsi_addr = res->start + A4000T_SCSI_OFFSET;
57 57
58 /* Fill in the required pieces of hostdata */ 58 /* Fill in the required pieces of hostdata */
59 hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr); 59 hostdata->base = ZTWO_VADDR(scsi_addr);
60 hostdata->clock = 50; 60 hostdata->clock = 50;
61 hostdata->chip710 = 1; 61 hostdata->chip710 = 1;
62 hostdata->dmode_extra = DMODE_FC2; 62 hostdata->dmode_extra = DMODE_FC2;
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 2203ac281103..3b6f83ffddc4 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -310,7 +310,7 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
310 if (!request_mem_region(address, 256, "wd33c93")) 310 if (!request_mem_region(address, 256, "wd33c93"))
311 return -EBUSY; 311 return -EBUSY;
312 312
313 regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); 313 regs = ZTWO_VADDR(address);
314 314
315 error = check_wd33c93(regs); 315 error = check_wd33c93(regs);
316 if (error) 316 if (error)
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index cbf3476c68cd..aff31991aea9 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -104,7 +104,7 @@ static int zorro7xx_init_one(struct zorro_dev *z,
104 if (ioaddr > 0x01000000) 104 if (ioaddr > 0x01000000)
105 hostdata->base = ioremap(ioaddr, zorro_resource_len(z)); 105 hostdata->base = ioremap(ioaddr, zorro_resource_len(z));
106 else 106 else
107 hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr); 107 hostdata->base = ZTWO_VADDR(ioaddr);
108 108
109 hostdata->clock = 50; 109 hostdata->clock = 50;
110 hostdata->chip710 = 1; 110 hostdata->chip710 = 1;
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 53fee2f9a498..8dfdd2732bdc 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev)
39 return 0; 39 return 0;
40} 40}
41 41
42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) 42static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
43 void *accel_priv)
43{ 44{
44 return ClassifyPacket(netdev_priv(dev), skb); 45 return ClassifyPacket(netdev_priv(dev), skb);
45} 46}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index 235d2b1ec593..eedffed17e39 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
306 return NETDEV_TX_OK; 306 return NETDEV_TX_OK;
307} 307}
308 308
309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) 309static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
310 void *accel_priv)
310{ 311{
311 return (u16)smp_processor_id(); 312 return (u16)smp_processor_id();
312} 313}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 17659bb04bef..dd69e344e409 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
652 return dscp >> 5; 652 return dscp >> 5;
653} 653}
654 654
655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) 655static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
656 void *accel_priv)
656{ 657{
657 struct adapter *padapter = rtw_netdev_priv(dev); 658 struct adapter *padapter = rtw_netdev_priv(dev);
658 struct mlme_priv *pmlmepriv = &padapter->mlmepriv; 659 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 8f181b3f842b..d833c8f5b465 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -438,14 +438,12 @@ static int clamp_thread(void *arg)
438 */ 438 */
439 local_touch_nmi(); 439 local_touch_nmi();
440 stop_critical_timings(); 440 stop_critical_timings();
441 __monitor((void *)&current_thread_info()->flags, 0, 0); 441 mwait_idle_with_hints(eax, ecx);
442 cpu_relax(); /* allow HT sibling to run */
443 __mwait(eax, ecx);
444 start_critical_timings(); 442 start_critical_timings();
445 atomic_inc(&idle_wakeup_counter); 443 atomic_inc(&idle_wakeup_counter);
446 } 444 }
447 tick_nohz_idle_exit(); 445 tick_nohz_idle_exit();
448 preempt_enable_no_resched(); 446 preempt_enable();
449 } 447 }
450 del_timer_sync(&wakeup_timer); 448 del_timer_sync(&wakeup_timer);
451 clear_bit(cpunr, cpu_clamping_mask); 449 clear_bit(cpunr, cpu_clamping_mask);
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 0dac36ce09d6..518f790ef88a 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -3710,7 +3710,7 @@ default_chipset:
3710 if (!videomemory) { 3710 if (!videomemory) {
3711 dev_warn(&pdev->dev, 3711 dev_warn(&pdev->dev,
3712 "Unable to map videomem cached writethrough\n"); 3712 "Unable to map videomem cached writethrough\n");
3713 info->screen_base = (char *)ZTWO_VADDR(info->fix.smem_start); 3713 info->screen_base = ZTWO_VADDR(info->fix.smem_start);
3714 } else 3714 } else
3715 info->screen_base = (char *)videomemory; 3715 info->screen_base = (char *)videomemory;
3716 3716
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 5aab9b9dc210..d992aa5eb3f0 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -2256,7 +2256,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
2256 2256
2257 info->fix.mmio_start = regbase; 2257 info->fix.mmio_start = regbase;
2258 cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024) 2258 cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024)
2259 : (caddr_t)ZTWO_VADDR(regbase); 2259 : ZTWO_VADDR(regbase);
2260 if (!cinfo->regbase) { 2260 if (!cinfo->regbase) {
2261 dev_err(info->device, "Cannot map registers\n"); 2261 dev_err(info->device, "Cannot map registers\n");
2262 error = -EIO; 2262 error = -EIO;
@@ -2266,7 +2266,7 @@ static int cirrusfb_zorro_register(struct zorro_dev *z,
2266 info->fix.smem_start = rambase; 2266 info->fix.smem_start = rambase;
2267 info->screen_size = ramsize; 2267 info->screen_size = ramsize;
2268 info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize) 2268 info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize)
2269 : (caddr_t)ZTWO_VADDR(rambase); 2269 : ZTWO_VADDR(rambase);
2270 if (!info->screen_base) { 2270 if (!info->screen_base) {
2271 dev_err(info->device, "Cannot map video RAM\n"); 2271 dev_err(info->device, "Cannot map video RAM\n");
2272 error = -EIO; 2272 error = -EIO;
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index 5bd2eb8d4f39..cda7587cbc86 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -34,7 +34,6 @@
34#include <linux/fb.h> 34#include <linux/fb.h>
35 35
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/bootinfo.h>
38#include <asm/macintosh.h> 37#include <asm/macintosh.h>
39#include <asm/io.h> 38#include <asm/io.h>
40 39
diff --git a/drivers/video/valkyriefb.c b/drivers/video/valkyriefb.c
index e287ebc47817..97cb9bd1d1dd 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/valkyriefb.c
@@ -56,7 +56,6 @@
56#include <linux/cuda.h> 56#include <linux/cuda.h>
57#include <asm/io.h> 57#include <asm/io.h>
58#ifdef CONFIG_MAC 58#ifdef CONFIG_MAC
59#include <asm/bootinfo.h>
60#include <asm/macintosh.h> 59#include <asm/macintosh.h>
61#else 60#else
62#include <asm/prom.h> 61#include <asm/prom.h>
diff --git a/drivers/zorro/Makefile b/drivers/zorro/Makefile
index f62172603215..7dc5332ff984 100644
--- a/drivers/zorro/Makefile
+++ b/drivers/zorro/Makefile
@@ -2,8 +2,9 @@
2# Makefile for the Zorro bus specific drivers. 2# Makefile for the Zorro bus specific drivers.
3# 3#
4 4
5obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o names.o 5obj-$(CONFIG_ZORRO) += zorro.o zorro-driver.o zorro-sysfs.o
6obj-$(CONFIG_PROC_FS) += proc.o 6obj-$(CONFIG_PROC_FS) += proc.o
7obj-$(CONFIG_ZORRO_NAMES) += names.o
7 8
8hostprogs-y := gen-devlist 9hostprogs-y := gen-devlist
9 10
diff --git a/drivers/zorro/names.c b/drivers/zorro/names.c
index e8517c3d8e82..6f3fd9903ac3 100644
--- a/drivers/zorro/names.c
+++ b/drivers/zorro/names.c
@@ -15,8 +15,6 @@
15#include <linux/zorro.h> 15#include <linux/zorro.h>
16 16
17 17
18#ifdef CONFIG_ZORRO_NAMES
19
20struct zorro_prod_info { 18struct zorro_prod_info {
21 __u16 prod; 19 __u16 prod;
22 unsigned short seen; 20 unsigned short seen;
@@ -69,7 +67,6 @@ void __init zorro_name_device(struct zorro_dev *dev)
69 } while (--i); 67 } while (--i);
70 68
71 /* Couldn't find either the manufacturer nor the product */ 69 /* Couldn't find either the manufacturer nor the product */
72 sprintf(name, "Zorro device %08x", dev->id);
73 return; 70 return;
74 71
75 match_manuf: { 72 match_manuf: {
@@ -98,11 +95,3 @@ void __init zorro_name_device(struct zorro_dev *dev)
98 } 95 }
99 } 96 }
100} 97}
101
102#else
103
104void __init zorro_name_device(struct zorro_dev *dev)
105{
106}
107
108#endif
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index ea1ce822a8e0..6ac2579da0eb 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -14,6 +14,8 @@
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/export.h> 16#include <linux/export.h>
17
18#include <asm/byteorder.h>
17#include <asm/uaccess.h> 19#include <asm/uaccess.h>
18#include <asm/amigahw.h> 20#include <asm/amigahw.h>
19#include <asm/setup.h> 21#include <asm/setup.h>
@@ -41,10 +43,10 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
41 /* Construct a ConfigDev */ 43 /* Construct a ConfigDev */
42 memset(&cd, 0, sizeof(cd)); 44 memset(&cd, 0, sizeof(cd));
43 cd.cd_Rom = z->rom; 45 cd.cd_Rom = z->rom;
44 cd.cd_SlotAddr = z->slotaddr; 46 cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
45 cd.cd_SlotSize = z->slotsize; 47 cd.cd_SlotSize = cpu_to_be16(z->slotsize);
46 cd.cd_BoardAddr = (void *)zorro_resource_start(z); 48 cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
47 cd.cd_BoardSize = zorro_resource_len(z); 49 cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
48 50
49 if (copy_to_user(buf, (void *)&cd + pos, nbytes)) 51 if (copy_to_user(buf, (void *)&cd + pos, nbytes))
50 return -EFAULT; 52 return -EFAULT;
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index ac1db7f1bcab..eacae1434b73 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -161,11 +161,12 @@ static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
161} 161}
162 162
163struct bus_type zorro_bus_type = { 163struct bus_type zorro_bus_type = {
164 .name = "zorro", 164 .name = "zorro",
165 .match = zorro_bus_match, 165 .dev_name = "zorro",
166 .uevent = zorro_uevent, 166 .match = zorro_bus_match,
167 .probe = zorro_device_probe, 167 .uevent = zorro_uevent,
168 .remove = zorro_device_remove, 168 .probe = zorro_device_probe,
169 .remove = zorro_device_remove,
169}; 170};
170EXPORT_SYMBOL(zorro_bus_type); 171EXPORT_SYMBOL(zorro_bus_type);
171 172
diff --git a/drivers/zorro/zorro-sysfs.c b/drivers/zorro/zorro-sysfs.c
index 26f7184ef9e1..36b210f9b6b2 100644
--- a/drivers/zorro/zorro-sysfs.c
+++ b/drivers/zorro/zorro-sysfs.c
@@ -16,6 +16,8 @@
16#include <linux/stat.h> 16#include <linux/stat.h>
17#include <linux/string.h> 17#include <linux/string.h>
18 18
19#include <asm/byteorder.h>
20
19#include "zorro.h" 21#include "zorro.h"
20 22
21 23
@@ -33,10 +35,20 @@ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
33 35
34zorro_config_attr(id, id, "0x%08x\n"); 36zorro_config_attr(id, id, "0x%08x\n");
35zorro_config_attr(type, rom.er_Type, "0x%02x\n"); 37zorro_config_attr(type, rom.er_Type, "0x%02x\n");
36zorro_config_attr(serial, rom.er_SerialNumber, "0x%08x\n");
37zorro_config_attr(slotaddr, slotaddr, "0x%04x\n"); 38zorro_config_attr(slotaddr, slotaddr, "0x%04x\n");
38zorro_config_attr(slotsize, slotsize, "0x%04x\n"); 39zorro_config_attr(slotsize, slotsize, "0x%04x\n");
39 40
41static ssize_t
42show_serial(struct device *dev, struct device_attribute *attr, char *buf)
43{
44 struct zorro_dev *z;
45
46 z = to_zorro_dev(dev);
47 return sprintf(buf, "0x%08x\n", be32_to_cpu(z->rom.er_SerialNumber));
48}
49
50static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
51
40static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf) 52static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf)
41{ 53{
42 struct zorro_dev *z = to_zorro_dev(dev); 54 struct zorro_dev *z = to_zorro_dev(dev);
@@ -60,10 +72,10 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
60 /* Construct a ConfigDev */ 72 /* Construct a ConfigDev */
61 memset(&cd, 0, sizeof(cd)); 73 memset(&cd, 0, sizeof(cd));
62 cd.cd_Rom = z->rom; 74 cd.cd_Rom = z->rom;
63 cd.cd_SlotAddr = z->slotaddr; 75 cd.cd_SlotAddr = cpu_to_be16(z->slotaddr);
64 cd.cd_SlotSize = z->slotsize; 76 cd.cd_SlotSize = cpu_to_be16(z->slotsize);
65 cd.cd_BoardAddr = (void *)zorro_resource_start(z); 77 cd.cd_BoardAddr = cpu_to_be32(zorro_resource_start(z));
66 cd.cd_BoardSize = zorro_resource_len(z); 78 cd.cd_BoardSize = cpu_to_be32(zorro_resource_len(z));
67 79
68 return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd)); 80 return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
69} 81}
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 858c9714b2f3..707c1a5a0317 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -18,6 +18,7 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20
21#include <asm/byteorder.h>
21#include <asm/setup.h> 22#include <asm/setup.h>
22#include <asm/amigahw.h> 23#include <asm/amigahw.h>
23 24
@@ -29,7 +30,8 @@
29 */ 30 */
30 31
31unsigned int zorro_num_autocon; 32unsigned int zorro_num_autocon;
32struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; 33struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata;
34struct zorro_dev *zorro_autocon;
33 35
34 36
35 /* 37 /*
@@ -38,6 +40,7 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
38 40
39struct zorro_bus { 41struct zorro_bus {
40 struct device dev; 42 struct device dev;
43 struct zorro_dev devices[0];
41}; 44};
42 45
43 46
@@ -125,18 +128,22 @@ static struct resource __init *zorro_find_parent_resource(
125static int __init amiga_zorro_probe(struct platform_device *pdev) 128static int __init amiga_zorro_probe(struct platform_device *pdev)
126{ 129{
127 struct zorro_bus *bus; 130 struct zorro_bus *bus;
131 struct zorro_dev_init *zi;
128 struct zorro_dev *z; 132 struct zorro_dev *z;
129 struct resource *r; 133 struct resource *r;
130 unsigned int i; 134 unsigned int i;
131 int error; 135 int error;
132 136
133 /* Initialize the Zorro bus */ 137 /* Initialize the Zorro bus */
134 bus = kzalloc(sizeof(*bus), GFP_KERNEL); 138 bus = kzalloc(sizeof(*bus) +
139 zorro_num_autocon * sizeof(bus->devices[0]),
140 GFP_KERNEL);
135 if (!bus) 141 if (!bus)
136 return -ENOMEM; 142 return -ENOMEM;
137 143
144 zorro_autocon = bus->devices;
138 bus->dev.parent = &pdev->dev; 145 bus->dev.parent = &pdev->dev;
139 dev_set_name(&bus->dev, "zorro"); 146 dev_set_name(&bus->dev, zorro_bus_type.name);
140 error = device_register(&bus->dev); 147 error = device_register(&bus->dev);
141 if (error) { 148 if (error) {
142 pr_err("Zorro: Error registering zorro_bus\n"); 149 pr_err("Zorro: Error registering zorro_bus\n");
@@ -151,15 +158,23 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
151 158
152 /* First identify all devices ... */ 159 /* First identify all devices ... */
153 for (i = 0; i < zorro_num_autocon; i++) { 160 for (i = 0; i < zorro_num_autocon; i++) {
161 zi = &zorro_autocon_init[i];
154 z = &zorro_autocon[i]; 162 z = &zorro_autocon[i];
155 z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); 163
164 z->rom = zi->rom;
165 z->id = (be16_to_cpu(z->rom.er_Manufacturer) << 16) |
166 (z->rom.er_Product << 8);
156 if (z->id == ZORRO_PROD_GVP_EPC_BASE) { 167 if (z->id == ZORRO_PROD_GVP_EPC_BASE) {
157 /* GVP quirk */ 168 /* GVP quirk */
158 unsigned long magic = zorro_resource_start(z)+0x8000; 169 unsigned long magic = zi->boardaddr + 0x8000;
159 z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK; 170 z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK;
160 } 171 }
172 z->slotaddr = zi->slotaddr;
173 z->slotsize = zi->slotsize;
161 sprintf(z->name, "Zorro device %08x", z->id); 174 sprintf(z->name, "Zorro device %08x", z->id);
162 zorro_name_device(z); 175 zorro_name_device(z);
176 z->resource.start = zi->boardaddr;
177 z->resource.end = zi->boardaddr + zi->boardsize - 1;
163 z->resource.name = z->name; 178 z->resource.name = z->name;
164 r = zorro_find_parent_resource(pdev, z); 179 r = zorro_find_parent_resource(pdev, z);
165 error = request_resource(r, &z->resource); 180 error = request_resource(r, &z->resource);
@@ -167,9 +182,9 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
167 dev_err(&bus->dev, 182 dev_err(&bus->dev,
168 "Address space collision on device %s %pR\n", 183 "Address space collision on device %s %pR\n",
169 z->name, &z->resource); 184 z->name, &z->resource);
170 dev_set_name(&z->dev, "%02x", i);
171 z->dev.parent = &bus->dev; 185 z->dev.parent = &bus->dev;
172 z->dev.bus = &zorro_bus_type; 186 z->dev.bus = &zorro_bus_type;
187 z->dev.id = i;
173 } 188 }
174 189
175 /* ... then register them */ 190 /* ... then register them */
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index b682d5ccd63f..34119fb4e560 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -1,4 +1,9 @@
1 1
2#ifdef CONFIG_ZORRO_NAMES
2extern void zorro_name_device(struct zorro_dev *z); 3extern void zorro_name_device(struct zorro_dev *z);
4#else
5static inline void zorro_name_device(struct zorro_dev *dev) { }
6#endif
7
3extern int zorro_create_sysfs_dev_files(struct zorro_dev *z); 8extern int zorro_create_sysfs_dev_files(struct zorro_dev *z);
4 9