aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/Kconfig17
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c117
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/acpi_processor.c8
-rw-r--r--drivers/acpi/acpica/acglobal.h4
-rw-r--r--drivers/acpi/acpica/exfield.c104
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/battery.c329
-rw-r--r--drivers/acpi/blacklist.c21
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/cm_sbs.c105
-rw-r--r--drivers/acpi/ec.c21
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/ata/Kconfig7
-rw-r--r--drivers/ata/ahci.c50
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_imx.c179
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c36
-rw-r--r--drivers/ata/pata_arasan_cf.c7
-rw-r--r--drivers/ata/pata_at91.c11
-rw-r--r--drivers/ata/pata_samsung_cf.c10
-rw-r--r--drivers/base/core.c33
-rw-r--r--drivers/base/dd.c21
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/topology.c3
-rw-r--r--drivers/block/floppy.c11
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bus/mvebu-mbus.c22
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c10
-rw-r--r--drivers/char/ipmi/Kconfig12
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c239
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c145
-rw-r--r--drivers/char/pcmcia/Kconfig2
-rw-r--r--drivers/char/random.c7
-rw-r--r--drivers/char/tpm/tpm_ppi.c8
-rw-r--r--drivers/char/ttyprintk.c15
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c33
-rw-r--r--drivers/clk/bcm/clk-kona.c64
-rw-r--r--drivers/clk/bcm/clk-kona.h28
-rw-r--r--drivers/clk/clk-divider.c37
-rw-r--r--drivers/clk/clk.c74
-rw-r--r--drivers/clk/shmobile/clk-mstp.c9
-rw-r--r--drivers/clk/socfpga/clk-pll.c7
-rw-r--r--drivers/clk/socfpga/clk.c23
-rw-r--r--drivers/clk/st/clkgen-pll.c4
-rw-r--r--drivers/clk/tegra/clk-pll.c66
-rw-r--r--drivers/clk/tegra/clk-tegra124.c3
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/exynos_mct.c12
-rw-r--r--drivers/clocksource/tcb_clksrc.c8
-rw-r--r--drivers/clocksource/timer-marco.c2
-rw-r--r--drivers/clocksource/zevio-timer.c7
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c16
-rw-r--r--drivers/cpufreq/cpufreq_governor.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/cpufreq/longhaul.c36
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c4
-rw-r--r--drivers/cpufreq/powernow-k6.c23
-rw-r--r--drivers/cpufreq/powernow-k7.c4
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c1
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c5
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c4
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/edma.c6
-rw-r--r--drivers/dma/fsl-edma.c12
-rw-r--r--drivers/dma/mv_xor.c8
-rw-r--r--drivers/dma/sa11x0-dma.c4
-rw-r--r--drivers/dma/sirf-dma.c2
-rw-r--r--drivers/firewire/core.h4
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/iscsi_ibft.c1
-rw-r--r--drivers/gpio/gpio-ich.c4
-rw-r--r--drivers/gpio/gpio-mcp23s08.c12
-rw-r--r--drivers/gpio/gpio-spear-spics.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c12
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs.h3
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c44
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c42
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c370
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c55
-rw-r--r--drivers/gpu/drm/drm_mm.c2
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c33
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c426
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c367
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c218
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c132
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c36
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c62
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c60
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c76
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c50
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c54
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c100
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c32
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c16
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c54
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c175
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c33
-rw-r--r--drivers/gpu/drm/radeon/cik.c176
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/radeon/cikd.h9
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c28
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c135
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c44
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c111
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c133
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c8
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c65
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c10
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c44
-rw-r--r--drivers/gpu/drm/tegra/dpaux.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c22
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c4
-rw-r--r--drivers/hid/hid-core.c10
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-microsoft.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c10
-rw-r--r--drivers/hid/hid-sony.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/connection.c5
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/emc1403.c10
-rw-r--r--drivers/hwmon/ltc2945.c6
-rw-r--r--drivers/hwmon/ntc_thermistor.c15
-rw-r--r--drivers/hwmon/vexpress.c83
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/idle/intel_idle.c3
-rw-r--r--drivers/iio/adc/Kconfig4
-rw-r--r--drivers/iio/adc/at91_adc.c33
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c7
-rw-r--r--drivers/iio/industrialio-buffer.c6
-rw-r--r--drivers/iio/light/cm32181.c1
-rw-r--r--drivers/iio/light/cm36651.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c128
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c24
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c41
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c81
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h72
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h14
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c55
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c38
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/atkbd.c29
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c7
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c7
-rw-r--r--drivers/input/misc/bma150.c4
-rw-r--r--drivers/input/misc/da9055_onkey.c1
-rw-r--r--drivers/input/misc/soc_button_array.c1
-rw-r--r--drivers/input/mouse/Kconfig2
-rw-r--r--drivers/input/mouse/elantech.c27
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c145
-rw-r--r--drivers/input/serio/ambakmi.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h15
-rw-r--r--drivers/input/serio/i8042.c6
-rw-r--r--drivers/input/serio/serio.c14
-rw-r--r--drivers/input/tablet/wacom_sys.c246
-rw-r--r--drivers/input/tablet/wacom_wac.c29
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/arm-smmu.c4
-rw-r--r--drivers/iommu/dmar.c3
-rw-r--r--drivers/iommu/intel-iommu.c10
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c54
-rw-r--r--drivers/irqchip/irq-crossbar.c2
-rw-r--r--drivers/irqchip/irq-gic.c8
-rw-r--r--drivers/irqchip/irq-vic.c6
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/hisax/icc.c2
-rw-r--r--drivers/isdn/icn/icn.c11
-rw-r--r--drivers/mcb/mcb-parse.c1
-rw-r--r--drivers/md/dm-cache-target.c3
-rw-r--r--drivers/md/dm-crypt.c61
-rw-r--r--drivers/md/dm-mpath.c12
-rw-r--r--drivers/md/dm-thin.c106
-rw-r--r--drivers/md/dm-verity.c15
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/media/i2c/ov7670.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/media-device.c1
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c16
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c34
-rw-r--r--drivers/media/platform/davinci/vpif_display.c35
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/fc2580_priv.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c48
-rw-r--r--drivers/media/usb/gspca/sonixb.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c12
-rw-r--r--drivers/memory/mvebu-devbus.c15
-rw-r--r--drivers/mfd/rtsx_pcr.c132
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/genwqe/card_base.h58
-rw-r--r--drivers/misc/genwqe/card_ddcb.c6
-rw-r--r--drivers/misc/genwqe/card_dev.c44
-rw-r--r--drivers/misc/genwqe/card_utils.c170
-rw-r--r--drivers/misc/genwqe/genwqe_driver.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/interrupt.c3
-rw-r--r--drivers/misc/mei/main.c3
-rw-r--r--drivers/misc/mei/pci-me.c30
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c418
-rw-r--r--drivers/mtd/devices/spear_smi.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/wl.c6
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/c_can.c648
-rw-r--r--drivers/net/can/c_can/c_can.h23
-rw-r--r--drivers/net/can/c_can/c_can_pci.c9
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c16
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Kconfig1
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c118
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c338
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h53
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c116
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c206
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c82
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c60
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/cadence/macb.c35
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c223
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c40
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c77
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c99
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c22
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c11
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h42
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c13
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c14
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.c19
-rw-r--r--drivers/net/ethernet/sfc/enum.h23
-rw-r--r--drivers/net/ethernet/sfc/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c55
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/sfc/nic.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c10
-rw-r--r--drivers/net/macvlan.c21
-rw-r--r--drivers/net/macvtap.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c72
-rw-r--r--drivers/net/phy/micrel.c6
-rw-r--r--drivers/net/phy/phy.c27
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/slip/slip.c6
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c28
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c42
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/cw1200/debug.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c261
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/mwifiex/main.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h20
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c5
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
-rw-r--r--drivers/of/base.c14
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c28
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/of/selftest.c32
-rw-r--r--drivers/of/testcase-data/tests-interrupts.dtsi13
-rw-r--r--drivers/pci/host/pci-mvebu.c92
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c8
-rw-r--r--drivers/pci/host/pci-tegra.c7
-rw-r--r--drivers/pci/host/pcie-designware.c20
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c4
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile9
-rw-r--r--drivers/phy/phy-core.c3
-rw-r--r--drivers/pinctrl/Kconfig8
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c17
-rw-r--r--drivers/pinctrl/pinctrl-bcm281xx.c1461
-rw-r--r--drivers/pinctrl/pinctrl-capri.c1454
-rw-r--r--drivers/pinctrl/pinctrl-msm.c6
-rw-r--r--drivers/pinctrl/pinctrl-msm.h1
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c48
-rw-r--r--drivers/pinctrl/pinctrl-single.c13
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c23
-rw-r--r--drivers/pnp/pnpacpi/core.c44
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c2
-rw-r--r--drivers/pnp/quirks.c79
-rw-r--r--drivers/power/reset/vexpress-poweroff.c19
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/pwm/pwm-spear.c4
-rw-r--r--drivers/regulator/pbias-regulator.c76
-rw-r--r--drivers/rtc/rtc-hym8563.c3
-rw-r--r--drivers/rtc/rtc-pcf8523.c4
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c14
-rw-r--r--drivers/s390/cio/chsc.c22
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/scsi_error.c12
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_netlink.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c3
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--drivers/sh/Makefile14
-rw-r--r--drivers/sh/pm_runtime.c20
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/spi-adi-v3.c (renamed from drivers/spi/spi-bfin-v3.c)433
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-bfin5xx.c1
-rw-r--r--drivers/spi/spi-cadence.c673
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw.c197
-rw-r--r--drivers/spi/spi-dw.h24
-rw-r--r--drivers/spi/spi-fsl-dspi.c2
-rw-r--r--drivers/spi/spi-fsl-espi.c40
-rw-r--r--drivers/spi/spi-fsl-lib.c6
-rw-r--r--drivers/spi/spi-fsl-lib.h1
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-pl022.c13
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c16
-rw-r--r--drivers/spi/spi-pxa2xx.c10
-rw-r--r--drivers/spi/spi-qup.c8
-rw-r--r--drivers/spi/spi-rspi.c601
-rw-r--r--drivers/spi/spi-s3c24xx.c14
-rw-r--r--drivers/spi/spi-s3c64xx.c5
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/spi/spi-sirf.c321
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/spi/spi-tle62x0.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c5
-rw-r--r--drivers/spi/spi.c146
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/comedi_buf.c37
-rw-r--r--drivers/staging/comedi/comedi_fops.c18
-rw-r--r--drivers/staging/comedi/comedi_internal.h2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c9
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c1
-rw-r--r--drivers/staging/gs_fpgaboot/Makefile2
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c1
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c2
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c4
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c7
-rw-r--r--drivers/staging/imx-drm/imx-tve.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c13
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_devtable.h2
-rw-r--r--drivers/staging/rtl8187se/Kconfig10
-rw-r--r--drivers/staging/rtl8187se/Makefile38
-rw-r--r--drivers/staging/rtl8187se/Module.symvers0
-rw-r--r--drivers/staging/rtl8187se/TODO13
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.c189
-rw-r--r--drivers/staging/rtl8187se/ieee80211/dot11d.h71
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h1496
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c240
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.h86
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c455
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c740
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c277
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_module.c203
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c1486
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c2711
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c567
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c591
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c713
-rw-r--r--drivers/staging/rtl8187se/r8180.h640
-rw-r--r--drivers/staging/rtl8187se/r8180_93cx6.h54
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c3775
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.c1139
-rw-r--r--drivers/staging/rtl8187se/r8180_dm.h23
-rw-r--r--drivers/staging/rtl8187se/r8180_hw.h588
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h34
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c811
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c1409
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.h21
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c1464
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c19
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c74
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c13
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ieee80211.c46
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8723au/core/rtw_p2p.c4
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c4
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8821ae/base.c10
-rw-r--r--drivers/staging/speakup/main.c18
-rw-r--r--drivers/staging/unisys/uislib/uislib.c4
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset.h4
-rw-r--r--drivers/staging/unisys/visorchipset/visorchipset_main.c8
-rw-r--r--drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c7
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c2
-rw-r--r--drivers/staging/vme/devices/vme_user.c9
-rw-r--r--drivers/staging/xgifb/vb_def.h2
-rw-r--r--drivers/staging/xgifb/vb_struct.h2
-rw-r--r--drivers/staging/xgifb/vgatypes.h4
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/target_core_device.c12
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/tty/hvc/hvc_console.c2
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dma.c9
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/amba-pl011.c8
-rw-r--r--drivers/tty/serial/clps711x.c20
-rw-r--r--drivers/tty/serial/efm32-uart.c3
-rw-r--r--drivers/tty/serial/omap-serial.c30
-rw-r--r--drivers/tty/serial/samsung.c23
-rw-r--r--drivers/tty/serial/serial_core.c44
-rw-r--r--drivers/tty/serial/st-asc.c4
-rw-r--r--drivers/tty/tty_buffer.c17
-rw-r--r--drivers/tty/tty_io.c4
-rw-r--r--drivers/usb/chipidea/core.c37
-rw-r--r--drivers/usb/class/cdc-acm.c34
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/gadget.c12
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/f_fs.c7
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c6
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/gadget/rndis.c1
-rw-r--r--drivers/usb/gadget/u_ether.c101
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/ehci-exynos.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-tegra.c23
-rw-r--r--drivers/usb/host/ohci-hub.c18
-rw-r--r--drivers/usb/host/ohci-jz4740.c6
-rw-r--r--drivers/usb/host/ohci-pci.c1
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-ring.c67
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/musb/omap2430.c8
-rw-r--r--drivers/usb/phy/phy-am335x-control.c9
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c9
-rw-r--r--drivers/usb/phy/phy.c3
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c33
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h37
-rw-r--r--drivers/usb/serial/io_ti.c50
-rw-r--r--drivers/usb/serial/option.c83
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/pl2303.h5
-rw-r--r--drivers/usb/serial/qcserial.c24
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/serial/usb_wwan.c9
-rw-r--r--drivers/usb/storage/shuttle_usbat.c2
-rw-r--r--drivers/usb/storage/uas.c13
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/usb/usb-common.c2
-rw-r--r--drivers/usb/wusbcore/mmc.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c4
-rw-r--r--drivers/uwb/drp.c14
-rw-r--r--drivers/video/Kconfig2478
-rw-r--r--drivers/video/Makefile166
-rw-r--r--drivers/video/console/sticon.c2
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/fbdev/68328fb.c (renamed from drivers/video/68328fb.c)0
-rw-r--r--drivers/video/fbdev/Kconfig2474
-rw-r--r--drivers/video/fbdev/Makefile152
-rw-r--r--drivers/video/fbdev/acornfb.c (renamed from drivers/video/acornfb.c)0
-rw-r--r--drivers/video/fbdev/acornfb.h (renamed from drivers/video/acornfb.h)0
-rw-r--r--drivers/video/fbdev/amba-clcd.c (renamed from drivers/video/amba-clcd.c)0
-rw-r--r--drivers/video/fbdev/amifb.c (renamed from drivers/video/amifb.c)0
-rw-r--r--drivers/video/fbdev/arcfb.c (renamed from drivers/video/arcfb.c)0
-rw-r--r--drivers/video/fbdev/arkfb.c (renamed from drivers/video/arkfb.c)0
-rw-r--r--drivers/video/fbdev/asiliantfb.c (renamed from drivers/video/asiliantfb.c)0
-rw-r--r--drivers/video/fbdev/atafb.c (renamed from drivers/video/atafb.c)0
-rw-r--r--drivers/video/fbdev/atafb.h (renamed from drivers/video/atafb.h)0
-rw-r--r--drivers/video/fbdev/atafb_iplan2p2.c (renamed from drivers/video/atafb_iplan2p2.c)0
-rw-r--r--drivers/video/fbdev/atafb_iplan2p4.c (renamed from drivers/video/atafb_iplan2p4.c)0
-rw-r--r--drivers/video/fbdev/atafb_iplan2p8.c (renamed from drivers/video/atafb_iplan2p8.c)0
-rw-r--r--drivers/video/fbdev/atafb_mfb.c (renamed from drivers/video/atafb_mfb.c)0
-rw-r--r--drivers/video/fbdev/atafb_utils.h (renamed from drivers/video/atafb_utils.h)0
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c (renamed from drivers/video/atmel_lcdfb.c)0
-rw-r--r--drivers/video/fbdev/aty/Makefile (renamed from drivers/video/aty/Makefile)0
-rw-r--r--drivers/video/fbdev/aty/ati_ids.h (renamed from drivers/video/aty/ati_ids.h)0
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c (renamed from drivers/video/aty/aty128fb.c)0
-rw-r--r--drivers/video/fbdev/aty/atyfb.h (renamed from drivers/video/aty/atyfb.h)0
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c (renamed from drivers/video/aty/atyfb_base.c)0
-rw-r--r--drivers/video/fbdev/aty/mach64_accel.c (renamed from drivers/video/aty/mach64_accel.c)0
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c (renamed from drivers/video/aty/mach64_ct.c)0
-rw-r--r--drivers/video/fbdev/aty/mach64_cursor.c (renamed from drivers/video/aty/mach64_cursor.c)2
-rw-r--r--drivers/video/fbdev/aty/mach64_gx.c (renamed from drivers/video/aty/mach64_gx.c)0
-rw-r--r--drivers/video/fbdev/aty/radeon_accel.c (renamed from drivers/video/aty/radeon_accel.c)0
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c (renamed from drivers/video/aty/radeon_backlight.c)0
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c (renamed from drivers/video/aty/radeon_base.c)0
-rw-r--r--drivers/video/fbdev/aty/radeon_i2c.c (renamed from drivers/video/aty/radeon_i2c.c)0
-rw-r--r--drivers/video/fbdev/aty/radeon_monitor.c (renamed from drivers/video/aty/radeon_monitor.c)0
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c (renamed from drivers/video/aty/radeon_pm.c)0
-rw-r--r--drivers/video/fbdev/aty/radeonfb.h (renamed from drivers/video/aty/radeonfb.h)0
-rw-r--r--drivers/video/fbdev/au1100fb.c (renamed from drivers/video/au1100fb.c)0
-rw-r--r--drivers/video/fbdev/au1100fb.h (renamed from drivers/video/au1100fb.h)0
-rw-r--r--drivers/video/fbdev/au1200fb.c (renamed from drivers/video/au1200fb.c)0
-rw-r--r--drivers/video/fbdev/au1200fb.h (renamed from drivers/video/au1200fb.h)0
-rw-r--r--drivers/video/fbdev/auo_k1900fb.c (renamed from drivers/video/auo_k1900fb.c)0
-rw-r--r--drivers/video/fbdev/auo_k1901fb.c (renamed from drivers/video/auo_k1901fb.c)0
-rw-r--r--drivers/video/fbdev/auo_k190x.c (renamed from drivers/video/auo_k190x.c)0
-rw-r--r--drivers/video/fbdev/auo_k190x.h (renamed from drivers/video/auo_k190x.h)0
-rw-r--r--drivers/video/fbdev/bf537-lq035.c (renamed from drivers/video/bf537-lq035.c)0
-rw-r--r--drivers/video/fbdev/bf54x-lq043fb.c (renamed from drivers/video/bf54x-lq043fb.c)2
-rw-r--r--drivers/video/fbdev/bfin-lq035q1-fb.c (renamed from drivers/video/bfin-lq035q1-fb.c)0
-rw-r--r--drivers/video/fbdev/bfin-t350mcqb-fb.c (renamed from drivers/video/bfin-t350mcqb-fb.c)0
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.c (renamed from drivers/video/bfin_adv7393fb.c)0
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.h (renamed from drivers/video/bfin_adv7393fb.h)0
-rw-r--r--drivers/video/fbdev/broadsheetfb.c (renamed from drivers/video/broadsheetfb.c)0
-rw-r--r--drivers/video/fbdev/bt431.h (renamed from drivers/video/bt431.h)0
-rw-r--r--drivers/video/fbdev/bt455.h (renamed from drivers/video/bt455.h)0
-rw-r--r--drivers/video/fbdev/bw2.c (renamed from drivers/video/bw2.c)0
-rw-r--r--drivers/video/fbdev/c2p.h (renamed from drivers/video/c2p.h)0
-rw-r--r--drivers/video/fbdev/c2p_core.h (renamed from drivers/video/c2p_core.h)0
-rw-r--r--drivers/video/fbdev/c2p_iplan2.c (renamed from drivers/video/c2p_iplan2.c)0
-rw-r--r--drivers/video/fbdev/c2p_planar.c (renamed from drivers/video/c2p_planar.c)0
-rw-r--r--drivers/video/fbdev/carminefb.c (renamed from drivers/video/carminefb.c)0
-rw-r--r--drivers/video/fbdev/carminefb.h (renamed from drivers/video/carminefb.h)0
-rw-r--r--drivers/video/fbdev/carminefb_regs.h (renamed from drivers/video/carminefb_regs.h)0
-rw-r--r--drivers/video/fbdev/cg14.c (renamed from drivers/video/cg14.c)0
-rw-r--r--drivers/video/fbdev/cg3.c (renamed from drivers/video/cg3.c)0
-rw-r--r--drivers/video/fbdev/cg6.c (renamed from drivers/video/cg6.c)0
-rw-r--r--drivers/video/fbdev/chipsfb.c (renamed from drivers/video/chipsfb.c)0
-rw-r--r--drivers/video/fbdev/cirrusfb.c (renamed from drivers/video/cirrusfb.c)0
-rw-r--r--drivers/video/fbdev/clps711xfb.c (renamed from drivers/video/clps711xfb.c)0
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c (renamed from drivers/video/cobalt_lcdfb.c)0
-rw-r--r--drivers/video/fbdev/controlfb.c (renamed from drivers/video/controlfb.c)0
-rw-r--r--drivers/video/fbdev/controlfb.h (renamed from drivers/video/controlfb.h)0
-rw-r--r--drivers/video/fbdev/core/Makefile16
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c (renamed from drivers/video/cfbcopyarea.c)0
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c (renamed from drivers/video/cfbfillrect.c)0
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c (renamed from drivers/video/cfbimgblt.c)0
-rw-r--r--drivers/video/fbdev/core/fb_ddc.c (renamed from drivers/video/fb_ddc.c)2
-rw-r--r--drivers/video/fbdev/core/fb_defio.c (renamed from drivers/video/fb_defio.c)0
-rw-r--r--drivers/video/fbdev/core/fb_draw.h (renamed from drivers/video/fb_draw.h)0
-rw-r--r--drivers/video/fbdev/core/fb_notify.c (renamed from drivers/video/fb_notify.c)0
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c (renamed from drivers/video/fb_sys_fops.c)0
-rw-r--r--drivers/video/fbdev/core/fbcmap.c (renamed from drivers/video/fbcmap.c)0
-rw-r--r--drivers/video/fbdev/core/fbcvt.c (renamed from drivers/video/fbcvt.c)0
-rw-r--r--drivers/video/fbdev/core/fbmem.c (renamed from drivers/video/fbmem.c)0
-rw-r--r--drivers/video/fbdev/core/fbmon.c (renamed from drivers/video/fbmon.c)2
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c (renamed from drivers/video/fbsysfs.c)0
-rw-r--r--drivers/video/fbdev/core/modedb.c (renamed from drivers/video/modedb.c)0
-rw-r--r--drivers/video/fbdev/core/svgalib.c (renamed from drivers/video/svgalib.c)0
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c (renamed from drivers/video/syscopyarea.c)0
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c (renamed from drivers/video/sysfillrect.c)0
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c (renamed from drivers/video/sysimgblt.c)0
-rw-r--r--drivers/video/fbdev/cyber2000fb.c (renamed from drivers/video/cyber2000fb.c)0
-rw-r--r--drivers/video/fbdev/cyber2000fb.h (renamed from drivers/video/cyber2000fb.h)0
-rw-r--r--drivers/video/fbdev/da8xx-fb.c (renamed from drivers/video/da8xx-fb.c)10
-rw-r--r--drivers/video/fbdev/dnfb.c (renamed from drivers/video/dnfb.c)0
-rw-r--r--drivers/video/fbdev/edid.h (renamed from drivers/video/edid.h)0
-rw-r--r--drivers/video/fbdev/efifb.c (renamed from drivers/video/efifb.c)0
-rw-r--r--drivers/video/fbdev/ep93xx-fb.c (renamed from drivers/video/ep93xx-fb.c)0
-rw-r--r--drivers/video/fbdev/exynos/Kconfig (renamed from drivers/video/exynos/Kconfig)0
-rw-r--r--drivers/video/fbdev/exynos/Makefile (renamed from drivers/video/exynos/Makefile)0
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi.c (renamed from drivers/video/exynos/exynos_mipi_dsi.c)0
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c (renamed from drivers/video/exynos/exynos_mipi_dsi_common.c)0
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h (renamed from drivers/video/exynos/exynos_mipi_dsi_common.h)0
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c (renamed from drivers/video/exynos/exynos_mipi_dsi_lowlevel.c)0
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h (renamed from drivers/video/exynos/exynos_mipi_dsi_lowlevel.h)0
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h (renamed from drivers/video/exynos/exynos_mipi_dsi_regs.h)0
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c (renamed from drivers/video/exynos/s6e8ax0.c)0
-rw-r--r--drivers/video/fbdev/fb-puv3.c (renamed from drivers/video/fb-puv3.c)0
-rw-r--r--drivers/video/fbdev/ffb.c (renamed from drivers/video/ffb.c)0
-rw-r--r--drivers/video/fbdev/fm2fb.c (renamed from drivers/video/fm2fb.c)0
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c (renamed from drivers/video/fsl-diu-fb.c)0
-rw-r--r--drivers/video/fbdev/g364fb.c (renamed from drivers/video/g364fb.c)0
-rw-r--r--drivers/video/fbdev/gbefb.c (renamed from drivers/video/gbefb.c)0
-rw-r--r--drivers/video/fbdev/geode/Kconfig (renamed from drivers/video/geode/Kconfig)0
-rw-r--r--drivers/video/fbdev/geode/Makefile (renamed from drivers/video/geode/Makefile)0
-rw-r--r--drivers/video/fbdev/geode/display_gx.c (renamed from drivers/video/geode/display_gx.c)0
-rw-r--r--drivers/video/fbdev/geode/display_gx1.c (renamed from drivers/video/geode/display_gx1.c)0
-rw-r--r--drivers/video/fbdev/geode/display_gx1.h (renamed from drivers/video/geode/display_gx1.h)0
-rw-r--r--drivers/video/fbdev/geode/geodefb.h (renamed from drivers/video/geode/geodefb.h)0
-rw-r--r--drivers/video/fbdev/geode/gx1fb_core.c (renamed from drivers/video/geode/gx1fb_core.c)0
-rw-r--r--drivers/video/fbdev/geode/gxfb.h (renamed from drivers/video/geode/gxfb.h)0
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c (renamed from drivers/video/geode/gxfb_core.c)0
-rw-r--r--drivers/video/fbdev/geode/lxfb.h (renamed from drivers/video/geode/lxfb.h)0
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c (renamed from drivers/video/geode/lxfb_core.c)0
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c (renamed from drivers/video/geode/lxfb_ops.c)0
-rw-r--r--drivers/video/fbdev/geode/suspend_gx.c (renamed from drivers/video/geode/suspend_gx.c)0
-rw-r--r--drivers/video/fbdev/geode/video_cs5530.c (renamed from drivers/video/geode/video_cs5530.c)0
-rw-r--r--drivers/video/fbdev/geode/video_cs5530.h (renamed from drivers/video/geode/video_cs5530.h)0
-rw-r--r--drivers/video/fbdev/geode/video_gx.c (renamed from drivers/video/geode/video_gx.c)0
-rw-r--r--drivers/video/fbdev/goldfishfb.c (renamed from drivers/video/goldfishfb.c)0
-rw-r--r--drivers/video/fbdev/grvga.c (renamed from drivers/video/grvga.c)0
-rw-r--r--drivers/video/fbdev/gxt4500.c (renamed from drivers/video/gxt4500.c)0
-rw-r--r--drivers/video/fbdev/hecubafb.c (renamed from drivers/video/hecubafb.c)0
-rw-r--r--drivers/video/fbdev/hgafb.c (renamed from drivers/video/hgafb.c)0
-rw-r--r--drivers/video/fbdev/hitfb.c (renamed from drivers/video/hitfb.c)0
-rw-r--r--drivers/video/fbdev/hpfb.c (renamed from drivers/video/hpfb.c)0
-rw-r--r--drivers/video/fbdev/hyperv_fb.c (renamed from drivers/video/hyperv_fb.c)0
-rw-r--r--drivers/video/fbdev/i740_reg.h (renamed from drivers/video/i740_reg.h)0
-rw-r--r--drivers/video/fbdev/i740fb.c (renamed from drivers/video/i740fb.c)0
-rw-r--r--drivers/video/fbdev/i810/Makefile (renamed from drivers/video/i810/Makefile)0
-rw-r--r--drivers/video/fbdev/i810/i810-i2c.c (renamed from drivers/video/i810/i810-i2c.c)0
-rw-r--r--drivers/video/fbdev/i810/i810.h (renamed from drivers/video/i810/i810.h)0
-rw-r--r--drivers/video/fbdev/i810/i810_accel.c (renamed from drivers/video/i810/i810_accel.c)0
-rw-r--r--drivers/video/fbdev/i810/i810_dvt.c (renamed from drivers/video/i810/i810_dvt.c)0
-rw-r--r--drivers/video/fbdev/i810/i810_gtf.c (renamed from drivers/video/i810/i810_gtf.c)0
-rw-r--r--drivers/video/fbdev/i810/i810_main.c (renamed from drivers/video/i810/i810_main.c)0
-rw-r--r--drivers/video/fbdev/i810/i810_main.h (renamed from drivers/video/i810/i810_main.h)0
-rw-r--r--drivers/video/fbdev/i810/i810_regs.h (renamed from drivers/video/i810/i810_regs.h)0
-rw-r--r--drivers/video/fbdev/igafb.c (renamed from drivers/video/igafb.c)0
-rw-r--r--drivers/video/fbdev/imsttfb.c (renamed from drivers/video/imsttfb.c)0
-rw-r--r--drivers/video/fbdev/imxfb.c (renamed from drivers/video/imxfb.c)0
-rw-r--r--drivers/video/fbdev/intelfb/Makefile (renamed from drivers/video/intelfb/Makefile)0
-rw-r--r--drivers/video/fbdev/intelfb/intelfb.h (renamed from drivers/video/intelfb/intelfb.h)0
-rw-r--r--drivers/video/fbdev/intelfb/intelfb_i2c.c (renamed from drivers/video/intelfb/intelfb_i2c.c)0
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c (renamed from drivers/video/intelfb/intelfbdrv.c)0
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.c (renamed from drivers/video/intelfb/intelfbhw.c)0
-rw-r--r--drivers/video/fbdev/intelfb/intelfbhw.h (renamed from drivers/video/intelfb/intelfbhw.h)0
-rw-r--r--drivers/video/fbdev/jz4740_fb.c (renamed from drivers/video/jz4740_fb.c)0
-rw-r--r--drivers/video/fbdev/kyro/Makefile (renamed from drivers/video/kyro/Makefile)0
-rw-r--r--drivers/video/fbdev/kyro/STG4000InitDevice.c (renamed from drivers/video/kyro/STG4000InitDevice.c)0
-rw-r--r--drivers/video/fbdev/kyro/STG4000Interface.h (renamed from drivers/video/kyro/STG4000Interface.h)0
-rw-r--r--drivers/video/fbdev/kyro/STG4000OverlayDevice.c (renamed from drivers/video/kyro/STG4000OverlayDevice.c)0
-rw-r--r--drivers/video/fbdev/kyro/STG4000Ramdac.c (renamed from drivers/video/kyro/STG4000Ramdac.c)0
-rw-r--r--drivers/video/fbdev/kyro/STG4000Reg.h (renamed from drivers/video/kyro/STG4000Reg.h)0
-rw-r--r--drivers/video/fbdev/kyro/STG4000VTG.c (renamed from drivers/video/kyro/STG4000VTG.c)0
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c (renamed from drivers/video/kyro/fbdev.c)0
-rw-r--r--drivers/video/fbdev/leo.c (renamed from drivers/video/leo.c)0
-rw-r--r--drivers/video/fbdev/macfb.c (renamed from drivers/video/macfb.c)0
-rw-r--r--drivers/video/fbdev/macmodes.c (renamed from drivers/video/macmodes.c)0
-rw-r--r--drivers/video/fbdev/macmodes.h (renamed from drivers/video/macmodes.h)0
-rw-r--r--drivers/video/fbdev/matrox/Makefile (renamed from drivers/video/matrox/Makefile)0
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c (renamed from drivers/video/matrox/g450_pll.c)0
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.h (renamed from drivers/video/matrox/g450_pll.h)0
-rw-r--r--drivers/video/fbdev/matrox/i2c-matroxfb.c (renamed from drivers/video/matrox/i2c-matroxfb.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.c (renamed from drivers/video/matrox/matroxfb_DAC1064.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.h (renamed from drivers/video/matrox/matroxfb_DAC1064.h)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c (renamed from drivers/video/matrox/matroxfb_Ti3026.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.h (renamed from drivers/video/matrox/matroxfb_Ti3026.h)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.c (renamed from drivers/video/matrox/matroxfb_accel.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.h (renamed from drivers/video/matrox/matroxfb_accel.h)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c (renamed from drivers/video/matrox/matroxfb_base.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.h (renamed from drivers/video/matrox/matroxfb_base.h)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_crtc2.c (renamed from drivers/video/matrox/matroxfb_crtc2.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_crtc2.h (renamed from drivers/video/matrox/matroxfb_crtc2.h)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.c (renamed from drivers/video/matrox/matroxfb_g450.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.h (renamed from drivers/video/matrox/matroxfb_g450.h)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.c (renamed from drivers/video/matrox/matroxfb_maven.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_maven.h (renamed from drivers/video/matrox/matroxfb_maven.h)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.c (renamed from drivers/video/matrox/matroxfb_misc.c)0
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.h (renamed from drivers/video/matrox/matroxfb_misc.h)0
-rw-r--r--drivers/video/fbdev/maxinefb.c (renamed from drivers/video/maxinefb.c)0
-rw-r--r--drivers/video/fbdev/mb862xx/Makefile (renamed from drivers/video/mb862xx/Makefile)0
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx-i2c.c (renamed from drivers/video/mb862xx/mb862xx-i2c.c)0
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx_reg.h (renamed from drivers/video/mb862xx/mb862xx_reg.h)0
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb.h (renamed from drivers/video/mb862xx/mb862xxfb.h)0
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.c (renamed from drivers/video/mb862xx/mb862xxfb_accel.c)0
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfb_accel.h (renamed from drivers/video/mb862xx/mb862xxfb_accel.h)0
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c (renamed from drivers/video/mb862xx/mb862xxfbdrv.c)0
-rw-r--r--drivers/video/fbdev/mbx/Makefile (renamed from drivers/video/mbx/Makefile)0
-rw-r--r--drivers/video/fbdev/mbx/mbxdebugfs.c (renamed from drivers/video/mbx/mbxdebugfs.c)0
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c (renamed from drivers/video/mbx/mbxfb.c)0
-rw-r--r--drivers/video/fbdev/mbx/reg_bits.h (renamed from drivers/video/mbx/reg_bits.h)0
-rw-r--r--drivers/video/fbdev/mbx/regs.h (renamed from drivers/video/mbx/regs.h)0
-rw-r--r--drivers/video/fbdev/metronomefb.c (renamed from drivers/video/metronomefb.c)0
-rw-r--r--drivers/video/fbdev/mmp/Kconfig (renamed from drivers/video/mmp/Kconfig)6
-rw-r--r--drivers/video/fbdev/mmp/Makefile (renamed from drivers/video/mmp/Makefile)0
-rw-r--r--drivers/video/fbdev/mmp/core.c (renamed from drivers/video/mmp/core.c)0
-rw-r--r--drivers/video/fbdev/mmp/fb/Kconfig (renamed from drivers/video/mmp/fb/Kconfig)0
-rw-r--r--drivers/video/fbdev/mmp/fb/Makefile (renamed from drivers/video/mmp/fb/Makefile)0
-rw-r--r--drivers/video/fbdev/mmp/fb/mmpfb.c (renamed from drivers/video/mmp/fb/mmpfb.c)0
-rw-r--r--drivers/video/fbdev/mmp/fb/mmpfb.h (renamed from drivers/video/mmp/fb/mmpfb.h)0
-rw-r--r--drivers/video/fbdev/mmp/hw/Kconfig (renamed from drivers/video/mmp/hw/Kconfig)0
-rw-r--r--drivers/video/fbdev/mmp/hw/Makefile (renamed from drivers/video/mmp/hw/Makefile)0
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c (renamed from drivers/video/mmp/hw/mmp_ctrl.c)0
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.h (renamed from drivers/video/mmp/hw/mmp_ctrl.h)0
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_spi.c (renamed from drivers/video/mmp/hw/mmp_spi.c)0
-rw-r--r--drivers/video/fbdev/mmp/panel/Kconfig (renamed from drivers/video/mmp/panel/Kconfig)0
-rw-r--r--drivers/video/fbdev/mmp/panel/Makefile (renamed from drivers/video/mmp/panel/Makefile)0
-rw-r--r--drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c (renamed from drivers/video/mmp/panel/tpo_tj032md01bw.c)0
-rw-r--r--drivers/video/fbdev/msm/Makefile (renamed from drivers/video/msm/Makefile)0
-rw-r--r--drivers/video/fbdev/msm/mddi.c (renamed from drivers/video/msm/mddi.c)0
-rw-r--r--drivers/video/fbdev/msm/mddi_client_dummy.c (renamed from drivers/video/msm/mddi_client_dummy.c)0
-rw-r--r--drivers/video/fbdev/msm/mddi_client_nt35399.c (renamed from drivers/video/msm/mddi_client_nt35399.c)0
-rw-r--r--drivers/video/fbdev/msm/mddi_client_toshiba.c (renamed from drivers/video/msm/mddi_client_toshiba.c)0
-rw-r--r--drivers/video/fbdev/msm/mddi_hw.h (renamed from drivers/video/msm/mddi_hw.h)0
-rw-r--r--drivers/video/fbdev/msm/mdp.c (renamed from drivers/video/msm/mdp.c)0
-rw-r--r--drivers/video/fbdev/msm/mdp_csc_table.h (renamed from drivers/video/msm/mdp_csc_table.h)0
-rw-r--r--drivers/video/fbdev/msm/mdp_hw.h (renamed from drivers/video/msm/mdp_hw.h)0
-rw-r--r--drivers/video/fbdev/msm/mdp_ppp.c (renamed from drivers/video/msm/mdp_ppp.c)0
-rw-r--r--drivers/video/fbdev/msm/mdp_scale_tables.c (renamed from drivers/video/msm/mdp_scale_tables.c)0
-rw-r--r--drivers/video/fbdev/msm/mdp_scale_tables.h (renamed from drivers/video/msm/mdp_scale_tables.h)0
-rw-r--r--drivers/video/fbdev/msm/msm_fb.c (renamed from drivers/video/msm/msm_fb.c)0
-rw-r--r--drivers/video/fbdev/mx3fb.c (renamed from drivers/video/mx3fb.c)0
-rw-r--r--drivers/video/fbdev/mxsfb.c (renamed from drivers/video/mxsfb.c)0
-rw-r--r--drivers/video/fbdev/n411.c (renamed from drivers/video/n411.c)0
-rw-r--r--drivers/video/fbdev/neofb.c (renamed from drivers/video/neofb.c)0
-rw-r--r--drivers/video/fbdev/nuc900fb.c (renamed from drivers/video/nuc900fb.c)0
-rw-r--r--drivers/video/fbdev/nuc900fb.h (renamed from drivers/video/nuc900fb.h)0
-rw-r--r--drivers/video/fbdev/nvidia/Makefile (renamed from drivers/video/nvidia/Makefile)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_accel.c (renamed from drivers/video/nvidia/nv_accel.c)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c (renamed from drivers/video/nvidia/nv_backlight.c)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_dma.h (renamed from drivers/video/nvidia/nv_dma.h)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_hw.c (renamed from drivers/video/nvidia/nv_hw.c)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_i2c.c (renamed from drivers/video/nvidia/nv_i2c.c)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_local.h (renamed from drivers/video/nvidia/nv_local.h)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_of.c (renamed from drivers/video/nvidia/nv_of.c)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_proto.h (renamed from drivers/video/nvidia/nv_proto.h)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_setup.c (renamed from drivers/video/nvidia/nv_setup.c)0
-rw-r--r--drivers/video/fbdev/nvidia/nv_type.h (renamed from drivers/video/nvidia/nv_type.h)0
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c (renamed from drivers/video/nvidia/nvidia.c)0
-rw-r--r--drivers/video/fbdev/ocfb.c (renamed from drivers/video/ocfb.c)0
-rw-r--r--drivers/video/fbdev/offb.c (renamed from drivers/video/offb.c)0
-rw-r--r--drivers/video/fbdev/omap/Kconfig (renamed from drivers/video/omap/Kconfig)0
-rw-r--r--drivers/video/fbdev/omap/Makefile (renamed from drivers/video/omap/Makefile)0
-rw-r--r--drivers/video/fbdev/omap/hwa742.c (renamed from drivers/video/omap/hwa742.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_ams_delta.c (renamed from drivers/video/omap/lcd_ams_delta.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c (renamed from drivers/video/omap/lcd_h3.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_htcherald.c (renamed from drivers/video/omap/lcd_htcherald.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1510.c (renamed from drivers/video/omap/lcd_inn1510.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1610.c (renamed from drivers/video/omap/lcd_inn1610.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c (renamed from drivers/video/omap/lcd_mipid.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_osk.c (renamed from drivers/video/omap/lcd_osk.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_palmte.c (renamed from drivers/video/omap/lcd_palmte.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_palmtt.c (renamed from drivers/video/omap/lcd_palmtt.c)0
-rw-r--r--drivers/video/fbdev/omap/lcd_palmz71.c (renamed from drivers/video/omap/lcd_palmz71.c)0
-rw-r--r--drivers/video/fbdev/omap/lcdc.c (renamed from drivers/video/omap/lcdc.c)0
-rw-r--r--drivers/video/fbdev/omap/lcdc.h (renamed from drivers/video/omap/lcdc.h)0
-rw-r--r--drivers/video/fbdev/omap/omapfb.h (renamed from drivers/video/omap/omapfb.h)0
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c (renamed from drivers/video/omap/omapfb_main.c)0
-rw-r--r--drivers/video/fbdev/omap/sossi.c (renamed from drivers/video/omap/sossi.c)0
-rw-r--r--drivers/video/fbdev/omap2/Kconfig10
-rw-r--r--drivers/video/fbdev/omap2/Makefile (renamed from drivers/video/omap2/Makefile)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/Kconfig (renamed from drivers/video/omap2/displays-new/Kconfig)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/Makefile (renamed from drivers/video/omap2/displays-new/Makefile)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c (renamed from drivers/video/omap2/displays-new/connector-analog-tv.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c (renamed from drivers/video/omap2/displays-new/connector-dvi.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-hdmi.c (renamed from drivers/video/omap2/displays-new/connector-hdmi.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c (renamed from drivers/video/omap2/displays-new/encoder-tfp410.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c (renamed from drivers/video/omap2/displays-new/encoder-tpd12s015.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dpi.c (renamed from drivers/video/omap2/displays-new/panel-dpi.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c (renamed from drivers/video/omap2/displays-new/panel-dsi-cm.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c (renamed from drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c (renamed from drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c (renamed from drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c (renamed from drivers/video/omap2/displays-new/panel-sony-acx565akm.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c (renamed from drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c)0
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c (renamed from drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/Kconfig (renamed from drivers/video/omap2/dss/Kconfig)0
-rw-r--r--drivers/video/fbdev/omap2/dss/Makefile (renamed from drivers/video/omap2/dss/Makefile)0
-rw-r--r--drivers/video/fbdev/omap2/dss/apply.c (renamed from drivers/video/omap2/dss/apply.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/core.c (renamed from drivers/video/omap2/dss/core.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc-compat.c (renamed from drivers/video/omap2/dss/dispc-compat.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc-compat.h (renamed from drivers/video/omap2/dss/dispc-compat.h)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c (renamed from drivers/video/omap2/dss/dispc.c)67
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.h (renamed from drivers/video/omap2/dss/dispc.h)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc_coefs.c (renamed from drivers/video/omap2/dss/dispc_coefs.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/display-sysfs.c (renamed from drivers/video/omap2/dss/display-sysfs.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/display.c (renamed from drivers/video/omap2/dss/display.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dpi.c (renamed from drivers/video/omap2/dss/dpi.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c (renamed from drivers/video/omap2/dss/dsi.c)20
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c (renamed from drivers/video/omap2/dss/dss-of.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c (renamed from drivers/video/omap2/dss/dss.c)4
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.h (renamed from drivers/video/omap2/dss/dss.h)6
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.c (renamed from drivers/video/omap2/dss/dss_features.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/dss_features.h (renamed from drivers/video/omap2/dss/dss_features.h)0
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi.h (renamed from drivers/video/omap2/dss/hdmi.h)0
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4.c (renamed from drivers/video/omap2/dss/hdmi4.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.c (renamed from drivers/video/omap2/dss/hdmi4_core.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.h (renamed from drivers/video/omap2/dss/hdmi4_core.h)0
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_common.c (renamed from drivers/video/omap2/dss/hdmi_common.c)8
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_phy.c (renamed from drivers/video/omap2/dss/hdmi_phy.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c (renamed from drivers/video/omap2/dss/hdmi_pll.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_wp.c (renamed from drivers/video/omap2/dss/hdmi_wp.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/manager-sysfs.c (renamed from drivers/video/omap2/dss/manager-sysfs.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/manager.c (renamed from drivers/video/omap2/dss/manager.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/output.c (renamed from drivers/video/omap2/dss/output.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/overlay-sysfs.c (renamed from drivers/video/omap2/dss/overlay-sysfs.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/overlay.c (renamed from drivers/video/omap2/dss/overlay.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/rfbi.c (renamed from drivers/video/omap2/dss/rfbi.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c (renamed from drivers/video/omap2/dss/sdi.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c (renamed from drivers/video/omap2/dss/venc.c)0
-rw-r--r--drivers/video/fbdev/omap2/dss/venc_panel.c (renamed from drivers/video/omap2/dss/venc_panel.c)0
-rw-r--r--drivers/video/fbdev/omap2/omapfb/Kconfig (renamed from drivers/video/omap2/omapfb/Kconfig)0
-rw-r--r--drivers/video/fbdev/omap2/omapfb/Makefile (renamed from drivers/video/omap2/omapfb/Makefile)0
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c (renamed from drivers/video/omap2/omapfb/omapfb-ioctl.c)0
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c (renamed from drivers/video/omap2/omapfb/omapfb-main.c)0
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c (renamed from drivers/video/omap2/omapfb/omapfb-sysfs.c)0
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb.h (renamed from drivers/video/omap2/omapfb/omapfb.h)0
-rw-r--r--drivers/video/fbdev/omap2/vrfb.c (renamed from drivers/video/omap2/vrfb.c)0
-rw-r--r--drivers/video/fbdev/p9100.c (renamed from drivers/video/p9100.c)0
-rw-r--r--drivers/video/fbdev/platinumfb.c (renamed from drivers/video/platinumfb.c)0
-rw-r--r--drivers/video/fbdev/platinumfb.h (renamed from drivers/video/platinumfb.h)0
-rw-r--r--drivers/video/fbdev/pm2fb.c (renamed from drivers/video/pm2fb.c)0
-rw-r--r--drivers/video/fbdev/pm3fb.c (renamed from drivers/video/pm3fb.c)0
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c (renamed from drivers/video/pmag-aa-fb.c)0
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c (renamed from drivers/video/pmag-ba-fb.c)0
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c (renamed from drivers/video/pmagb-b-fb.c)0
-rw-r--r--drivers/video/fbdev/ps3fb.c (renamed from drivers/video/ps3fb.c)0
-rw-r--r--drivers/video/fbdev/pvr2fb.c (renamed from drivers/video/pvr2fb.c)0
-rw-r--r--drivers/video/fbdev/pxa168fb.c (renamed from drivers/video/pxa168fb.c)0
-rw-r--r--drivers/video/fbdev/pxa168fb.h (renamed from drivers/video/pxa168fb.h)0
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c (renamed from drivers/video/pxa3xx-gcu.c)0
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.h (renamed from drivers/video/pxa3xx-gcu.h)0
-rw-r--r--drivers/video/fbdev/pxafb.c (renamed from drivers/video/pxafb.c)0
-rw-r--r--drivers/video/fbdev/pxafb.h (renamed from drivers/video/pxafb.h)0
-rw-r--r--drivers/video/fbdev/q40fb.c (renamed from drivers/video/q40fb.c)0
-rw-r--r--drivers/video/fbdev/riva/Makefile (renamed from drivers/video/riva/Makefile)0
-rw-r--r--drivers/video/fbdev/riva/fbdev.c (renamed from drivers/video/riva/fbdev.c)0
-rw-r--r--drivers/video/fbdev/riva/nv_driver.c (renamed from drivers/video/riva/nv_driver.c)0
-rw-r--r--drivers/video/fbdev/riva/nv_type.h (renamed from drivers/video/riva/nv_type.h)0
-rw-r--r--drivers/video/fbdev/riva/nvreg.h (renamed from drivers/video/riva/nvreg.h)0
-rw-r--r--drivers/video/fbdev/riva/riva_hw.c (renamed from drivers/video/riva/riva_hw.c)0
-rw-r--r--drivers/video/fbdev/riva/riva_hw.h (renamed from drivers/video/riva/riva_hw.h)0
-rw-r--r--drivers/video/fbdev/riva/riva_tbl.h (renamed from drivers/video/riva/riva_tbl.h)0
-rw-r--r--drivers/video/fbdev/riva/rivafb-i2c.c (renamed from drivers/video/riva/rivafb-i2c.c)0
-rw-r--r--drivers/video/fbdev/riva/rivafb.h (renamed from drivers/video/riva/rivafb.h)0
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c (renamed from drivers/video/s1d13xxxfb.c)0
-rw-r--r--drivers/video/fbdev/s3c-fb.c (renamed from drivers/video/s3c-fb.c)0
-rw-r--r--drivers/video/fbdev/s3c2410fb.c (renamed from drivers/video/s3c2410fb.c)0
-rw-r--r--drivers/video/fbdev/s3c2410fb.h (renamed from drivers/video/s3c2410fb.h)0
-rw-r--r--drivers/video/fbdev/s3fb.c (renamed from drivers/video/s3fb.c)0
-rw-r--r--drivers/video/fbdev/sa1100fb.c (renamed from drivers/video/sa1100fb.c)0
-rw-r--r--drivers/video/fbdev/sa1100fb.h (renamed from drivers/video/sa1100fb.h)0
-rw-r--r--drivers/video/fbdev/savage/Makefile (renamed from drivers/video/savage/Makefile)0
-rw-r--r--drivers/video/fbdev/savage/savagefb-i2c.c (renamed from drivers/video/savage/savagefb-i2c.c)0
-rw-r--r--drivers/video/fbdev/savage/savagefb.h (renamed from drivers/video/savage/savagefb.h)0
-rw-r--r--drivers/video/fbdev/savage/savagefb_accel.c (renamed from drivers/video/savage/savagefb_accel.c)0
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c (renamed from drivers/video/savage/savagefb_driver.c)0
-rw-r--r--drivers/video/fbdev/sbuslib.c (renamed from drivers/video/sbuslib.c)0
-rw-r--r--drivers/video/fbdev/sbuslib.h (renamed from drivers/video/sbuslib.h)0
-rw-r--r--drivers/video/fbdev/sh7760fb.c (renamed from drivers/video/sh7760fb.c)0
-rw-r--r--drivers/video/fbdev/sh_mipi_dsi.c (renamed from drivers/video/sh_mipi_dsi.c)0
-rw-r--r--drivers/video/fbdev/sh_mobile_hdmi.c (renamed from drivers/video/sh_mobile_hdmi.c)0
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c (renamed from drivers/video/sh_mobile_lcdcfb.c)0
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.h (renamed from drivers/video/sh_mobile_lcdcfb.h)0
-rw-r--r--drivers/video/fbdev/sh_mobile_meram.c (renamed from drivers/video/sh_mobile_meram.c)0
-rw-r--r--drivers/video/fbdev/simplefb.c (renamed from drivers/video/simplefb.c)0
-rw-r--r--drivers/video/fbdev/sis/300vtbl.h (renamed from drivers/video/sis/300vtbl.h)0
-rw-r--r--drivers/video/fbdev/sis/310vtbl.h (renamed from drivers/video/sis/310vtbl.h)0
-rw-r--r--drivers/video/fbdev/sis/Makefile (renamed from drivers/video/sis/Makefile)0
-rw-r--r--drivers/video/fbdev/sis/init.c (renamed from drivers/video/sis/init.c)0
-rw-r--r--drivers/video/fbdev/sis/init.h (renamed from drivers/video/sis/init.h)0
-rw-r--r--drivers/video/fbdev/sis/init301.c (renamed from drivers/video/sis/init301.c)0
-rw-r--r--drivers/video/fbdev/sis/init301.h (renamed from drivers/video/sis/init301.h)0
-rw-r--r--drivers/video/fbdev/sis/initdef.h (renamed from drivers/video/sis/initdef.h)0
-rw-r--r--drivers/video/fbdev/sis/initextlfb.c (renamed from drivers/video/sis/initextlfb.c)0
-rw-r--r--drivers/video/fbdev/sis/oem300.h (renamed from drivers/video/sis/oem300.h)0
-rw-r--r--drivers/video/fbdev/sis/oem310.h (renamed from drivers/video/sis/oem310.h)0
-rw-r--r--drivers/video/fbdev/sis/sis.h (renamed from drivers/video/sis/sis.h)0
-rw-r--r--drivers/video/fbdev/sis/sis_accel.c (renamed from drivers/video/sis/sis_accel.c)0
-rw-r--r--drivers/video/fbdev/sis/sis_accel.h (renamed from drivers/video/sis/sis_accel.h)0
-rw-r--r--drivers/video/fbdev/sis/sis_main.c (renamed from drivers/video/sis/sis_main.c)0
-rw-r--r--drivers/video/fbdev/sis/sis_main.h (renamed from drivers/video/sis/sis_main.h)0
-rw-r--r--drivers/video/fbdev/sis/vgatypes.h (renamed from drivers/video/sis/vgatypes.h)0
-rw-r--r--drivers/video/fbdev/sis/vstruct.h (renamed from drivers/video/sis/vstruct.h)0
-rw-r--r--drivers/video/fbdev/skeletonfb.c (renamed from drivers/video/skeletonfb.c)0
-rw-r--r--drivers/video/fbdev/sm501fb.c (renamed from drivers/video/sm501fb.c)0
-rw-r--r--drivers/video/fbdev/smscufx.c (renamed from drivers/video/smscufx.c)0
-rw-r--r--drivers/video/fbdev/ssd1307fb.c (renamed from drivers/video/ssd1307fb.c)0
-rw-r--r--drivers/video/fbdev/sstfb.c (renamed from drivers/video/sstfb.c)0
-rw-r--r--drivers/video/fbdev/sticore.h (renamed from drivers/video/sticore.h)0
-rw-r--r--drivers/video/fbdev/stifb.c (renamed from drivers/video/stifb.c)0
-rw-r--r--drivers/video/fbdev/sunxvr1000.c (renamed from drivers/video/sunxvr1000.c)0
-rw-r--r--drivers/video/fbdev/sunxvr2500.c (renamed from drivers/video/sunxvr2500.c)0
-rw-r--r--drivers/video/fbdev/sunxvr500.c (renamed from drivers/video/sunxvr500.c)0
-rw-r--r--drivers/video/fbdev/tcx.c (renamed from drivers/video/tcx.c)0
-rw-r--r--drivers/video/fbdev/tdfxfb.c (renamed from drivers/video/tdfxfb.c)0
-rw-r--r--drivers/video/fbdev/tgafb.c (renamed from drivers/video/tgafb.c)0
-rw-r--r--drivers/video/fbdev/tmiofb.c (renamed from drivers/video/tmiofb.c)0
-rw-r--r--drivers/video/fbdev/tridentfb.c (renamed from drivers/video/tridentfb.c)0
-rw-r--r--drivers/video/fbdev/udlfb.c (renamed from drivers/video/udlfb.c)0
-rw-r--r--drivers/video/fbdev/uvesafb.c (renamed from drivers/video/uvesafb.c)0
-rw-r--r--drivers/video/fbdev/valkyriefb.c (renamed from drivers/video/valkyriefb.c)0
-rw-r--r--drivers/video/fbdev/valkyriefb.h (renamed from drivers/video/valkyriefb.h)0
-rw-r--r--drivers/video/fbdev/vermilion/Makefile (renamed from drivers/video/vermilion/Makefile)0
-rw-r--r--drivers/video/fbdev/vermilion/cr_pll.c (renamed from drivers/video/vermilion/cr_pll.c)0
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c (renamed from drivers/video/vermilion/vermilion.c)0
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.h (renamed from drivers/video/vermilion/vermilion.h)0
-rw-r--r--drivers/video/fbdev/vesafb.c (renamed from drivers/video/vesafb.c)0
-rw-r--r--drivers/video/fbdev/vfb.c (renamed from drivers/video/vfb.c)0
-rw-r--r--drivers/video/fbdev/vga16fb.c (renamed from drivers/video/vga16fb.c)0
-rw-r--r--drivers/video/fbdev/via/Makefile (renamed from drivers/video/via/Makefile)0
-rw-r--r--drivers/video/fbdev/via/accel.c (renamed from drivers/video/via/accel.c)0
-rw-r--r--drivers/video/fbdev/via/accel.h (renamed from drivers/video/via/accel.h)0
-rw-r--r--drivers/video/fbdev/via/chip.h (renamed from drivers/video/via/chip.h)0
-rw-r--r--drivers/video/fbdev/via/debug.h (renamed from drivers/video/via/debug.h)0
-rw-r--r--drivers/video/fbdev/via/dvi.c (renamed from drivers/video/via/dvi.c)0
-rw-r--r--drivers/video/fbdev/via/dvi.h (renamed from drivers/video/via/dvi.h)0
-rw-r--r--drivers/video/fbdev/via/global.c (renamed from drivers/video/via/global.c)0
-rw-r--r--drivers/video/fbdev/via/global.h (renamed from drivers/video/via/global.h)0
-rw-r--r--drivers/video/fbdev/via/hw.c (renamed from drivers/video/via/hw.c)0
-rw-r--r--drivers/video/fbdev/via/hw.h (renamed from drivers/video/via/hw.h)0
-rw-r--r--drivers/video/fbdev/via/ioctl.c (renamed from drivers/video/via/ioctl.c)0
-rw-r--r--drivers/video/fbdev/via/ioctl.h (renamed from drivers/video/via/ioctl.h)0
-rw-r--r--drivers/video/fbdev/via/lcd.c (renamed from drivers/video/via/lcd.c)0
-rw-r--r--drivers/video/fbdev/via/lcd.h (renamed from drivers/video/via/lcd.h)0
-rw-r--r--drivers/video/fbdev/via/share.h (renamed from drivers/video/via/share.h)0
-rw-r--r--drivers/video/fbdev/via/tblDPASetting.c (renamed from drivers/video/via/tblDPASetting.c)0
-rw-r--r--drivers/video/fbdev/via/tblDPASetting.h (renamed from drivers/video/via/tblDPASetting.h)0
-rw-r--r--drivers/video/fbdev/via/via-core.c (renamed from drivers/video/via/via-core.c)0
-rw-r--r--drivers/video/fbdev/via/via-gpio.c (renamed from drivers/video/via/via-gpio.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux.c (renamed from drivers/video/via/via_aux.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux.h (renamed from drivers/video/via/via_aux.h)0
-rw-r--r--drivers/video/fbdev/via/via_aux_ch7301.c (renamed from drivers/video/via/via_aux_ch7301.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_edid.c (renamed from drivers/video/via/via_aux_edid.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_sii164.c (renamed from drivers/video/via/via_aux_sii164.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_vt1621.c (renamed from drivers/video/via/via_aux_vt1621.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_vt1622.c (renamed from drivers/video/via/via_aux_vt1622.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_vt1625.c (renamed from drivers/video/via/via_aux_vt1625.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_vt1631.c (renamed from drivers/video/via/via_aux_vt1631.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_vt1632.c (renamed from drivers/video/via/via_aux_vt1632.c)0
-rw-r--r--drivers/video/fbdev/via/via_aux_vt1636.c (renamed from drivers/video/via/via_aux_vt1636.c)0
-rw-r--r--drivers/video/fbdev/via/via_clock.c (renamed from drivers/video/via/via_clock.c)0
-rw-r--r--drivers/video/fbdev/via/via_clock.h (renamed from drivers/video/via/via_clock.h)0
-rw-r--r--drivers/video/fbdev/via/via_i2c.c (renamed from drivers/video/via/via_i2c.c)0
-rw-r--r--drivers/video/fbdev/via/via_modesetting.c (renamed from drivers/video/via/via_modesetting.c)0
-rw-r--r--drivers/video/fbdev/via/via_modesetting.h (renamed from drivers/video/via/via_modesetting.h)0
-rw-r--r--drivers/video/fbdev/via/via_utility.c (renamed from drivers/video/via/via_utility.c)0
-rw-r--r--drivers/video/fbdev/via/via_utility.h (renamed from drivers/video/via/via_utility.h)0
-rw-r--r--drivers/video/fbdev/via/viafbdev.c (renamed from drivers/video/via/viafbdev.c)0
-rw-r--r--drivers/video/fbdev/via/viafbdev.h (renamed from drivers/video/via/viafbdev.h)0
-rw-r--r--drivers/video/fbdev/via/viamode.c (renamed from drivers/video/via/viamode.c)0
-rw-r--r--drivers/video/fbdev/via/viamode.h (renamed from drivers/video/via/viamode.h)0
-rw-r--r--drivers/video/fbdev/via/vt1636.c (renamed from drivers/video/via/vt1636.c)0
-rw-r--r--drivers/video/fbdev/via/vt1636.h (renamed from drivers/video/via/vt1636.h)0
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c (renamed from drivers/video/vt8500lcdfb.c)0
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.h (renamed from drivers/video/vt8500lcdfb.h)0
-rw-r--r--drivers/video/fbdev/vt8623fb.c (renamed from drivers/video/vt8623fb.c)0
-rw-r--r--drivers/video/fbdev/w100fb.c (renamed from drivers/video/w100fb.c)0
-rw-r--r--drivers/video/fbdev/w100fb.h (renamed from drivers/video/w100fb.h)0
-rw-r--r--drivers/video/fbdev/wm8505fb.c (renamed from drivers/video/wm8505fb.c)0
-rw-r--r--drivers/video/fbdev/wm8505fb_regs.h (renamed from drivers/video/wm8505fb_regs.h)0
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c (renamed from drivers/video/wmt_ge_rops.c)2
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.h (renamed from drivers/video/wmt_ge_rops.h)0
-rw-r--r--drivers/video/fbdev/xen-fbfront.c (renamed from drivers/video/xen-fbfront.c)0
-rw-r--r--drivers/video/fbdev/xilinxfb.c (renamed from drivers/video/xilinxfb.c)0
-rw-r--r--drivers/video/omap2/Kconfig10
-rw-r--r--drivers/vme/bridges/vme_tsi148.c22
-rw-r--r--drivers/w1/w1.c32
-rw-r--r--drivers/w1/w1_netlink.c44
-rw-r--r--drivers/xen/events/events_fifo.c41
-rw-r--r--drivers/xen/manage.c32
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c3
-rw-r--r--drivers/xen/xen-pciback/vpci.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c44
1173 files changed, 16352 insertions, 30893 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index e3ced91b1784..7183b6af5dac 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -53,8 +53,8 @@ obj-y += gpu/
53obj-$(CONFIG_CONNECTOR) += connector/ 53obj-$(CONFIG_CONNECTOR) += connector/
54 54
55# i810fb and intelfb depend on char/agp/ 55# i810fb and intelfb depend on char/agp/
56obj-$(CONFIG_FB_I810) += video/i810/ 56obj-$(CONFIG_FB_I810) += video/fbdev/i810/
57obj-$(CONFIG_FB_INTEL) += video/intelfb/ 57obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
58 58
59obj-$(CONFIG_PARPORT) += parport/ 59obj-$(CONFIG_PARPORT) += parport/
60obj-y += base/ block/ misc/ mfd/ nfc/ 60obj-y += base/ block/ misc/ mfd/ nfc/
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN) += sn/
119obj-y += firmware/ 119obj-y += firmware/
120obj-$(CONFIG_CRYPTO) += crypto/ 120obj-$(CONFIG_CRYPTO) += crypto/
121obj-$(CONFIG_SUPERH) += sh/ 121obj-$(CONFIG_SUPERH) += sh/
122obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/ 122obj-$(CONFIG_ARCH_SHMOBILE) += sh/
123ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 123ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
124obj-y += clocksource/ 124obj-y += clocksource/
125endif 125endif
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ab686b310100..a34a22841002 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -47,6 +47,23 @@ config ACPI_SLEEP
47 depends on SUSPEND || HIBERNATION 47 depends on SUSPEND || HIBERNATION
48 default y 48 default y
49 49
50config ACPI_PROCFS_POWER
51 bool "Deprecated power /proc/acpi directories"
52 depends on PROC_FS
53 help
54 For backwards compatibility, this option allows
55 deprecated power /proc/acpi/ directories to exist, even when
56 they have been replaced by functions in /sys.
57 The deprecated directories (and their replacements) include:
58 /proc/acpi/battery/* (/sys/class/power_supply/*)
59 /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
60 This option has no effect on /proc/acpi/ directories
61 and functions, which do not yet exist in /sys
62 This option, together with the proc directories, will be
63 deleted in the future.
64
65 Say N to delete power /proc/acpi/ directories that have moved to /sys/
66
50config ACPI_EC_DEBUGFS 67config ACPI_EC_DEBUGFS
51 tristate "EC read/write access through /sys/kernel/debug/ec" 68 tristate "EC read/write access through /sys/kernel/debug/ec"
52 default n 69 default n
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 0331f91d56e6..bce34afadcd0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,6 +47,7 @@ acpi-y += sysfs.o
47acpi-$(CONFIG_X86) += acpi_cmos_rtc.o 47acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
48acpi-$(CONFIG_DEBUG_FS) += debugfs.o 48acpi-$(CONFIG_DEBUG_FS) += debugfs.o
49acpi-$(CONFIG_ACPI_NUMA) += numa.o 49acpi-$(CONFIG_ACPI_NUMA) += numa.o
50acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
50ifdef CONFIG_ACPI_VIDEO 51ifdef CONFIG_ACPI_VIDEO
51acpi-y += video_detect.o 52acpi-y += video_detect.o
52endif 53endif
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2c01c1da29ce..c67f6f5ad611 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
52MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 52MODULE_DESCRIPTION("ACPI AC Adapter Driver");
53MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
54 54
55static int acpi_ac_add(struct acpi_device *device);
56static int acpi_ac_remove(struct acpi_device *device);
57static void acpi_ac_notify(struct acpi_device *device, u32 event);
58
59static const struct acpi_device_id ac_device_ids[] = {
60 {"ACPI0003", 0},
61 {"", 0},
62};
63MODULE_DEVICE_TABLE(acpi, ac_device_ids);
64
65#ifdef CONFIG_PM_SLEEP
66static int acpi_ac_resume(struct device *dev);
67#endif
68static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
69
55static int ac_sleep_before_get_state_ms; 70static int ac_sleep_before_get_state_ms;
56 71
72static struct acpi_driver acpi_ac_driver = {
73 .name = "ac",
74 .class = ACPI_AC_CLASS,
75 .ids = ac_device_ids,
76 .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
77 .ops = {
78 .add = acpi_ac_add,
79 .remove = acpi_ac_remove,
80 .notify = acpi_ac_notify,
81 },
82 .drv.pm = &acpi_ac_pm,
83};
84
57struct acpi_ac { 85struct acpi_ac {
58 struct power_supply charger; 86 struct power_supply charger;
59 struct platform_device *pdev; 87 struct acpi_device * device;
60 unsigned long long state; 88 unsigned long long state;
61 struct notifier_block battery_nb; 89 struct notifier_block battery_nb;
62}; 90};
@@ -69,10 +97,12 @@ struct acpi_ac {
69 97
70static int acpi_ac_get_state(struct acpi_ac *ac) 98static int acpi_ac_get_state(struct acpi_ac *ac)
71{ 99{
72 acpi_status status; 100 acpi_status status = AE_OK;
73 acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); 101
102 if (!ac)
103 return -EINVAL;
74 104
75 status = acpi_evaluate_integer(handle, "_PSR", NULL, 105 status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
76 &ac->state); 106 &ac->state);
77 if (ACPI_FAILURE(status)) { 107 if (ACPI_FAILURE(status)) {
78 ACPI_EXCEPTION((AE_INFO, status, 108 ACPI_EXCEPTION((AE_INFO, status,
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = {
117 Driver Model 147 Driver Model
118 -------------------------------------------------------------------------- */ 148 -------------------------------------------------------------------------- */
119 149
120static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) 150static void acpi_ac_notify(struct acpi_device *device, u32 event)
121{ 151{
122 struct acpi_ac *ac = data; 152 struct acpi_ac *ac = acpi_driver_data(device);
123 struct acpi_device *adev;
124 153
125 if (!ac) 154 if (!ac)
126 return; 155 return;
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
143 msleep(ac_sleep_before_get_state_ms); 172 msleep(ac_sleep_before_get_state_ms);
144 173
145 acpi_ac_get_state(ac); 174 acpi_ac_get_state(ac);
146 adev = ACPI_COMPANION(&ac->pdev->dev); 175 acpi_bus_generate_netlink_event(device->pnp.device_class,
147 acpi_bus_generate_netlink_event(adev->pnp.device_class, 176 dev_name(&device->dev), event,
148 dev_name(&ac->pdev->dev), 177 (u32) ac->state);
149 event, (u32) ac->state); 178 acpi_notifier_call_chain(device, event, (u32) ac->state);
150 acpi_notifier_call_chain(adev, event, (u32) ac->state);
151 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 179 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
152 } 180 }
153 181
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = {
192 {}, 220 {},
193}; 221};
194 222
195static int acpi_ac_probe(struct platform_device *pdev) 223static int acpi_ac_add(struct acpi_device *device)
196{ 224{
197 int result = 0; 225 int result = 0;
198 struct acpi_ac *ac = NULL; 226 struct acpi_ac *ac = NULL;
199 struct acpi_device *adev;
200 227
201 if (!pdev)
202 return -EINVAL;
203 228
204 adev = ACPI_COMPANION(&pdev->dev); 229 if (!device)
205 if (!adev) 230 return -EINVAL;
206 return -ENODEV;
207 231
208 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); 232 ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
209 if (!ac) 233 if (!ac)
210 return -ENOMEM; 234 return -ENOMEM;
211 235
212 strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); 236 ac->device = device;
213 strcpy(acpi_device_class(adev), ACPI_AC_CLASS); 237 strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
214 ac->pdev = pdev; 238 strcpy(acpi_device_class(device), ACPI_AC_CLASS);
215 platform_set_drvdata(pdev, ac); 239 device->driver_data = ac;
216 240
217 result = acpi_ac_get_state(ac); 241 result = acpi_ac_get_state(ac);
218 if (result) 242 if (result)
219 goto end; 243 goto end;
220 244
221 ac->charger.name = acpi_device_bid(adev); 245 ac->charger.name = acpi_device_bid(device);
222 ac->charger.type = POWER_SUPPLY_TYPE_MAINS; 246 ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
223 ac->charger.properties = ac_props; 247 ac->charger.properties = ac_props;
224 ac->charger.num_properties = ARRAY_SIZE(ac_props); 248 ac->charger.num_properties = ARRAY_SIZE(ac_props);
225 ac->charger.get_property = get_ac_property; 249 ac->charger.get_property = get_ac_property;
226 result = power_supply_register(&pdev->dev, &ac->charger); 250 result = power_supply_register(&ac->device->dev, &ac->charger);
227 if (result) 251 if (result)
228 goto end; 252 goto end;
229 253
230 result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
231 ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
232 if (result) {
233 power_supply_unregister(&ac->charger);
234 goto end;
235 }
236 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", 254 printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
237 acpi_device_name(adev), acpi_device_bid(adev), 255 acpi_device_name(device), acpi_device_bid(device),
238 ac->state ? "on-line" : "off-line"); 256 ac->state ? "on-line" : "off-line");
239 257
240 ac->battery_nb.notifier_call = acpi_ac_battery_notify; 258 ac->battery_nb.notifier_call = acpi_ac_battery_notify;
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev)
256 if (!dev) 274 if (!dev)
257 return -EINVAL; 275 return -EINVAL;
258 276
259 ac = platform_get_drvdata(to_platform_device(dev)); 277 ac = acpi_driver_data(to_acpi_device(dev));
260 if (!ac) 278 if (!ac)
261 return -EINVAL; 279 return -EINVAL;
262 280
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev)
270#else 288#else
271#define acpi_ac_resume NULL 289#define acpi_ac_resume NULL
272#endif 290#endif
273static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
274 291
275static int acpi_ac_remove(struct platform_device *pdev) 292static int acpi_ac_remove(struct acpi_device *device)
276{ 293{
277 struct acpi_ac *ac; 294 struct acpi_ac *ac = NULL;
295
278 296
279 if (!pdev) 297 if (!device || !acpi_driver_data(device))
280 return -EINVAL; 298 return -EINVAL;
281 299
282 acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), 300 ac = acpi_driver_data(device);
283 ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
284 301
285 ac = platform_get_drvdata(pdev);
286 if (ac->charger.dev) 302 if (ac->charger.dev)
287 power_supply_unregister(&ac->charger); 303 power_supply_unregister(&ac->charger);
288 unregister_acpi_notifier(&ac->battery_nb); 304 unregister_acpi_notifier(&ac->battery_nb);
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
292 return 0; 308 return 0;
293} 309}
294 310
295static const struct acpi_device_id acpi_ac_match[] = {
296 { "ACPI0003", 0 },
297 { }
298};
299MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
300
301static struct platform_driver acpi_ac_driver = {
302 .probe = acpi_ac_probe,
303 .remove = acpi_ac_remove,
304 .driver = {
305 .name = "acpi-ac",
306 .owner = THIS_MODULE,
307 .pm = &acpi_ac_pm_ops,
308 .acpi_match_table = ACPI_PTR(acpi_ac_match),
309 },
310};
311
312static int __init acpi_ac_init(void) 311static int __init acpi_ac_init(void)
313{ 312{
314 int result; 313 int result;
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void)
316 if (acpi_disabled) 315 if (acpi_disabled)
317 return -ENODEV; 316 return -ENODEV;
318 317
319 result = platform_driver_register(&acpi_ac_driver); 318 result = acpi_bus_register_driver(&acpi_ac_driver);
320 if (result < 0) 319 if (result < 0)
321 return -ENODEV; 320 return -ENODEV;
322 321
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void)
325 324
326static void __exit acpi_ac_exit(void) 325static void __exit acpi_ac_exit(void)
327{ 326{
328 platform_driver_unregister(&acpi_ac_driver); 327 acpi_bus_unregister_driver(&acpi_ac_driver);
329} 328}
330module_init(acpi_ac_init); 329module_init(acpi_ac_init);
331module_exit(acpi_ac_exit); 330module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index dbfe49e5fd63..1d4950388fa1 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
29static const struct acpi_device_id acpi_platform_device_ids[] = { 29static const struct acpi_device_id acpi_platform_device_ids[] = {
30 30
31 { "PNP0D40" }, 31 { "PNP0D40" },
32 { "ACPI0003" },
33 { "VPC2004" }, 32 { "VPC2004" },
34 { "BCM4752" }, 33 { "BCM4752" },
35 34
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index c29c2c3ec0ad..52c81c49cc7d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
170 acpi_status status; 170 acpi_status status;
171 int ret; 171 int ret;
172 172
173 if (pr->apic_id == -1)
174 return -ENODEV;
175
173 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 176 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
174 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) 177 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
175 return -ENODEV; 178 return -ENODEV;
@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
260 } 263 }
261 264
262 apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); 265 apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
263 if (apic_id < 0) { 266 if (apic_id < 0)
264 acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); 267 acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
265 return -ENODEV;
266 }
267 pr->apic_id = apic_id; 268 pr->apic_id = apic_id;
268 269
269 cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); 270 cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
@@ -404,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
404 goto err; 405 goto err;
405 406
406 pr->dev = dev; 407 pr->dev = dev;
407 dev->offline = pr->flags.need_hotplug_init;
408 408
409 /* Trigger the processor driver's .probe() if present. */ 409 /* Trigger the processor driver's .probe() if present. */
410 if (device_attach(dev) >= 0) 410 if (device_attach(dev) >= 0)
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 49bbc71fad54..a08a448068dd 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
141 * address. Although ACPICA adheres to the ACPI specification which 141 * address. Although ACPICA adheres to the ACPI specification which
142 * requires the use of the corresponding 64-bit address if it is non-zero, 142 * requires the use of the corresponding 64-bit address if it is non-zero,
143 * some machines have been found to have a corrupted non-zero 64-bit 143 * some machines have been found to have a corrupted non-zero 64-bit
144 * address. Default is FALSE, do not favor the 32-bit addresses. 144 * address. Default is TRUE, favor the 32-bit addresses.
145 */ 145 */
146ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); 146ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
147 147
148/* 148/*
149 * Optionally truncate I/O addresses to 16 bits. Provides compatibility 149 * Optionally truncate I/O addresses to 16 bits. Provides compatibility
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 68d97441432c..12878e1982f7 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -45,10 +45,71 @@
45#include "accommon.h" 45#include "accommon.h"
46#include "acdispat.h" 46#include "acdispat.h"
47#include "acinterp.h" 47#include "acinterp.h"
48#include "amlcode.h"
48 49
49#define _COMPONENT ACPI_EXECUTER 50#define _COMPONENT ACPI_EXECUTER
50ACPI_MODULE_NAME("exfield") 51ACPI_MODULE_NAME("exfield")
51 52
53/* Local prototypes */
54static u32
55acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length);
56
57/*******************************************************************************
58 *
59 * FUNCTION: acpi_get_serial_access_bytes
60 *
61 * PARAMETERS: accessor_type - The type of the protocol indicated by region
62 * field access attributes
63 * access_length - The access length of the region field
64 *
65 * RETURN: Decoded access length
66 *
67 * DESCRIPTION: This routine returns the length of the generic_serial_bus
68 * protocol bytes
69 *
70 ******************************************************************************/
71
72static u32
73acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
74{
75 u32 length;
76
77 switch (accessor_type) {
78 case AML_FIELD_ATTRIB_QUICK:
79
80 length = 0;
81 break;
82
83 case AML_FIELD_ATTRIB_SEND_RCV:
84 case AML_FIELD_ATTRIB_BYTE:
85
86 length = 1;
87 break;
88
89 case AML_FIELD_ATTRIB_WORD:
90 case AML_FIELD_ATTRIB_WORD_CALL:
91
92 length = 2;
93 break;
94
95 case AML_FIELD_ATTRIB_MULTIBYTE:
96 case AML_FIELD_ATTRIB_RAW_BYTES:
97 case AML_FIELD_ATTRIB_RAW_PROCESS:
98
99 length = access_length;
100 break;
101
102 case AML_FIELD_ATTRIB_BLOCK:
103 case AML_FIELD_ATTRIB_BLOCK_CALL:
104 default:
105
106 length = ACPI_GSBUS_BUFFER_SIZE;
107 break;
108 }
109
110 return (length);
111}
112
52/******************************************************************************* 113/*******************************************************************************
53 * 114 *
54 * FUNCTION: acpi_ex_read_data_from_field 115 * FUNCTION: acpi_ex_read_data_from_field
@@ -63,8 +124,9 @@ ACPI_MODULE_NAME("exfield")
63 * Buffer, depending on the size of the field. 124 * Buffer, depending on the size of the field.
64 * 125 *
65 ******************************************************************************/ 126 ******************************************************************************/
127
66acpi_status 128acpi_status
67acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, 129acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
68 union acpi_operand_object *obj_desc, 130 union acpi_operand_object *obj_desc,
69 union acpi_operand_object **ret_buffer_desc) 131 union acpi_operand_object **ret_buffer_desc)
70{ 132{
@@ -73,6 +135,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
73 acpi_size length; 135 acpi_size length;
74 void *buffer; 136 void *buffer;
75 u32 function; 137 u32 function;
138 u16 accessor_type;
76 139
77 ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc); 140 ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
78 141
@@ -116,9 +179,22 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
116 ACPI_READ | (obj_desc->field.attribute << 16); 179 ACPI_READ | (obj_desc->field.attribute << 16);
117 } else if (obj_desc->field.region_obj->region.space_id == 180 } else if (obj_desc->field.region_obj->region.space_id ==
118 ACPI_ADR_SPACE_GSBUS) { 181 ACPI_ADR_SPACE_GSBUS) {
119 length = ACPI_GSBUS_BUFFER_SIZE; 182 accessor_type = obj_desc->field.attribute;
120 function = 183 length = acpi_ex_get_serial_access_length(accessor_type,
121 ACPI_READ | (obj_desc->field.attribute << 16); 184 obj_desc->
185 field.
186 access_length);
187
188 /*
189 * Add additional 2 bytes for modeled generic_serial_bus data buffer:
190 * typedef struct {
191 * BYTEStatus; // Byte 0 of the data buffer
192 * BYTELength; // Byte 1 of the data buffer
193 * BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
194 * }
195 */
196 length += 2;
197 function = ACPI_READ | (accessor_type << 16);
122 } else { /* IPMI */ 198 } else { /* IPMI */
123 199
124 length = ACPI_IPMI_BUFFER_SIZE; 200 length = ACPI_IPMI_BUFFER_SIZE;
@@ -231,6 +307,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
231 void *buffer; 307 void *buffer;
232 union acpi_operand_object *buffer_desc; 308 union acpi_operand_object *buffer_desc;
233 u32 function; 309 u32 function;
310 u16 accessor_type;
234 311
235 ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc); 312 ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
236 313
@@ -284,9 +361,22 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
284 ACPI_WRITE | (obj_desc->field.attribute << 16); 361 ACPI_WRITE | (obj_desc->field.attribute << 16);
285 } else if (obj_desc->field.region_obj->region.space_id == 362 } else if (obj_desc->field.region_obj->region.space_id ==
286 ACPI_ADR_SPACE_GSBUS) { 363 ACPI_ADR_SPACE_GSBUS) {
287 length = ACPI_GSBUS_BUFFER_SIZE; 364 accessor_type = obj_desc->field.attribute;
288 function = 365 length = acpi_ex_get_serial_access_length(accessor_type,
289 ACPI_WRITE | (obj_desc->field.attribute << 16); 366 obj_desc->
367 field.
368 access_length);
369
370 /*
371 * Add additional 2 bytes for modeled generic_serial_bus data buffer:
372 * typedef struct {
373 * BYTEStatus; // Byte 0 of the data buffer
374 * BYTELength; // Byte 1 of the data buffer
375 * BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
376 * }
377 */
378 length += 2;
379 function = ACPI_WRITE | (accessor_type << 16);
290 } else { /* IPMI */ 380 } else { /* IPMI */
291 381
292 length = ACPI_IPMI_BUFFER_SIZE; 382 length = ACPI_IPMI_BUFFER_SIZE;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index a4702eee91a8..9fb85f38de90 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
461 u32 table_count; 461 u32 table_count;
462 struct acpi_table_header *table; 462 struct acpi_table_header *table;
463 acpi_physical_address address; 463 acpi_physical_address address;
464 acpi_physical_address rsdt_address;
464 u32 length; 465 u32 length;
465 u8 *table_entry; 466 u8 *table_entry;
466 acpi_status status; 467 acpi_status status;
@@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
488 * as per the ACPI specification. 489 * as per the ACPI specification.
489 */ 490 */
490 address = (acpi_physical_address) rsdp->xsdt_physical_address; 491 address = (acpi_physical_address) rsdp->xsdt_physical_address;
492 rsdt_address =
493 (acpi_physical_address) rsdp->rsdt_physical_address;
491 table_entry_size = ACPI_XSDT_ENTRY_SIZE; 494 table_entry_size = ACPI_XSDT_ENTRY_SIZE;
492 } else { 495 } else {
493 /* Root table is an RSDT (32-bit physical addresses) */ 496 /* Root table is an RSDT (32-bit physical addresses) */
494 497
495 address = (acpi_physical_address) rsdp->rsdt_physical_address; 498 address = (acpi_physical_address) rsdp->rsdt_physical_address;
499 rsdt_address = address;
496 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 500 table_entry_size = ACPI_RSDT_ENTRY_SIZE;
497 } 501 }
498 502
@@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
515 519
516 /* Fall back to the RSDT */ 520 /* Fall back to the RSDT */
517 521
518 address = 522 address = rsdt_address;
519 (acpi_physical_address) rsdp->rsdt_physical_address;
520 table_entry_size = ACPI_RSDT_ENTRY_SIZE; 523 table_entry_size = ACPI_RSDT_ENTRY_SIZE;
521 } 524 }
522 } 525 }
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 9a2c63b20050..6e7b2a12860d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,6 +36,12 @@
36#include <linux/suspend.h> 36#include <linux/suspend.h>
37#include <asm/unaligned.h> 37#include <asm/unaligned.h>
38 38
39#ifdef CONFIG_ACPI_PROCFS_POWER
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <asm/uaccess.h>
43#endif
44
39#include <linux/acpi.h> 45#include <linux/acpi.h>
40#include <linux/power_supply.h> 46#include <linux/power_supply.h>
41 47
@@ -64,6 +70,19 @@ static unsigned int cache_time = 1000;
64module_param(cache_time, uint, 0644); 70module_param(cache_time, uint, 0644);
65MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 71MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
66 72
73#ifdef CONFIG_ACPI_PROCFS_POWER
74extern struct proc_dir_entry *acpi_lock_battery_dir(void);
75extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
76
77enum acpi_battery_files {
78 info_tag = 0,
79 state_tag,
80 alarm_tag,
81 ACPI_BATTERY_NUMFILES,
82};
83
84#endif
85
67static const struct acpi_device_id battery_device_ids[] = { 86static const struct acpi_device_id battery_device_ids[] = {
68 {"PNP0C0A", 0}, 87 {"PNP0C0A", 0},
69 {"", 0}, 88 {"", 0},
@@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = {
299 POWER_SUPPLY_PROP_SERIAL_NUMBER, 318 POWER_SUPPLY_PROP_SERIAL_NUMBER,
300}; 319};
301 320
321#ifdef CONFIG_ACPI_PROCFS_POWER
322inline char *acpi_battery_units(struct acpi_battery *battery)
323{
324 return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
325 "mA" : "mW";
326}
327#endif
328
302/* -------------------------------------------------------------------------- 329/* --------------------------------------------------------------------------
303 Battery Management 330 Battery Management
304 -------------------------------------------------------------------------- */ 331 -------------------------------------------------------------------------- */
@@ -717,6 +744,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
717} 744}
718 745
719/* -------------------------------------------------------------------------- 746/* --------------------------------------------------------------------------
747 FS Interface (/proc)
748 -------------------------------------------------------------------------- */
749
750#ifdef CONFIG_ACPI_PROCFS_POWER
751static struct proc_dir_entry *acpi_battery_dir;
752
753static int acpi_battery_print_info(struct seq_file *seq, int result)
754{
755 struct acpi_battery *battery = seq->private;
756
757 if (result)
758 goto end;
759
760 seq_printf(seq, "present: %s\n",
761 acpi_battery_present(battery) ? "yes" : "no");
762 if (!acpi_battery_present(battery))
763 goto end;
764 if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
765 seq_printf(seq, "design capacity: unknown\n");
766 else
767 seq_printf(seq, "design capacity: %d %sh\n",
768 battery->design_capacity,
769 acpi_battery_units(battery));
770
771 if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
772 seq_printf(seq, "last full capacity: unknown\n");
773 else
774 seq_printf(seq, "last full capacity: %d %sh\n",
775 battery->full_charge_capacity,
776 acpi_battery_units(battery));
777
778 seq_printf(seq, "battery technology: %srechargeable\n",
779 (!battery->technology)?"non-":"");
780
781 if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
782 seq_printf(seq, "design voltage: unknown\n");
783 else
784 seq_printf(seq, "design voltage: %d mV\n",
785 battery->design_voltage);
786 seq_printf(seq, "design capacity warning: %d %sh\n",
787 battery->design_capacity_warning,
788 acpi_battery_units(battery));
789 seq_printf(seq, "design capacity low: %d %sh\n",
790 battery->design_capacity_low,
791 acpi_battery_units(battery));
792 seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
793 seq_printf(seq, "capacity granularity 1: %d %sh\n",
794 battery->capacity_granularity_1,
795 acpi_battery_units(battery));
796 seq_printf(seq, "capacity granularity 2: %d %sh\n",
797 battery->capacity_granularity_2,
798 acpi_battery_units(battery));
799 seq_printf(seq, "model number: %s\n", battery->model_number);
800 seq_printf(seq, "serial number: %s\n", battery->serial_number);
801 seq_printf(seq, "battery type: %s\n", battery->type);
802 seq_printf(seq, "OEM info: %s\n", battery->oem_info);
803 end:
804 if (result)
805 seq_printf(seq, "ERROR: Unable to read battery info\n");
806 return result;
807}
808
809static int acpi_battery_print_state(struct seq_file *seq, int result)
810{
811 struct acpi_battery *battery = seq->private;
812
813 if (result)
814 goto end;
815
816 seq_printf(seq, "present: %s\n",
817 acpi_battery_present(battery) ? "yes" : "no");
818 if (!acpi_battery_present(battery))
819 goto end;
820
821 seq_printf(seq, "capacity state: %s\n",
822 (battery->state & 0x04) ? "critical" : "ok");
823 if ((battery->state & 0x01) && (battery->state & 0x02))
824 seq_printf(seq,
825 "charging state: charging/discharging\n");
826 else if (battery->state & 0x01)
827 seq_printf(seq, "charging state: discharging\n");
828 else if (battery->state & 0x02)
829 seq_printf(seq, "charging state: charging\n");
830 else
831 seq_printf(seq, "charging state: charged\n");
832
833 if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
834 seq_printf(seq, "present rate: unknown\n");
835 else
836 seq_printf(seq, "present rate: %d %s\n",
837 battery->rate_now, acpi_battery_units(battery));
838
839 if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
840 seq_printf(seq, "remaining capacity: unknown\n");
841 else
842 seq_printf(seq, "remaining capacity: %d %sh\n",
843 battery->capacity_now, acpi_battery_units(battery));
844 if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
845 seq_printf(seq, "present voltage: unknown\n");
846 else
847 seq_printf(seq, "present voltage: %d mV\n",
848 battery->voltage_now);
849 end:
850 if (result)
851 seq_printf(seq, "ERROR: Unable to read battery state\n");
852
853 return result;
854}
855
856static int acpi_battery_print_alarm(struct seq_file *seq, int result)
857{
858 struct acpi_battery *battery = seq->private;
859
860 if (result)
861 goto end;
862
863 if (!acpi_battery_present(battery)) {
864 seq_printf(seq, "present: no\n");
865 goto end;
866 }
867 seq_printf(seq, "alarm: ");
868 if (!battery->alarm)
869 seq_printf(seq, "unsupported\n");
870 else
871 seq_printf(seq, "%u %sh\n", battery->alarm,
872 acpi_battery_units(battery));
873 end:
874 if (result)
875 seq_printf(seq, "ERROR: Unable to read battery alarm\n");
876 return result;
877}
878
879static ssize_t acpi_battery_write_alarm(struct file *file,
880 const char __user * buffer,
881 size_t count, loff_t * ppos)
882{
883 int result = 0;
884 char alarm_string[12] = { '\0' };
885 struct seq_file *m = file->private_data;
886 struct acpi_battery *battery = m->private;
887
888 if (!battery || (count > sizeof(alarm_string) - 1))
889 return -EINVAL;
890 if (!acpi_battery_present(battery)) {
891 result = -ENODEV;
892 goto end;
893 }
894 if (copy_from_user(alarm_string, buffer, count)) {
895 result = -EFAULT;
896 goto end;
897 }
898 alarm_string[count] = '\0';
899 battery->alarm = simple_strtol(alarm_string, NULL, 0);
900 result = acpi_battery_set_alarm(battery);
901 end:
902 if (!result)
903 return count;
904 return result;
905}
906
907typedef int(*print_func)(struct seq_file *seq, int result);
908
909static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
910 acpi_battery_print_info,
911 acpi_battery_print_state,
912 acpi_battery_print_alarm,
913};
914
915static int acpi_battery_read(int fid, struct seq_file *seq)
916{
917 struct acpi_battery *battery = seq->private;
918 int result = acpi_battery_update(battery);
919 return acpi_print_funcs[fid](seq, result);
920}
921
922#define DECLARE_FILE_FUNCTIONS(_name) \
923static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
924{ \
925 return acpi_battery_read(_name##_tag, seq); \
926} \
927static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
928{ \
929 return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
930}
931
932DECLARE_FILE_FUNCTIONS(info);
933DECLARE_FILE_FUNCTIONS(state);
934DECLARE_FILE_FUNCTIONS(alarm);
935
936#undef DECLARE_FILE_FUNCTIONS
937
938#define FILE_DESCRIPTION_RO(_name) \
939 { \
940 .name = __stringify(_name), \
941 .mode = S_IRUGO, \
942 .ops = { \
943 .open = acpi_battery_##_name##_open_fs, \
944 .read = seq_read, \
945 .llseek = seq_lseek, \
946 .release = single_release, \
947 .owner = THIS_MODULE, \
948 }, \
949 }
950
951#define FILE_DESCRIPTION_RW(_name) \
952 { \
953 .name = __stringify(_name), \
954 .mode = S_IFREG | S_IRUGO | S_IWUSR, \
955 .ops = { \
956 .open = acpi_battery_##_name##_open_fs, \
957 .read = seq_read, \
958 .llseek = seq_lseek, \
959 .write = acpi_battery_write_##_name, \
960 .release = single_release, \
961 .owner = THIS_MODULE, \
962 }, \
963 }
964
965static const struct battery_file {
966 struct file_operations ops;
967 umode_t mode;
968 const char *name;
969} acpi_battery_file[] = {
970 FILE_DESCRIPTION_RO(info),
971 FILE_DESCRIPTION_RO(state),
972 FILE_DESCRIPTION_RW(alarm),
973};
974
975#undef FILE_DESCRIPTION_RO
976#undef FILE_DESCRIPTION_RW
977
978static int acpi_battery_add_fs(struct acpi_device *device)
979{
980 struct proc_dir_entry *entry = NULL;
981 int i;
982
983 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
984 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
985 if (!acpi_device_dir(device)) {
986 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
987 acpi_battery_dir);
988 if (!acpi_device_dir(device))
989 return -ENODEV;
990 }
991
992 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
993 entry = proc_create_data(acpi_battery_file[i].name,
994 acpi_battery_file[i].mode,
995 acpi_device_dir(device),
996 &acpi_battery_file[i].ops,
997 acpi_driver_data(device));
998 if (!entry)
999 return -ENODEV;
1000 }
1001 return 0;
1002}
1003
1004static void acpi_battery_remove_fs(struct acpi_device *device)
1005{
1006 int i;
1007 if (!acpi_device_dir(device))
1008 return;
1009 for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
1010 remove_proc_entry(acpi_battery_file[i].name,
1011 acpi_device_dir(device));
1012
1013 remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
1014 acpi_device_dir(device) = NULL;
1015}
1016
1017#endif
1018
1019/* --------------------------------------------------------------------------
720 Driver Interface 1020 Driver Interface
721 -------------------------------------------------------------------------- */ 1021 -------------------------------------------------------------------------- */
722 1022
@@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device)
790 result = acpi_battery_update(battery); 1090 result = acpi_battery_update(battery);
791 if (result) 1091 if (result)
792 goto fail; 1092 goto fail;
1093#ifdef CONFIG_ACPI_PROCFS_POWER
1094 result = acpi_battery_add_fs(device);
1095#endif
1096 if (result) {
1097#ifdef CONFIG_ACPI_PROCFS_POWER
1098 acpi_battery_remove_fs(device);
1099#endif
1100 goto fail;
1101 }
793 1102
794 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", 1103 printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
795 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), 1104 ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device)
816 return -EINVAL; 1125 return -EINVAL;
817 battery = acpi_driver_data(device); 1126 battery = acpi_driver_data(device);
818 unregister_pm_notifier(&battery->pm_nb); 1127 unregister_pm_notifier(&battery->pm_nb);
1128#ifdef CONFIG_ACPI_PROCFS_POWER
1129 acpi_battery_remove_fs(device);
1130#endif
819 sysfs_remove_battery(battery); 1131 sysfs_remove_battery(battery);
820 mutex_destroy(&battery->lock); 1132 mutex_destroy(&battery->lock);
821 mutex_destroy(&battery->sysfs_lock); 1133 mutex_destroy(&battery->sysfs_lock);
@@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
866 1178
867 if (dmi_check_system(bat_dmi_table)) 1179 if (dmi_check_system(bat_dmi_table))
868 battery_bix_broken_package = 1; 1180 battery_bix_broken_package = 1;
869 acpi_bus_register_driver(&acpi_battery_driver); 1181
1182#ifdef CONFIG_ACPI_PROCFS_POWER
1183 acpi_battery_dir = acpi_lock_battery_dir();
1184 if (!acpi_battery_dir)
1185 return;
1186#endif
1187 if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
1188#ifdef CONFIG_ACPI_PROCFS_POWER
1189 acpi_unlock_battery_dir(acpi_battery_dir);
1190#endif
1191 return;
1192 }
1193 return;
870} 1194}
871 1195
872static int __init acpi_battery_init(void) 1196static int __init acpi_battery_init(void)
@@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void)
878static void __exit acpi_battery_exit(void) 1202static void __exit acpi_battery_exit(void)
879{ 1203{
880 acpi_bus_unregister_driver(&acpi_battery_driver); 1204 acpi_bus_unregister_driver(&acpi_battery_driver);
1205#ifdef CONFIG_ACPI_PROCFS_POWER
1206 acpi_unlock_battery_dir(acpi_battery_dir);
1207#endif
881} 1208}
882 1209
883module_init(acpi_battery_init); 1210module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index afec4526c48a..3d8413d02a97 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), 314 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
315 }, 315 },
316 }, 316 },
317 {
318 .callback = dmi_disable_osi_win8,
319 .ident = "Dell Inspiron 7737",
320 .matches = {
321 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
322 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
323 },
324 },
317 325
318 /* 326 /*
319 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 327 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
374 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), 382 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
375 }, 383 },
376 }, 384 },
385 /*
386 * Without this this EEEpc exports a non working WMI interface, with
387 * this it exports a working "good old" eeepc_laptop interface, fixing
388 * both brightness control, and rfkill not working.
389 */
390 {
391 .callback = dmi_enable_osi_linux,
392 .ident = "Asus EEE PC 1015PX",
393 .matches = {
394 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
395 DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
396 },
397 },
377 {} 398 {}
378}; 399};
379 400
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e7e5844c87d0..cf925c4f36b7 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -380,9 +380,8 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
380 break; 380 break;
381 381
382 default: 382 default:
383 acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type); 383 acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
384 ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY; 384 break;
385 goto err;
386 } 385 }
387 386
388 adev = acpi_bus_get_acpi_device(handle); 387 adev = acpi_bus_get_acpi_device(handle);
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644
index 000000000000..6c9ee68e46fb
--- /dev/null
+++ b/drivers/acpi/cm_sbs.c
@@ -0,0 +1,105 @@
1/*
2 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or (at
7 * your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/acpi.h>
25#include <linux/types.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h>
30
31#define PREFIX "ACPI: "
32
33ACPI_MODULE_NAME("cm_sbs");
34#define ACPI_AC_CLASS "ac_adapter"
35#define ACPI_BATTERY_CLASS "battery"
36#define _COMPONENT ACPI_SBS_COMPONENT
37static struct proc_dir_entry *acpi_ac_dir;
38static struct proc_dir_entry *acpi_battery_dir;
39
40static DEFINE_MUTEX(cm_sbs_mutex);
41
42static int lock_ac_dir_cnt;
43static int lock_battery_dir_cnt;
44
45struct proc_dir_entry *acpi_lock_ac_dir(void)
46{
47 mutex_lock(&cm_sbs_mutex);
48 if (!acpi_ac_dir)
49 acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
50 if (acpi_ac_dir) {
51 lock_ac_dir_cnt++;
52 } else {
53 printk(KERN_ERR PREFIX
54 "Cannot create %s\n", ACPI_AC_CLASS);
55 }
56 mutex_unlock(&cm_sbs_mutex);
57 return acpi_ac_dir;
58}
59EXPORT_SYMBOL(acpi_lock_ac_dir);
60
61void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
62{
63 mutex_lock(&cm_sbs_mutex);
64 if (acpi_ac_dir_param)
65 lock_ac_dir_cnt--;
66 if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
67 remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
68 acpi_ac_dir = NULL;
69 }
70 mutex_unlock(&cm_sbs_mutex);
71}
72EXPORT_SYMBOL(acpi_unlock_ac_dir);
73
74struct proc_dir_entry *acpi_lock_battery_dir(void)
75{
76 mutex_lock(&cm_sbs_mutex);
77 if (!acpi_battery_dir) {
78 acpi_battery_dir =
79 proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
80 }
81 if (acpi_battery_dir) {
82 lock_battery_dir_cnt++;
83 } else {
84 printk(KERN_ERR PREFIX
85 "Cannot create %s\n", ACPI_BATTERY_CLASS);
86 }
87 mutex_unlock(&cm_sbs_mutex);
88 return acpi_battery_dir;
89}
90EXPORT_SYMBOL(acpi_lock_battery_dir);
91
92void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
93{
94 mutex_lock(&cm_sbs_mutex);
95 if (acpi_battery_dir_param)
96 lock_battery_dir_cnt--;
97 if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
98 && acpi_battery_dir) {
99 remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
100 acpi_battery_dir = NULL;
101 }
102 mutex_unlock(&cm_sbs_mutex);
103 return;
104}
105EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d7d32c28829b..ad11ba4a412d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -206,13 +206,13 @@ unlock:
206 spin_unlock_irqrestore(&ec->lock, flags); 206 spin_unlock_irqrestore(&ec->lock, flags);
207} 207}
208 208
209static int acpi_ec_sync_query(struct acpi_ec *ec); 209static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
210 210
211static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) 211static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
212{ 212{
213 if (state & ACPI_EC_FLAG_SCI) { 213 if (state & ACPI_EC_FLAG_SCI) {
214 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) 214 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
215 return acpi_ec_sync_query(ec); 215 return acpi_ec_sync_query(ec, NULL);
216 } 216 }
217 return 0; 217 return 0;
218} 218}
@@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
443 443
444EXPORT_SYMBOL(ec_get_handle); 444EXPORT_SYMBOL(ec_get_handle);
445 445
446static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
447
448/* 446/*
449 * Clears stale _Q events that might have accumulated in the EC. 447 * Process _Q events that might have accumulated in the EC.
450 * Run with locked ec mutex. 448 * Run with locked ec mutex.
451 */ 449 */
452static void acpi_ec_clear(struct acpi_ec *ec) 450static void acpi_ec_clear(struct acpi_ec *ec)
@@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
455 u8 value = 0; 453 u8 value = 0;
456 454
457 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { 455 for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
458 status = acpi_ec_query_unlocked(ec, &value); 456 status = acpi_ec_sync_query(ec, &value);
459 if (status || !value) 457 if (status || !value)
460 break; 458 break;
461 } 459 }
@@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
582 kfree(handler); 580 kfree(handler);
583} 581}
584 582
585static int acpi_ec_sync_query(struct acpi_ec *ec) 583static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
586{ 584{
587 u8 value = 0; 585 u8 value = 0;
588 int status; 586 int status;
589 struct acpi_ec_query_handler *handler, *copy; 587 struct acpi_ec_query_handler *handler, *copy;
590 if ((status = acpi_ec_query_unlocked(ec, &value))) 588
589 status = acpi_ec_query_unlocked(ec, &value);
590 if (data)
591 *data = value;
592 if (status)
591 return status; 593 return status;
594
592 list_for_each_entry(handler, &ec->list, node) { 595 list_for_each_entry(handler, &ec->list, node) {
593 if (value == handler->query_bit) { 596 if (value == handler->query_bit) {
594 /* have custom handler for this bit */ 597 /* have custom handler for this bit */
@@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
612 if (!ec) 615 if (!ec)
613 return; 616 return;
614 mutex_lock(&ec->mutex); 617 mutex_lock(&ec->mutex);
615 acpi_ec_sync_query(ec); 618 acpi_ec_sync_query(ec, NULL);
616 mutex_unlock(&ec->mutex); 619 mutex_unlock(&ec->mutex);
617} 620}
618 621
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index c1e31a41f949..25bbc55dca89 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
1278 1278
1279static void __exit acpi_thermal_exit(void) 1279static void __exit acpi_thermal_exit(void)
1280{ 1280{
1281 destroy_workqueue(acpi_thermal_pm_queue);
1282 acpi_bus_unregister_driver(&acpi_thermal_driver); 1281 acpi_bus_unregister_driver(&acpi_thermal_driver);
1282 destroy_workqueue(acpi_thermal_pm_queue);
1283 1283
1284 return; 1284 return;
1285} 1285}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8b6990e417ec..f8bc5a755dda 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
457 }, 457 },
458 { 458 {
459 .callback = video_set_use_native_backlight, 459 .callback = video_set_use_native_backlight,
460 .ident = "ThinkPad T430s", 460 .ident = "ThinkPad T430 and T430s",
461 .matches = { 461 .matches = {
462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 462 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
463 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), 463 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
464 }, 464 },
465 }, 465 },
466 { 466 {
@@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
472 }, 472 },
473 }, 473 },
474 { 474 {
475 .callback = video_set_use_native_backlight, 475 .callback = video_set_use_native_backlight,
476 .ident = "ThinkPad X1 Carbon", 476 .ident = "ThinkPad X1 Carbon",
477 .matches = { 477 .matches = {
478 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 478 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
500 .ident = "Dell Inspiron 7520", 500 .ident = "Dell Inspiron 7520",
501 .matches = { 501 .matches = {
502 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 502 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
503 DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), 503 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
504 }, 504 },
505 }, 505 },
506 { 506 {
@@ -513,6 +513,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
513 }, 513 },
514 { 514 {
515 .callback = video_set_use_native_backlight, 515 .callback = video_set_use_native_backlight,
516 .ident = "Acer Aspire 5742G",
517 .matches = {
518 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
519 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
520 },
521 },
522 {
523 .callback = video_set_use_native_backlight,
516 .ident = "Acer Aspire V5-431", 524 .ident = "Acer Aspire V5-431",
517 .matches = { 525 .matches = {
518 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 526 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 20e03a7eb8b4..0033fafc470b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -116,7 +116,7 @@ config AHCI_ST
116 116
117config AHCI_IMX 117config AHCI_IMX
118 tristate "Freescale i.MX AHCI SATA support" 118 tristate "Freescale i.MX AHCI SATA support"
119 depends on MFD_SYSCON 119 depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
120 help 120 help
121 This option enables support for the Freescale i.MX SoC's 121 This option enables support for the Freescale i.MX SoC's
122 onboard AHCI SATA. 122 onboard AHCI SATA.
@@ -134,8 +134,7 @@ config AHCI_SUNXI
134 134
135config AHCI_XGENE 135config AHCI_XGENE
136 tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support" 136 tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
137 depends on ARM64 || COMPILE_TEST 137 depends on PHY_XGENE
138 select PHY_XGENE
139 help 138 help
140 This option enables support for APM X-Gene SoC SATA host controller. 139 This option enables support for APM X-Gene SoC SATA host controller.
141 140
@@ -816,7 +815,7 @@ config PATA_AT32
816 815
817config PATA_AT91 816config PATA_AT91
818 tristate "PATA support for AT91SAM9260" 817 tristate "PATA support for AT91SAM9260"
819 depends on ARM && ARCH_AT91 818 depends on ARM && SOC_AT91SAM9
820 help 819 help
821 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. 820 This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
822 821
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5a0bf8ed649b..60707814a84b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev)
1115 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); 1115 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
1116} 1116}
1117 1117
1118static bool ahci_broken_devslp(struct pci_dev *pdev)
1119{
1120 /* device with broken DEVSLP but still showing SDS capability */
1121 static const struct pci_device_id ids[] = {
1122 { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
1123 {}
1124 };
1125
1126 return pci_match_id(ids, pdev);
1127}
1128
1118#ifdef CONFIG_ATA_ACPI 1129#ifdef CONFIG_ATA_ACPI
1119static void ahci_gtf_filter_workaround(struct ata_host *host) 1130static void ahci_gtf_filter_workaround(struct ata_host *host)
1120{ 1131{
@@ -1164,9 +1175,9 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1164#endif 1175#endif
1165 1176
1166static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports, 1177static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
1167 struct ahci_host_priv *hpriv) 1178 struct ahci_host_priv *hpriv)
1168{ 1179{
1169 int nvec; 1180 int rc, nvec;
1170 1181
1171 if (hpriv->flags & AHCI_HFLAG_NO_MSI) 1182 if (hpriv->flags & AHCI_HFLAG_NO_MSI)
1172 goto intx; 1183 goto intx;
@@ -1183,12 +1194,19 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
1183 if (nvec < n_ports) 1194 if (nvec < n_ports)
1184 goto single_msi; 1195 goto single_msi;
1185 1196
1186 nvec = pci_enable_msi_range(pdev, nvec, nvec); 1197 rc = pci_enable_msi_exact(pdev, nvec);
1187 if (nvec == -ENOSPC) 1198 if (rc == -ENOSPC)
1188 goto single_msi; 1199 goto single_msi;
1189 else if (nvec < 0) 1200 else if (rc < 0)
1190 goto intx; 1201 goto intx;
1191 1202
1203 /* fallback to single MSI mode if the controller enforced MRSM mode */
1204 if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
1205 pci_disable_msi(pdev);
1206 printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
1207 goto single_msi;
1208 }
1209
1192 return nvec; 1210 return nvec;
1193 1211
1194single_msi: 1212single_msi:
@@ -1232,18 +1250,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
1232 return rc; 1250 return rc;
1233 1251
1234 for (i = 0; i < host->n_ports; i++) { 1252 for (i = 0; i < host->n_ports; i++) {
1235 const char* desc;
1236 struct ahci_port_priv *pp = host->ports[i]->private_data; 1253 struct ahci_port_priv *pp = host->ports[i]->private_data;
1237 1254
1238 /* pp is NULL for dummy ports */ 1255 /* Do not receive interrupts sent by dummy ports */
1239 if (pp) 1256 if (!pp) {
1240 desc = pp->irq_desc; 1257 disable_irq(irq + i);
1241 else 1258 continue;
1242 desc = dev_driver_string(host->dev); 1259 }
1243 1260
1244 rc = devm_request_threaded_irq(host->dev, 1261 rc = devm_request_threaded_irq(host->dev, irq + i,
1245 irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED, 1262 ahci_hw_interrupt,
1246 desc, host->ports[i]); 1263 ahci_thread_fn, IRQF_SHARED,
1264 pp->irq_desc, host->ports[i]);
1247 if (rc) 1265 if (rc)
1248 goto out_free_irqs; 1266 goto out_free_irqs;
1249 } 1267 }
@@ -1357,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1357 1375
1358 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; 1376 hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
1359 1377
1378 /* must set flag prior to save config in order to take effect */
1379 if (ahci_broken_devslp(pdev))
1380 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
1381
1360 /* save initial config */ 1382 /* save initial config */
1361 ahci_pci_save_initial_config(pdev, hpriv); 1383 ahci_pci_save_initial_config(pdev, hpriv);
1362 1384
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 51af275b3388..af63c75c2001 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -94,6 +94,7 @@ enum {
94 /* HOST_CTL bits */ 94 /* HOST_CTL bits */
95 HOST_RESET = (1 << 0), /* reset controller; self-clear */ 95 HOST_RESET = (1 << 0), /* reset controller; self-clear */
96 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */ 96 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
97 HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */
97 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */ 98 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
98 99
99 /* HOST_CAP bits */ 100 /* HOST_CAP bits */
@@ -235,6 +236,7 @@ enum {
235 port start (wait until 236 port start (wait until
236 error-handling stage) */ 237 error-handling stage) */
237 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ 238 AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
239 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
238 240
239 /* ap->flags bits */ 241 /* ap->flags bits */
240 242
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 497c7abe1c7d..8befeb69eeb1 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -29,9 +29,25 @@
29#include "ahci.h" 29#include "ahci.h"
30 30
31enum { 31enum {
32 PORT_PHY_CTL = 0x178, /* Port0 PHY Control */ 32 /* Timer 1-ms Register */
33 PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */ 33 IMX_TIMER1MS = 0x00e0,
34 HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ 34 /* Port0 PHY Control Register */
35 IMX_P0PHYCR = 0x0178,
36 IMX_P0PHYCR_TEST_PDDQ = 1 << 20,
37 IMX_P0PHYCR_CR_READ = 1 << 19,
38 IMX_P0PHYCR_CR_WRITE = 1 << 18,
39 IMX_P0PHYCR_CR_CAP_DATA = 1 << 17,
40 IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16,
41 /* Port0 PHY Status Register */
42 IMX_P0PHYSR = 0x017c,
43 IMX_P0PHYSR_CR_ACK = 1 << 18,
44 IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0,
45 /* Lane0 Output Status Register */
46 IMX_LANE0_OUT_STAT = 0x2003,
47 IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1,
48 /* Clock Reset Register */
49 IMX_CLOCK_RESET = 0x7f3f,
50 IMX_CLOCK_RESET_RESET = 1 << 0,
35}; 51};
36 52
37enum ahci_imx_type { 53enum ahci_imx_type {
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support
54 70
55static void ahci_imx_host_stop(struct ata_host *host); 71static void ahci_imx_host_stop(struct ata_host *host);
56 72
73static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
74{
75 int timeout = 10;
76 u32 crval;
77 u32 srval;
78
79 /* Assert or deassert the bit */
80 crval = readl(mmio + IMX_P0PHYCR);
81 if (assert)
82 crval |= bit;
83 else
84 crval &= ~bit;
85 writel(crval, mmio + IMX_P0PHYCR);
86
87 /* Wait for the cr_ack signal */
88 do {
89 srval = readl(mmio + IMX_P0PHYSR);
90 if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
91 break;
92 usleep_range(100, 200);
93 } while (--timeout);
94
95 return timeout ? 0 : -ETIMEDOUT;
96}
97
98static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
99{
100 u32 crval = addr;
101 int ret;
102
103 /* Supply the address on cr_data_in */
104 writel(crval, mmio + IMX_P0PHYCR);
105
106 /* Assert the cr_cap_addr signal */
107 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
108 if (ret)
109 return ret;
110
111 /* Deassert cr_cap_addr */
112 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
113 if (ret)
114 return ret;
115
116 return 0;
117}
118
119static int imx_phy_reg_write(u16 val, void __iomem *mmio)
120{
121 u32 crval = val;
122 int ret;
123
124 /* Supply the data on cr_data_in */
125 writel(crval, mmio + IMX_P0PHYCR);
126
127 /* Assert the cr_cap_data signal */
128 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
129 if (ret)
130 return ret;
131
132 /* Deassert cr_cap_data */
133 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
134 if (ret)
135 return ret;
136
137 if (val & IMX_CLOCK_RESET_RESET) {
138 /*
139 * In case we're resetting the phy, it's unable to acknowledge,
140 * so we return immediately here.
141 */
142 crval |= IMX_P0PHYCR_CR_WRITE;
143 writel(crval, mmio + IMX_P0PHYCR);
144 goto out;
145 }
146
147 /* Assert the cr_write signal */
148 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
149 if (ret)
150 return ret;
151
152 /* Deassert cr_write */
153 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
154 if (ret)
155 return ret;
156
157out:
158 return 0;
159}
160
161static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
162{
163 int ret;
164
165 /* Assert the cr_read signal */
166 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
167 if (ret)
168 return ret;
169
170 /* Capture the data from cr_data_out[] */
171 *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
172
173 /* Deassert cr_read */
174 ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
175 if (ret)
176 return ret;
177
178 return 0;
179}
180
181static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
182{
183 void __iomem *mmio = hpriv->mmio;
184 int timeout = 10;
185 u16 val;
186 int ret;
187
188 /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
189 ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
190 if (ret)
191 return ret;
192 ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
193 if (ret)
194 return ret;
195
196 /* Wait for PHY RX_PLL to be stable */
197 do {
198 usleep_range(100, 200);
199 ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
200 if (ret)
201 return ret;
202 ret = imx_phy_reg_read(&val, mmio);
203 if (ret)
204 return ret;
205 if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
206 break;
207 } while (--timeout);
208
209 return timeout ? 0 : -ETIMEDOUT;
210}
211
57static int imx_sata_enable(struct ahci_host_priv *hpriv) 212static int imx_sata_enable(struct ahci_host_priv *hpriv)
58{ 213{
59 struct imx_ahci_priv *imxpriv = hpriv->plat_data; 214 struct imx_ahci_priv *imxpriv = hpriv->plat_data;
215 struct device *dev = &imxpriv->ahci_pdev->dev;
60 int ret; 216 int ret;
61 217
62 if (imxpriv->no_device) 218 if (imxpriv->no_device)
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
101 regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, 257 regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
102 IMX6Q_GPR13_SATA_MPLL_CLK_EN, 258 IMX6Q_GPR13_SATA_MPLL_CLK_EN,
103 IMX6Q_GPR13_SATA_MPLL_CLK_EN); 259 IMX6Q_GPR13_SATA_MPLL_CLK_EN);
260
261 usleep_range(100, 200);
262
263 ret = imx_sata_phy_reset(hpriv);
264 if (ret) {
265 dev_err(dev, "failed to reset phy: %d\n", ret);
266 goto disable_regulator;
267 }
104 } 268 }
105 269
106 usleep_range(1000, 2000); 270 usleep_range(1000, 2000);
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap)
156 * without full reset once the pddq mode is enabled making it 320 * without full reset once the pddq mode is enabled making it
157 * impossible to use as part of libata LPM. 321 * impossible to use as part of libata LPM.
158 */ 322 */
159 reg_val = readl(mmio + PORT_PHY_CTL); 323 reg_val = readl(mmio + IMX_P0PHYCR);
160 writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); 324 writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
161 imx_sata_disable(hpriv); 325 imx_sata_disable(hpriv);
162 imxpriv->no_device = true; 326 imxpriv->no_device = true;
163} 327}
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
217 if (!imxpriv) 381 if (!imxpriv)
218 return -ENOMEM; 382 return -ENOMEM;
219 383
384 imxpriv->ahci_pdev = pdev;
220 imxpriv->no_device = false; 385 imxpriv->no_device = false;
221 imxpriv->first_time = true; 386 imxpriv->first_time = true;
222 imxpriv->type = (enum ahci_imx_type)of_id->data; 387 imxpriv->type = (enum ahci_imx_type)of_id->data;
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
248 413
249 /* 414 /*
250 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, 415 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
251 * and IP vendor specific register HOST_TIMER1MS. 416 * and IP vendor specific register IMX_TIMER1MS.
252 * Configure CAP_SSS (support stagered spin up). 417 * Configure CAP_SSS (support stagered spin up).
253 * Implement the port0. 418 * Implement the port0.
254 * Get the ahb clock rate, and configure the TIMER1MS register. 419 * Get the ahb clock rate, and configure the TIMER1MS register.
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
265 } 430 }
266 431
267 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; 432 reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
268 writel(reg_val, hpriv->mmio + HOST_TIMER1MS); 433 writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
269 434
270 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); 435 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
271 if (ret) 436 if (ret)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 6bd4f660b4e1..b9861453fc81 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev,
452 cap &= ~HOST_CAP_SNTF; 452 cap &= ~HOST_CAP_SNTF;
453 } 453 }
454 454
455 if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
456 dev_info(dev,
457 "controller can't do DEVSLP, turning off\n");
458 cap2 &= ~HOST_CAP2_SDS;
459 cap2 &= ~HOST_CAP2_SADM;
460 }
461
455 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { 462 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
456 dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); 463 dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
457 cap |= HOST_CAP_FBS; 464 cap |= HOST_CAP_FBS;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c19734d96d7e..ea83828bfea9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,8 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4224 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4225 4225
4226 /* devices that don't properly handle queued TRIM commands */ 4226 /* devices that don't properly handle queued TRIM commands */
4227 { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4227 { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
4228 { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, 4228 { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
4229 { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4230 { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
4229 4231
4230 /* 4232 /*
4231 * Some WD SATA-I drives spin up and down erratically when the link 4233 * Some WD SATA-I drives spin up and down erratically when the link
@@ -4792,21 +4794,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4792static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4794static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4793{ 4795{
4794 struct ata_queued_cmd *qc = NULL; 4796 struct ata_queued_cmd *qc = NULL;
4795 unsigned int i; 4797 unsigned int i, tag;
4796 4798
4797 /* no command while frozen */ 4799 /* no command while frozen */
4798 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4800 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4799 return NULL; 4801 return NULL;
4800 4802
4801 /* the last tag is reserved for internal command. */ 4803 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4802 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4804 tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
4803 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4805
4804 qc = __ata_qc_from_tag(ap, i); 4806 /* the last tag is reserved for internal command. */
4807 if (tag == ATA_TAG_INTERNAL)
4808 continue;
4809
4810 if (!test_and_set_bit(tag, &ap->qc_allocated)) {
4811 qc = __ata_qc_from_tag(ap, tag);
4812 qc->tag = tag;
4813 ap->last_tag = tag;
4805 break; 4814 break;
4806 } 4815 }
4807 4816 }
4808 if (qc)
4809 qc->tag = i;
4810 4817
4811 return qc; 4818 return qc;
4812} 4819}
@@ -6307,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq,
6307static void ata_port_detach(struct ata_port *ap) 6314static void ata_port_detach(struct ata_port *ap)
6308{ 6315{
6309 unsigned long flags; 6316 unsigned long flags;
6317 struct ata_link *link;
6318 struct ata_device *dev;
6310 6319
6311 if (!ap->ops->error_handler) 6320 if (!ap->ops->error_handler)
6312 goto skip_eh; 6321 goto skip_eh;
@@ -6326,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap)
6326 cancel_delayed_work_sync(&ap->hotplug_task); 6335 cancel_delayed_work_sync(&ap->hotplug_task);
6327 6336
6328 skip_eh: 6337 skip_eh:
6338 /* clean up zpodd on port removal */
6339 ata_for_each_link(link, ap, HOST_FIRST) {
6340 ata_for_each_dev(dev, link, ALL) {
6341 if (zpodd_dev_enabled(dev))
6342 zpodd_exit(dev);
6343 }
6344 }
6329 if (ap->pmp_link) { 6345 if (ap->pmp_link) {
6330 int i; 6346 int i;
6331 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6347 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 6fac524c2f50..4edb1a81f63f 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -898,9 +898,12 @@ static int arasan_cf_probe(struct platform_device *pdev)
898 898
899 cf_card_detect(acdev, 0); 899 cf_card_detect(acdev, 0);
900 900
901 return ata_host_activate(host, acdev->irq, irq_handler, 0, 901 ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
902 &arasan_cf_sht); 902 &arasan_cf_sht);
903 if (!ret)
904 return 0;
903 905
906 cf_exit(acdev);
904free_clk: 907free_clk:
905 clk_put(acdev->clk); 908 clk_put(acdev->clk);
906 return ret; 909 return ret;
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index e9c87274a781..8a66f23af4c4 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -407,12 +407,13 @@ static int pata_at91_probe(struct platform_device *pdev)
407 407
408 host->private_data = info; 408 host->private_data = info;
409 409
410 return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0, 410 ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
411 gpio_is_valid(irq) ? ata_sff_interrupt : NULL, 411 gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
412 irq_flags, &pata_at91_sht); 412 irq_flags, &pata_at91_sht);
413 if (ret)
414 goto err_put;
413 415
414 if (!ret) 416 return 0;
415 return 0;
416 417
417err_put: 418err_put:
418 clk_put(info->mck); 419 clk_put(info->mck);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index a79566d05666..0610e78c8a2a 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -594,9 +594,13 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
594 594
595 platform_set_drvdata(pdev, host); 595 platform_set_drvdata(pdev, host);
596 596
597 return ata_host_activate(host, info->irq, 597 ret = ata_host_activate(host, info->irq,
598 info->irq ? pata_s3c_irq : NULL, 598 info->irq ? pata_s3c_irq : NULL,
599 0, &pata_s3c_sht); 599 0, &pata_s3c_sht);
600 if (ret)
601 goto stop_clk;
602
603 return 0;
600 604
601stop_clk: 605stop_clk:
602 clk_disable(info->clk); 606 clk_disable(info->clk);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 0dd65281cc65..20da3ad1696b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -614,39 +614,6 @@ void device_remove_bin_file(struct device *dev,
614} 614}
615EXPORT_SYMBOL_GPL(device_remove_bin_file); 615EXPORT_SYMBOL_GPL(device_remove_bin_file);
616 616
617/**
618 * device_schedule_callback_owner - helper to schedule a callback for a device
619 * @dev: device.
620 * @func: callback function to invoke later.
621 * @owner: module owning the callback routine
622 *
623 * Attribute methods must not unregister themselves or their parent device
624 * (which would amount to the same thing). Attempts to do so will deadlock,
625 * since unregistration is mutually exclusive with driver callbacks.
626 *
627 * Instead methods can call this routine, which will attempt to allocate
628 * and schedule a workqueue request to call back @func with @dev as its
629 * argument in the workqueue's process context. @dev will be pinned until
630 * @func returns.
631 *
632 * This routine is usually called via the inline device_schedule_callback(),
633 * which automatically sets @owner to THIS_MODULE.
634 *
635 * Returns 0 if the request was submitted, -ENOMEM if storage could not
636 * be allocated, -ENODEV if a reference to @owner isn't available.
637 *
638 * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an
639 * underlying sysfs routine (since it is intended for use by attribute
640 * methods), and if sysfs isn't available you'll get nothing but -ENOSYS.
641 */
642int device_schedule_callback_owner(struct device *dev,
643 void (*func)(struct device *), struct module *owner)
644{
645 return sysfs_schedule_callback(&dev->kobj,
646 (void (*)(void *)) func, dev, owner);
647}
648EXPORT_SYMBOL_GPL(device_schedule_callback_owner);
649
650static void klist_children_get(struct klist_node *n) 617static void klist_children_get(struct klist_node *n)
651{ 618{
652 struct device_private *p = to_device_private_parent(n); 619 struct device_private *p = to_device_private_parent(n);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 06051767393f..62ec61e8f84a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
52static LIST_HEAD(deferred_probe_pending_list); 52static LIST_HEAD(deferred_probe_pending_list);
53static LIST_HEAD(deferred_probe_active_list); 53static LIST_HEAD(deferred_probe_active_list);
54static struct workqueue_struct *deferred_wq; 54static struct workqueue_struct *deferred_wq;
55static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
55 56
56/** 57/**
57 * deferred_probe_work_func() - Retry probing devices in the active list. 58 * deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
135 * This functions moves all devices from the pending list to the active 136 * This functions moves all devices from the pending list to the active
136 * list and schedules the deferred probe workqueue to process them. It 137 * list and schedules the deferred probe workqueue to process them. It
137 * should be called anytime a driver is successfully bound to a device. 138 * should be called anytime a driver is successfully bound to a device.
139 *
140 * Note, there is a race condition in multi-threaded probe. In the case where
141 * more than one device is probing at the same time, it is possible for one
142 * probe to complete successfully while another is about to defer. If the second
143 * depends on the first, then it will get put on the pending list after the
144 * trigger event has already occured and will be stuck there.
145 *
146 * The atomic 'deferred_trigger_count' is used to determine if a successful
147 * trigger has occurred in the midst of probing a driver. If the trigger count
148 * changes in the midst of a probe, then deferred processing should be triggered
149 * again.
138 */ 150 */
139static void driver_deferred_probe_trigger(void) 151static void driver_deferred_probe_trigger(void)
140{ 152{
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
147 * into the active list so they can be retried by the workqueue 159 * into the active list so they can be retried by the workqueue
148 */ 160 */
149 mutex_lock(&deferred_probe_mutex); 161 mutex_lock(&deferred_probe_mutex);
162 atomic_inc(&deferred_trigger_count);
150 list_splice_tail_init(&deferred_probe_pending_list, 163 list_splice_tail_init(&deferred_probe_pending_list,
151 &deferred_probe_active_list); 164 &deferred_probe_active_list);
152 mutex_unlock(&deferred_probe_mutex); 165 mutex_unlock(&deferred_probe_mutex);
@@ -187,8 +200,8 @@ static void driver_bound(struct device *dev)
187 return; 200 return;
188 } 201 }
189 202
190 pr_debug("driver: '%s': %s: bound to device '%s'\n", dev_name(dev), 203 pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
191 __func__, dev->driver->name); 204 __func__, dev_name(dev));
192 205
193 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); 206 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
194 207
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
265static int really_probe(struct device *dev, struct device_driver *drv) 278static int really_probe(struct device *dev, struct device_driver *drv)
266{ 279{
267 int ret = 0; 280 int ret = 0;
281 int local_trigger_count = atomic_read(&deferred_trigger_count);
268 282
269 atomic_inc(&probe_count); 283 atomic_inc(&probe_count);
270 pr_debug("bus: '%s': %s: probing driver %s with device %s\n", 284 pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
310 /* Driver requested deferred probing */ 324 /* Driver requested deferred probing */
311 dev_info(dev, "Driver %s requests probe deferral\n", drv->name); 325 dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
312 driver_deferred_probe_add(dev); 326 driver_deferred_probe_add(dev);
327 /* Did a trigger occur while probing? Need to re-trigger if yes */
328 if (local_trigger_count != atomic_read(&deferred_trigger_count))
329 driver_deferred_probe_trigger();
313 } else if (ret != -ENODEV && ret != -ENXIO) { 330 } else if (ret != -ENODEV && ret != -ENXIO) {
314 /* driver matched but the probe failed */ 331 /* driver matched but the probe failed */
315 printk(KERN_WARNING 332 printk(KERN_WARNING
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index e714709704e4..5b47210889e0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -13,6 +13,7 @@
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/of_device.h> 15#include <linux/of_device.h>
16#include <linux/of_irq.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
@@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
87 return -ENXIO; 88 return -ENXIO;
88 return dev->archdata.irqs[num]; 89 return dev->archdata.irqs[num];
89#else 90#else
90 struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); 91 struct resource *r;
92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
93 return of_irq_get(dev->dev.of_node, num);
94
95 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
91 96
92 return r ? r->start : -ENXIO; 97 return r ? r->start : -ENXIO;
93#endif 98#endif
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index bbcbd3c43926..be7c1fb7c0c9 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -39,8 +39,7 @@
39static ssize_t show_##name(struct device *dev, \ 39static ssize_t show_##name(struct device *dev, \
40 struct device_attribute *attr, char *buf) \ 40 struct device_attribute *attr, char *buf) \
41{ \ 41{ \
42 unsigned int cpu = dev->id; \ 42 return sprintf(buf, "%d\n", topology_##name(dev->id)); \
43 return sprintf(buf, "%d\n", topology_##name(cpu)); \
44} 43}
45 44
46#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \ 45#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8f5565bf34cd..fa9bb742df6e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3067,7 +3067,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
3067 int ret; 3067 int ret;
3068 3068
3069 while (ptr) { 3069 while (ptr) {
3070 ret = copy_to_user(param, ptr, sizeof(*ptr)); 3070 struct floppy_raw_cmd cmd = *ptr;
3071 cmd.next = NULL;
3072 cmd.kernel_data = NULL;
3073 ret = copy_to_user(param, &cmd, sizeof(cmd));
3071 if (ret) 3074 if (ret)
3072 return -EFAULT; 3075 return -EFAULT;
3073 param += sizeof(struct floppy_raw_cmd); 3076 param += sizeof(struct floppy_raw_cmd);
@@ -3121,10 +3124,11 @@ loop:
3121 return -ENOMEM; 3124 return -ENOMEM;
3122 *rcmd = ptr; 3125 *rcmd = ptr;
3123 ret = copy_from_user(ptr, param, sizeof(*ptr)); 3126 ret = copy_from_user(ptr, param, sizeof(*ptr));
3124 if (ret)
3125 return -EFAULT;
3126 ptr->next = NULL; 3127 ptr->next = NULL;
3127 ptr->buffer_length = 0; 3128 ptr->buffer_length = 0;
3129 ptr->kernel_data = NULL;
3130 if (ret)
3131 return -EFAULT;
3128 param += sizeof(struct floppy_raw_cmd); 3132 param += sizeof(struct floppy_raw_cmd);
3129 if (ptr->cmd_count > 33) 3133 if (ptr->cmd_count > 33)
3130 /* the command may now also take up the space 3134 /* the command may now also take up the space
@@ -3140,7 +3144,6 @@ loop:
3140 for (i = 0; i < 16; i++) 3144 for (i = 0; i < 16; i++)
3141 ptr->reply[i] = 0; 3145 ptr->reply[i] = 0;
3142 ptr->resultcode = 0; 3146 ptr->resultcode = 0;
3143 ptr->kernel_data = NULL;
3144 3147
3145 if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { 3148 if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
3146 if (ptr->length <= 0) 3149 if (ptr->length <= 0)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6d8a87f252de..cb9b1f8326c3 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
144 if (unlikely(virtqueue_is_broken(vq))) 144 if (unlikely(virtqueue_is_broken(vq)))
145 break; 145 break;
146 } while (!virtqueue_enable_cb(vq)); 146 } while (!virtqueue_enable_cb(vq));
147 spin_unlock_irqrestore(&vblk->vq_lock, flags);
148 147
149 /* In case queue is stopped waiting for more buffers. */ 148 /* In case queue is stopped waiting for more buffers. */
150 if (req_done) 149 if (req_done)
151 blk_mq_start_stopped_hw_queues(vblk->disk->queue); 150 blk_mq_start_stopped_hw_queues(vblk->disk->queue);
151 spin_unlock_irqrestore(&vblk->vq_lock, flags);
152} 152}
153 153
154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) 154static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
@@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); 202 err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
203 if (err) { 203 if (err) {
204 virtqueue_kick(vblk->vq); 204 virtqueue_kick(vblk->vq);
205 spin_unlock_irqrestore(&vblk->vq_lock, flags);
206 blk_mq_stop_hw_queue(hctx); 205 blk_mq_stop_hw_queue(hctx);
206 spin_unlock_irqrestore(&vblk->vq_lock, flags);
207 /* Out of mem doesn't actually happen, since we fall back 207 /* Out of mem doesn't actually happen, since we fall back
208 * to direct descriptors */ 208 * to direct descriptors */
209 if (err == -ENOMEM || err == -ENOSPC) 209 if (err == -ENOMEM || err == -ENOSPC)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index be571fef185d..a83b57e57b63 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
82 { USB_DEVICE(0x04CA, 0x3004) }, 82 { USB_DEVICE(0x04CA, 0x3004) },
83 { USB_DEVICE(0x04CA, 0x3005) }, 83 { USB_DEVICE(0x04CA, 0x3005) },
84 { USB_DEVICE(0x04CA, 0x3006) }, 84 { USB_DEVICE(0x04CA, 0x3006) },
85 { USB_DEVICE(0x04CA, 0x3007) },
85 { USB_DEVICE(0x04CA, 0x3008) }, 86 { USB_DEVICE(0x04CA, 0x3008) },
86 { USB_DEVICE(0x04CA, 0x300b) }, 87 { USB_DEVICE(0x04CA, 0x300b) },
87 { USB_DEVICE(0x0930, 0x0219) }, 88 { USB_DEVICE(0x0930, 0x0219) },
@@ -131,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
131 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
132 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 133 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
134 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 136 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 137 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
136 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 138 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f338b0c5a8de..a7dfbf9a3afb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -152,6 +152,7 @@ static const struct usb_device_id blacklist_table[] = {
152 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, 152 { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
153 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, 153 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
154 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, 154 { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
155 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, 156 { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
156 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, 157 { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
157 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, 158 { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
@@ -1485,10 +1486,8 @@ static int btusb_probe(struct usb_interface *intf,
1485 if (id->driver_info & BTUSB_BCM92035) 1486 if (id->driver_info & BTUSB_BCM92035)
1486 hdev->setup = btusb_setup_bcm92035; 1487 hdev->setup = btusb_setup_bcm92035;
1487 1488
1488 if (id->driver_info & BTUSB_INTEL) { 1489 if (id->driver_info & BTUSB_INTEL)
1489 usb_enable_autosuspend(data->udev);
1490 hdev->setup = btusb_setup_intel; 1490 hdev->setup = btusb_setup_intel;
1491 }
1492 1491
1493 /* Interface numbers are hardcoded in the specification */ 1492 /* Interface numbers are hardcoded in the specification */
1494 data->isoc = usb_ifnum_to_if(data->udev, 1); 1493 data->isoc = usb_ifnum_to_if(data->udev, 1);
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 293e2e0a0a87..00b73448b22e 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -56,6 +56,7 @@
56#include <linux/of.h> 56#include <linux/of.h>
57#include <linux/of_address.h> 57#include <linux/of_address.h>
58#include <linux/debugfs.h> 58#include <linux/debugfs.h>
59#include <linux/log2.h>
59 60
60/* 61/*
61 * DDR target is the same on all platforms. 62 * DDR target is the same on all platforms.
@@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
222 */ 223 */
223 if ((u64)base < wend && end > wbase) 224 if ((u64)base < wend && end > wbase)
224 return 0; 225 return 0;
225
226 /*
227 * Check if target/attribute conflicts
228 */
229 if (target == wtarget && attr == wattr)
230 return 0;
231 } 226 }
232 227
233 return 1; 228 return 1;
@@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
266 mbus->soc->win_cfg_offset(win); 261 mbus->soc->win_cfg_offset(win);
267 u32 ctrl, remap_addr; 262 u32 ctrl, remap_addr;
268 263
264 if (!is_power_of_2(size)) {
265 WARN(true, "Invalid MBus window size: 0x%zx\n", size);
266 return -EINVAL;
267 }
268
269 if ((base & (phys_addr_t)(size - 1)) != 0) {
270 WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
271 size);
272 return -EINVAL;
273 }
274
269 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | 275 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
270 (attr << WIN_CTRL_ATTR_SHIFT) | 276 (attr << WIN_CTRL_ATTR_SHIFT) |
271 (target << WIN_CTRL_TGT_SHIFT) | 277 (target << WIN_CTRL_TGT_SHIFT) |
@@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
413 win, (unsigned long long)wbase, 419 win, (unsigned long long)wbase,
414 (unsigned long long)(wbase + wsize), wtarget, wattr); 420 (unsigned long long)(wbase + wsize), wtarget, wattr);
415 421
422 if (!is_power_of_2(wsize) ||
423 ((wbase & (u64)(wsize - 1)) != 0))
424 seq_puts(seq, " (Invalid base/size!!)");
425
416 if (win < mbus->soc->num_remappable_wins) { 426 if (win < mbus->soc->num_remappable_wins) {
417 seq_printf(seq, " (remap %016llx)\n", 427 seq_printf(seq, " (remap %016llx)\n",
418 (unsigned long long)wremap); 428 (unsigned long long)wremap);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fbae63e3d304..6e9f74a5c095 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -40,7 +40,7 @@ config SGI_MBCS
40source "drivers/tty/serial/Kconfig" 40source "drivers/tty/serial/Kconfig"
41 41
42config TTY_PRINTK 42config TTY_PRINTK
43 bool "TTY driver to output user messages via printk" 43 tristate "TTY driver to output user messages via printk"
44 depends on EXPERT && TTY 44 depends on EXPERT && TTY
45 default n 45 default n
46 ---help--- 46 ---help---
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 8121b4c70ede..b29703324e94 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -730,6 +730,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
730 730
731 agp_copy_info(agp_bridge, &kerninfo); 731 agp_copy_info(agp_bridge, &kerninfo);
732 732
733 memset(&userinfo, 0, sizeof(userinfo));
733 userinfo.version.major = kerninfo.version.major; 734 userinfo.version.major = kerninfo.version.major;
734 userinfo.version.minor = kerninfo.version.minor; 735 userinfo.version.minor = kerninfo.version.minor;
735 userinfo.bridge_id = kerninfo.device->vendor | 736 userinfo.bridge_id = kerninfo.device->vendor |
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 8c3b255e629a..e900961cdd2e 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -61,18 +61,18 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
61 } 61 }
62 bcm2835_rng_ops.priv = (unsigned long)rng_base; 62 bcm2835_rng_ops.priv = (unsigned long)rng_base;
63 63
64 /* set warm-up count & enable */
65 __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
66 __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
67
64 /* register driver */ 68 /* register driver */
65 err = hwrng_register(&bcm2835_rng_ops); 69 err = hwrng_register(&bcm2835_rng_ops);
66 if (err) { 70 if (err) {
67 dev_err(dev, "hwrng registration failed\n"); 71 dev_err(dev, "hwrng registration failed\n");
68 iounmap(rng_base); 72 iounmap(rng_base);
69 } else { 73 } else
70 dev_info(dev, "hwrng registered\n"); 74 dev_info(dev, "hwrng registered\n");
71 75
72 /* set warm-up count & enable */
73 __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
74 __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
75 }
76 return err; 76 return err;
77} 77}
78 78
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 0baa8fab4ea7..db1c9b7adaa6 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -50,6 +50,18 @@ config IPMI_SI
50 Currently, only KCS and SMIC are supported. If 50 Currently, only KCS and SMIC are supported. If
51 you are using IPMI, you should probably say "y" here. 51 you are using IPMI, you should probably say "y" here.
52 52
53config IPMI_SI_PROBE_DEFAULTS
54 bool 'Probe for all possible IPMI system interfaces by default'
55 default n
56 depends on IPMI_SI
57 help
58 Modern systems will usually expose IPMI interfaces via a discoverable
59 firmware mechanism such as ACPI or DMI. Older systems do not, and so
60 the driver is forced to probe hardware manually. This may cause boot
61 delays. Say "n" here to disable this manual probing. IPMI will then
62 only be available on older systems if the "ipmi_si_intf.trydefaults=1"
63 boot argument is passed.
64
53config IPMI_WATCHDOG 65config IPMI_WATCHDOG
54 tristate 'IPMI Watchdog Timer' 66 tristate 'IPMI Watchdog Timer'
55 help 67 help
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index f5e4cd7617f6..61e71616689b 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
352 352
353static inline int read_all_bytes(struct si_sm_data *bt) 353static inline int read_all_bytes(struct si_sm_data *bt)
354{ 354{
355 unsigned char i; 355 unsigned int i;
356 356
357 /* 357 /*
358 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. 358 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 6a4bdc18955a..8c25f596808a 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
251 if (!GET_STATUS_OBF(status)) { 251 if (!GET_STATUS_OBF(status)) {
252 kcs->obf_timeout -= time; 252 kcs->obf_timeout -= time;
253 if (kcs->obf_timeout < 0) { 253 if (kcs->obf_timeout < 0) {
254 start_error_recovery(kcs, "OBF not ready in time"); 254 kcs->obf_timeout = OBF_RETRY_TIMEOUT;
255 return 1; 255 start_error_recovery(kcs, "OBF not ready in time");
256 return 1;
256 } 257 }
257 return 0; 258 return 0;
258 } 259 }
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index ec4e10fcf1a5..e6db9381b2c7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -55,6 +55,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
55static int ipmi_init_msghandler(void); 55static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long); 56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf); 57static void handle_new_recv_msgs(ipmi_smi_t intf);
58static void need_waiter(ipmi_smi_t intf);
58 59
59static int initialized; 60static int initialized;
60 61
@@ -73,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root;
73 */ 74 */
74#define MAX_MSG_TIMEOUT 60000 75#define MAX_MSG_TIMEOUT 60000
75 76
77/* Call every ~1000 ms. */
78#define IPMI_TIMEOUT_TIME 1000
79
80/* How many jiffies does it take to get to the timeout time. */
81#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
82
83/*
84 * Request events from the queue every second (this is the number of
85 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
86 * future, IPMI will add a way to know immediately if an event is in
87 * the queue and this silliness can go away.
88 */
89#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
90
76/* 91/*
77 * The main "user" data structure. 92 * The main "user" data structure.
78 */ 93 */
79struct ipmi_user { 94struct ipmi_user {
80 struct list_head link; 95 struct list_head link;
81 96
82 /* Set to "0" when the user is destroyed. */ 97 /* Set to false when the user is destroyed. */
83 int valid; 98 bool valid;
84 99
85 struct kref refcount; 100 struct kref refcount;
86 101
@@ -92,7 +107,7 @@ struct ipmi_user {
92 ipmi_smi_t intf; 107 ipmi_smi_t intf;
93 108
94 /* Does this interface receive IPMI events? */ 109 /* Does this interface receive IPMI events? */
95 int gets_events; 110 bool gets_events;
96}; 111};
97 112
98struct cmd_rcvr { 113struct cmd_rcvr {
@@ -383,6 +398,9 @@ struct ipmi_smi {
383 unsigned int waiting_events_count; /* How many events in queue? */ 398 unsigned int waiting_events_count; /* How many events in queue? */
384 char delivering_events; 399 char delivering_events;
385 char event_msg_printed; 400 char event_msg_printed;
401 atomic_t event_waiters;
402 unsigned int ticks_to_req_ev;
403 int last_needs_timer;
386 404
387 /* 405 /*
388 * The event receiver for my BMC, only really used at panic 406 * The event receiver for my BMC, only really used at panic
@@ -395,7 +413,7 @@ struct ipmi_smi {
395 413
396 /* For handling of maintenance mode. */ 414 /* For handling of maintenance mode. */
397 int maintenance_mode; 415 int maintenance_mode;
398 int maintenance_mode_enable; 416 bool maintenance_mode_enable;
399 int auto_maintenance_timeout; 417 int auto_maintenance_timeout;
400 spinlock_t maintenance_mode_lock; /* Used in a timer... */ 418 spinlock_t maintenance_mode_lock; /* Used in a timer... */
401 419
@@ -451,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex);
451static LIST_HEAD(smi_watchers); 469static LIST_HEAD(smi_watchers);
452static DEFINE_MUTEX(smi_watchers_mutex); 470static DEFINE_MUTEX(smi_watchers_mutex);
453 471
454
455#define ipmi_inc_stat(intf, stat) \ 472#define ipmi_inc_stat(intf, stat) \
456 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) 473 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
457#define ipmi_get_stat(intf, stat) \ 474#define ipmi_get_stat(intf, stat) \
@@ -772,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf,
772 *seq = i; 789 *seq = i;
773 *seqid = intf->seq_table[i].seqid; 790 *seqid = intf->seq_table[i].seqid;
774 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; 791 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
792 need_waiter(intf);
775 } else { 793 } else {
776 rv = -EAGAIN; 794 rv = -EAGAIN;
777 } 795 }
@@ -941,7 +959,7 @@ int ipmi_create_user(unsigned int if_num,
941 new_user->handler = handler; 959 new_user->handler = handler;
942 new_user->handler_data = handler_data; 960 new_user->handler_data = handler_data;
943 new_user->intf = intf; 961 new_user->intf = intf;
944 new_user->gets_events = 0; 962 new_user->gets_events = false;
945 963
946 if (!try_module_get(intf->handlers->owner)) { 964 if (!try_module_get(intf->handlers->owner)) {
947 rv = -ENODEV; 965 rv = -ENODEV;
@@ -962,10 +980,15 @@ int ipmi_create_user(unsigned int if_num,
962 */ 980 */
963 mutex_unlock(&ipmi_interfaces_mutex); 981 mutex_unlock(&ipmi_interfaces_mutex);
964 982
965 new_user->valid = 1; 983 new_user->valid = true;
966 spin_lock_irqsave(&intf->seq_lock, flags); 984 spin_lock_irqsave(&intf->seq_lock, flags);
967 list_add_rcu(&new_user->link, &intf->users); 985 list_add_rcu(&new_user->link, &intf->users);
968 spin_unlock_irqrestore(&intf->seq_lock, flags); 986 spin_unlock_irqrestore(&intf->seq_lock, flags);
987 if (handler->ipmi_watchdog_pretimeout) {
988 /* User wants pretimeouts, so make sure to watch for them. */
989 if (atomic_inc_return(&intf->event_waiters) == 1)
990 need_waiter(intf);
991 }
969 *user = new_user; 992 *user = new_user;
970 return 0; 993 return 0;
971 994
@@ -1019,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user)
1019 struct cmd_rcvr *rcvr; 1042 struct cmd_rcvr *rcvr;
1020 struct cmd_rcvr *rcvrs = NULL; 1043 struct cmd_rcvr *rcvrs = NULL;
1021 1044
1022 user->valid = 0; 1045 user->valid = false;
1046
1047 if (user->handler->ipmi_watchdog_pretimeout)
1048 atomic_dec(&intf->event_waiters);
1049
1050 if (user->gets_events)
1051 atomic_dec(&intf->event_waiters);
1023 1052
1024 /* Remove the user from the interface's sequence table. */ 1053 /* Remove the user from the interface's sequence table. */
1025 spin_lock_irqsave(&intf->seq_lock, flags); 1054 spin_lock_irqsave(&intf->seq_lock, flags);
@@ -1155,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1155 if (intf->maintenance_mode != mode) { 1184 if (intf->maintenance_mode != mode) {
1156 switch (mode) { 1185 switch (mode) {
1157 case IPMI_MAINTENANCE_MODE_AUTO: 1186 case IPMI_MAINTENANCE_MODE_AUTO:
1158 intf->maintenance_mode = mode;
1159 intf->maintenance_mode_enable 1187 intf->maintenance_mode_enable
1160 = (intf->auto_maintenance_timeout > 0); 1188 = (intf->auto_maintenance_timeout > 0);
1161 break; 1189 break;
1162 1190
1163 case IPMI_MAINTENANCE_MODE_OFF: 1191 case IPMI_MAINTENANCE_MODE_OFF:
1164 intf->maintenance_mode = mode; 1192 intf->maintenance_mode_enable = false;
1165 intf->maintenance_mode_enable = 0;
1166 break; 1193 break;
1167 1194
1168 case IPMI_MAINTENANCE_MODE_ON: 1195 case IPMI_MAINTENANCE_MODE_ON:
1169 intf->maintenance_mode = mode; 1196 intf->maintenance_mode_enable = true;
1170 intf->maintenance_mode_enable = 1;
1171 break; 1197 break;
1172 1198
1173 default: 1199 default:
1174 rv = -EINVAL; 1200 rv = -EINVAL;
1175 goto out_unlock; 1201 goto out_unlock;
1176 } 1202 }
1203 intf->maintenance_mode = mode;
1177 1204
1178 maintenance_mode_update(intf); 1205 maintenance_mode_update(intf);
1179 } 1206 }
@@ -1184,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1184} 1211}
1185EXPORT_SYMBOL(ipmi_set_maintenance_mode); 1212EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1186 1213
1187int ipmi_set_gets_events(ipmi_user_t user, int val) 1214int ipmi_set_gets_events(ipmi_user_t user, bool val)
1188{ 1215{
1189 unsigned long flags; 1216 unsigned long flags;
1190 ipmi_smi_t intf = user->intf; 1217 ipmi_smi_t intf = user->intf;
@@ -1194,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
1194 INIT_LIST_HEAD(&msgs); 1221 INIT_LIST_HEAD(&msgs);
1195 1222
1196 spin_lock_irqsave(&intf->events_lock, flags); 1223 spin_lock_irqsave(&intf->events_lock, flags);
1224 if (user->gets_events == val)
1225 goto out;
1226
1197 user->gets_events = val; 1227 user->gets_events = val;
1198 1228
1229 if (val) {
1230 if (atomic_inc_return(&intf->event_waiters) == 1)
1231 need_waiter(intf);
1232 } else {
1233 atomic_dec(&intf->event_waiters);
1234 }
1235
1199 if (intf->delivering_events) 1236 if (intf->delivering_events)
1200 /* 1237 /*
1201 * Another thread is delivering events for this, so 1238 * Another thread is delivering events for this, so
@@ -1289,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user,
1289 goto out_unlock; 1326 goto out_unlock;
1290 } 1327 }
1291 1328
1329 if (atomic_inc_return(&intf->event_waiters) == 1)
1330 need_waiter(intf);
1331
1292 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); 1332 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1293 1333
1294 out_unlock: 1334 out_unlock:
@@ -1330,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
1330 mutex_unlock(&intf->cmd_rcvrs_mutex); 1370 mutex_unlock(&intf->cmd_rcvrs_mutex);
1331 synchronize_rcu(); 1371 synchronize_rcu();
1332 while (rcvrs) { 1372 while (rcvrs) {
1373 atomic_dec(&intf->event_waiters);
1333 rcvr = rcvrs; 1374 rcvr = rcvrs;
1334 rcvrs = rcvr->next; 1375 rcvrs = rcvr->next;
1335 kfree(rcvr); 1376 kfree(rcvr);
@@ -1535,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user,
1535 = IPMI_MAINTENANCE_MODE_TIMEOUT; 1576 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1536 if (!intf->maintenance_mode 1577 if (!intf->maintenance_mode
1537 && !intf->maintenance_mode_enable) { 1578 && !intf->maintenance_mode_enable) {
1538 intf->maintenance_mode_enable = 1; 1579 intf->maintenance_mode_enable = true;
1539 maintenance_mode_update(intf); 1580 maintenance_mode_update(intf);
1540 } 1581 }
1541 spin_unlock_irqrestore(&intf->maintenance_mode_lock, 1582 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
@@ -2876,6 +2917,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2876 (unsigned long) intf); 2917 (unsigned long) intf);
2877 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 2918 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2878 spin_lock_init(&intf->events_lock); 2919 spin_lock_init(&intf->events_lock);
2920 atomic_set(&intf->event_waiters, 0);
2921 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2879 INIT_LIST_HEAD(&intf->waiting_events); 2922 INIT_LIST_HEAD(&intf->waiting_events);
2880 intf->waiting_events_count = 0; 2923 intf->waiting_events_count = 0;
2881 mutex_init(&intf->cmd_rcvrs_mutex); 2924 mutex_init(&intf->cmd_rcvrs_mutex);
@@ -3965,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3965 4008
3966static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, 4009static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3967 struct list_head *timeouts, long timeout_period, 4010 struct list_head *timeouts, long timeout_period,
3968 int slot, unsigned long *flags) 4011 int slot, unsigned long *flags,
4012 unsigned int *waiting_msgs)
3969{ 4013{
3970 struct ipmi_recv_msg *msg; 4014 struct ipmi_recv_msg *msg;
3971 struct ipmi_smi_handlers *handlers; 4015 struct ipmi_smi_handlers *handlers;
@@ -3977,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3977 return; 4021 return;
3978 4022
3979 ent->timeout -= timeout_period; 4023 ent->timeout -= timeout_period;
3980 if (ent->timeout > 0) 4024 if (ent->timeout > 0) {
4025 (*waiting_msgs)++;
3981 return; 4026 return;
4027 }
3982 4028
3983 if (ent->retries_left == 0) { 4029 if (ent->retries_left == 0) {
3984 /* The message has used all its retries. */ 4030 /* The message has used all its retries. */
@@ -3995,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3995 struct ipmi_smi_msg *smi_msg; 4041 struct ipmi_smi_msg *smi_msg;
3996 /* More retries, send again. */ 4042 /* More retries, send again. */
3997 4043
4044 (*waiting_msgs)++;
4045
3998 /* 4046 /*
3999 * Start with the max timer, set to normal timer after 4047 * Start with the max timer, set to normal timer after
4000 * the message is sent. 4048 * the message is sent.
@@ -4040,117 +4088,118 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
4040 } 4088 }
4041} 4089}
4042 4090
4043static void ipmi_timeout_handler(long timeout_period) 4091static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
4044{ 4092{
4045 ipmi_smi_t intf;
4046 struct list_head timeouts; 4093 struct list_head timeouts;
4047 struct ipmi_recv_msg *msg, *msg2; 4094 struct ipmi_recv_msg *msg, *msg2;
4048 unsigned long flags; 4095 unsigned long flags;
4049 int i; 4096 int i;
4097 unsigned int waiting_msgs = 0;
4050 4098
4051 rcu_read_lock(); 4099 /*
4052 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4100 * Go through the seq table and find any messages that
4053 tasklet_schedule(&intf->recv_tasklet); 4101 * have timed out, putting them in the timeouts
4054 4102 * list.
4055 /* 4103 */
4056 * Go through the seq table and find any messages that 4104 INIT_LIST_HEAD(&timeouts);
4057 * have timed out, putting them in the timeouts 4105 spin_lock_irqsave(&intf->seq_lock, flags);
4058 * list. 4106 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4059 */ 4107 check_msg_timeout(intf, &(intf->seq_table[i]),
4060 INIT_LIST_HEAD(&timeouts); 4108 &timeouts, timeout_period, i,
4061 spin_lock_irqsave(&intf->seq_lock, flags); 4109 &flags, &waiting_msgs);
4062 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) 4110 spin_unlock_irqrestore(&intf->seq_lock, flags);
4063 check_msg_timeout(intf, &(intf->seq_table[i]),
4064 &timeouts, timeout_period, i,
4065 &flags);
4066 spin_unlock_irqrestore(&intf->seq_lock, flags);
4067 4111
4068 list_for_each_entry_safe(msg, msg2, &timeouts, link) 4112 list_for_each_entry_safe(msg, msg2, &timeouts, link)
4069 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); 4113 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
4070 4114
4071 /* 4115 /*
4072 * Maintenance mode handling. Check the timeout 4116 * Maintenance mode handling. Check the timeout
4073 * optimistically before we claim the lock. It may 4117 * optimistically before we claim the lock. It may
4074 * mean a timeout gets missed occasionally, but that 4118 * mean a timeout gets missed occasionally, but that
4075 * only means the timeout gets extended by one period 4119 * only means the timeout gets extended by one period
4076 * in that case. No big deal, and it avoids the lock 4120 * in that case. No big deal, and it avoids the lock
4077 * most of the time. 4121 * most of the time.
4078 */ 4122 */
4123 if (intf->auto_maintenance_timeout > 0) {
4124 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4079 if (intf->auto_maintenance_timeout > 0) { 4125 if (intf->auto_maintenance_timeout > 0) {
4080 spin_lock_irqsave(&intf->maintenance_mode_lock, flags); 4126 intf->auto_maintenance_timeout
4081 if (intf->auto_maintenance_timeout > 0) { 4127 -= timeout_period;
4082 intf->auto_maintenance_timeout 4128 if (!intf->maintenance_mode
4083 -= timeout_period; 4129 && (intf->auto_maintenance_timeout <= 0)) {
4084 if (!intf->maintenance_mode 4130 intf->maintenance_mode_enable = false;
4085 && (intf->auto_maintenance_timeout <= 0)) { 4131 maintenance_mode_update(intf);
4086 intf->maintenance_mode_enable = 0;
4087 maintenance_mode_update(intf);
4088 }
4089 } 4132 }
4090 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4091 flags);
4092 } 4133 }
4134 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4135 flags);
4093 } 4136 }
4094 rcu_read_unlock(); 4137
4138 tasklet_schedule(&intf->recv_tasklet);
4139
4140 return waiting_msgs;
4095} 4141}
4096 4142
4097static void ipmi_request_event(void) 4143static void ipmi_request_event(ipmi_smi_t intf)
4098{ 4144{
4099 ipmi_smi_t intf;
4100 struct ipmi_smi_handlers *handlers; 4145 struct ipmi_smi_handlers *handlers;
4101 4146
4102 rcu_read_lock(); 4147 /* No event requests when in maintenance mode. */
4103 /* 4148 if (intf->maintenance_mode_enable)
4104 * Called from the timer, no need to check if handlers is 4149 return;
4105 * valid.
4106 */
4107 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4108 /* No event requests when in maintenance mode. */
4109 if (intf->maintenance_mode_enable)
4110 continue;
4111 4150
4112 handlers = intf->handlers; 4151 handlers = intf->handlers;
4113 if (handlers) 4152 if (handlers)
4114 handlers->request_events(intf->send_info); 4153 handlers->request_events(intf->send_info);
4115 }
4116 rcu_read_unlock();
4117} 4154}
4118 4155
4119static struct timer_list ipmi_timer; 4156static struct timer_list ipmi_timer;
4120 4157
4121/* Call every ~1000 ms. */
4122#define IPMI_TIMEOUT_TIME 1000
4123
4124/* How many jiffies does it take to get to the timeout time. */
4125#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
4126
4127/*
4128 * Request events from the queue every second (this is the number of
4129 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
4130 * future, IPMI will add a way to know immediately if an event is in
4131 * the queue and this silliness can go away.
4132 */
4133#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
4134
4135static atomic_t stop_operation; 4158static atomic_t stop_operation;
4136static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4137 4159
4138static void ipmi_timeout(unsigned long data) 4160static void ipmi_timeout(unsigned long data)
4139{ 4161{
4162 ipmi_smi_t intf;
4163 int nt = 0;
4164
4140 if (atomic_read(&stop_operation)) 4165 if (atomic_read(&stop_operation))
4141 return; 4166 return;
4142 4167
4143 ticks_to_req_ev--; 4168 rcu_read_lock();
4144 if (ticks_to_req_ev == 0) { 4169 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4145 ipmi_request_event(); 4170 int lnt = 0;
4146 ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 4171
4147 } 4172 if (atomic_read(&intf->event_waiters)) {
4173 intf->ticks_to_req_ev--;
4174 if (intf->ticks_to_req_ev == 0) {
4175 ipmi_request_event(intf);
4176 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4177 }
4178 lnt++;
4179 }
4148 4180
4149 ipmi_timeout_handler(IPMI_TIMEOUT_TIME); 4181 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4150 4182
4151 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 4183 lnt = !!lnt;
4184 if (lnt != intf->last_needs_timer &&
4185 intf->handlers->set_need_watch)
4186 intf->handlers->set_need_watch(intf->send_info, lnt);
4187 intf->last_needs_timer = lnt;
4188
4189 nt += lnt;
4190 }
4191 rcu_read_unlock();
4192
4193 if (nt)
4194 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4152} 4195}
4153 4196
4197static void need_waiter(ipmi_smi_t intf)
4198{
4199 /* Racy, but worst case we start the timer twice. */
4200 if (!timer_pending(&ipmi_timer))
4201 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4202}
4154 4203
4155static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); 4204static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4156static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); 4205static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b7efd3c1a882..1c4bb4f6ce93 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -217,7 +217,7 @@ struct smi_info {
217 unsigned char msg_flags; 217 unsigned char msg_flags;
218 218
219 /* Does the BMC have an event buffer? */ 219 /* Does the BMC have an event buffer? */
220 char has_event_buffer; 220 bool has_event_buffer;
221 221
222 /* 222 /*
223 * If set to true, this will request events the next time the 223 * If set to true, this will request events the next time the
@@ -230,7 +230,7 @@ struct smi_info {
230 * call. Generally used after a panic to make sure stuff goes 230 * call. Generally used after a panic to make sure stuff goes
231 * out. 231 * out.
232 */ 232 */
233 int run_to_completion; 233 bool run_to_completion;
234 234
235 /* The I/O port of an SI interface. */ 235 /* The I/O port of an SI interface. */
236 int port; 236 int port;
@@ -248,19 +248,25 @@ struct smi_info {
248 /* The timer for this si. */ 248 /* The timer for this si. */
249 struct timer_list si_timer; 249 struct timer_list si_timer;
250 250
251 /* This flag is set, if the timer is running (timer_pending() isn't enough) */
252 bool timer_running;
253
251 /* The time (in jiffies) the last timeout occurred at. */ 254 /* The time (in jiffies) the last timeout occurred at. */
252 unsigned long last_timeout_jiffies; 255 unsigned long last_timeout_jiffies;
253 256
254 /* Used to gracefully stop the timer without race conditions. */ 257 /* Used to gracefully stop the timer without race conditions. */
255 atomic_t stop_operation; 258 atomic_t stop_operation;
256 259
260 /* Are we waiting for the events, pretimeouts, received msgs? */
261 atomic_t need_watch;
262
257 /* 263 /*
258 * The driver will disable interrupts when it gets into a 264 * The driver will disable interrupts when it gets into a
259 * situation where it cannot handle messages due to lack of 265 * situation where it cannot handle messages due to lack of
260 * memory. Once that situation clears up, it will re-enable 266 * memory. Once that situation clears up, it will re-enable
261 * interrupts. 267 * interrupts.
262 */ 268 */
263 int interrupt_disabled; 269 bool interrupt_disabled;
264 270
265 /* From the get device id response... */ 271 /* From the get device id response... */
266 struct ipmi_device_id device_id; 272 struct ipmi_device_id device_id;
@@ -273,7 +279,7 @@ struct smi_info {
273 * True if we allocated the device, false if it came from 279 * True if we allocated the device, false if it came from
274 * someplace else (like PCI). 280 * someplace else (like PCI).
275 */ 281 */
276 int dev_registered; 282 bool dev_registered;
277 283
278 /* Slave address, could be reported from DMI. */ 284 /* Slave address, could be reported from DMI. */
279 unsigned char slave_addr; 285 unsigned char slave_addr;
@@ -297,19 +303,19 @@ struct smi_info {
297static int force_kipmid[SI_MAX_PARMS]; 303static int force_kipmid[SI_MAX_PARMS];
298static int num_force_kipmid; 304static int num_force_kipmid;
299#ifdef CONFIG_PCI 305#ifdef CONFIG_PCI
300static int pci_registered; 306static bool pci_registered;
301#endif 307#endif
302#ifdef CONFIG_ACPI 308#ifdef CONFIG_ACPI
303static int pnp_registered; 309static bool pnp_registered;
304#endif 310#endif
305#ifdef CONFIG_PARISC 311#ifdef CONFIG_PARISC
306static int parisc_registered; 312static bool parisc_registered;
307#endif 313#endif
308 314
309static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; 315static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
310static int num_max_busy_us; 316static int num_max_busy_us;
311 317
312static int unload_when_empty = 1; 318static bool unload_when_empty = true;
313 319
314static int add_smi(struct smi_info *smi); 320static int add_smi(struct smi_info *smi);
315static int try_smi_init(struct smi_info *smi); 321static int try_smi_init(struct smi_info *smi);
@@ -434,6 +440,13 @@ static void start_clear_flags(struct smi_info *smi_info)
434 smi_info->si_state = SI_CLEARING_FLAGS; 440 smi_info->si_state = SI_CLEARING_FLAGS;
435} 441}
436 442
443static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
444{
445 smi_info->last_timeout_jiffies = jiffies;
446 mod_timer(&smi_info->si_timer, new_val);
447 smi_info->timer_running = true;
448}
449
437/* 450/*
438 * When we have a situtaion where we run out of memory and cannot 451 * When we have a situtaion where we run out of memory and cannot
439 * allocate messages, we just leave them in the BMC and run the system 452 * allocate messages, we just leave them in the BMC and run the system
@@ -444,10 +457,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
444{ 457{
445 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 458 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
446 start_disable_irq(smi_info); 459 start_disable_irq(smi_info);
447 smi_info->interrupt_disabled = 1; 460 smi_info->interrupt_disabled = true;
448 if (!atomic_read(&smi_info->stop_operation)) 461 if (!atomic_read(&smi_info->stop_operation))
449 mod_timer(&smi_info->si_timer, 462 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
450 jiffies + SI_TIMEOUT_JIFFIES);
451 } 463 }
452} 464}
453 465
@@ -455,7 +467,7 @@ static inline void enable_si_irq(struct smi_info *smi_info)
455{ 467{
456 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 468 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
457 start_enable_irq(smi_info); 469 start_enable_irq(smi_info);
458 smi_info->interrupt_disabled = 0; 470 smi_info->interrupt_disabled = false;
459 } 471 }
460} 472}
461 473
@@ -700,7 +712,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
700 dev_warn(smi_info->dev, 712 dev_warn(smi_info->dev,
701 "Maybe ok, but ipmi might run very slowly.\n"); 713 "Maybe ok, but ipmi might run very slowly.\n");
702 } else 714 } else
703 smi_info->interrupt_disabled = 0; 715 smi_info->interrupt_disabled = false;
704 smi_info->si_state = SI_NORMAL; 716 smi_info->si_state = SI_NORMAL;
705 break; 717 break;
706 } 718 }
@@ -853,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
853 return si_sm_result; 865 return si_sm_result;
854} 866}
855 867
868static void check_start_timer_thread(struct smi_info *smi_info)
869{
870 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
871 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
872
873 if (smi_info->thread)
874 wake_up_process(smi_info->thread);
875
876 start_next_msg(smi_info);
877 smi_event_handler(smi_info, 0);
878 }
879}
880
856static void sender(void *send_info, 881static void sender(void *send_info,
857 struct ipmi_smi_msg *msg, 882 struct ipmi_smi_msg *msg,
858 int priority) 883 int priority)
@@ -906,27 +931,11 @@ static void sender(void *send_info,
906 else 931 else
907 list_add_tail(&msg->link, &smi_info->xmit_msgs); 932 list_add_tail(&msg->link, &smi_info->xmit_msgs);
908 933
909 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { 934 check_start_timer_thread(smi_info);
910 /*
911 * last_timeout_jiffies is updated here to avoid
912 * smi_timeout() handler passing very large time_diff
913 * value to smi_event_handler() that causes
914 * the send command to abort.
915 */
916 smi_info->last_timeout_jiffies = jiffies;
917
918 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
919
920 if (smi_info->thread)
921 wake_up_process(smi_info->thread);
922
923 start_next_msg(smi_info);
924 smi_event_handler(smi_info, 0);
925 }
926 spin_unlock_irqrestore(&smi_info->si_lock, flags); 935 spin_unlock_irqrestore(&smi_info->si_lock, flags);
927} 936}
928 937
929static void set_run_to_completion(void *send_info, int i_run_to_completion) 938static void set_run_to_completion(void *send_info, bool i_run_to_completion)
930{ 939{
931 struct smi_info *smi_info = send_info; 940 struct smi_info *smi_info = send_info;
932 enum si_sm_result result; 941 enum si_sm_result result;
@@ -1004,6 +1013,17 @@ static int ipmi_thread(void *data)
1004 1013
1005 spin_lock_irqsave(&(smi_info->si_lock), flags); 1014 spin_lock_irqsave(&(smi_info->si_lock), flags);
1006 smi_result = smi_event_handler(smi_info, 0); 1015 smi_result = smi_event_handler(smi_info, 0);
1016
1017 /*
1018 * If the driver is doing something, there is a possible
1019 * race with the timer. If the timer handler see idle,
1020 * and the thread here sees something else, the timer
1021 * handler won't restart the timer even though it is
1022 * required. So start it here if necessary.
1023 */
1024 if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1025 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1026
1007 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1027 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1008 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, 1028 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1009 &busy_until); 1029 &busy_until);
@@ -1011,9 +1031,15 @@ static int ipmi_thread(void *data)
1011 ; /* do nothing */ 1031 ; /* do nothing */
1012 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) 1032 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1013 schedule(); 1033 schedule();
1014 else if (smi_result == SI_SM_IDLE) 1034 else if (smi_result == SI_SM_IDLE) {
1015 schedule_timeout_interruptible(100); 1035 if (atomic_read(&smi_info->need_watch)) {
1016 else 1036 schedule_timeout_interruptible(100);
1037 } else {
1038 /* Wait to be woken up when we are needed. */
1039 __set_current_state(TASK_INTERRUPTIBLE);
1040 schedule();
1041 }
1042 } else
1017 schedule_timeout_interruptible(1); 1043 schedule_timeout_interruptible(1);
1018 } 1044 }
1019 return 0; 1045 return 0;
@@ -1024,7 +1050,7 @@ static void poll(void *send_info)
1024{ 1050{
1025 struct smi_info *smi_info = send_info; 1051 struct smi_info *smi_info = send_info;
1026 unsigned long flags = 0; 1052 unsigned long flags = 0;
1027 int run_to_completion = smi_info->run_to_completion; 1053 bool run_to_completion = smi_info->run_to_completion;
1028 1054
1029 /* 1055 /*
1030 * Make sure there is some delay in the poll loop so we can 1056 * Make sure there is some delay in the poll loop so we can
@@ -1049,6 +1075,17 @@ static void request_events(void *send_info)
1049 atomic_set(&smi_info->req_events, 1); 1075 atomic_set(&smi_info->req_events, 1);
1050} 1076}
1051 1077
1078static void set_need_watch(void *send_info, bool enable)
1079{
1080 struct smi_info *smi_info = send_info;
1081 unsigned long flags;
1082
1083 atomic_set(&smi_info->need_watch, enable);
1084 spin_lock_irqsave(&smi_info->si_lock, flags);
1085 check_start_timer_thread(smi_info);
1086 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1087}
1088
1052static int initialized; 1089static int initialized;
1053 1090
1054static void smi_timeout(unsigned long data) 1091static void smi_timeout(unsigned long data)
@@ -1073,10 +1110,6 @@ static void smi_timeout(unsigned long data)
1073 * SI_USEC_PER_JIFFY); 1110 * SI_USEC_PER_JIFFY);
1074 smi_result = smi_event_handler(smi_info, time_diff); 1111 smi_result = smi_event_handler(smi_info, time_diff);
1075 1112
1076 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1077
1078 smi_info->last_timeout_jiffies = jiffies_now;
1079
1080 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 1113 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1081 /* Running with interrupts, only do long timeouts. */ 1114 /* Running with interrupts, only do long timeouts. */
1082 timeout = jiffies + SI_TIMEOUT_JIFFIES; 1115 timeout = jiffies + SI_TIMEOUT_JIFFIES;
@@ -1098,7 +1131,10 @@ static void smi_timeout(unsigned long data)
1098 1131
1099 do_mod_timer: 1132 do_mod_timer:
1100 if (smi_result != SI_SM_IDLE) 1133 if (smi_result != SI_SM_IDLE)
1101 mod_timer(&(smi_info->si_timer), timeout); 1134 smi_mod_timer(smi_info, timeout);
1135 else
1136 smi_info->timer_running = false;
1137 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1102} 1138}
1103 1139
1104static irqreturn_t si_irq_handler(int irq, void *data) 1140static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1146,8 +1182,7 @@ static int smi_start_processing(void *send_info,
1146 1182
1147 /* Set up the timer that drives the interface. */ 1183 /* Set up the timer that drives the interface. */
1148 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); 1184 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1149 new_smi->last_timeout_jiffies = jiffies; 1185 smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1150 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1151 1186
1152 /* 1187 /*
1153 * Check if the user forcefully enabled the daemon. 1188 * Check if the user forcefully enabled the daemon.
@@ -1188,7 +1223,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1188 return 0; 1223 return 0;
1189} 1224}
1190 1225
1191static void set_maintenance_mode(void *send_info, int enable) 1226static void set_maintenance_mode(void *send_info, bool enable)
1192{ 1227{
1193 struct smi_info *smi_info = send_info; 1228 struct smi_info *smi_info = send_info;
1194 1229
@@ -1202,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = {
1202 .get_smi_info = get_smi_info, 1237 .get_smi_info = get_smi_info,
1203 .sender = sender, 1238 .sender = sender,
1204 .request_events = request_events, 1239 .request_events = request_events,
1240 .set_need_watch = set_need_watch,
1205 .set_maintenance_mode = set_maintenance_mode, 1241 .set_maintenance_mode = set_maintenance_mode,
1206 .set_run_to_completion = set_run_to_completion, 1242 .set_run_to_completion = set_run_to_completion,
1207 .poll = poll, 1243 .poll = poll,
@@ -1229,7 +1265,7 @@ static bool si_tryplatform = 1;
1229#ifdef CONFIG_PCI 1265#ifdef CONFIG_PCI
1230static bool si_trypci = 1; 1266static bool si_trypci = 1;
1231#endif 1267#endif
1232static bool si_trydefaults = 1; 1268static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS);
1233static char *si_type[SI_MAX_PARMS]; 1269static char *si_type[SI_MAX_PARMS];
1234#define MAX_SI_TYPE_STR 30 1270#define MAX_SI_TYPE_STR 30
1235static char si_type_str[MAX_SI_TYPE_STR]; 1271static char si_type_str[MAX_SI_TYPE_STR];
@@ -1328,7 +1364,7 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1328MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" 1364MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1329 " disabled(0). Normally the IPMI driver auto-detects" 1365 " disabled(0). Normally the IPMI driver auto-detects"
1330 " this, but the value may be overridden by this parm."); 1366 " this, but the value may be overridden by this parm.");
1331module_param(unload_when_empty, int, 0); 1367module_param(unload_when_empty, bool, 0);
1332MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" 1368MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1333 " specified or found, default is 1. Setting to 0" 1369 " specified or found, default is 1. Setting to 0"
1334 " is useful for hot add of devices using hotmod."); 1370 " is useful for hot add of devices using hotmod.");
@@ -3336,18 +3372,19 @@ static int try_smi_init(struct smi_info *new_smi)
3336 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); 3372 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3337 new_smi->curr_msg = NULL; 3373 new_smi->curr_msg = NULL;
3338 atomic_set(&new_smi->req_events, 0); 3374 atomic_set(&new_smi->req_events, 0);
3339 new_smi->run_to_completion = 0; 3375 new_smi->run_to_completion = false;
3340 for (i = 0; i < SI_NUM_STATS; i++) 3376 for (i = 0; i < SI_NUM_STATS; i++)
3341 atomic_set(&new_smi->stats[i], 0); 3377 atomic_set(&new_smi->stats[i], 0);
3342 3378
3343 new_smi->interrupt_disabled = 1; 3379 new_smi->interrupt_disabled = true;
3344 atomic_set(&new_smi->stop_operation, 0); 3380 atomic_set(&new_smi->stop_operation, 0);
3381 atomic_set(&new_smi->need_watch, 0);
3345 new_smi->intf_num = smi_num; 3382 new_smi->intf_num = smi_num;
3346 smi_num++; 3383 smi_num++;
3347 3384
3348 rv = try_enable_event_buffer(new_smi); 3385 rv = try_enable_event_buffer(new_smi);
3349 if (rv == 0) 3386 if (rv == 0)
3350 new_smi->has_event_buffer = 1; 3387 new_smi->has_event_buffer = true;
3351 3388
3352 /* 3389 /*
3353 * Start clearing the flags before we enable interrupts or the 3390 * Start clearing the flags before we enable interrupts or the
@@ -3381,7 +3418,7 @@ static int try_smi_init(struct smi_info *new_smi)
3381 rv); 3418 rv);
3382 goto out_err; 3419 goto out_err;
3383 } 3420 }
3384 new_smi->dev_registered = 1; 3421 new_smi->dev_registered = true;
3385 } 3422 }
3386 3423
3387 rv = ipmi_register_smi(&handlers, 3424 rv = ipmi_register_smi(&handlers,
@@ -3430,7 +3467,7 @@ static int try_smi_init(struct smi_info *new_smi)
3430 wait_for_timer_and_thread(new_smi); 3467 wait_for_timer_and_thread(new_smi);
3431 3468
3432 out_err: 3469 out_err:
3433 new_smi->interrupt_disabled = 1; 3470 new_smi->interrupt_disabled = true;
3434 3471
3435 if (new_smi->intf) { 3472 if (new_smi->intf) {
3436 ipmi_unregister_smi(new_smi->intf); 3473 ipmi_unregister_smi(new_smi->intf);
@@ -3466,7 +3503,7 @@ static int try_smi_init(struct smi_info *new_smi)
3466 3503
3467 if (new_smi->dev_registered) { 3504 if (new_smi->dev_registered) {
3468 platform_device_unregister(new_smi->pdev); 3505 platform_device_unregister(new_smi->pdev);
3469 new_smi->dev_registered = 0; 3506 new_smi->dev_registered = false;
3470 } 3507 }
3471 3508
3472 return rv; 3509 return rv;
@@ -3521,14 +3558,14 @@ static int init_ipmi_si(void)
3521 printk(KERN_ERR PFX "Unable to register " 3558 printk(KERN_ERR PFX "Unable to register "
3522 "PCI driver: %d\n", rv); 3559 "PCI driver: %d\n", rv);
3523 else 3560 else
3524 pci_registered = 1; 3561 pci_registered = true;
3525 } 3562 }
3526#endif 3563#endif
3527 3564
3528#ifdef CONFIG_ACPI 3565#ifdef CONFIG_ACPI
3529 if (si_tryacpi) { 3566 if (si_tryacpi) {
3530 pnp_register_driver(&ipmi_pnp_driver); 3567 pnp_register_driver(&ipmi_pnp_driver);
3531 pnp_registered = 1; 3568 pnp_registered = true;
3532 } 3569 }
3533#endif 3570#endif
3534 3571
@@ -3544,7 +3581,7 @@ static int init_ipmi_si(void)
3544 3581
3545#ifdef CONFIG_PARISC 3582#ifdef CONFIG_PARISC
3546 register_parisc_driver(&ipmi_parisc_driver); 3583 register_parisc_driver(&ipmi_parisc_driver);
3547 parisc_registered = 1; 3584 parisc_registered = true;
3548 /* poking PC IO addresses will crash machine, don't do it */ 3585 /* poking PC IO addresses will crash machine, don't do it */
3549 si_trydefaults = 0; 3586 si_trydefaults = 0;
3550#endif 3587#endif
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index b27f5342fe76..8d3dfb0c8a26 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -15,7 +15,7 @@ config SYNCLINK_CS
15 15
16 This driver may be built as a module ( = code which can be 16 This driver may be built as a module ( = code which can be
17 inserted in and removed from the running kernel whenever you want). 17 inserted in and removed from the running kernel whenever you want).
18 The module will be called synclinkmp. If you want to do that, say M 18 The module will be called synclink_cs. If you want to do that, say M
19 here. 19 here.
20 20
21config CARDMAN_4000 21config CARDMAN_4000
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6b75713d953a..102c50d38902 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -995,8 +995,11 @@ retry:
995 ibytes = min_t(size_t, ibytes, have_bytes - reserved); 995 ibytes = min_t(size_t, ibytes, have_bytes - reserved);
996 if (ibytes < min) 996 if (ibytes < min)
997 ibytes = 0; 997 ibytes = 0;
998 entropy_count = max_t(int, 0, 998 if (have_bytes >= ibytes + reserved)
999 entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); 999 entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
1000 else
1001 entropy_count = reserved << (ENTROPY_SHIFT + 3);
1002
1000 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1003 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1001 goto retry; 1004 goto retry;
1002 1005
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index b3ea223585bd..61dcc8011ec7 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
328 /* Cache TPM ACPI handle and version string */ 328 /* Cache TPM ACPI handle and version string */
329 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 329 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
330 ppi_callback, NULL, NULL, &tpm_ppi_handle); 330 ppi_callback, NULL, NULL, &tpm_ppi_handle);
331 if (tpm_ppi_handle == NULL) 331 return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
332 return -ENODEV;
333
334 return sysfs_create_group(parent, &ppi_attr_grp);
335} 332}
336 333
337void tpm_remove_ppi(struct kobject *parent) 334void tpm_remove_ppi(struct kobject *parent)
338{ 335{
339 sysfs_remove_group(parent, &ppi_attr_grp); 336 if (tpm_ppi_handle)
337 sysfs_remove_group(parent, &ppi_attr_grp);
340} 338}
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index daea84c41743..a15ce4ef39cd 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -17,7 +17,7 @@
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/serial.h> 18#include <linux/serial.h>
19#include <linux/tty.h> 19#include <linux/tty.h>
20#include <linux/export.h> 20#include <linux/module.h>
21 21
22struct ttyprintk_port { 22struct ttyprintk_port {
23 struct tty_port port; 23 struct tty_port port;
@@ -210,10 +210,19 @@ static int __init ttyprintk_init(void)
210 return 0; 210 return 0;
211 211
212error: 212error:
213 tty_unregister_driver(ttyprintk_driver);
214 put_tty_driver(ttyprintk_driver); 213 put_tty_driver(ttyprintk_driver);
215 tty_port_destroy(&tpk_port.port); 214 tty_port_destroy(&tpk_port.port);
216 ttyprintk_driver = NULL;
217 return ret; 215 return ret;
218} 216}
217
218static void __exit ttyprintk_exit(void)
219{
220 tty_unregister_driver(ttyprintk_driver);
221 put_tty_driver(ttyprintk_driver);
222 tty_port_destroy(&tpk_port.port);
223}
224
219device_initcall(ttyprintk_init); 225device_initcall(ttyprintk_init);
226module_exit(ttyprintk_exit);
227
228MODULE_LICENSE("GPL");
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index c7607feb18dd..54a06526f64f 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list); /* The list of set up CCUs */
27 27
28static bool clk_requires_trigger(struct kona_clk *bcm_clk) 28static bool clk_requires_trigger(struct kona_clk *bcm_clk)
29{ 29{
30 struct peri_clk_data *peri = bcm_clk->peri; 30 struct peri_clk_data *peri = bcm_clk->u.peri;
31 struct bcm_clk_sel *sel; 31 struct bcm_clk_sel *sel;
32 struct bcm_clk_div *div; 32 struct bcm_clk_div *div;
33 33
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
63 u32 limit; 63 u32 limit;
64 64
65 BUG_ON(bcm_clk->type != bcm_clk_peri); 65 BUG_ON(bcm_clk->type != bcm_clk_peri);
66 peri = bcm_clk->peri; 66 peri = bcm_clk->u.peri;
67 name = bcm_clk->name; 67 name = bcm_clk->name;
68 range = bcm_clk->ccu->range; 68 range = bcm_clk->ccu->range;
69 69
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
81 81
82 div = &peri->div; 82 div = &peri->div;
83 if (divider_exists(div)) { 83 if (divider_exists(div)) {
84 if (div->offset > limit) { 84 if (div->u.s.offset > limit) {
85 pr_err("%s: bad divider offset for %s (%u > %u)\n", 85 pr_err("%s: bad divider offset for %s (%u > %u)\n",
86 __func__, name, div->offset, limit); 86 __func__, name, div->u.s.offset, limit);
87 return false; 87 return false;
88 } 88 }
89 } 89 }
90 90
91 div = &peri->pre_div; 91 div = &peri->pre_div;
92 if (divider_exists(div)) { 92 if (divider_exists(div)) {
93 if (div->offset > limit) { 93 if (div->u.s.offset > limit) {
94 pr_err("%s: bad pre-divider offset for %s " 94 pr_err("%s: bad pre-divider offset for %s "
95 "(%u > %u)\n", 95 "(%u > %u)\n",
96 __func__, name, div->offset, limit); 96 __func__, name, div->u.s.offset, limit);
97 return false; 97 return false;
98 } 98 }
99 } 99 }
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
249{ 249{
250 if (divider_is_fixed(div)) { 250 if (divider_is_fixed(div)) {
251 /* Any fixed divider value but 0 is OK */ 251 /* Any fixed divider value but 0 is OK */
252 if (div->fixed == 0) { 252 if (div->u.fixed == 0) {
253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__, 253 pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
254 field_name, clock_name); 254 field_name, clock_name);
255 return false; 255 return false;
256 } 256 }
257 return true; 257 return true;
258 } 258 }
259 if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) 259 if (!bitfield_valid(div->u.s.shift, div->u.s.width,
260 field_name, clock_name))
260 return false; 261 return false;
261 262
262 if (divider_has_fraction(div)) 263 if (divider_has_fraction(div))
263 if (div->frac_width > div->width) { 264 if (div->u.s.frac_width > div->u.s.width) {
264 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", 265 pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
265 __func__, field_name, clock_name, 266 __func__, field_name, clock_name,
266 div->frac_width, div->width); 267 div->u.s.frac_width, div->u.s.width);
267 return false; 268 return false;
268 } 269 }
269 270
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
278 */ 279 */
279static bool kona_dividers_valid(struct kona_clk *bcm_clk) 280static bool kona_dividers_valid(struct kona_clk *bcm_clk)
280{ 281{
281 struct peri_clk_data *peri = bcm_clk->peri; 282 struct peri_clk_data *peri = bcm_clk->u.peri;
282 struct bcm_clk_div *div; 283 struct bcm_clk_div *div;
283 struct bcm_clk_div *pre_div; 284 struct bcm_clk_div *pre_div;
284 u32 limit; 285 u32 limit;
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk)
295 296
296 limit = BITS_PER_BYTE * sizeof(u32); 297 limit = BITS_PER_BYTE * sizeof(u32);
297 298
298 return div->frac_width + pre_div->frac_width <= limit; 299 return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
299} 300}
300 301
301 302
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk)
328 if (!peri_clk_data_offsets_valid(bcm_clk)) 329 if (!peri_clk_data_offsets_valid(bcm_clk))
329 return false; 330 return false;
330 331
331 peri = bcm_clk->peri; 332 peri = bcm_clk->u.peri;
332 name = bcm_clk->name; 333 name = bcm_clk->name;
333 gate = &peri->gate; 334 gate = &peri->gate;
334 if (gate_exists(gate) && !gate_valid(gate, "gate", name)) 335 if (gate_exists(gate) && !gate_valid(gate, "gate", name))
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
588{ 589{
589 switch (bcm_clk->type) { 590 switch (bcm_clk->type) {
590 case bcm_clk_peri: 591 case bcm_clk_peri:
591 peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); 592 peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
592 break; 593 break;
593 default: 594 default:
594 break; 595 break;
595 } 596 }
596 bcm_clk->data = NULL; 597 bcm_clk->u.data = NULL;
597 bcm_clk->type = bcm_clk_none; 598 bcm_clk->type = bcm_clk_none;
598} 599}
599 600
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
644 break; 645 break;
645 } 646 }
646 bcm_clk->type = type; 647 bcm_clk->type = type;
647 bcm_clk->data = data; 648 bcm_clk->u.data = data;
648 649
649 /* Make sure everything makes sense before we set it up */ 650 /* Make sure everything makes sense before we set it up */
650 if (!kona_clk_valid(bcm_clk)) { 651 if (!kona_clk_valid(bcm_clk)) {
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index e3d339e08309..db11a87449f2 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor)
61/* Convert a divider into the scaled divisor value it represents. */ 61/* Convert a divider into the scaled divisor value it represents. */
62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) 62static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
63{ 63{
64 return (u64)reg_div + ((u64)1 << div->frac_width); 64 return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
65} 65}
66 66
67/* 67/*
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
77 BUG_ON(billionths >= BILLION); 77 BUG_ON(billionths >= BILLION);
78 78
79 combined = (u64)div_value * BILLION + billionths; 79 combined = (u64)div_value * BILLION + billionths;
80 combined <<= div->frac_width; 80 combined <<= div->u.s.frac_width;
81 81
82 return do_div_round_closest(combined, BILLION); 82 return do_div_round_closest(combined, BILLION);
83} 83}
@@ -87,7 +87,7 @@ static inline u64
87scaled_div_min(struct bcm_clk_div *div) 87scaled_div_min(struct bcm_clk_div *div)
88{ 88{
89 if (divider_is_fixed(div)) 89 if (divider_is_fixed(div))
90 return (u64)div->fixed; 90 return (u64)div->u.fixed;
91 91
92 return scaled_div_value(div, 0); 92 return scaled_div_value(div, 0);
93} 93}
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div)
98 u32 reg_div; 98 u32 reg_div;
99 99
100 if (divider_is_fixed(div)) 100 if (divider_is_fixed(div))
101 return (u64)div->fixed; 101 return (u64)div->u.fixed;
102 102
103 reg_div = ((u32)1 << div->width) - 1; 103 reg_div = ((u32)1 << div->u.s.width) - 1;
104 104
105 return scaled_div_value(div, reg_div); 105 return scaled_div_value(div, reg_div);
106} 106}
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div)
115 BUG_ON(scaled_div < scaled_div_min(div)); 115 BUG_ON(scaled_div < scaled_div_min(div));
116 BUG_ON(scaled_div > scaled_div_max(div)); 116 BUG_ON(scaled_div > scaled_div_max(div));
117 117
118 return (u32)(scaled_div - ((u64)1 << div->frac_width)); 118 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
119} 119}
120 120
121/* Return a rate scaled for use when dividing by a scaled divisor. */ 121/* Return a rate scaled for use when dividing by a scaled divisor. */
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate)
125 if (divider_is_fixed(div)) 125 if (divider_is_fixed(div))
126 return (u64)rate; 126 return (u64)rate;
127 127
128 return (u64)rate << div->frac_width; 128 return (u64)rate << div->u.s.frac_width;
129} 129}
130 130
131/* CCU access */ 131/* CCU access */
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
398 u32 reg_div; 398 u32 reg_div;
399 399
400 if (divider_is_fixed(div)) 400 if (divider_is_fixed(div))
401 return (u64)div->fixed; 401 return (u64)div->u.fixed;
402 402
403 flags = ccu_lock(ccu); 403 flags = ccu_lock(ccu);
404 reg_val = __ccu_read(ccu, div->offset); 404 reg_val = __ccu_read(ccu, div->u.s.offset);
405 ccu_unlock(ccu, flags); 405 ccu_unlock(ccu, flags);
406 406
407 /* Extract the full divider field from the register value */ 407 /* Extract the full divider field from the register value */
408 reg_div = bitfield_extract(reg_val, div->shift, div->width); 408 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
409 409
410 /* Return the scaled divisor value it represents */ 410 /* Return the scaled divisor value it represents */
411 return scaled_div_value(div, reg_div); 411 return scaled_div_value(div, reg_div);
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
433 * state was defined in the device tree, we just find out 433 * state was defined in the device tree, we just find out
434 * what its current value is rather than updating it. 434 * what its current value is rather than updating it.
435 */ 435 */
436 if (div->scaled_div == BAD_SCALED_DIV_VALUE) { 436 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
437 reg_val = __ccu_read(ccu, div->offset); 437 reg_val = __ccu_read(ccu, div->u.s.offset);
438 reg_div = bitfield_extract(reg_val, div->shift, div->width); 438 reg_div = bitfield_extract(reg_val, div->u.s.shift,
439 div->scaled_div = scaled_div_value(div, reg_div); 439 div->u.s.width);
440 div->u.s.scaled_div = scaled_div_value(div, reg_div);
440 441
441 return 0; 442 return 0;
442 } 443 }
443 444
444 /* Convert the scaled divisor to the value we need to record */ 445 /* Convert the scaled divisor to the value we need to record */
445 reg_div = divider(div, div->scaled_div); 446 reg_div = divider(div, div->u.s.scaled_div);
446 447
447 /* Clock needs to be enabled before changing the rate */ 448 /* Clock needs to be enabled before changing the rate */
448 enabled = __is_clk_gate_enabled(ccu, gate); 449 enabled = __is_clk_gate_enabled(ccu, gate);
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
452 } 453 }
453 454
454 /* Replace the divider value and record the result */ 455 /* Replace the divider value and record the result */
455 reg_val = __ccu_read(ccu, div->offset); 456 reg_val = __ccu_read(ccu, div->u.s.offset);
456 reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); 457 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
457 __ccu_write(ccu, div->offset, reg_val); 458 reg_div);
459 __ccu_write(ccu, div->u.s.offset, reg_val);
458 460
459 /* If the trigger fails we still want to disable the gate */ 461 /* If the trigger fails we still want to disable the gate */
460 if (!__clk_trigger(ccu, trig)) 462 if (!__clk_trigger(ccu, trig))
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
490 492
491 BUG_ON(divider_is_fixed(div)); 493 BUG_ON(divider_is_fixed(div));
492 494
493 previous = div->scaled_div; 495 previous = div->u.s.scaled_div;
494 if (previous == scaled_div) 496 if (previous == scaled_div)
495 return 0; /* No change */ 497 return 0; /* No change */
496 498
497 div->scaled_div = scaled_div; 499 div->u.s.scaled_div = scaled_div;
498 500
499 flags = ccu_lock(ccu); 501 flags = ccu_lock(ccu);
500 __ccu_write_enable(ccu); 502 __ccu_write_enable(ccu);
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
505 ccu_unlock(ccu, flags); 507 ccu_unlock(ccu, flags);
506 508
507 if (ret) 509 if (ret)
508 div->scaled_div = previous; /* Revert the change */ 510 div->u.s.scaled_div = previous; /* Revert the change */
509 511
510 return ret; 512 return ret;
511 513
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
802static int kona_peri_clk_enable(struct clk_hw *hw) 804static int kona_peri_clk_enable(struct clk_hw *hw)
803{ 805{
804 struct kona_clk *bcm_clk = to_kona_clk(hw); 806 struct kona_clk *bcm_clk = to_kona_clk(hw);
805 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 807 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
806 808
807 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); 809 return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
808} 810}
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw)
810static void kona_peri_clk_disable(struct clk_hw *hw) 812static void kona_peri_clk_disable(struct clk_hw *hw)
811{ 813{
812 struct kona_clk *bcm_clk = to_kona_clk(hw); 814 struct kona_clk *bcm_clk = to_kona_clk(hw);
813 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 815 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
814 816
815 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); 817 (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
816} 818}
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw)
818static int kona_peri_clk_is_enabled(struct clk_hw *hw) 820static int kona_peri_clk_is_enabled(struct clk_hw *hw)
819{ 821{
820 struct kona_clk *bcm_clk = to_kona_clk(hw); 822 struct kona_clk *bcm_clk = to_kona_clk(hw);
821 struct bcm_clk_gate *gate = &bcm_clk->peri->gate; 823 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
822 824
823 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; 825 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
824} 826}
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
827 unsigned long parent_rate) 829 unsigned long parent_rate)
828{ 830{
829 struct kona_clk *bcm_clk = to_kona_clk(hw); 831 struct kona_clk *bcm_clk = to_kona_clk(hw);
830 struct peri_clk_data *data = bcm_clk->peri; 832 struct peri_clk_data *data = bcm_clk->u.peri;
831 833
832 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, 834 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
833 parent_rate); 835 parent_rate);
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
837 unsigned long *parent_rate) 839 unsigned long *parent_rate)
838{ 840{
839 struct kona_clk *bcm_clk = to_kona_clk(hw); 841 struct kona_clk *bcm_clk = to_kona_clk(hw);
840 struct bcm_clk_div *div = &bcm_clk->peri->div; 842 struct bcm_clk_div *div = &bcm_clk->u.peri->div;
841 843
842 if (!divider_exists(div)) 844 if (!divider_exists(div))
843 return __clk_get_rate(hw->clk); 845 return __clk_get_rate(hw->clk);
844 846
845 /* Quietly avoid a zero rate */ 847 /* Quietly avoid a zero rate */
846 return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, 848 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
847 rate ? rate : 1, *parent_rate, NULL); 849 rate ? rate : 1, *parent_rate, NULL);
848} 850}
849 851
850static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) 852static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
851{ 853{
852 struct kona_clk *bcm_clk = to_kona_clk(hw); 854 struct kona_clk *bcm_clk = to_kona_clk(hw);
853 struct peri_clk_data *data = bcm_clk->peri; 855 struct peri_clk_data *data = bcm_clk->u.peri;
854 struct bcm_clk_sel *sel = &data->sel; 856 struct bcm_clk_sel *sel = &data->sel;
855 struct bcm_clk_trig *trig; 857 struct bcm_clk_trig *trig;
856 int ret; 858 int ret;
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
884static u8 kona_peri_clk_get_parent(struct clk_hw *hw) 886static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
885{ 887{
886 struct kona_clk *bcm_clk = to_kona_clk(hw); 888 struct kona_clk *bcm_clk = to_kona_clk(hw);
887 struct peri_clk_data *data = bcm_clk->peri; 889 struct peri_clk_data *data = bcm_clk->u.peri;
888 u8 index; 890 u8 index;
889 891
890 index = selector_read_index(bcm_clk->ccu, &data->sel); 892 index = selector_read_index(bcm_clk->ccu, &data->sel);
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
897 unsigned long parent_rate) 899 unsigned long parent_rate)
898{ 900{
899 struct kona_clk *bcm_clk = to_kona_clk(hw); 901 struct kona_clk *bcm_clk = to_kona_clk(hw);
900 struct peri_clk_data *data = bcm_clk->peri; 902 struct peri_clk_data *data = bcm_clk->u.peri;
901 struct bcm_clk_div *div = &data->div; 903 struct bcm_clk_div *div = &data->div;
902 u64 scaled_div = 0; 904 u64 scaled_div = 0;
903 int ret; 905 int ret;
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = {
958static bool __peri_clk_init(struct kona_clk *bcm_clk) 960static bool __peri_clk_init(struct kona_clk *bcm_clk)
959{ 961{
960 struct ccu_data *ccu = bcm_clk->ccu; 962 struct ccu_data *ccu = bcm_clk->ccu;
961 struct peri_clk_data *peri = bcm_clk->peri; 963 struct peri_clk_data *peri = bcm_clk->u.peri;
962 const char *name = bcm_clk->name; 964 const char *name = bcm_clk->name;
963 struct bcm_clk_trig *trig; 965 struct bcm_clk_trig *trig;
964 966
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index 5e139adc3dc5..dee690951bb6 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -57,7 +57,7 @@
57#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) 57#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
58#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) 58#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
59#define divider_has_fraction(div) (!divider_is_fixed(div) && \ 59#define divider_has_fraction(div) (!divider_is_fixed(div) && \
60 (div)->frac_width > 0) 60 (div)->u.s.frac_width > 0)
61 61
62#define selector_exists(sel) ((sel)->width != 0) 62#define selector_exists(sel) ((sel)->width != 0)
63#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) 63#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
@@ -244,9 +244,9 @@ struct bcm_clk_div {
244 u32 frac_width; /* field fraction width */ 244 u32 frac_width; /* field fraction width */
245 245
246 u64 scaled_div; /* scaled divider value */ 246 u64 scaled_div; /* scaled divider value */
247 }; 247 } s;
248 u32 fixed; /* non-zero fixed divider value */ 248 u32 fixed; /* non-zero fixed divider value */
249 }; 249 } u;
250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ 250 u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
251}; 251};
252 252
@@ -263,28 +263,28 @@ struct bcm_clk_div {
263/* A fixed (non-zero) divider */ 263/* A fixed (non-zero) divider */
264#define FIXED_DIVIDER(_value) \ 264#define FIXED_DIVIDER(_value) \
265 { \ 265 { \
266 .fixed = (_value), \ 266 .u.fixed = (_value), \
267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ 267 .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
268 } 268 }
269 269
270/* A divider with an integral divisor */ 270/* A divider with an integral divisor */
271#define DIVIDER(_offset, _shift, _width) \ 271#define DIVIDER(_offset, _shift, _width) \
272 { \ 272 { \
273 .offset = (_offset), \ 273 .u.s.offset = (_offset), \
274 .shift = (_shift), \ 274 .u.s.shift = (_shift), \
275 .width = (_width), \ 275 .u.s.width = (_width), \
276 .scaled_div = BAD_SCALED_DIV_VALUE, \ 276 .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
277 .flags = FLAG(DIV, EXISTS), \ 277 .flags = FLAG(DIV, EXISTS), \
278 } 278 }
279 279
280/* A divider whose divisor has an integer and fractional part */ 280/* A divider whose divisor has an integer and fractional part */
281#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ 281#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
282 { \ 282 { \
283 .offset = (_offset), \ 283 .u.s.offset = (_offset), \
284 .shift = (_shift), \ 284 .u.s.shift = (_shift), \
285 .width = (_width), \ 285 .u.s.width = (_width), \
286 .frac_width = (_frac_width), \ 286 .u.s.frac_width = (_frac_width), \
287 .scaled_div = BAD_SCALED_DIV_VALUE, \ 287 .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
288 .flags = FLAG(DIV, EXISTS), \ 288 .flags = FLAG(DIV, EXISTS), \
289 } 289 }
290 290
@@ -380,7 +380,7 @@ struct kona_clk {
380 union { 380 union {
381 void *data; 381 void *data;
382 struct peri_clk_data *peri; 382 struct peri_clk_data *peri;
383 }; 383 } u;
384}; 384};
385#define to_kona_clk(_hw) \ 385#define to_kona_clk(_hw) \
386 container_of(_hw, struct kona_clk, hw) 386 container_of(_hw, struct kona_clk, hw)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index ec22112e569f..3fbee4540228 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
144 return true; 144 return true;
145} 145}
146 146
147static int _round_up_table(const struct clk_div_table *table, int div)
148{
149 const struct clk_div_table *clkt;
150 int up = INT_MAX;
151
152 for (clkt = table; clkt->div; clkt++) {
153 if (clkt->div == div)
154 return clkt->div;
155 else if (clkt->div < div)
156 continue;
157
158 if ((clkt->div - div) < (up - div))
159 up = clkt->div;
160 }
161
162 return up;
163}
164
165static int _div_round_up(struct clk_divider *divider,
166 unsigned long parent_rate, unsigned long rate)
167{
168 int div = DIV_ROUND_UP(parent_rate, rate);
169
170 if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
171 div = __roundup_pow_of_two(div);
172 if (divider->table)
173 div = _round_up_table(divider->table, div);
174
175 return div;
176}
177
147static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, 178static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
148 unsigned long *best_parent_rate) 179 unsigned long *best_parent_rate)
149{ 180{
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
159 190
160 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 191 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
161 parent_rate = *best_parent_rate; 192 parent_rate = *best_parent_rate;
162 bestdiv = DIV_ROUND_UP(parent_rate, rate); 193 bestdiv = _div_round_up(divider, parent_rate, rate);
163 bestdiv = bestdiv == 0 ? 1 : bestdiv; 194 bestdiv = bestdiv == 0 ? 1 : bestdiv;
164 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 195 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
165 return bestdiv; 196 return bestdiv;
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
219 u32 val; 250 u32 val;
220 251
221 div = DIV_ROUND_UP(parent_rate, rate); 252 div = DIV_ROUND_UP(parent_rate, rate);
253
254 if (!_is_valid_div(divider, div))
255 return -EINVAL;
256
222 value = _get_val(divider, div); 257 value = _get_val(divider, div);
223 258
224 if (value > div_mask(divider)) 259 if (value > div_mask(divider))
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index dff0373f53c1..7cf2c093cc54 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1984} 1984}
1985EXPORT_SYMBOL_GPL(__clk_register); 1985EXPORT_SYMBOL_GPL(__clk_register);
1986 1986
1987static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) 1987/**
1988 * clk_register - allocate a new clock, register it and return an opaque cookie
1989 * @dev: device that is registering this clock
1990 * @hw: link to hardware-specific clock data
1991 *
1992 * clk_register is the primary interface for populating the clock tree with new
1993 * clock nodes. It returns a pointer to the newly allocated struct clk which
1994 * cannot be dereferenced by driver code but may be used in conjuction with the
1995 * rest of the clock API. In the event of an error clk_register will return an
1996 * error code; drivers must test for an error code after calling clk_register.
1997 */
1998struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1988{ 1999{
1989 int i, ret; 2000 int i, ret;
2001 struct clk *clk;
2002
2003 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2004 if (!clk) {
2005 pr_err("%s: could not allocate clk\n", __func__);
2006 ret = -ENOMEM;
2007 goto fail_out;
2008 }
1990 2009
1991 clk->name = kstrdup(hw->init->name, GFP_KERNEL); 2010 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1992 if (!clk->name) { 2011 if (!clk->name) {
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
2026 2045
2027 ret = __clk_init(dev, clk); 2046 ret = __clk_init(dev, clk);
2028 if (!ret) 2047 if (!ret)
2029 return 0; 2048 return clk;
2030 2049
2031fail_parent_names_copy: 2050fail_parent_names_copy:
2032 while (--i >= 0) 2051 while (--i >= 0)
@@ -2035,36 +2054,6 @@ fail_parent_names_copy:
2035fail_parent_names: 2054fail_parent_names:
2036 kfree(clk->name); 2055 kfree(clk->name);
2037fail_name: 2056fail_name:
2038 return ret;
2039}
2040
2041/**
2042 * clk_register - allocate a new clock, register it and return an opaque cookie
2043 * @dev: device that is registering this clock
2044 * @hw: link to hardware-specific clock data
2045 *
2046 * clk_register is the primary interface for populating the clock tree with new
2047 * clock nodes. It returns a pointer to the newly allocated struct clk which
2048 * cannot be dereferenced by driver code but may be used in conjuction with the
2049 * rest of the clock API. In the event of an error clk_register will return an
2050 * error code; drivers must test for an error code after calling clk_register.
2051 */
2052struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2053{
2054 int ret;
2055 struct clk *clk;
2056
2057 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2058 if (!clk) {
2059 pr_err("%s: could not allocate clk\n", __func__);
2060 ret = -ENOMEM;
2061 goto fail_out;
2062 }
2063
2064 ret = _clk_register(dev, hw, clk);
2065 if (!ret)
2066 return clk;
2067
2068 kfree(clk); 2057 kfree(clk);
2069fail_out: 2058fail_out:
2070 return ERR_PTR(ret); 2059 return ERR_PTR(ret);
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk)
2151 2140
2152 if (!hlist_empty(&clk->children)) { 2141 if (!hlist_empty(&clk->children)) {
2153 struct clk *child; 2142 struct clk *child;
2143 struct hlist_node *t;
2154 2144
2155 /* Reparent all children to the orphan list. */ 2145 /* Reparent all children to the orphan list. */
2156 hlist_for_each_entry(child, &clk->children, child_node) 2146 hlist_for_each_entry_safe(child, t, &clk->children, child_node)
2157 clk_set_parent(child, NULL); 2147 clk_set_parent(child, NULL);
2158 } 2148 }
2159 2149
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
2173 2163
2174static void devm_clk_release(struct device *dev, void *res) 2164static void devm_clk_release(struct device *dev, void *res)
2175{ 2165{
2176 clk_unregister(res); 2166 clk_unregister(*(struct clk **)res);
2177} 2167}
2178 2168
2179/** 2169/**
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
2188struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) 2178struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2189{ 2179{
2190 struct clk *clk; 2180 struct clk *clk;
2191 int ret; 2181 struct clk **clkp;
2192 2182
2193 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); 2183 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2194 if (!clk) 2184 if (!clkp)
2195 return ERR_PTR(-ENOMEM); 2185 return ERR_PTR(-ENOMEM);
2196 2186
2197 ret = _clk_register(dev, hw, clk); 2187 clk = clk_register(dev, hw);
2198 if (!ret) { 2188 if (!IS_ERR(clk)) {
2199 devres_add(dev, clk); 2189 *clkp = clk;
2190 devres_add(dev, clkp);
2200 } else { 2191 } else {
2201 devres_free(clk); 2192 devres_free(clkp);
2202 clk = ERR_PTR(ret);
2203 } 2193 }
2204 2194
2205 return clk; 2195 return clk;
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 2e5810c88d11..1f6324e29a80 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
156static void __init cpg_mstp_clocks_init(struct device_node *np) 156static void __init cpg_mstp_clocks_init(struct device_node *np)
157{ 157{
158 struct mstp_clock_group *group; 158 struct mstp_clock_group *group;
159 const char *idxname;
159 struct clk **clks; 160 struct clk **clks;
160 unsigned int i; 161 unsigned int i;
161 162
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
184 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) 185 for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
185 clks[i] = ERR_PTR(-ENOENT); 186 clks[i] = ERR_PTR(-ENOENT);
186 187
188 if (of_find_property(np, "clock-indices", &i))
189 idxname = "clock-indices";
190 else
191 idxname = "renesas,clock-indices";
192
187 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { 193 for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
188 const char *parent_name; 194 const char *parent_name;
189 const char *name; 195 const char *name;
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
197 continue; 203 continue;
198 204
199 parent_name = of_clk_get_parent_name(np, i); 205 parent_name = of_clk_get_parent_name(np, i);
200 ret = of_property_read_u32_index(np, "renesas,clock-indices", i, 206 ret = of_property_read_u32_index(np, idxname, i, &clkidx);
201 &clkidx);
202 if (parent_name == NULL || ret < 0) 207 if (parent_name == NULL || ret < 0)
203 break; 208 break;
204 209
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 88dafb5e9627..de6da957a09d 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -20,6 +20,7 @@
20#include <linux/clk-provider.h> 20#include <linux/clk-provider.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_address.h>
23 24
24#include "clk.h" 25#include "clk.h"
25 26
@@ -43,6 +44,8 @@
43 44
44#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) 45#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
45 46
47void __iomem *clk_mgr_base_addr;
48
46static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, 49static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
47 unsigned long parent_rate) 50 unsigned long parent_rate)
48{ 51{
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
87 const char *clk_name = node->name; 90 const char *clk_name = node->name;
88 const char *parent_name[SOCFPGA_MAX_PARENTS]; 91 const char *parent_name[SOCFPGA_MAX_PARENTS];
89 struct clk_init_data init; 92 struct clk_init_data init;
93 struct device_node *clkmgr_np;
90 int rc; 94 int rc;
91 int i = 0; 95 int i = 0;
92 96
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
96 if (WARN_ON(!pll_clk)) 100 if (WARN_ON(!pll_clk))
97 return NULL; 101 return NULL;
98 102
103 clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
104 clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
105 BUG_ON(!clk_mgr_base_addr);
99 pll_clk->hw.reg = clk_mgr_base_addr + reg; 106 pll_clk->hw.reg = clk_mgr_base_addr + reg;
100 107
101 of_property_read_string(node, "clock-output-names", &clk_name); 108 of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 35a960a993f9..43db947e5f0e 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -17,28 +17,11 @@
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */ 19 */
20#include <linux/clk.h>
21#include <linux/clkdev.h>
22#include <linux/clk-provider.h>
23#include <linux/io.h>
24#include <linux/of.h> 20#include <linux/of.h>
25#include <linux/of_address.h>
26 21
27#include "clk.h" 22#include "clk.h"
28 23
29void __iomem *clk_mgr_base_addr; 24CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
30 25CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
31static const struct of_device_id socfpga_child_clocks[] __initconst = { 26CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
32 { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
33 { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
34 { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
35 {},
36};
37
38static void __init socfpga_clkmgr_init(struct device_node *node)
39{
40 clk_mgr_base_addr = of_iomap(node, 0);
41 of_clk_init(socfpga_child_clocks);
42}
43CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
44 27
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index bca0a0badbfa..a886702f7c8b 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
521 gate->lock = odf_lock; 521 gate->lock = odf_lock;
522 522
523 div = kzalloc(sizeof(*div), GFP_KERNEL); 523 div = kzalloc(sizeof(*div), GFP_KERNEL);
524 if (!div) 524 if (!div) {
525 kfree(gate);
525 return ERR_PTR(-ENOMEM); 526 return ERR_PTR(-ENOMEM);
527 }
526 528
527 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; 529 div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
528 div->reg = reg + pll_data->odf[odf].offset; 530 div->reg = reg + pll_data->odf[odf].offset;
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 0d20241e0770..6aad8abc69a2 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -58,9 +58,9 @@
58#define PLLDU_LFCON_SET_DIVN 600 58#define PLLDU_LFCON_SET_DIVN 600
59 59
60#define PLLE_BASE_DIVCML_SHIFT 24 60#define PLLE_BASE_DIVCML_SHIFT 24
61#define PLLE_BASE_DIVCML_WIDTH 4 61#define PLLE_BASE_DIVCML_MASK 0xf
62#define PLLE_BASE_DIVP_SHIFT 16 62#define PLLE_BASE_DIVP_SHIFT 16
63#define PLLE_BASE_DIVP_WIDTH 7 63#define PLLE_BASE_DIVP_WIDTH 6
64#define PLLE_BASE_DIVN_SHIFT 8 64#define PLLE_BASE_DIVN_SHIFT 8
65#define PLLE_BASE_DIVN_WIDTH 8 65#define PLLE_BASE_DIVN_WIDTH 8
66#define PLLE_BASE_DIVM_SHIFT 0 66#define PLLE_BASE_DIVM_SHIFT 0
@@ -183,6 +183,14 @@
183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ 183#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
184 mask(p->params->div_nmp->divp_width)) 184 mask(p->params->div_nmp->divp_width))
185 185
186#define divm_shift(p) (p)->params->div_nmp->divm_shift
187#define divn_shift(p) (p)->params->div_nmp->divn_shift
188#define divp_shift(p) (p)->params->div_nmp->divp_shift
189
190#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
191#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
192#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
193
186#define divm_max(p) (divm_mask(p)) 194#define divm_max(p) (divm_mask(p))
187#define divn_max(p) (divn_mask(p)) 195#define divn_max(p) (divn_mask(p))
188#define divp_max(p) (1 << (divp_mask(p))) 196#define divp_max(p) (1 << (divp_mask(p)))
@@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
476 } else { 484 } else {
477 val = pll_readl_base(pll); 485 val = pll_readl_base(pll);
478 486
479 val &= ~((divm_mask(pll) << div_nmp->divm_shift) | 487 val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
480 (divn_mask(pll) << div_nmp->divn_shift) | 488 divp_mask_shifted(pll));
481 (divp_mask(pll) << div_nmp->divp_shift));
482 489
483 val |= ((cfg->m << div_nmp->divm_shift) | 490 val |= (cfg->m << divm_shift(pll)) |
484 (cfg->n << div_nmp->divn_shift) | 491 (cfg->n << divn_shift(pll)) |
485 (cfg->p << div_nmp->divp_shift)); 492 (cfg->p << divp_shift(pll));
486 493
487 pll_writel_base(val, pll); 494 pll_writel_base(val, pll);
488 } 495 }
@@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
730 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { 737 if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
731 /* configure dividers */ 738 /* configure dividers */
732 val = pll_readl_base(pll); 739 val = pll_readl_base(pll);
733 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 740 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
734 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 741 divm_mask_shifted(pll));
735 val |= sel.m << pll->params->div_nmp->divm_shift; 742 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
736 val |= sel.n << pll->params->div_nmp->divn_shift; 743 val |= sel.m << divm_shift(pll);
737 val |= sel.p << pll->params->div_nmp->divp_shift; 744 val |= sel.n << divn_shift(pll);
745 val |= sel.p << divp_shift(pll);
738 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 746 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
739 pll_writel_base(val, pll); 747 pll_writel_base(val, pll);
740 } 748 }
@@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
745 pll_writel_misc(val, pll); 753 pll_writel_misc(val, pll);
746 754
747 val = readl(pll->clk_base + PLLE_SS_CTRL); 755 val = readl(pll->clk_base + PLLE_SS_CTRL);
756 val &= ~PLLE_SS_COEFFICIENTS_MASK;
748 val |= PLLE_SS_DISABLE; 757 val |= PLLE_SS_DISABLE;
749 writel(val, pll->clk_base + PLLE_SS_CTRL); 758 writel(val, pll->clk_base + PLLE_SS_CTRL);
750 759
751 val |= pll_readl_base(pll); 760 val = pll_readl_base(pll);
752 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); 761 val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
753 pll_writel_base(val, pll); 762 pll_writel_base(val, pll);
754 763
@@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1292 pll_writel(val, PLLE_SS_CTRL, pll); 1301 pll_writel(val, PLLE_SS_CTRL, pll);
1293 1302
1294 val = pll_readl_base(pll); 1303 val = pll_readl_base(pll);
1295 val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); 1304 val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
1296 val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); 1305 divm_mask_shifted(pll));
1297 val |= sel.m << pll->params->div_nmp->divm_shift; 1306 val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
1298 val |= sel.n << pll->params->div_nmp->divn_shift; 1307 val |= sel.m << divm_shift(pll);
1308 val |= sel.n << divn_shift(pll);
1299 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; 1309 val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
1300 pll_writel_base(val, pll); 1310 pll_writel_base(val, pll);
1301 udelay(1); 1311 udelay(1);
@@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
1410 return clk; 1420 return clk;
1411} 1421}
1412 1422
1423static struct div_nmp pll_e_nmp = {
1424 .divn_shift = PLLE_BASE_DIVN_SHIFT,
1425 .divn_width = PLLE_BASE_DIVN_WIDTH,
1426 .divm_shift = PLLE_BASE_DIVM_SHIFT,
1427 .divm_width = PLLE_BASE_DIVM_WIDTH,
1428 .divp_shift = PLLE_BASE_DIVP_SHIFT,
1429 .divp_width = PLLE_BASE_DIVP_WIDTH,
1430};
1431
1413struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, 1432struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1414 void __iomem *clk_base, void __iomem *pmc, 1433 void __iomem *clk_base, void __iomem *pmc,
1415 unsigned long flags, struct tegra_clk_pll_params *pll_params, 1434 unsigned long flags, struct tegra_clk_pll_params *pll_params,
@@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
1420 1439
1421 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; 1440 pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
1422 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; 1441 pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
1442
1443 if (!pll_params->div_nmp)
1444 pll_params->div_nmp = &pll_e_nmp;
1445
1423 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); 1446 pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
1424 if (IS_ERR(pll)) 1447 if (IS_ERR(pll))
1425 return ERR_CAST(pll); 1448 return ERR_CAST(pll);
@@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
1557 int m; 1580 int m;
1558 1581
1559 m = _pll_fixed_mdiv(pll_params, parent_rate); 1582 m = _pll_fixed_mdiv(pll_params, parent_rate);
1560 val = m << PLL_BASE_DIVM_SHIFT; 1583 val = m << divm_shift(pll);
1561 val |= (pll_params->vco_min / parent_rate) 1584 val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
1562 << PLL_BASE_DIVN_SHIFT;
1563 pll_writel_base(val, pll); 1585 pll_writel_base(val, pll);
1564 } 1586 }
1565 1587
@@ -1718,7 +1740,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
1718 "pll_re_vco"); 1740 "pll_re_vco");
1719 } else { 1741 } else {
1720 val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); 1742 val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
1721 pll_writel(val, pll_params->aux_reg, pll); 1743 pll_writel(val_aux, pll_params->aux_reg, pll);
1722 } 1744 }
1723 1745
1724 clk = _tegra_clk_register_pll(pll, name, parent_name, flags, 1746 clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 166e02f16c8a..cc37c342c4cb 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -764,7 +764,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
764 [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, 764 [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
765 [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, 765 [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
766 [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, 766 [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
767 [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
768 [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, 767 [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
769 [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, 768 [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
770 [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, 769 [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
@@ -809,7 +808,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
809 [tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true }, 808 [tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true },
810 [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true }, 809 [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
811 [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true }, 810 [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
812 [tegra_clk_ndspeed] = { .dt_id = TEGRA124_CLK_NDSPEED, .present = true },
813 [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true }, 811 [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
814 [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true }, 812 [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
815 [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true }, 813 [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
@@ -952,7 +950,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
952 [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true }, 950 [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
953 [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true }, 951 [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
954 [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true }, 952 [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
955 [tegra_clk_uarte] = { .dt_id = TEGRA124_CLK_UARTE, .present = true },
956}; 953};
957 954
958static struct tegra_devclk devclks[] __initdata = { 955static struct tegra_devclk devclks[] __initdata = {
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 2dc8b41a339d..422391242b39 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -100,9 +100,11 @@ void __init vexpress_osc_of_setup(struct device_node *node)
100 struct clk *clk; 100 struct clk *clk;
101 u32 range[2]; 101 u32 range[2];
102 102
103 vexpress_sysreg_of_early_init();
104
103 osc = kzalloc(sizeof(*osc), GFP_KERNEL); 105 osc = kzalloc(sizeof(*osc), GFP_KERNEL);
104 if (!osc) 106 if (!osc)
105 goto error; 107 return;
106 108
107 osc->func = vexpress_config_func_get_by_node(node); 109 osc->func = vexpress_config_func_get_by_node(node);
108 if (!osc->func) { 110 if (!osc->func) {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57e823c44d2a..5163ec13429d 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
66static struct clock_event_device __percpu *arch_timer_evt; 66static struct clock_event_device __percpu *arch_timer_evt;
67 67
68static bool arch_timer_use_virtual = true; 68static bool arch_timer_use_virtual = true;
69static bool arch_timer_c3stop;
69static bool arch_timer_mem_use_virtual; 70static bool arch_timer_mem_use_virtual;
70 71
71/* 72/*
@@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
263 clk->features = CLOCK_EVT_FEAT_ONESHOT; 264 clk->features = CLOCK_EVT_FEAT_ONESHOT;
264 265
265 if (type == ARCH_CP15_TIMER) { 266 if (type == ARCH_CP15_TIMER) {
266 clk->features |= CLOCK_EVT_FEAT_C3STOP; 267 if (arch_timer_c3stop)
268 clk->features |= CLOCK_EVT_FEAT_C3STOP;
267 clk->name = "arch_sys_timer"; 269 clk->name = "arch_sys_timer";
268 clk->rating = 450; 270 clk->rating = 450;
269 clk->cpumask = cpumask_of(smp_processor_id()); 271 clk->cpumask = cpumask_of(smp_processor_id());
@@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
665 } 667 }
666 } 668 }
667 669
670 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
671
668 arch_timer_register(); 672 arch_timer_register();
669 arch_timer_common_init(); 673 arch_timer_common_init();
670} 674}
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index a6ee6d7cd63f..acf5a329d538 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -416,8 +416,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
416 evt->set_mode = exynos4_tick_set_mode; 416 evt->set_mode = exynos4_tick_set_mode;
417 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 417 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
418 evt->rating = 450; 418 evt->rating = 450;
419 clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
420 0xf, 0x7fffffff);
421 419
422 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); 420 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
423 421
@@ -430,9 +428,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
430 evt->irq); 428 evt->irq);
431 return -EIO; 429 return -EIO;
432 } 430 }
431 irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
433 } else { 432 } else {
434 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); 433 enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
435 } 434 }
435 clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
436 0xf, 0x7fffffff);
436 437
437 return 0; 438 return 0;
438} 439}
@@ -450,7 +451,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
450 unsigned long action, void *hcpu) 451 unsigned long action, void *hcpu)
451{ 452{
452 struct mct_clock_event_device *mevt; 453 struct mct_clock_event_device *mevt;
453 unsigned int cpu;
454 454
455 /* 455 /*
456 * Grab cpu pointer in each case to avoid spurious 456 * Grab cpu pointer in each case to avoid spurious
@@ -461,12 +461,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
461 mevt = this_cpu_ptr(&percpu_mct_tick); 461 mevt = this_cpu_ptr(&percpu_mct_tick);
462 exynos4_local_timer_setup(&mevt->evt); 462 exynos4_local_timer_setup(&mevt->evt);
463 break; 463 break;
464 case CPU_ONLINE:
465 cpu = (unsigned long)hcpu;
466 if (mct_int_type == MCT_INT_SPI)
467 irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
468 cpumask_of(cpu));
469 break;
470 case CPU_DYING: 464 case CPU_DYING:
471 mevt = this_cpu_ptr(&percpu_mct_tick); 465 mevt = this_cpu_ptr(&percpu_mct_tick);
472 exynos4_local_timer_stop(&mevt->evt); 466 exynos4_local_timer_stop(&mevt->evt);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 00fdd1170284..a8d7ea14f183 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { 100 || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 101 __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 102 __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
103 clk_disable_unprepare(tcd->clk); 103 clk_disable(tcd->clk);
104 } 104 }
105 105
106 switch (m) { 106 switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
109 * of oneshot, we get lower overhead and improved accuracy. 109 * of oneshot, we get lower overhead and improved accuracy.
110 */ 110 */
111 case CLOCK_EVT_MODE_PERIODIC: 111 case CLOCK_EVT_MODE_PERIODIC:
112 clk_prepare_enable(tcd->clk); 112 clk_enable(tcd->clk);
113 113
114 /* slow clock, count up to RC, then irq and restart */ 114 /* slow clock, count up to RC, then irq and restart */
115 __raw_writel(timer_clock 115 __raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
126 break; 126 break;
127 127
128 case CLOCK_EVT_MODE_ONESHOT: 128 case CLOCK_EVT_MODE_ONESHOT:
129 clk_prepare_enable(tcd->clk); 129 clk_enable(tcd->clk);
130 130
131 /* slow clock, count up to RC, then irq and stop */ 131 /* slow clock, count up to RC, then irq and stop */
132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP 132 __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
194 ret = clk_prepare_enable(t2_clk); 194 ret = clk_prepare_enable(t2_clk);
195 if (ret) 195 if (ret)
196 return ret; 196 return ret;
197 clk_disable_unprepare(t2_clk); 197 clk_disable(t2_clk);
198 198
199 clkevt.regs = tc->regs; 199 clkevt.regs = tc->regs;
200 clkevt.clk = t2_clk; 200 clkevt.clk = t2_clk;
diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c
index b52e1c078b99..7f5374dbefd9 100644
--- a/drivers/clocksource/timer-marco.c
+++ b/drivers/clocksource/timer-marco.c
@@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
199 199
200 action->dev_id = ce; 200 action->dev_id = ce;
201 BUG_ON(setup_irq(ce->irq, action)); 201 BUG_ON(setup_irq(ce->irq, action));
202 irq_set_affinity(action->irq, cpumask_of(cpu)); 202 irq_force_affinity(action->irq, cpumask_of(cpu));
203 203
204 clockevents_register_device(ce); 204 clockevents_register_device(ce);
205 return 0; 205 return 0;
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ca81809d159d..7ce442148c3f 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -212,4 +212,9 @@ error_free:
212 return ret; 212 return ret;
213} 213}
214 214
215CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); 215static void __init zevio_timer_init(struct device_node *node)
216{
217 BUG_ON(zevio_timer_add(node));
218}
219
220CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 148d707a1d43..ccdd4c7e748b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
369 return; 369 return;
370 370
371 /* Can only change if privileged. */ 371 /* Can only change if privileged. */
372 if (!capable(CAP_NET_ADMIN)) { 372 if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
373 err = EPERM; 373 err = EPERM;
374 goto out; 374 goto out;
375 } 375 }
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0e9cce82844b..580503513f0f 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -92,11 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
92 92
93config ARM_HIGHBANK_CPUFREQ 93config ARM_HIGHBANK_CPUFREQ
94 tristate "Calxeda Highbank-based" 94 tristate "Calxeda Highbank-based"
95 depends on ARCH_HIGHBANK 95 depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR
96 select GENERIC_CPUFREQ_CPU0
97 select PM_OPP
98 select REGULATOR
99
100 default m 96 default m
101 help 97 help
102 This adds the CPUFreq driver for Calxeda Highbank SoC 98 This adds the CPUFreq driver for Calxeda Highbank SoC
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 1bf6bbac3e03..09b9129c7bd3 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
130 return -ENOENT; 130 return -ENOENT;
131 } 131 }
132 132
133 cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); 133 cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
134 if (IS_ERR(cpu_reg)) { 134 if (IS_ERR(cpu_reg)) {
135 /* 135 /*
136 * If cpu0 regulator supply node is present, but regulator is 136 * If cpu0 regulator supply node is present, but regulator is
@@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
145 PTR_ERR(cpu_reg)); 145 PTR_ERR(cpu_reg));
146 } 146 }
147 147
148 cpu_clk = devm_clk_get(cpu_dev, NULL); 148 cpu_clk = clk_get(cpu_dev, NULL);
149 if (IS_ERR(cpu_clk)) { 149 if (IS_ERR(cpu_clk)) {
150 ret = PTR_ERR(cpu_clk); 150 ret = PTR_ERR(cpu_clk);
151 pr_err("failed to get cpu0 clock: %d\n", ret); 151 pr_err("failed to get cpu0 clock: %d\n", ret);
152 goto out_put_node; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 ret = of_init_opp_table(cpu_dev);
156 if (ret) { 156 if (ret) {
157 pr_err("failed to init OPP table: %d\n", ret); 157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_node; 158 goto out_put_clk;
159 } 159 }
160 160
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 162 if (ret) {
163 pr_err("failed to init cpufreq table: %d\n", ret); 163 pr_err("failed to init cpufreq table: %d\n", ret);
164 goto out_put_node; 164 goto out_put_clk;
165 } 165 }
166 166
167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); 167 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
@@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
216 216
217out_free_table: 217out_free_table:
218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 218 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
219out_put_clk:
220 if (!IS_ERR(cpu_clk))
221 clk_put(cpu_clk);
222out_put_reg:
223 if (!IS_ERR(cpu_reg))
224 regulator_put(cpu_reg);
219out_put_node: 225out_put_node:
220 of_node_put(np); 226 of_node_put(np);
221 return ret; 227 return ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ba43991ba98a..e1c6433b16e0 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
366 break; 366 break;
367 367
368 case CPUFREQ_GOV_LIMITS: 368 case CPUFREQ_GOV_LIMITS:
369 mutex_lock(&dbs_data->mutex);
370 if (!cpu_cdbs->cur_policy) {
371 mutex_unlock(&dbs_data->mutex);
372 break;
373 }
369 mutex_lock(&cpu_cdbs->timer_mutex); 374 mutex_lock(&cpu_cdbs->timer_mutex);
370 if (policy->max < cpu_cdbs->cur_policy->cur) 375 if (policy->max < cpu_cdbs->cur_policy->cur)
371 __cpufreq_driver_target(cpu_cdbs->cur_policy, 376 __cpufreq_driver_target(cpu_cdbs->cur_policy,
@@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
375 policy->min, CPUFREQ_RELATION_L); 380 policy->min, CPUFREQ_RELATION_L);
376 dbs_check_cpu(dbs_data, cpu); 381 dbs_check_cpu(dbs_data, cpu);
377 mutex_unlock(&cpu_cdbs->timer_mutex); 382 mutex_unlock(&cpu_cdbs->timer_mutex);
383 mutex_unlock(&dbs_data->mutex);
378 break; 384 break;
379 } 385 }
380 return 0; 386 return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 099967302bf2..eab8ccfe6beb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,7 @@
37#define BYT_RATIOS 0x66a 37#define BYT_RATIOS 0x66a
38#define BYT_VIDS 0x66b 38#define BYT_VIDS 0x66b
39#define BYT_TURBO_RATIOS 0x66c 39#define BYT_TURBO_RATIOS 0x66c
40#define BYT_TURBO_VIDS 0x66d
40 41
41 42
42#define FRAC_BITS 6 43#define FRAC_BITS 6
@@ -70,8 +71,9 @@ struct pstate_data {
70}; 71};
71 72
72struct vid_data { 73struct vid_data {
73 int32_t min; 74 int min;
74 int32_t max; 75 int max;
76 int turbo;
75 int32_t ratio; 77 int32_t ratio;
76}; 78};
77 79
@@ -359,14 +361,14 @@ static int byt_get_min_pstate(void)
359{ 361{
360 u64 value; 362 u64 value;
361 rdmsrl(BYT_RATIOS, value); 363 rdmsrl(BYT_RATIOS, value);
362 return (value >> 8) & 0xFF; 364 return (value >> 8) & 0x3F;
363} 365}
364 366
365static int byt_get_max_pstate(void) 367static int byt_get_max_pstate(void)
366{ 368{
367 u64 value; 369 u64 value;
368 rdmsrl(BYT_RATIOS, value); 370 rdmsrl(BYT_RATIOS, value);
369 return (value >> 16) & 0xFF; 371 return (value >> 16) & 0x3F;
370} 372}
371 373
372static int byt_get_turbo_pstate(void) 374static int byt_get_turbo_pstate(void)
@@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
393 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 395 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
394 vid = fp_toint(vid_fp); 396 vid = fp_toint(vid_fp);
395 397
398 if (pstate > cpudata->pstate.max_pstate)
399 vid = cpudata->vid.turbo;
400
396 val |= vid; 401 val |= vid;
397 402
398 wrmsrl(MSR_IA32_PERF_CTL, val); 403 wrmsrl(MSR_IA32_PERF_CTL, val);
@@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata)
402{ 407{
403 u64 value; 408 u64 value;
404 409
410
405 rdmsrl(BYT_VIDS, value); 411 rdmsrl(BYT_VIDS, value);
406 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 412 cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
407 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 413 cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
408 cpudata->vid.ratio = div_fp( 414 cpudata->vid.ratio = div_fp(
409 cpudata->vid.max - cpudata->vid.min, 415 cpudata->vid.max - cpudata->vid.min,
410 int_tofp(cpudata->pstate.max_pstate - 416 int_tofp(cpudata->pstate.max_pstate -
411 cpudata->pstate.min_pstate)); 417 cpudata->pstate.min_pstate));
418
419 rdmsrl(BYT_TURBO_VIDS, value);
420 cpudata->vid.turbo = value & 0x7f;
412} 421}
413 422
414 423
@@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
545 554
546 if (pstate_funcs.get_vid) 555 if (pstate_funcs.get_vid)
547 pstate_funcs.get_vid(cpu); 556 pstate_funcs.get_vid(cpu);
548 557 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
549 /*
550 * goto max pstate so we don't slow up boot if we are built-in if we are
551 * a module we will take care of it during normal operation
552 */
553 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
554} 558}
555 559
556static inline void intel_pstate_calc_busy(struct cpudata *cpu, 560static inline void intel_pstate_calc_busy(struct cpudata *cpu,
@@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
695 cpu = all_cpu_data[cpunum]; 699 cpu = all_cpu_data[cpunum];
696 700
697 intel_pstate_get_cpu_pstates(cpu); 701 intel_pstate_get_cpu_pstates(cpu);
698 if (!cpu->pstate.current_pstate) {
699 all_cpu_data[cpunum] = NULL;
700 kfree(cpu);
701 return -ENODATA;
702 }
703 702
704 cpu->cpu = cpunum; 703 cpu->cpu = cpunum;
705 704
@@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
710 cpu->timer.expires = jiffies + HZ/100; 709 cpu->timer.expires = jiffies + HZ/100;
711 intel_pstate_busy_pid_reset(cpu); 710 intel_pstate_busy_pid_reset(cpu);
712 intel_pstate_sample(cpu); 711 intel_pstate_sample(cpu);
713 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
714 712
715 add_timer_on(&cpu->timer, cpunum); 713 add_timer_on(&cpu->timer, cpunum);
716 714
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index d00e5d1abd25..5c4369b5d834 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
242 * Sets a new clock ratio. 242 * Sets a new clock ratio.
243 */ 243 */
244 244
245static void longhaul_setstate(struct cpufreq_policy *policy, 245static int longhaul_setstate(struct cpufreq_policy *policy,
246 unsigned int table_index) 246 unsigned int table_index)
247{ 247{
248 unsigned int mults_index; 248 unsigned int mults_index;
@@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
258 /* Safety precautions */ 258 /* Safety precautions */
259 mult = mults[mults_index & 0x1f]; 259 mult = mults[mults_index & 0x1f];
260 if (mult == -1) 260 if (mult == -1)
261 return; 261 return -EINVAL;
262
262 speed = calc_speed(mult); 263 speed = calc_speed(mult);
263 if ((speed > highest_speed) || (speed < lowest_speed)) 264 if ((speed > highest_speed) || (speed < lowest_speed))
264 return; 265 return -EINVAL;
266
265 /* Voltage transition before frequency transition? */ 267 /* Voltage transition before frequency transition? */
266 if (can_scale_voltage && longhaul_index < table_index) 268 if (can_scale_voltage && longhaul_index < table_index)
267 dir = 1; 269 dir = 1;
@@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
269 freqs.old = calc_speed(longhaul_get_cpu_mult()); 271 freqs.old = calc_speed(longhaul_get_cpu_mult());
270 freqs.new = speed; 272 freqs.new = speed;
271 273
272 cpufreq_freq_transition_begin(policy, &freqs);
273
274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", 274 pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
275 fsb, mult/10, mult%10, print_speed(speed/1000)); 275 fsb, mult/10, mult%10, print_speed(speed/1000));
276retry_loop: 276retry_loop:
@@ -385,12 +385,14 @@ retry_loop:
385 goto retry_loop; 385 goto retry_loop;
386 } 386 }
387 } 387 }
388 /* Report true CPU frequency */
389 cpufreq_freq_transition_end(policy, &freqs, 0);
390 388
391 if (!bm_timeout) 389 if (!bm_timeout) {
392 printk(KERN_INFO PFX "Warning: Timeout while waiting for " 390 printk(KERN_INFO PFX "Warning: Timeout while waiting for "
393 "idle PCI bus.\n"); 391 "idle PCI bus.\n");
392 return -EBUSY;
393 }
394
395 return 0;
394} 396}
395 397
396/* 398/*
@@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
631 unsigned int i; 633 unsigned int i;
632 unsigned int dir = 0; 634 unsigned int dir = 0;
633 u8 vid, current_vid; 635 u8 vid, current_vid;
636 int retval = 0;
634 637
635 if (!can_scale_voltage) 638 if (!can_scale_voltage)
636 longhaul_setstate(policy, table_index); 639 retval = longhaul_setstate(policy, table_index);
637 else { 640 else {
638 /* On test system voltage transitions exceeding single 641 /* On test system voltage transitions exceeding single
639 * step up or down were turning motherboard off. Both 642 * step up or down were turning motherboard off. Both
@@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
648 while (i != table_index) { 651 while (i != table_index) {
649 vid = (longhaul_table[i].driver_data >> 8) & 0x1f; 652 vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
650 if (vid != current_vid) { 653 if (vid != current_vid) {
651 longhaul_setstate(policy, i); 654 retval = longhaul_setstate(policy, i);
652 current_vid = vid; 655 current_vid = vid;
653 msleep(200); 656 msleep(200);
654 } 657 }
@@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
657 else 660 else
658 i--; 661 i--;
659 } 662 }
660 longhaul_setstate(policy, table_index); 663 retval = longhaul_setstate(policy, table_index);
661 } 664 }
665
662 longhaul_index = table_index; 666 longhaul_index = table_index;
663 return 0; 667 return retval;
664} 668}
665 669
666 670
@@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
968 972
969 for (i = 0; i < numscales; i++) { 973 for (i = 0; i < numscales; i++) {
970 if (mults[i] == maxmult) { 974 if (mults[i] == maxmult) {
975 struct cpufreq_freqs freqs;
976
977 freqs.old = policy->cur;
978 freqs.new = longhaul_table[i].frequency;
979 freqs.flags = 0;
980
981 cpufreq_freq_transition_begin(policy, &freqs);
971 longhaul_setstate(policy, i); 982 longhaul_setstate(policy, i);
983 cpufreq_freq_transition_end(policy, &freqs, 0);
972 break; 984 break;
973 } 985 }
974 } 986 }
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index f0bc31f5db27..d4add8621944 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
62 set_cpus_allowed_ptr(current, &cpus_allowed); 62 set_cpus_allowed_ptr(current, &cpus_allowed);
63 63
64 /* setting the cpu frequency */ 64 /* setting the cpu frequency */
65 clk_set_rate(policy->clk, freq); 65 clk_set_rate(policy->clk, freq * 1000);
66 66
67 return 0; 67 return 0;
68} 68}
@@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
92 i++) 92 i++)
93 loongson2_clockmod_table[i].frequency = (rate * i) / 8; 93 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
94 94
95 ret = clk_set_rate(cpuclk, rate); 95 ret = clk_set_rate(cpuclk, rate * 1000);
96 if (ret) { 96 if (ret) {
97 clk_put(cpuclk); 97 clk_put(cpuclk);
98 return ret; 98 return ret;
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 49f120e1bc7b..78904e6ca4a0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
138static int powernow_k6_target(struct cpufreq_policy *policy, 138static int powernow_k6_target(struct cpufreq_policy *policy,
139 unsigned int best_i) 139 unsigned int best_i)
140{ 140{
141 struct cpufreq_freqs freqs;
142 141
143 if (clock_ratio[best_i].driver_data > max_multiplier) { 142 if (clock_ratio[best_i].driver_data > max_multiplier) {
144 printk(KERN_ERR PFX "invalid target frequency\n"); 143 printk(KERN_ERR PFX "invalid target frequency\n");
145 return -EINVAL; 144 return -EINVAL;
146 } 145 }
147 146
148 freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
149 freqs.new = busfreq * clock_ratio[best_i].driver_data;
150
151 cpufreq_freq_transition_begin(policy, &freqs);
152
153 powernow_k6_set_cpu_multiplier(best_i); 147 powernow_k6_set_cpu_multiplier(best_i);
154 148
155 cpufreq_freq_transition_end(policy, &freqs, 0);
156
157 return 0; 149 return 0;
158} 150}
159 151
@@ -227,9 +219,20 @@ have_busfreq:
227static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) 219static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
228{ 220{
229 unsigned int i; 221 unsigned int i;
230 for (i = 0; i < 8; i++) { 222
231 if (i == max_multiplier) 223 for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
224 if (clock_ratio[i].driver_data == max_multiplier) {
225 struct cpufreq_freqs freqs;
226
227 freqs.old = policy->cur;
228 freqs.new = clock_ratio[i].frequency;
229 freqs.flags = 0;
230
231 cpufreq_freq_transition_begin(policy, &freqs);
232 powernow_k6_target(policy, i); 232 powernow_k6_target(policy, i);
233 cpufreq_freq_transition_end(policy, &freqs, 0);
234 break;
235 }
233 } 236 }
234 return 0; 237 return 0;
235} 238}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index f911645c3f6d..e61e224475ad 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
269 269
270 freqs.new = powernow_table[index].frequency; 270 freqs.new = powernow_table[index].frequency;
271 271
272 cpufreq_freq_transition_begin(policy, &freqs);
273
274 /* Now do the magic poking into the MSRs. */ 272 /* Now do the magic poking into the MSRs. */
275 273
276 if (have_a0 == 1) /* A0 errata 5 */ 274 if (have_a0 == 1) /* A0 errata 5 */
@@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
290 if (have_a0 == 1) 288 if (have_a0 == 1)
291 local_irq_enable(); 289 local_irq_enable();
292 290
293 cpufreq_freq_transition_end(policy, &freqs, 0);
294
295 return 0; 291 return 0;
296} 292}
297 293
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 9edccc63245d..af4968813e76 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -29,6 +29,7 @@
29 29
30#include <asm/cputhreads.h> 30#include <asm/cputhreads.h>
31#include <asm/reg.h> 31#include <asm/reg.h>
32#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
32 33
33#define POWERNV_MAX_PSTATES 256 34#define POWERNV_MAX_PSTATES 256
34 35
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index b7e677be1df0..0af618abebaf 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
138 struct cpufreq_frequency_table *table; 138 struct cpufreq_frequency_table *table;
139 struct cpu_data *data; 139 struct cpu_data *data;
140 unsigned int cpu = policy->cpu; 140 unsigned int cpu = policy->cpu;
141 u64 transition_latency_hz;
141 142
142 np = of_get_cpu_node(cpu, NULL); 143 np = of_get_cpu_node(cpu, NULL);
143 if (!np) 144 if (!np)
@@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
205 for_each_cpu(i, per_cpu(cpu_mask, cpu)) 206 for_each_cpu(i, per_cpu(cpu_mask, cpu))
206 per_cpu(cpu_data, i) = data; 207 per_cpu(cpu_data, i) = data;
207 208
209 transition_latency_hz = 12ULL * NSEC_PER_SEC;
208 policy->cpuinfo.transition_latency = 210 policy->cpuinfo.transition_latency =
209 (12 * NSEC_PER_SEC) / fsl_get_sys_freq(); 211 do_div(transition_latency_hz, fsl_get_sys_freq());
212
210 of_node_put(np); 213 of_node_put(np);
211 214
212 return 0; 215 return 0;
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 8d045afa7fb4..6f9dfa80563a 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -60,9 +60,7 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
60 policy->max = policy->cpuinfo.max_freq = 1000000; 60 policy->max = policy->cpuinfo.max_freq = 1000000;
61 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 61 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
62 policy->clk = clk_get(NULL, "MAIN_CLK"); 62 policy->clk = clk_get(NULL, "MAIN_CLK");
63 if (IS_ERR(policy->clk)) 63 return PTR_ERR_OR_ZERO(policy->clk);
64 return PTR_ERR(policy->clk);
65 return 0;
66} 64}
67 65
68static struct cpufreq_driver ucv2_driver = { 66static struct cpufreq_driver ucv2_driver = {
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 9f25f5296029..0eabd81e1a90 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -16,9 +16,13 @@
16 char *tmp; \ 16 char *tmp; \
17 \ 17 \
18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ 18 tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
19 sprintf(tmp, format, param); \ 19 if (likely(tmp)) { \
20 strcat(str, tmp); \ 20 sprintf(tmp, format, param); \
21 kfree(tmp); \ 21 strcat(str, tmp); \
22 kfree(tmp); \
23 } else { \
24 strcat(str, "kmalloc failure in SPRINTFCAT"); \
25 } \
22} 26}
23 27
24static void report_jump_idx(u32 status, char *outstr) 28static void report_jump_idx(u32 status, char *outstr)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ba06d1d2f99e..5c5863842de9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -197,7 +197,7 @@ config AMCC_PPC440SPE_ADMA
197 197
198config TIMB_DMA 198config TIMB_DMA
199 tristate "Timberdale FPGA DMA support" 199 tristate "Timberdale FPGA DMA support"
200 depends on MFD_TIMBERDALE || HAS_IOMEM 200 depends on MFD_TIMBERDALE
201 select DMA_ENGINE 201 select DMA_ENGINE
202 help 202 help
203 Enable support for the Timberdale FPGA DMA engine. 203 Enable support for the Timberdale FPGA DMA engine.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index a886713937fd..d5d30ed863ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
1009 dma_unmap_page(dev, unmap->addr[i], unmap->len, 1009 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010 DMA_BIDIRECTIONAL); 1010 DMA_BIDIRECTIONAL);
1011 } 1011 }
1012 cnt = unmap->map_cnt;
1012 mempool_free(unmap, __get_unmap_pool(cnt)->pool); 1013 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1013} 1014}
1014 1015
@@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1074 memset(unmap, 0, sizeof(*unmap)); 1075 memset(unmap, 0, sizeof(*unmap));
1075 kref_init(&unmap->kref); 1076 kref_init(&unmap->kref);
1076 unmap->dev = dev; 1077 unmap->dev = dev;
1078 unmap->map_cnt = nr;
1077 1079
1078 return unmap; 1080 return unmap;
1079} 1081}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cfdbb92aae1d..7a740769c2fa 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1548 /* Disable BLOCK interrupts as well */ 1548 /* Disable BLOCK interrupts as well */
1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1549 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1550 1550
1551 err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
1552 IRQF_SHARED, "dw_dmac", dw);
1553 if (err)
1554 return err;
1555
1556 /* Create a pool of consistent memory blocks for hardware descriptors */ 1551 /* Create a pool of consistent memory blocks for hardware descriptors */
1557 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, 1552 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1558 sizeof(struct dw_desc), 4, 0); 1553 sizeof(struct dw_desc), 4, 0);
@@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1563 1558
1564 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1559 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1565 1560
1561 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1562 "dw_dmac", dw);
1563 if (err)
1564 return err;
1565
1566 INIT_LIST_HEAD(&dw->dma.channels); 1566 INIT_LIST_HEAD(&dw->dma.channels);
1567 for (i = 0; i < nr_channels; i++) { 1567 for (i = 0; i < nr_channels; i++) {
1568 struct dw_dma_chan *dwc = &dw->chan[i]; 1568 struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
1667 dw_dma_off(dw); 1667 dw_dma_off(dw);
1668 dma_async_device_unregister(&dw->dma); 1668 dma_async_device_unregister(&dw->dma);
1669 1669
1670 free_irq(chip->irq, dw);
1670 tasklet_kill(&dw->tasklet); 1671 tasklet_kill(&dw->tasklet);
1671 1672
1672 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1673 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd04eb7b182e..926360c2db6a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -182,11 +182,13 @@ static void edma_execute(struct edma_chan *echan)
182 echan->ecc->dummy_slot); 182 echan->ecc->dummy_slot);
183 } 183 }
184 184
185 edma_resume(echan->ch_num);
186
187 if (edesc->processed <= MAX_NR_SG) { 185 if (edesc->processed <= MAX_NR_SG) {
188 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); 186 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
189 edma_start(echan->ch_num); 187 edma_start(echan->ch_num);
188 } else {
189 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
190 echan->ch_num, edesc->processed);
191 edma_resume(echan->ch_num);
190 } 192 }
191 193
192 /* 194 /*
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 381e793184ba..b396a7fb53ab 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -968,7 +968,17 @@ static struct platform_driver fsl_edma_driver = {
968 .remove = fsl_edma_remove, 968 .remove = fsl_edma_remove,
969}; 969};
970 970
971module_platform_driver(fsl_edma_driver); 971static int __init fsl_edma_init(void)
972{
973 return platform_driver_register(&fsl_edma_driver);
974}
975subsys_initcall(fsl_edma_init);
976
977static void __exit fsl_edma_exit(void)
978{
979 platform_driver_unregister(&fsl_edma_driver);
980}
981module_exit(fsl_edma_exit);
972 982
973MODULE_ALIAS("platform:fsl-edma"); 983MODULE_ALIAS("platform:fsl-edma");
974MODULE_DESCRIPTION("Freescale eDMA engine driver"); 984MODULE_DESCRIPTION("Freescale eDMA engine driver");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 766b68ed505c..394cbc5c93e3 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
191 191
192static void mv_chan_activate(struct mv_xor_chan *chan) 192static void mv_chan_activate(struct mv_xor_chan *chan)
193{ 193{
194 u32 activation;
195
196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 194 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
197 activation = readl_relaxed(XOR_ACTIVATION(chan)); 195
198 activation |= 0x1; 196 /* writel ensures all descriptors are flushed before activation */
199 writel_relaxed(activation, XOR_ACTIVATION(chan)); 197 writel(BIT(0), XOR_ACTIVATION(chan));
200} 198}
201 199
202static char mv_chan_is_busy(struct mv_xor_chan *chan) 200static char mv_chan_is_busy(struct mv_xor_chan *chan)
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ab26d46bbe15..5ebdfbc1051e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
113 struct sa11x0_dma_desc *txd_load; 113 struct sa11x0_dma_desc *txd_load;
114 unsigned sg_done; 114 unsigned sg_done;
115 struct sa11x0_dma_desc *txd_done; 115 struct sa11x0_dma_desc *txd_done;
116#ifdef CONFIG_PM_SLEEP
117 u32 dbs[2]; 116 u32 dbs[2];
118 u32 dbt[2]; 117 u32 dbt[2];
119 u32 dcsr; 118 u32 dcsr;
120#endif
121}; 119};
122 120
123struct sa11x0_dma_dev { 121struct sa11x0_dma_dev {
@@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
984 return 0; 982 return 0;
985} 983}
986 984
987#ifdef CONFIG_PM_SLEEP
988static int sa11x0_dma_suspend(struct device *dev) 985static int sa11x0_dma_suspend(struct device *dev)
989{ 986{
990 struct sa11x0_dma_dev *d = dev_get_drvdata(dev); 987 struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
@@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
1054 1051
1055 return 0; 1052 return 0;
1056} 1053}
1057#endif
1058 1054
1059static const struct dev_pm_ops sa11x0_dma_pm_ops = { 1055static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1060 .suspend_noirq = sa11x0_dma_suspend, 1056 .suspend_noirq = sa11x0_dma_suspend,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index a1bd8298d55f..03f7820fa333 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -666,7 +666,7 @@ static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
666 struct sirfsoc_dma *sdma = ofdma->of_dma_data; 666 struct sirfsoc_dma *sdma = ofdma->of_dma_data;
667 unsigned int request = dma_spec->args[0]; 667 unsigned int request = dma_spec->args[0];
668 668
669 if (request > SIRFSOC_DMA_CHANNELS) 669 if (request >= SIRFSOC_DMA_CHANNELS)
670 return NULL; 670 return NULL;
671 671
672 return dma_get_slave_channel(&sdma->channels[request].chan); 672 return dma_get_slave_channel(&sdma->channels[request].chan);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index c98764aeeec6..f477308b6e9c 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
237 237
238#define LOCAL_BUS 0xffc0 238#define LOCAL_BUS 0xffc0
239 239
240/* arbitrarily chosen maximum range for physical DMA: 128 TB */ 240/* OHCI-1394's default upper bound for physical DMA: 4 GB */
241#define FW_MAX_PHYSICAL_RANGE (128ULL << 40) 241#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
242 242
243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); 243void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); 244void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 8db663219560..586f2f7f6993 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
3716 version >> 16, version & 0xff, ohci->card.index, 3716 version >> 16, version & 0xff, ohci->card.index,
3717 ohci->n_ir, ohci->n_it, ohci->quirks, 3717 ohci->n_ir, ohci->n_it, ohci->quirks,
3718 reg_read(ohci, OHCI1394_PhyUpperBound) ? 3718 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3719 ", >4 GB phys DMA" : ""); 3719 ", physUB" : "");
3720 3720
3721 return 0; 3721 return 0;
3722 3722
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 3ee852c9925b..071c2c969eec 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -756,6 +756,7 @@ static const struct {
756 */ 756 */
757 { ACPI_SIG_IBFT }, 757 { ACPI_SIG_IBFT },
758 { "iBFT" }, 758 { "iBFT" },
759 { "BIFT" }, /* Broadcom iSCSI Offload */
759}; 760};
760 761
761static void __init acpi_find_ibft_region(void) 762static void __init acpi_find_ibft_region(void)
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index e73c6755a5eb..70304220a479 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = {
305 305
306 .ngpio = 50, 306 .ngpio = 50,
307 .have_blink = true, 307 .have_blink = true,
308 .regs = ichx_regs,
309 .reglen = ichx_reglen,
308}; 310};
309 311
310/* Intel 3100 */ 312/* Intel 3100 */
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = {
324 .uses_gpe0 = true, 326 .uses_gpe0 = true,
325 327
326 .ngpio = 50, 328 .ngpio = 50,
329 .regs = ichx_regs,
330 .reglen = ichx_reglen,
327}; 331};
328 332
329/* ICH7 and ICH8-based */ 333/* ICH7 and ICH8-based */
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 99a68310e7c0..3d53fd6880d1 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi)
894 dev_err(&spi->dev, "invalid spi-present-mask\n"); 894 dev_err(&spi->dev, "invalid spi-present-mask\n");
895 return -ENODEV; 895 return -ENODEV;
896 } 896 }
897 897 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
898 for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) 898 if ((spi_present_mask & (1 << addr)))
899 chips++;
899 pullups[addr] = 0; 900 pullups[addr] = 0;
901 }
900 } else { 902 } else {
901 type = spi_get_device_id(spi)->driver_data; 903 type = spi_get_device_id(spi)->driver_data;
902 pdata = dev_get_platdata(&spi->dev); 904 pdata = dev_get_platdata(&spi->dev);
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi)
919 pullups[addr] = pdata->chip[addr].pullups; 921 pullups[addr] = pdata->chip[addr].pullups;
920 } 922 }
921 923
922 if (!chips)
923 return -ENODEV;
924
925 base = pdata->base; 924 base = pdata->base;
926 } 925 }
927 926
927 if (!chips)
928 return -ENODEV;
929
928 data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), 930 data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
929 GFP_KERNEL); 931 GFP_KERNEL);
930 if (!data) 932 if (!data)
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index e9a0415834ea..30bcc539425d 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -2,7 +2,7 @@
2 * SPEAr platform SPI chipselect abstraction over gpiolib 2 * SPEAr platform SPI chipselect abstraction over gpiolib
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Shiraz Hashim <shiraz.hashim@st.com> 5 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -205,6 +205,6 @@ static int __init spics_gpio_init(void)
205} 205}
206subsys_initcall(spics_gpio_init); 206subsys_initcall(spics_gpio_init);
207 207
208MODULE_AUTHOR("Shiraz Hashim <shiraz.hashim@st.com>"); 208MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
209MODULE_DESCRIPTION("ST Microlectronics SPEAr SPI Chip Select Abstraction"); 209MODULE_DESCRIPTION("ST Microlectronics SPEAr SPI Chip Select Abstraction");
210MODULE_LICENSE("GPL"); 210MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index bf0f8b476696..401add28933f 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -233,7 +233,7 @@ static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio)
233{ 233{
234 struct gpio_chip *chip = acpi_gpio->chip; 234 struct gpio_chip *chip = acpi_gpio->chip;
235 235
236 if (!chip->dev || !chip->to_irq) 236 if (!chip->to_irq)
237 return; 237 return;
238 238
239 INIT_LIST_HEAD(&acpi_gpio->events); 239 INIT_LIST_HEAD(&acpi_gpio->events);
@@ -253,7 +253,7 @@ static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio)
253 struct acpi_gpio_event *event, *ep; 253 struct acpi_gpio_event *event, *ep;
254 struct gpio_chip *chip = acpi_gpio->chip; 254 struct gpio_chip *chip = acpi_gpio->chip;
255 255
256 if (!chip->dev || !chip->to_irq) 256 if (!chip->to_irq)
257 return; 257 return;
258 258
259 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 259 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
@@ -451,7 +451,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
451 if (function == ACPI_WRITE) 451 if (function == ACPI_WRITE)
452 gpiod_set_raw_value(desc, !!((1 << i) & *value)); 452 gpiod_set_raw_value(desc, !!((1 << i) & *value));
453 else 453 else
454 *value |= gpiod_get_raw_value(desc) << i; 454 *value |= (u64)gpiod_get_raw_value(desc) << i;
455 } 455 }
456 456
457out: 457out:
@@ -501,6 +501,9 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
501 acpi_handle handle; 501 acpi_handle handle;
502 acpi_status status; 502 acpi_status status;
503 503
504 if (!chip || !chip->dev)
505 return;
506
504 handle = ACPI_HANDLE(chip->dev); 507 handle = ACPI_HANDLE(chip->dev);
505 if (!handle) 508 if (!handle)
506 return; 509 return;
@@ -531,6 +534,9 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
531 acpi_handle handle; 534 acpi_handle handle;
532 acpi_status status; 535 acpi_status status;
533 536
537 if (!chip || !chip->dev)
538 return;
539
534 handle = ACPI_HANDLE(chip->dev); 540 handle = ACPI_HANDLE(chip->dev);
535 if (!handle) 541 if (!handle)
536 return; 542 return;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 761013f8b82f..f48817d97480 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1387,8 +1387,8 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
1387{ 1387{
1388 struct gpio_chip *chip = d->host_data; 1388 struct gpio_chip *chip = d->host_data;
1389 1389
1390 irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
1391 irq_set_chip_data(irq, chip); 1390 irq_set_chip_data(irq, chip);
1391 irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
1392#ifdef CONFIG_ARM 1392#ifdef CONFIG_ARM
1393 set_irq_flags(irq, IRQF_VALID); 1393 set_irq_flags(irq, IRQF_VALID);
1394#else 1394#else
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9d25dbbe6771..48e38ba22783 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
23 23
24drm-usb-y := drm_usb.o 24drm-usb-y := drm_usb.o
25 25
26drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o 26drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o
27drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 27drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
28drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o 28drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
29drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 29drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 977cfb35837a..635f6ffc27c2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -572,7 +572,7 @@ static u32 cbr_scan2(struct ast_private *ast)
572 for (loop = 0; loop < CBR_PASSNUM2; loop++) { 572 for (loop = 0; loop < CBR_PASSNUM2; loop++) {
573 if ((data = cbr_test2(ast)) != 0) { 573 if ((data = cbr_test2(ast)) != 0) {
574 data2 &= data; 574 data2 &= data;
575 if (!data) 575 if (!data2)
576 return 0; 576 return 0;
577 break; 577 break;
578 } 578 }
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 741965c001a6..7eb52dd44b01 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -1,5 +1,6 @@
1#include <linux/io.h> 1#include <linux/io.h>
2#include <linux/fb.h> 2#include <linux/fb.h>
3#include <linux/console.h>
3 4
4#include <drm/drmP.h> 5#include <drm/drmP.h>
5#include <drm/drm_crtc.h> 6#include <drm/drm_crtc.h>
@@ -87,8 +88,6 @@ struct bochs_device {
87 struct bochs_framebuffer gfb; 88 struct bochs_framebuffer gfb;
88 struct drm_fb_helper helper; 89 struct drm_fb_helper helper;
89 int size; 90 int size;
90 int x1, y1, x2, y2; /* dirty rect */
91 spinlock_t dirty_lock;
92 bool initialized; 91 bool initialized;
93 } fb; 92 } fb;
94}; 93};
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 395bba261c9a..9c13df29fd20 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -95,6 +95,49 @@ static struct drm_driver bochs_driver = {
95}; 95};
96 96
97/* ---------------------------------------------------------------------- */ 97/* ---------------------------------------------------------------------- */
98/* pm interface */
99
100static int bochs_pm_suspend(struct device *dev)
101{
102 struct pci_dev *pdev = to_pci_dev(dev);
103 struct drm_device *drm_dev = pci_get_drvdata(pdev);
104 struct bochs_device *bochs = drm_dev->dev_private;
105
106 drm_kms_helper_poll_disable(drm_dev);
107
108 if (bochs->fb.initialized) {
109 console_lock();
110 fb_set_suspend(bochs->fb.helper.fbdev, 1);
111 console_unlock();
112 }
113
114 return 0;
115}
116
117static int bochs_pm_resume(struct device *dev)
118{
119 struct pci_dev *pdev = to_pci_dev(dev);
120 struct drm_device *drm_dev = pci_get_drvdata(pdev);
121 struct bochs_device *bochs = drm_dev->dev_private;
122
123 drm_helper_resume_force_mode(drm_dev);
124
125 if (bochs->fb.initialized) {
126 console_lock();
127 fb_set_suspend(bochs->fb.helper.fbdev, 0);
128 console_unlock();
129 }
130
131 drm_kms_helper_poll_enable(drm_dev);
132 return 0;
133}
134
135static const struct dev_pm_ops bochs_pm_ops = {
136 SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
137 bochs_pm_resume)
138};
139
140/* ---------------------------------------------------------------------- */
98/* pci interface */ 141/* pci interface */
99 142
100static int bochs_kick_out_firmware_fb(struct pci_dev *pdev) 143static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
@@ -155,6 +198,7 @@ static struct pci_driver bochs_pci_driver = {
155 .id_table = bochs_pci_tbl, 198 .id_table = bochs_pci_tbl,
156 .probe = bochs_pci_probe, 199 .probe = bochs_pci_probe,
157 .remove = bochs_pci_remove, 200 .remove = bochs_pci_remove,
201 .driver.pm = &bochs_pm_ops,
158}; 202};
159 203
160/* ---------------------------------------------------------------------- */ 204/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 4da5206b7cc9..561b84474122 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -190,7 +190,6 @@ int bochs_fbdev_init(struct bochs_device *bochs)
190 int ret; 190 int ret;
191 191
192 bochs->fb.helper.funcs = &bochs_fb_helper_funcs; 192 bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
193 spin_lock_init(&bochs->fb.dirty_lock);
194 193
195 ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper, 194 ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
196 1, 1); 195 1, 1);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 953fc8aea69c..08ce520f61a5 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/console.h> 12#include <linux/console.h>
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h>
14 15
15#include "cirrus_drv.h" 16#include "cirrus_drv.h"
16 17
@@ -75,6 +76,41 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
75 drm_put_dev(dev); 76 drm_put_dev(dev);
76} 77}
77 78
79static int cirrus_pm_suspend(struct device *dev)
80{
81 struct pci_dev *pdev = to_pci_dev(dev);
82 struct drm_device *drm_dev = pci_get_drvdata(pdev);
83 struct cirrus_device *cdev = drm_dev->dev_private;
84
85 drm_kms_helper_poll_disable(drm_dev);
86
87 if (cdev->mode_info.gfbdev) {
88 console_lock();
89 fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 1);
90 console_unlock();
91 }
92
93 return 0;
94}
95
96static int cirrus_pm_resume(struct device *dev)
97{
98 struct pci_dev *pdev = to_pci_dev(dev);
99 struct drm_device *drm_dev = pci_get_drvdata(pdev);
100 struct cirrus_device *cdev = drm_dev->dev_private;
101
102 drm_helper_resume_force_mode(drm_dev);
103
104 if (cdev->mode_info.gfbdev) {
105 console_lock();
106 fb_set_suspend(cdev->mode_info.gfbdev->helper.fbdev, 0);
107 console_unlock();
108 }
109
110 drm_kms_helper_poll_enable(drm_dev);
111 return 0;
112}
113
78static const struct file_operations cirrus_driver_fops = { 114static const struct file_operations cirrus_driver_fops = {
79 .owner = THIS_MODULE, 115 .owner = THIS_MODULE,
80 .open = drm_open, 116 .open = drm_open,
@@ -103,11 +139,17 @@ static struct drm_driver driver = {
103 .dumb_destroy = drm_gem_dumb_destroy, 139 .dumb_destroy = drm_gem_dumb_destroy,
104}; 140};
105 141
142static const struct dev_pm_ops cirrus_pm_ops = {
143 SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
144 cirrus_pm_resume)
145};
146
106static struct pci_driver cirrus_pci_driver = { 147static struct pci_driver cirrus_pci_driver = {
107 .name = DRIVER_NAME, 148 .name = DRIVER_NAME,
108 .id_table = pciidlist, 149 .id_table = pciidlist,
109 .probe = cirrus_pci_probe, 150 .probe = cirrus_pci_probe,
110 .remove = cirrus_pci_remove, 151 .remove = cirrus_pci_remove,
152 .driver.pm = &cirrus_pm_ops,
111}; 153};
112 154
113static int __init cirrus_init(void) 155static int __init cirrus_init(void)
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 2d64aea83df2..f59433b7610c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -308,6 +308,9 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
308 308
309 WREG_HDR(hdr); 309 WREG_HDR(hdr);
310 cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0); 310 cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
311
312 /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
313 outb(0x20, 0x3c0);
311 return 0; 314 return 0;
312} 315}
313 316
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c43825e8f5c1..df281b54db01 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -72,147 +72,6 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
72} 72}
73EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); 73EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
74 74
75static bool drm_kms_helper_poll = true;
76module_param_named(poll, drm_kms_helper_poll, bool, 0600);
77
78static void drm_mode_validate_flag(struct drm_connector *connector,
79 int flags)
80{
81 struct drm_display_mode *mode;
82
83 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
84 DRM_MODE_FLAG_3D_MASK))
85 return;
86
87 list_for_each_entry(mode, &connector->modes, head) {
88 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
89 !(flags & DRM_MODE_FLAG_INTERLACE))
90 mode->status = MODE_NO_INTERLACE;
91 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
92 !(flags & DRM_MODE_FLAG_DBLSCAN))
93 mode->status = MODE_NO_DBLESCAN;
94 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
95 !(flags & DRM_MODE_FLAG_3D_MASK))
96 mode->status = MODE_NO_STEREO;
97 }
98
99 return;
100}
101
102/**
103 * drm_helper_probe_single_connector_modes - get complete set of display modes
104 * @connector: connector to probe
105 * @maxX: max width for modes
106 * @maxY: max height for modes
107 *
108 * Based on the helper callbacks implemented by @connector try to detect all
109 * valid modes. Modes will first be added to the connector's probed_modes list,
110 * then culled (based on validity and the @maxX, @maxY parameters) and put into
111 * the normal modes list.
112 *
113 * Intended to be use as a generic implementation of the ->fill_modes()
114 * @connector vfunc for drivers that use the crtc helpers for output mode
115 * filtering and detection.
116 *
117 * Returns:
118 * The number of modes found on @connector.
119 */
120int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
121 uint32_t maxX, uint32_t maxY)
122{
123 struct drm_device *dev = connector->dev;
124 struct drm_display_mode *mode;
125 struct drm_connector_helper_funcs *connector_funcs =
126 connector->helper_private;
127 int count = 0;
128 int mode_flags = 0;
129 bool verbose_prune = true;
130
131 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
132
133 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
134 drm_get_connector_name(connector));
135 /* set all modes to the unverified state */
136 list_for_each_entry(mode, &connector->modes, head)
137 mode->status = MODE_UNVERIFIED;
138
139 if (connector->force) {
140 if (connector->force == DRM_FORCE_ON)
141 connector->status = connector_status_connected;
142 else
143 connector->status = connector_status_disconnected;
144 if (connector->funcs->force)
145 connector->funcs->force(connector);
146 } else {
147 connector->status = connector->funcs->detect(connector, true);
148 }
149
150 /* Re-enable polling in case the global poll config changed. */
151 if (drm_kms_helper_poll != dev->mode_config.poll_running)
152 drm_kms_helper_poll_enable(dev);
153
154 dev->mode_config.poll_running = drm_kms_helper_poll;
155
156 if (connector->status == connector_status_disconnected) {
157 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
158 connector->base.id, drm_get_connector_name(connector));
159 drm_mode_connector_update_edid_property(connector, NULL);
160 verbose_prune = false;
161 goto prune;
162 }
163
164#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
165 count = drm_load_edid_firmware(connector);
166 if (count == 0)
167#endif
168 count = (*connector_funcs->get_modes)(connector);
169
170 if (count == 0 && connector->status == connector_status_connected)
171 count = drm_add_modes_noedid(connector, 1024, 768);
172 if (count == 0)
173 goto prune;
174
175 drm_mode_connector_list_update(connector);
176
177 if (maxX && maxY)
178 drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
179
180 if (connector->interlace_allowed)
181 mode_flags |= DRM_MODE_FLAG_INTERLACE;
182 if (connector->doublescan_allowed)
183 mode_flags |= DRM_MODE_FLAG_DBLSCAN;
184 if (connector->stereo_allowed)
185 mode_flags |= DRM_MODE_FLAG_3D_MASK;
186 drm_mode_validate_flag(connector, mode_flags);
187
188 list_for_each_entry(mode, &connector->modes, head) {
189 if (mode->status == MODE_OK)
190 mode->status = connector_funcs->mode_valid(connector,
191 mode);
192 }
193
194prune:
195 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
196
197 if (list_empty(&connector->modes))
198 return 0;
199
200 list_for_each_entry(mode, &connector->modes, head)
201 mode->vrefresh = drm_mode_vrefresh(mode);
202
203 drm_mode_sort(&connector->modes);
204
205 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
206 drm_get_connector_name(connector));
207 list_for_each_entry(mode, &connector->modes, head) {
208 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
209 drm_mode_debug_printmodeline(mode);
210 }
211
212 return count;
213}
214EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
215
216/** 75/**
217 * drm_helper_encoder_in_use - check if a given encoder is in use 76 * drm_helper_encoder_in_use - check if a given encoder is in use
218 * @encoder: encoder to check 77 * @encoder: encoder to check
@@ -1020,232 +879,3 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
1020 drm_modeset_unlock_all(dev); 879 drm_modeset_unlock_all(dev);
1021} 880}
1022EXPORT_SYMBOL(drm_helper_resume_force_mode); 881EXPORT_SYMBOL(drm_helper_resume_force_mode);
1023
1024/**
1025 * drm_kms_helper_hotplug_event - fire off KMS hotplug events
1026 * @dev: drm_device whose connector state changed
1027 *
1028 * This function fires off the uevent for userspace and also calls the
1029 * output_poll_changed function, which is most commonly used to inform the fbdev
1030 * emulation code and allow it to update the fbcon output configuration.
1031 *
1032 * Drivers should call this from their hotplug handling code when a change is
1033 * detected. Note that this function does not do any output detection of its
1034 * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
1035 * driver already.
1036 *
1037 * This function must be called from process context with no mode
1038 * setting locks held.
1039 */
1040void drm_kms_helper_hotplug_event(struct drm_device *dev)
1041{
1042 /* send a uevent + call fbdev */
1043 drm_sysfs_hotplug_event(dev);
1044 if (dev->mode_config.funcs->output_poll_changed)
1045 dev->mode_config.funcs->output_poll_changed(dev);
1046}
1047EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
1048
1049#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
1050static void output_poll_execute(struct work_struct *work)
1051{
1052 struct delayed_work *delayed_work = to_delayed_work(work);
1053 struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
1054 struct drm_connector *connector;
1055 enum drm_connector_status old_status;
1056 bool repoll = false, changed = false;
1057
1058 if (!drm_kms_helper_poll)
1059 return;
1060
1061 mutex_lock(&dev->mode_config.mutex);
1062 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1063
1064 /* Ignore forced connectors. */
1065 if (connector->force)
1066 continue;
1067
1068 /* Ignore HPD capable connectors and connectors where we don't
1069 * want any hotplug detection at all for polling. */
1070 if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
1071 continue;
1072
1073 repoll = true;
1074
1075 old_status = connector->status;
1076 /* if we are connected and don't want to poll for disconnect
1077 skip it */
1078 if (old_status == connector_status_connected &&
1079 !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
1080 continue;
1081
1082 connector->status = connector->funcs->detect(connector, false);
1083 if (old_status != connector->status) {
1084 const char *old, *new;
1085
1086 old = drm_get_connector_status_name(old_status);
1087 new = drm_get_connector_status_name(connector->status);
1088
1089 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
1090 "status updated from %s to %s\n",
1091 connector->base.id,
1092 drm_get_connector_name(connector),
1093 old, new);
1094
1095 changed = true;
1096 }
1097 }
1098
1099 mutex_unlock(&dev->mode_config.mutex);
1100
1101 if (changed)
1102 drm_kms_helper_hotplug_event(dev);
1103
1104 if (repoll)
1105 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
1106}
1107
1108/**
1109 * drm_kms_helper_poll_disable - disable output polling
1110 * @dev: drm_device
1111 *
1112 * This function disables the output polling work.
1113 *
1114 * Drivers can call this helper from their device suspend implementation. It is
1115 * not an error to call this even when output polling isn't enabled or arlready
1116 * disabled.
1117 */
1118void drm_kms_helper_poll_disable(struct drm_device *dev)
1119{
1120 if (!dev->mode_config.poll_enabled)
1121 return;
1122 cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
1123}
1124EXPORT_SYMBOL(drm_kms_helper_poll_disable);
1125
1126/**
1127 * drm_kms_helper_poll_enable - re-enable output polling.
1128 * @dev: drm_device
1129 *
1130 * This function re-enables the output polling work.
1131 *
1132 * Drivers can call this helper from their device resume implementation. It is
1133 * an error to call this when the output polling support has not yet been set
1134 * up.
1135 */
1136void drm_kms_helper_poll_enable(struct drm_device *dev)
1137{
1138 bool poll = false;
1139 struct drm_connector *connector;
1140
1141 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
1142 return;
1143
1144 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1145 if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
1146 DRM_CONNECTOR_POLL_DISCONNECT))
1147 poll = true;
1148 }
1149
1150 if (poll)
1151 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
1152}
1153EXPORT_SYMBOL(drm_kms_helper_poll_enable);
1154
1155/**
1156 * drm_kms_helper_poll_init - initialize and enable output polling
1157 * @dev: drm_device
1158 *
1159 * This function intializes and then also enables output polling support for
1160 * @dev. Drivers which do not have reliable hotplug support in hardware can use
1161 * this helper infrastructure to regularly poll such connectors for changes in
1162 * their connection state.
1163 *
1164 * Drivers can control which connectors are polled by setting the
1165 * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
1166 * connectors where probing live outputs can result in visual distortion drivers
1167 * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
1168 * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
1169 * completely ignored by the polling logic.
1170 *
1171 * Note that a connector can be both polled and probed from the hotplug handler,
1172 * in case the hotplug interrupt is known to be unreliable.
1173 */
1174void drm_kms_helper_poll_init(struct drm_device *dev)
1175{
1176 INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
1177 dev->mode_config.poll_enabled = true;
1178
1179 drm_kms_helper_poll_enable(dev);
1180}
1181EXPORT_SYMBOL(drm_kms_helper_poll_init);
1182
1183/**
1184 * drm_kms_helper_poll_fini - disable output polling and clean it up
1185 * @dev: drm_device
1186 */
1187void drm_kms_helper_poll_fini(struct drm_device *dev)
1188{
1189 drm_kms_helper_poll_disable(dev);
1190}
1191EXPORT_SYMBOL(drm_kms_helper_poll_fini);
1192
1193/**
1194 * drm_helper_hpd_irq_event - hotplug processing
1195 * @dev: drm_device
1196 *
1197 * Drivers can use this helper function to run a detect cycle on all connectors
1198 * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
1199 * other connectors are ignored, which is useful to avoid reprobing fixed
1200 * panels.
1201 *
1202 * This helper function is useful for drivers which can't or don't track hotplug
1203 * interrupts for each connector.
1204 *
1205 * Drivers which support hotplug interrupts for each connector individually and
1206 * which have a more fine-grained detect logic should bypass this code and
1207 * directly call drm_kms_helper_hotplug_event() in case the connector state
1208 * changed.
1209 *
1210 * This function must be called from process context with no mode
1211 * setting locks held.
1212 *
1213 * Note that a connector can be both polled and probed from the hotplug handler,
1214 * in case the hotplug interrupt is known to be unreliable.
1215 */
1216bool drm_helper_hpd_irq_event(struct drm_device *dev)
1217{
1218 struct drm_connector *connector;
1219 enum drm_connector_status old_status;
1220 bool changed = false;
1221
1222 if (!dev->mode_config.poll_enabled)
1223 return false;
1224
1225 mutex_lock(&dev->mode_config.mutex);
1226 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1227
1228 /* Only handle HPD capable connectors. */
1229 if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
1230 continue;
1231
1232 old_status = connector->status;
1233
1234 connector->status = connector->funcs->detect(connector, false);
1235 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1236 connector->base.id,
1237 drm_get_connector_name(connector),
1238 drm_get_connector_status_name(old_status),
1239 drm_get_connector_status_name(connector->status));
1240 if (old_status != connector->status)
1241 changed = true;
1242 }
1243
1244 mutex_unlock(&dev->mode_config.mutex);
1245
1246 if (changed)
1247 drm_kms_helper_hotplug_event(dev);
1248
1249 return changed;
1250}
1251EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 27671489477d..4b6e6f3ba0a1 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -577,7 +577,9 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
577 577
578/* 578/*
579 * Transfer a single I2C-over-AUX message and handle various error conditions, 579 * Transfer a single I2C-over-AUX message and handle various error conditions,
580 * retrying the transaction as appropriate. 580 * retrying the transaction as appropriate. It is assumed that the
581 * aux->transfer function does not modify anything in the msg other than the
582 * reply field.
581 */ 583 */
582static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 584static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
583{ 585{
@@ -665,11 +667,26 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
665{ 667{
666 struct drm_dp_aux *aux = adapter->algo_data; 668 struct drm_dp_aux *aux = adapter->algo_data;
667 unsigned int i, j; 669 unsigned int i, j;
670 struct drm_dp_aux_msg msg;
671 int err = 0;
668 672
669 for (i = 0; i < num; i++) { 673 memset(&msg, 0, sizeof(msg));
670 struct drm_dp_aux_msg msg;
671 int err;
672 674
675 for (i = 0; i < num; i++) {
676 msg.address = msgs[i].addr;
677 msg.request = (msgs[i].flags & I2C_M_RD) ?
678 DP_AUX_I2C_READ :
679 DP_AUX_I2C_WRITE;
680 msg.request |= DP_AUX_I2C_MOT;
681 /* Send a bare address packet to start the transaction.
682 * Zero sized messages specify an address only (bare
683 * address) transaction.
684 */
685 msg.buffer = NULL;
686 msg.size = 0;
687 err = drm_dp_i2c_do_msg(aux, &msg);
688 if (err < 0)
689 break;
673 /* 690 /*
674 * Many hardware implementations support FIFOs larger than a 691 * Many hardware implementations support FIFOs larger than a
675 * single byte, but it has been empirically determined that 692 * single byte, but it has been empirically determined that
@@ -678,30 +695,28 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
678 * transferred byte-by-byte. 695 * transferred byte-by-byte.
679 */ 696 */
680 for (j = 0; j < msgs[i].len; j++) { 697 for (j = 0; j < msgs[i].len; j++) {
681 memset(&msg, 0, sizeof(msg));
682 msg.address = msgs[i].addr;
683
684 msg.request = (msgs[i].flags & I2C_M_RD) ?
685 DP_AUX_I2C_READ :
686 DP_AUX_I2C_WRITE;
687
688 /*
689 * All messages except the last one are middle-of-
690 * transfer messages.
691 */
692 if ((i < num - 1) || (j < msgs[i].len - 1))
693 msg.request |= DP_AUX_I2C_MOT;
694
695 msg.buffer = msgs[i].buf + j; 698 msg.buffer = msgs[i].buf + j;
696 msg.size = 1; 699 msg.size = 1;
697 700
698 err = drm_dp_i2c_do_msg(aux, &msg); 701 err = drm_dp_i2c_do_msg(aux, &msg);
699 if (err < 0) 702 if (err < 0)
700 return err; 703 break;
701 } 704 }
705 if (err < 0)
706 break;
702 } 707 }
708 if (err >= 0)
709 err = num;
710 /* Send a bare address packet to close out the transaction.
711 * Zero sized messages specify an address only (bare
712 * address) transaction.
713 */
714 msg.request &= ~DP_AUX_I2C_MOT;
715 msg.buffer = NULL;
716 msg.size = 0;
717 (void)drm_dp_i2c_do_msg(aux, &msg);
703 718
704 return num; 719 return err;
705} 720}
706 721
707static const struct i2c_algorithm drm_dp_i2c_algo = { 722static const struct i2c_algorithm drm_dp_i2c_algo = {
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 71e2d3fcd6ee..04a209e2b66d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -207,8 +207,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
207 return 0; 207 return 0;
208 } 208 }
209 209
210 WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
211 node->start, node->size);
212 return -ENOSPC; 210 return -ENOSPC;
213} 211}
214EXPORT_SYMBOL(drm_mm_reserve_node); 212EXPORT_SYMBOL(drm_mm_reserve_node);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index e768d35ff22e..d2b1c03b3d71 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -203,9 +203,9 @@ EXPORT_SYMBOL(drm_primary_helper_update);
203 * 203 *
204 * Provides a default plane disable handler for primary planes. This is handler 204 * Provides a default plane disable handler for primary planes. This is handler
205 * is called in response to a userspace SetPlane operation on the plane with a 205 * is called in response to a userspace SetPlane operation on the plane with a
206 * NULL framebuffer parameter. We call the driver's modeset handler with a NULL 206 * NULL framebuffer parameter. It unconditionally fails the disable call with
207 * framebuffer to disable the CRTC if no other planes are currently enabled. 207 * -EINVAL the only way to disable the primary plane without driver support is
208 * If other planes are still enabled on the same CRTC, we return -EBUSY. 208 * to disable the entier CRTC. Which does not match the plane ->disable hook.
209 * 209 *
210 * Note that some hardware may be able to disable the primary plane without 210 * Note that some hardware may be able to disable the primary plane without
211 * disabling the whole CRTC. Drivers for such hardware should provide their 211 * disabling the whole CRTC. Drivers for such hardware should provide their
@@ -214,34 +214,11 @@ EXPORT_SYMBOL(drm_primary_helper_update);
214 * disabled primary plane). 214 * disabled primary plane).
215 * 215 *
216 * RETURNS: 216 * RETURNS:
217 * Zero on success, error code on failure 217 * Unconditionally returns -EINVAL.
218 */ 218 */
219int drm_primary_helper_disable(struct drm_plane *plane) 219int drm_primary_helper_disable(struct drm_plane *plane)
220{ 220{
221 struct drm_plane *p; 221 return -EINVAL;
222 struct drm_mode_set set = {
223 .crtc = plane->crtc,
224 .fb = NULL,
225 };
226
227 if (plane->crtc == NULL || plane->fb == NULL)
228 /* Already disabled */
229 return 0;
230
231 list_for_each_entry(p, &plane->dev->mode_config.plane_list, head)
232 if (p != plane && p->fb) {
233 DRM_DEBUG_KMS("Cannot disable primary plane while other planes are still active on CRTC.\n");
234 return -EBUSY;
235 }
236
237 /*
238 * N.B. We call set_config() directly here rather than
239 * drm_mode_set_config_internal() since drm_mode_setplane() already
240 * handles the basic refcounting and we don't need the special
241 * cross-CRTC refcounting (no chance of stealing connectors from
242 * other CRTC's with this update).
243 */
244 return plane->crtc->funcs->set_config(&set);
245} 222}
246EXPORT_SYMBOL(drm_primary_helper_disable); 223EXPORT_SYMBOL(drm_primary_helper_disable);
247 224
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
new file mode 100644
index 000000000000..e70f54d4a581
--- /dev/null
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -0,0 +1,426 @@
1/*
2 * Copyright (c) 2006-2008 Intel Corporation
3 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
4 *
5 * DRM core CRTC related functions
6 *
7 * Permission to use, copy, modify, distribute, and sell this software and its
8 * documentation for any purpose is hereby granted without fee, provided that
9 * the above copyright notice appear in all copies and that both that copyright
10 * notice and this permission notice appear in supporting documentation, and
11 * that the name of the copyright holders not be used in advertising or
12 * publicity pertaining to distribution of the software without specific,
13 * written prior permission. The copyright holders make no representations
14 * about the suitability of this software for any purpose. It is provided "as
15 * is" without express or implied warranty.
16 *
17 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
18 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
19 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
20 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
21 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
22 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 * OF THIS SOFTWARE.
24 *
25 * Authors:
26 * Keith Packard
27 * Eric Anholt <eric@anholt.net>
28 * Dave Airlie <airlied@linux.ie>
29 * Jesse Barnes <jesse.barnes@intel.com>
30 */
31
32#include <linux/export.h>
33#include <linux/moduleparam.h>
34
35#include <drm/drmP.h>
36#include <drm/drm_crtc.h>
37#include <drm/drm_fourcc.h>
38#include <drm/drm_crtc_helper.h>
39#include <drm/drm_fb_helper.h>
40#include <drm/drm_edid.h>
41
42/**
43 * DOC: output probing helper overview
44 *
45 * This library provides some helper code for output probing. It provides an
46 * implementation of the core connector->fill_modes interface with
47 * drm_helper_probe_single_connector_modes.
48 *
49 * It also provides support for polling connectors with a work item and for
50 * generic hotplug interrupt handling where the driver doesn't or cannot keep
51 * track of a per-connector hpd interrupt.
52 *
53 * This helper library can be used independently of the modeset helper library.
54 * Drivers can also overwrite different parts e.g. use their own hotplug
55 * handling code to avoid probing unrelated outputs.
56 */
57
58static bool drm_kms_helper_poll = true;
59module_param_named(poll, drm_kms_helper_poll, bool, 0600);
60
61static void drm_mode_validate_flag(struct drm_connector *connector,
62 int flags)
63{
64 struct drm_display_mode *mode;
65
66 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
67 DRM_MODE_FLAG_3D_MASK))
68 return;
69
70 list_for_each_entry(mode, &connector->modes, head) {
71 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
72 !(flags & DRM_MODE_FLAG_INTERLACE))
73 mode->status = MODE_NO_INTERLACE;
74 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
75 !(flags & DRM_MODE_FLAG_DBLSCAN))
76 mode->status = MODE_NO_DBLESCAN;
77 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
78 !(flags & DRM_MODE_FLAG_3D_MASK))
79 mode->status = MODE_NO_STEREO;
80 }
81
82 return;
83}
84
85/**
86 * drm_helper_probe_single_connector_modes - get complete set of display modes
87 * @connector: connector to probe
88 * @maxX: max width for modes
89 * @maxY: max height for modes
90 *
91 * Based on the helper callbacks implemented by @connector try to detect all
92 * valid modes. Modes will first be added to the connector's probed_modes list,
93 * then culled (based on validity and the @maxX, @maxY parameters) and put into
94 * the normal modes list.
95 *
96 * Intended to be use as a generic implementation of the ->fill_modes()
97 * @connector vfunc for drivers that use the crtc helpers for output mode
98 * filtering and detection.
99 *
100 * Returns:
101 * The number of modes found on @connector.
102 */
103int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
104 uint32_t maxX, uint32_t maxY)
105{
106 struct drm_device *dev = connector->dev;
107 struct drm_display_mode *mode;
108 struct drm_connector_helper_funcs *connector_funcs =
109 connector->helper_private;
110 int count = 0;
111 int mode_flags = 0;
112 bool verbose_prune = true;
113
114 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
115
116 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
117 drm_get_connector_name(connector));
118 /* set all modes to the unverified state */
119 list_for_each_entry(mode, &connector->modes, head)
120 mode->status = MODE_UNVERIFIED;
121
122 if (connector->force) {
123 if (connector->force == DRM_FORCE_ON)
124 connector->status = connector_status_connected;
125 else
126 connector->status = connector_status_disconnected;
127 if (connector->funcs->force)
128 connector->funcs->force(connector);
129 } else {
130 connector->status = connector->funcs->detect(connector, true);
131 }
132
133 /* Re-enable polling in case the global poll config changed. */
134 if (drm_kms_helper_poll != dev->mode_config.poll_running)
135 drm_kms_helper_poll_enable(dev);
136
137 dev->mode_config.poll_running = drm_kms_helper_poll;
138
139 if (connector->status == connector_status_disconnected) {
140 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
141 connector->base.id, drm_get_connector_name(connector));
142 drm_mode_connector_update_edid_property(connector, NULL);
143 verbose_prune = false;
144 goto prune;
145 }
146
147#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
148 count = drm_load_edid_firmware(connector);
149 if (count == 0)
150#endif
151 count = (*connector_funcs->get_modes)(connector);
152
153 if (count == 0 && connector->status == connector_status_connected)
154 count = drm_add_modes_noedid(connector, 1024, 768);
155 if (count == 0)
156 goto prune;
157
158 drm_mode_connector_list_update(connector);
159
160 if (maxX && maxY)
161 drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
162
163 if (connector->interlace_allowed)
164 mode_flags |= DRM_MODE_FLAG_INTERLACE;
165 if (connector->doublescan_allowed)
166 mode_flags |= DRM_MODE_FLAG_DBLSCAN;
167 if (connector->stereo_allowed)
168 mode_flags |= DRM_MODE_FLAG_3D_MASK;
169 drm_mode_validate_flag(connector, mode_flags);
170
171 list_for_each_entry(mode, &connector->modes, head) {
172 if (mode->status == MODE_OK)
173 mode->status = connector_funcs->mode_valid(connector,
174 mode);
175 }
176
177prune:
178 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
179
180 if (list_empty(&connector->modes))
181 return 0;
182
183 list_for_each_entry(mode, &connector->modes, head)
184 mode->vrefresh = drm_mode_vrefresh(mode);
185
186 drm_mode_sort(&connector->modes);
187
188 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
189 drm_get_connector_name(connector));
190 list_for_each_entry(mode, &connector->modes, head) {
191 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
192 drm_mode_debug_printmodeline(mode);
193 }
194
195 return count;
196}
197EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
198
199/**
200 * drm_kms_helper_hotplug_event - fire off KMS hotplug events
201 * @dev: drm_device whose connector state changed
202 *
203 * This function fires off the uevent for userspace and also calls the
204 * output_poll_changed function, which is most commonly used to inform the fbdev
205 * emulation code and allow it to update the fbcon output configuration.
206 *
207 * Drivers should call this from their hotplug handling code when a change is
208 * detected. Note that this function does not do any output detection of its
209 * own, like drm_helper_hpd_irq_event() does - this is assumed to be done by the
210 * driver already.
211 *
212 * This function must be called from process context with no mode
213 * setting locks held.
214 */
215void drm_kms_helper_hotplug_event(struct drm_device *dev)
216{
217 /* send a uevent + call fbdev */
218 drm_sysfs_hotplug_event(dev);
219 if (dev->mode_config.funcs->output_poll_changed)
220 dev->mode_config.funcs->output_poll_changed(dev);
221}
222EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
223
224#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
225static void output_poll_execute(struct work_struct *work)
226{
227 struct delayed_work *delayed_work = to_delayed_work(work);
228 struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
229 struct drm_connector *connector;
230 enum drm_connector_status old_status;
231 bool repoll = false, changed = false;
232
233 if (!drm_kms_helper_poll)
234 return;
235
236 mutex_lock(&dev->mode_config.mutex);
237 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
238
239 /* Ignore forced connectors. */
240 if (connector->force)
241 continue;
242
243 /* Ignore HPD capable connectors and connectors where we don't
244 * want any hotplug detection at all for polling. */
245 if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
246 continue;
247
248 repoll = true;
249
250 old_status = connector->status;
251 /* if we are connected and don't want to poll for disconnect
252 skip it */
253 if (old_status == connector_status_connected &&
254 !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
255 continue;
256
257 connector->status = connector->funcs->detect(connector, false);
258 if (old_status != connector->status) {
259 const char *old, *new;
260
261 old = drm_get_connector_status_name(old_status);
262 new = drm_get_connector_status_name(connector->status);
263
264 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
265 "status updated from %s to %s\n",
266 connector->base.id,
267 drm_get_connector_name(connector),
268 old, new);
269
270 changed = true;
271 }
272 }
273
274 mutex_unlock(&dev->mode_config.mutex);
275
276 if (changed)
277 drm_kms_helper_hotplug_event(dev);
278
279 if (repoll)
280 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
281}
282
283/**
284 * drm_kms_helper_poll_disable - disable output polling
285 * @dev: drm_device
286 *
287 * This function disables the output polling work.
288 *
289 * Drivers can call this helper from their device suspend implementation. It is
290 * not an error to call this even when output polling isn't enabled or arlready
291 * disabled.
292 */
293void drm_kms_helper_poll_disable(struct drm_device *dev)
294{
295 if (!dev->mode_config.poll_enabled)
296 return;
297 cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
298}
299EXPORT_SYMBOL(drm_kms_helper_poll_disable);
300
301/**
302 * drm_kms_helper_poll_enable - re-enable output polling.
303 * @dev: drm_device
304 *
305 * This function re-enables the output polling work.
306 *
307 * Drivers can call this helper from their device resume implementation. It is
308 * an error to call this when the output polling support has not yet been set
309 * up.
310 */
311void drm_kms_helper_poll_enable(struct drm_device *dev)
312{
313 bool poll = false;
314 struct drm_connector *connector;
315
316 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
317 return;
318
319 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
320 if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
321 DRM_CONNECTOR_POLL_DISCONNECT))
322 poll = true;
323 }
324
325 if (poll)
326 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
327}
328EXPORT_SYMBOL(drm_kms_helper_poll_enable);
329
330/**
331 * drm_kms_helper_poll_init - initialize and enable output polling
332 * @dev: drm_device
333 *
334 * This function intializes and then also enables output polling support for
335 * @dev. Drivers which do not have reliable hotplug support in hardware can use
336 * this helper infrastructure to regularly poll such connectors for changes in
337 * their connection state.
338 *
339 * Drivers can control which connectors are polled by setting the
340 * DRM_CONNECTOR_POLL_CONNECT and DRM_CONNECTOR_POLL_DISCONNECT flags. On
341 * connectors where probing live outputs can result in visual distortion drivers
342 * should not set the DRM_CONNECTOR_POLL_DISCONNECT flag to avoid this.
343 * Connectors which have no flag or only DRM_CONNECTOR_POLL_HPD set are
344 * completely ignored by the polling logic.
345 *
346 * Note that a connector can be both polled and probed from the hotplug handler,
347 * in case the hotplug interrupt is known to be unreliable.
348 */
349void drm_kms_helper_poll_init(struct drm_device *dev)
350{
351 INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
352 dev->mode_config.poll_enabled = true;
353
354 drm_kms_helper_poll_enable(dev);
355}
356EXPORT_SYMBOL(drm_kms_helper_poll_init);
357
358/**
359 * drm_kms_helper_poll_fini - disable output polling and clean it up
360 * @dev: drm_device
361 */
362void drm_kms_helper_poll_fini(struct drm_device *dev)
363{
364 drm_kms_helper_poll_disable(dev);
365}
366EXPORT_SYMBOL(drm_kms_helper_poll_fini);
367
368/**
369 * drm_helper_hpd_irq_event - hotplug processing
370 * @dev: drm_device
371 *
372 * Drivers can use this helper function to run a detect cycle on all connectors
373 * which have the DRM_CONNECTOR_POLL_HPD flag set in their &polled member. All
374 * other connectors are ignored, which is useful to avoid reprobing fixed
375 * panels.
376 *
377 * This helper function is useful for drivers which can't or don't track hotplug
378 * interrupts for each connector.
379 *
380 * Drivers which support hotplug interrupts for each connector individually and
381 * which have a more fine-grained detect logic should bypass this code and
382 * directly call drm_kms_helper_hotplug_event() in case the connector state
383 * changed.
384 *
385 * This function must be called from process context with no mode
386 * setting locks held.
387 *
388 * Note that a connector can be both polled and probed from the hotplug handler,
389 * in case the hotplug interrupt is known to be unreliable.
390 */
391bool drm_helper_hpd_irq_event(struct drm_device *dev)
392{
393 struct drm_connector *connector;
394 enum drm_connector_status old_status;
395 bool changed = false;
396
397 if (!dev->mode_config.poll_enabled)
398 return false;
399
400 mutex_lock(&dev->mode_config.mutex);
401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
402
403 /* Only handle HPD capable connectors. */
404 if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
405 continue;
406
407 old_status = connector->status;
408
409 connector->status = connector->funcs->detect(connector, false);
410 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
411 connector->base.id,
412 drm_get_connector_name(connector),
413 drm_get_connector_status_name(old_status),
414 drm_get_connector_status_name(connector->status));
415 if (old_status != connector->status)
416 changed = true;
417 }
418
419 mutex_unlock(&dev->mode_config.mutex);
420
421 if (changed)
422 drm_kms_helper_hotplug_event(dev);
423
424 return changed;
425}
426EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index e930d4fe29c7..1ef5ab9c9d51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
145 145
146 plane->crtc = crtc; 146 plane->crtc = crtc;
147 plane->fb = crtc->primary->fb; 147 plane->fb = crtc->primary->fb;
148 drm_framebuffer_reference(plane->fb);
148 149
149 return 0; 150 return 0;
150} 151}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index c786cd4f457b..2a3ad24276f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
263 buffer->sgt = sgt; 263 buffer->sgt = sgt;
264 exynos_gem_obj->base.import_attach = attach; 264 exynos_gem_obj->base.import_attach = attach;
265 265
266 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, 266 DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
267 buffer->size); 267 buffer->size);
268 268
269 return &exynos_gem_obj->base; 269 return &exynos_gem_obj->base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index eb73e3bf2a0c..4ac438187568 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1426 1426
1427 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1427 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1428 dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); 1428 dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
1429 if (!dsi->reg_base) { 1429 if (IS_ERR(dsi->reg_base)) {
1430 dev_err(&pdev->dev, "failed to remap io region\n"); 1430 dev_err(&pdev->dev, "failed to remap io region\n");
1431 return -EADDRNOTAVAIL; 1431 return PTR_ERR(dsi->reg_base);
1432 } 1432 }
1433 1433
1434 dsi->phy = devm_phy_get(&pdev->dev, "dsim"); 1434 dsi->phy = devm_phy_get(&pdev->dev, "dsim");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 7afead9c3f30..852f2dadaebd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
220 220
221 win_data->enabled = true; 221 win_data->enabled = true;
222 222
223 DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); 223 DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
224 224
225 if (ctx->vblank_on) 225 if (ctx->vblank_on)
226 schedule_work(&ctx->work); 226 schedule_work(&ctx->work);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 96177eec0a0e..eedb023af27d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
1833 flush_workqueue(dev_priv->wq); 1833 flush_workqueue(dev_priv->wq);
1834 1834
1835 mutex_lock(&dev->struct_mutex); 1835 mutex_lock(&dev->struct_mutex);
1836 i915_gem_free_all_phys_object(dev);
1837 i915_gem_cleanup_ringbuffer(dev); 1836 i915_gem_cleanup_ringbuffer(dev);
1838 i915_gem_context_fini(dev); 1837 i915_gem_context_fini(dev);
1839 WARN_ON(dev_priv->mm.aliasing_ppgtt); 1838 WARN_ON(dev_priv->mm.aliasing_ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0905cd915589..388c028e223c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -242,18 +242,6 @@ struct intel_ddi_plls {
242#define WATCH_LISTS 0 242#define WATCH_LISTS 0
243#define WATCH_GTT 0 243#define WATCH_GTT 0
244 244
245#define I915_GEM_PHYS_CURSOR_0 1
246#define I915_GEM_PHYS_CURSOR_1 2
247#define I915_GEM_PHYS_OVERLAY_REGS 3
248#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
249
250struct drm_i915_gem_phys_object {
251 int id;
252 struct page **page_list;
253 drm_dma_handle_t *handle;
254 struct drm_i915_gem_object *cur_obj;
255};
256
257struct opregion_header; 245struct opregion_header;
258struct opregion_acpi; 246struct opregion_acpi;
259struct opregion_swsci; 247struct opregion_swsci;
@@ -1187,9 +1175,6 @@ struct i915_gem_mm {
1187 /** Bit 6 swizzling required for Y tiling */ 1175 /** Bit 6 swizzling required for Y tiling */
1188 uint32_t bit_6_swizzle_y; 1176 uint32_t bit_6_swizzle_y;
1189 1177
1190 /* storage for physical objects */
1191 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1192
1193 /* accounting, useful for userland debugging */ 1178 /* accounting, useful for userland debugging */
1194 spinlock_t object_stat_lock; 1179 spinlock_t object_stat_lock;
1195 size_t object_memory; 1180 size_t object_memory;
@@ -1308,6 +1293,7 @@ struct intel_vbt_data {
1308 1293
1309 struct { 1294 struct {
1310 u16 pwm_freq_hz; 1295 u16 pwm_freq_hz;
1296 bool present;
1311 bool active_low_pwm; 1297 bool active_low_pwm;
1312 } backlight; 1298 } backlight;
1313 1299
@@ -1768,7 +1754,7 @@ struct drm_i915_gem_object {
1768 struct drm_file *pin_filp; 1754 struct drm_file *pin_filp;
1769 1755
1770 /** for phy allocated objects */ 1756 /** for phy allocated objects */
1771 struct drm_i915_gem_phys_object *phys_obj; 1757 drm_dma_handle_t *phys_handle;
1772}; 1758};
1773 1759
1774#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1760#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@@ -1953,6 +1939,9 @@ struct drm_i915_cmd_table {
1953#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 1939#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1954#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1940#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1955 ((dev)->pdev->device & 0x00F0) == 0x0020) 1941 ((dev)->pdev->device & 0x00F0) == 0x0020)
1942/* ULX machines are also considered ULT. */
1943#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
1944 (dev)->pdev->device == 0x0A1E)
1956#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1945#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1957 1946
1958/* 1947/*
@@ -2200,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
2200#define PIN_MAPPABLE 0x1 2189#define PIN_MAPPABLE 0x1
2201#define PIN_NONBLOCK 0x2 2190#define PIN_NONBLOCK 0x2
2202#define PIN_GLOBAL 0x4 2191#define PIN_GLOBAL 0x4
2192#define PIN_OFFSET_BIAS 0x8
2193#define PIN_OFFSET_MASK (~4095)
2203int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 2194int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
2204 struct i915_address_space *vm, 2195 struct i915_address_space *vm,
2205 uint32_t alignment, 2196 uint32_t alignment,
2206 unsigned flags); 2197 uint64_t flags);
2207int __must_check i915_vma_unbind(struct i915_vma *vma); 2198int __must_check i915_vma_unbind(struct i915_vma *vma);
2208int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2199int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2209void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2200void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
@@ -2330,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2330 u32 alignment, 2321 u32 alignment,
2331 struct intel_ring_buffer *pipelined); 2322 struct intel_ring_buffer *pipelined);
2332void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); 2323void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
2333int i915_gem_attach_phys_object(struct drm_device *dev, 2324int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2334 struct drm_i915_gem_object *obj,
2335 int id,
2336 int align); 2325 int align);
2337void i915_gem_detach_phys_object(struct drm_device *dev,
2338 struct drm_i915_gem_object *obj);
2339void i915_gem_free_all_phys_object(struct drm_device *dev);
2340int i915_gem_open(struct drm_device *dev, struct drm_file *file); 2326int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2341void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2327void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2342 2328
@@ -2431,20 +2417,18 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2431int i915_gem_context_enable(struct drm_i915_private *dev_priv); 2417int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2432void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2418void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2433int i915_switch_context(struct intel_ring_buffer *ring, 2419int i915_switch_context(struct intel_ring_buffer *ring,
2434 struct drm_file *file, struct i915_hw_context *to); 2420 struct i915_hw_context *to);
2435struct i915_hw_context * 2421struct i915_hw_context *
2436i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2422i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2437void i915_gem_context_free(struct kref *ctx_ref); 2423void i915_gem_context_free(struct kref *ctx_ref);
2438static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 2424static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2439{ 2425{
2440 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2426 kref_get(&ctx->ref);
2441 kref_get(&ctx->ref);
2442} 2427}
2443 2428
2444static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 2429static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2445{ 2430{
2446 if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2431 kref_put(&ctx->ref, i915_gem_context_free);
2447 kref_put(&ctx->ref, i915_gem_context_free);
2448} 2432}
2449 2433
2450static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) 2434static inline bool i915_gem_context_is_default(const struct i915_hw_context *c)
@@ -2463,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2463 int min_size, 2447 int min_size,
2464 unsigned alignment, 2448 unsigned alignment,
2465 unsigned cache_level, 2449 unsigned cache_level,
2450 unsigned long start,
2451 unsigned long end,
2466 unsigned flags); 2452 unsigned flags);
2467int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2453int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2468int i915_gem_evict_everything(struct drm_device *dev); 2454int i915_gem_evict_everything(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6370a761d137..3326770c9ed2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
43static __must_check int 43static __must_check int
44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 44i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
45 bool readonly); 45 bool readonly);
46static int i915_gem_phys_pwrite(struct drm_device *dev,
47 struct drm_i915_gem_object *obj,
48 struct drm_i915_gem_pwrite *args,
49 struct drm_file *file);
50 46
51static void i915_gem_write_fence(struct drm_device *dev, int reg, 47static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj); 48 struct drm_i915_gem_object *obj);
@@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 return 0; 205 return 0;
210} 206}
211 207
208static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
209{
210 drm_dma_handle_t *phys = obj->phys_handle;
211
212 if (!phys)
213 return;
214
215 if (obj->madv == I915_MADV_WILLNEED) {
216 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
217 char *vaddr = phys->vaddr;
218 int i;
219
220 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
221 struct page *page = shmem_read_mapping_page(mapping, i);
222 if (!IS_ERR(page)) {
223 char *dst = kmap_atomic(page);
224 memcpy(dst, vaddr, PAGE_SIZE);
225 drm_clflush_virt_range(dst, PAGE_SIZE);
226 kunmap_atomic(dst);
227
228 set_page_dirty(page);
229 mark_page_accessed(page);
230 page_cache_release(page);
231 }
232 vaddr += PAGE_SIZE;
233 }
234 i915_gem_chipset_flush(obj->base.dev);
235 }
236
237#ifdef CONFIG_X86
238 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
239#endif
240 drm_pci_free(obj->base.dev, phys);
241 obj->phys_handle = NULL;
242}
243
244int
245i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
246 int align)
247{
248 drm_dma_handle_t *phys;
249 struct address_space *mapping;
250 char *vaddr;
251 int i;
252
253 if (obj->phys_handle) {
254 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
255 return -EBUSY;
256
257 return 0;
258 }
259
260 if (obj->madv != I915_MADV_WILLNEED)
261 return -EFAULT;
262
263 if (obj->base.filp == NULL)
264 return -EINVAL;
265
266 /* create a new object */
267 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
268 if (!phys)
269 return -ENOMEM;
270
271 vaddr = phys->vaddr;
272#ifdef CONFIG_X86
273 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
274#endif
275 mapping = file_inode(obj->base.filp)->i_mapping;
276 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
277 struct page *page;
278 char *src;
279
280 page = shmem_read_mapping_page(mapping, i);
281 if (IS_ERR(page)) {
282#ifdef CONFIG_X86
283 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
284#endif
285 drm_pci_free(obj->base.dev, phys);
286 return PTR_ERR(page);
287 }
288
289 src = kmap_atomic(page);
290 memcpy(vaddr, src, PAGE_SIZE);
291 kunmap_atomic(src);
292
293 mark_page_accessed(page);
294 page_cache_release(page);
295
296 vaddr += PAGE_SIZE;
297 }
298
299 obj->phys_handle = phys;
300 return 0;
301}
302
303static int
304i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
305 struct drm_i915_gem_pwrite *args,
306 struct drm_file *file_priv)
307{
308 struct drm_device *dev = obj->base.dev;
309 void *vaddr = obj->phys_handle->vaddr + args->offset;
310 char __user *user_data = to_user_ptr(args->data_ptr);
311
312 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
313 unsigned long unwritten;
314
315 /* The physical object once assigned is fixed for the lifetime
316 * of the obj, so we can safely drop the lock and continue
317 * to access vaddr.
318 */
319 mutex_unlock(&dev->struct_mutex);
320 unwritten = copy_from_user(vaddr, user_data, args->size);
321 mutex_lock(&dev->struct_mutex);
322 if (unwritten)
323 return -EFAULT;
324 }
325
326 i915_gem_chipset_flush(dev);
327 return 0;
328}
329
212void *i915_gem_object_alloc(struct drm_device *dev) 330void *i915_gem_object_alloc(struct drm_device *dev)
213{ 331{
214 struct drm_i915_private *dev_priv = dev->dev_private; 332 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
921 * pread/pwrite currently are reading and writing from the CPU 1039 * pread/pwrite currently are reading and writing from the CPU
922 * perspective, requiring manual detiling by the client. 1040 * perspective, requiring manual detiling by the client.
923 */ 1041 */
924 if (obj->phys_obj) { 1042 if (obj->phys_handle) {
925 ret = i915_gem_phys_pwrite(dev, obj, args, file); 1043 ret = i915_gem_phys_pwrite(obj, args, file);
926 goto out; 1044 goto out;
927 } 1045 }
928 1046
@@ -2790,7 +2908,7 @@ int i915_gpu_idle(struct drm_device *dev)
2790 2908
2791 /* Flush everything onto the inactive list. */ 2909 /* Flush everything onto the inactive list. */
2792 for_each_ring(ring, dev_priv, i) { 2910 for_each_ring(ring, dev_priv, i) {
2793 ret = i915_switch_context(ring, NULL, ring->default_context); 2911 ret = i915_switch_context(ring, ring->default_context);
2794 if (ret) 2912 if (ret)
2795 return ret; 2913 return ret;
2796 2914
@@ -3208,12 +3326,14 @@ static struct i915_vma *
3208i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3326i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3209 struct i915_address_space *vm, 3327 struct i915_address_space *vm,
3210 unsigned alignment, 3328 unsigned alignment,
3211 unsigned flags) 3329 uint64_t flags)
3212{ 3330{
3213 struct drm_device *dev = obj->base.dev; 3331 struct drm_device *dev = obj->base.dev;
3214 struct drm_i915_private *dev_priv = dev->dev_private; 3332 struct drm_i915_private *dev_priv = dev->dev_private;
3215 u32 size, fence_size, fence_alignment, unfenced_alignment; 3333 u32 size, fence_size, fence_alignment, unfenced_alignment;
3216 size_t gtt_max = 3334 unsigned long start =
3335 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3336 unsigned long end =
3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3337 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3218 struct i915_vma *vma; 3338 struct i915_vma *vma;
3219 int ret; 3339 int ret;
@@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3242 /* If the object is bigger than the entire aperture, reject it early 3362 /* If the object is bigger than the entire aperture, reject it early
3243 * before evicting everything in a vain attempt to find space. 3363 * before evicting everything in a vain attempt to find space.
3244 */ 3364 */
3245 if (obj->base.size > gtt_max) { 3365 if (obj->base.size > end) {
3246 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3366 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3247 obj->base.size, 3367 obj->base.size,
3248 flags & PIN_MAPPABLE ? "mappable" : "total", 3368 flags & PIN_MAPPABLE ? "mappable" : "total",
3249 gtt_max); 3369 end);
3250 return ERR_PTR(-E2BIG); 3370 return ERR_PTR(-E2BIG);
3251 } 3371 }
3252 3372
@@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3263search_free: 3383search_free:
3264 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3384 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3265 size, alignment, 3385 size, alignment,
3266 obj->cache_level, 0, gtt_max, 3386 obj->cache_level,
3387 start, end,
3267 DRM_MM_SEARCH_DEFAULT, 3388 DRM_MM_SEARCH_DEFAULT,
3268 DRM_MM_CREATE_DEFAULT); 3389 DRM_MM_CREATE_DEFAULT);
3269 if (ret) { 3390 if (ret) {
3270 ret = i915_gem_evict_something(dev, vm, size, alignment, 3391 ret = i915_gem_evict_something(dev, vm, size, alignment,
3271 obj->cache_level, flags); 3392 obj->cache_level,
3393 start, end,
3394 flags);
3272 if (ret == 0) 3395 if (ret == 0)
3273 goto search_free; 3396 goto search_free;
3274 3397
@@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3828 return ret; 3951 return ret;
3829} 3952}
3830 3953
3954static bool
3955i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
3956{
3957 struct drm_i915_gem_object *obj = vma->obj;
3958
3959 if (alignment &&
3960 vma->node.start & (alignment - 1))
3961 return true;
3962
3963 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3964 return true;
3965
3966 if (flags & PIN_OFFSET_BIAS &&
3967 vma->node.start < (flags & PIN_OFFSET_MASK))
3968 return true;
3969
3970 return false;
3971}
3972
3831int 3973int
3832i915_gem_object_pin(struct drm_i915_gem_object *obj, 3974i915_gem_object_pin(struct drm_i915_gem_object *obj,
3833 struct i915_address_space *vm, 3975 struct i915_address_space *vm,
3834 uint32_t alignment, 3976 uint32_t alignment,
3835 unsigned flags) 3977 uint64_t flags)
3836{ 3978{
3837 struct i915_vma *vma; 3979 struct i915_vma *vma;
3838 int ret; 3980 int ret;
@@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3845 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3987 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3846 return -EBUSY; 3988 return -EBUSY;
3847 3989
3848 if ((alignment && 3990 if (i915_vma_misplaced(vma, alignment, flags)) {
3849 vma->node.start & (alignment - 1)) ||
3850 (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
3851 WARN(vma->pin_count, 3991 WARN(vma->pin_count,
3852 "bo is already pinned with incorrect alignment:" 3992 "bo is already pinned with incorrect alignment:"
3853 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3993 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
3854 " obj->map_and_fenceable=%d\n", 3994 " obj->map_and_fenceable=%d\n",
3855 i915_gem_obj_offset(obj, vm), alignment, 3995 i915_gem_obj_offset(obj, vm), alignment,
3856 flags & PIN_MAPPABLE, 3996 !!(flags & PIN_MAPPABLE),
3857 obj->map_and_fenceable); 3997 obj->map_and_fenceable);
3858 ret = i915_vma_unbind(vma); 3998 ret = i915_vma_unbind(vma);
3859 if (ret) 3999 if (ret)
@@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4163 4303
4164 trace_i915_gem_object_destroy(obj); 4304 trace_i915_gem_object_destroy(obj);
4165 4305
4166 if (obj->phys_obj)
4167 i915_gem_detach_phys_object(dev, obj);
4168
4169 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4306 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4170 int ret; 4307 int ret;
4171 4308
@@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4183 } 4320 }
4184 } 4321 }
4185 4322
4323 i915_gem_object_detach_phys(obj);
4324
4186 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4325 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4187 * before progressing. */ 4326 * before progressing. */
4188 if (obj->stolen) 4327 if (obj->stolen)
@@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
4646 register_shrinker(&dev_priv->mm.inactive_shrinker); 4785 register_shrinker(&dev_priv->mm.inactive_shrinker);
4647} 4786}
4648 4787
4649/*
4650 * Create a physically contiguous memory object for this object
4651 * e.g. for cursor + overlay regs
4652 */
4653static int i915_gem_init_phys_object(struct drm_device *dev,
4654 int id, int size, int align)
4655{
4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct drm_i915_gem_phys_object *phys_obj;
4658 int ret;
4659
4660 if (dev_priv->mm.phys_objs[id - 1] || !size)
4661 return 0;
4662
4663 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4664 if (!phys_obj)
4665 return -ENOMEM;
4666
4667 phys_obj->id = id;
4668
4669 phys_obj->handle = drm_pci_alloc(dev, size, align);
4670 if (!phys_obj->handle) {
4671 ret = -ENOMEM;
4672 goto kfree_obj;
4673 }
4674#ifdef CONFIG_X86
4675 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4676#endif
4677
4678 dev_priv->mm.phys_objs[id - 1] = phys_obj;
4679
4680 return 0;
4681kfree_obj:
4682 kfree(phys_obj);
4683 return ret;
4684}
4685
4686static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 struct drm_i915_gem_phys_object *phys_obj;
4690
4691 if (!dev_priv->mm.phys_objs[id - 1])
4692 return;
4693
4694 phys_obj = dev_priv->mm.phys_objs[id - 1];
4695 if (phys_obj->cur_obj) {
4696 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4697 }
4698
4699#ifdef CONFIG_X86
4700 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4701#endif
4702 drm_pci_free(dev, phys_obj->handle);
4703 kfree(phys_obj);
4704 dev_priv->mm.phys_objs[id - 1] = NULL;
4705}
4706
4707void i915_gem_free_all_phys_object(struct drm_device *dev)
4708{
4709 int i;
4710
4711 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4712 i915_gem_free_phys_object(dev, i);
4713}
4714
4715void i915_gem_detach_phys_object(struct drm_device *dev,
4716 struct drm_i915_gem_object *obj)
4717{
4718 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4719 char *vaddr;
4720 int i;
4721 int page_count;
4722
4723 if (!obj->phys_obj)
4724 return;
4725 vaddr = obj->phys_obj->handle->vaddr;
4726
4727 page_count = obj->base.size / PAGE_SIZE;
4728 for (i = 0; i < page_count; i++) {
4729 struct page *page = shmem_read_mapping_page(mapping, i);
4730 if (!IS_ERR(page)) {
4731 char *dst = kmap_atomic(page);
4732 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4733 kunmap_atomic(dst);
4734
4735 drm_clflush_pages(&page, 1);
4736
4737 set_page_dirty(page);
4738 mark_page_accessed(page);
4739 page_cache_release(page);
4740 }
4741 }
4742 i915_gem_chipset_flush(dev);
4743
4744 obj->phys_obj->cur_obj = NULL;
4745 obj->phys_obj = NULL;
4746}
4747
4748int
4749i915_gem_attach_phys_object(struct drm_device *dev,
4750 struct drm_i915_gem_object *obj,
4751 int id,
4752 int align)
4753{
4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4755 struct drm_i915_private *dev_priv = dev->dev_private;
4756 int ret = 0;
4757 int page_count;
4758 int i;
4759
4760 if (id > I915_MAX_PHYS_OBJECT)
4761 return -EINVAL;
4762
4763 if (obj->phys_obj) {
4764 if (obj->phys_obj->id == id)
4765 return 0;
4766 i915_gem_detach_phys_object(dev, obj);
4767 }
4768
4769 /* create a new object */
4770 if (!dev_priv->mm.phys_objs[id - 1]) {
4771 ret = i915_gem_init_phys_object(dev, id,
4772 obj->base.size, align);
4773 if (ret) {
4774 DRM_ERROR("failed to init phys object %d size: %zu\n",
4775 id, obj->base.size);
4776 return ret;
4777 }
4778 }
4779
4780 /* bind to the object */
4781 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4782 obj->phys_obj->cur_obj = obj;
4783
4784 page_count = obj->base.size / PAGE_SIZE;
4785
4786 for (i = 0; i < page_count; i++) {
4787 struct page *page;
4788 char *dst, *src;
4789
4790 page = shmem_read_mapping_page(mapping, i);
4791 if (IS_ERR(page))
4792 return PTR_ERR(page);
4793
4794 src = kmap_atomic(page);
4795 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4796 memcpy(dst, src, PAGE_SIZE);
4797 kunmap_atomic(src);
4798
4799 mark_page_accessed(page);
4800 page_cache_release(page);
4801 }
4802
4803 return 0;
4804}
4805
4806static int
4807i915_gem_phys_pwrite(struct drm_device *dev,
4808 struct drm_i915_gem_object *obj,
4809 struct drm_i915_gem_pwrite *args,
4810 struct drm_file *file_priv)
4811{
4812 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4813 char __user *user_data = to_user_ptr(args->data_ptr);
4814
4815 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4816 unsigned long unwritten;
4817
4818 /* The physical object once assigned is fixed for the lifetime
4819 * of the obj, so we can safely drop the lock and continue
4820 * to access vaddr.
4821 */
4822 mutex_unlock(&dev->struct_mutex);
4823 unwritten = copy_from_user(vaddr, user_data, args->size);
4824 mutex_lock(&dev->struct_mutex);
4825 if (unwritten)
4826 return -EFAULT;
4827 }
4828
4829 i915_gem_chipset_flush(dev);
4830 return 0;
4831}
4832
4833void i915_gem_release(struct drm_device *dev, struct drm_file *file) 4788void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4834{ 4789{
4835 struct drm_i915_file_private *file_priv = file->driver_priv; 4790 struct drm_i915_file_private *file_priv = file->driver_priv;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6043062ffce7..d72db15afa02 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -96,9 +96,6 @@
96#define GEN6_CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096 97#define GEN7_CONTEXT_ALIGN 4096
98 98
99static int do_switch(struct intel_ring_buffer *ring,
100 struct i915_hw_context *to);
101
102static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 99static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
103{ 100{
104 struct drm_device *dev = ppgtt->base.dev; 101 struct drm_device *dev = ppgtt->base.dev;
@@ -185,13 +182,15 @@ void i915_gem_context_free(struct kref *ctx_ref)
185 typeof(*ctx), ref); 182 typeof(*ctx), ref);
186 struct i915_hw_ppgtt *ppgtt = NULL; 183 struct i915_hw_ppgtt *ppgtt = NULL;
187 184
188 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 185 if (ctx->obj) {
189 if (USES_PPGTT(ctx->obj->base.dev)) 186 /* We refcount even the aliasing PPGTT to keep the code symmetric */
190 ppgtt = ctx_to_ppgtt(ctx); 187 if (USES_PPGTT(ctx->obj->base.dev))
188 ppgtt = ctx_to_ppgtt(ctx);
191 189
192 /* XXX: Free up the object before tearing down the address space, in 190 /* XXX: Free up the object before tearing down the address space, in
193 * case we're bound in the PPGTT */ 191 * case we're bound in the PPGTT */
194 drm_gem_object_unreference(&ctx->obj->base); 192 drm_gem_object_unreference(&ctx->obj->base);
193 }
195 194
196 if (ppgtt) 195 if (ppgtt)
197 kref_put(&ppgtt->ref, ppgtt_release); 196 kref_put(&ppgtt->ref, ppgtt_release);
@@ -232,32 +231,32 @@ __create_hw_context(struct drm_device *dev,
232 return ERR_PTR(-ENOMEM); 231 return ERR_PTR(-ENOMEM);
233 232
234 kref_init(&ctx->ref); 233 kref_init(&ctx->ref);
235 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); 234 list_add_tail(&ctx->link, &dev_priv->context_list);
236 INIT_LIST_HEAD(&ctx->link);
237 if (ctx->obj == NULL) {
238 kfree(ctx);
239 DRM_DEBUG_DRIVER("Context object allocated failed\n");
240 return ERR_PTR(-ENOMEM);
241 }
242 235
243 if (INTEL_INFO(dev)->gen >= 7) { 236 if (dev_priv->hw_context_size) {
244 ret = i915_gem_object_set_cache_level(ctx->obj, 237 ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
245 I915_CACHE_L3_LLC); 238 if (ctx->obj == NULL) {
246 /* Failure shouldn't ever happen this early */ 239 ret = -ENOMEM;
247 if (WARN_ON(ret))
248 goto err_out; 240 goto err_out;
249 } 241 }
250 242
251 list_add_tail(&ctx->link, &dev_priv->context_list); 243 if (INTEL_INFO(dev)->gen >= 7) {
244 ret = i915_gem_object_set_cache_level(ctx->obj,
245 I915_CACHE_L3_LLC);
246 /* Failure shouldn't ever happen this early */
247 if (WARN_ON(ret))
248 goto err_out;
249 }
250 }
252 251
253 /* Default context will never have a file_priv */ 252 /* Default context will never have a file_priv */
254 if (file_priv == NULL) 253 if (file_priv != NULL) {
255 return ctx; 254 ret = idr_alloc(&file_priv->context_idr, ctx,
256 255 DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
257 ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0, 256 if (ret < 0)
258 GFP_KERNEL); 257 goto err_out;
259 if (ret < 0) 258 } else
260 goto err_out; 259 ret = DEFAULT_CONTEXT_ID;
261 260
262 ctx->file_priv = file_priv; 261 ctx->file_priv = file_priv;
263 ctx->id = ret; 262 ctx->id = ret;
@@ -294,7 +293,7 @@ i915_gem_create_context(struct drm_device *dev,
294 if (IS_ERR(ctx)) 293 if (IS_ERR(ctx))
295 return ctx; 294 return ctx;
296 295
297 if (is_global_default_ctx) { 296 if (is_global_default_ctx && ctx->obj) {
298 /* We may need to do things with the shrinker which 297 /* We may need to do things with the shrinker which
299 * require us to immediately switch back to the default 298 * require us to immediately switch back to the default
300 * context. This can cause a problem as pinning the 299 * context. This can cause a problem as pinning the
@@ -342,7 +341,7 @@ i915_gem_create_context(struct drm_device *dev,
342 return ctx; 341 return ctx;
343 342
344err_unpin: 343err_unpin:
345 if (is_global_default_ctx) 344 if (is_global_default_ctx && ctx->obj)
346 i915_gem_object_ggtt_unpin(ctx->obj); 345 i915_gem_object_ggtt_unpin(ctx->obj);
347err_destroy: 346err_destroy:
348 i915_gem_context_unreference(ctx); 347 i915_gem_context_unreference(ctx);
@@ -352,32 +351,22 @@ err_destroy:
352void i915_gem_context_reset(struct drm_device *dev) 351void i915_gem_context_reset(struct drm_device *dev)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private; 353 struct drm_i915_private *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
356 int i; 354 int i;
357 355
358 if (!HAS_HW_CONTEXTS(dev))
359 return;
360
361 /* Prevent the hardware from restoring the last context (which hung) on 356 /* Prevent the hardware from restoring the last context (which hung) on
362 * the next switch */ 357 * the next switch */
363 for (i = 0; i < I915_NUM_RINGS; i++) { 358 for (i = 0; i < I915_NUM_RINGS; i++) {
364 struct i915_hw_context *dctx; 359 struct intel_ring_buffer *ring = &dev_priv->ring[i];
365 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 360 struct i915_hw_context *dctx = ring->default_context;
366 continue;
367 361
368 /* Do a fake switch to the default context */ 362 /* Do a fake switch to the default context */
369 ring = &dev_priv->ring[i]; 363 if (ring->last_context == dctx)
370 dctx = ring->default_context;
371 if (WARN_ON(!dctx))
372 continue; 364 continue;
373 365
374 if (!ring->last_context) 366 if (!ring->last_context)
375 continue; 367 continue;
376 368
377 if (ring->last_context == dctx) 369 if (dctx->obj && i == RCS) {
378 continue;
379
380 if (i == RCS) {
381 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 370 WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
382 get_context_alignment(dev), 0)); 371 get_context_alignment(dev), 0));
383 /* Fake a finish/inactive */ 372 /* Fake a finish/inactive */
@@ -394,44 +383,35 @@ void i915_gem_context_reset(struct drm_device *dev)
394int i915_gem_context_init(struct drm_device *dev) 383int i915_gem_context_init(struct drm_device *dev)
395{ 384{
396 struct drm_i915_private *dev_priv = dev->dev_private; 385 struct drm_i915_private *dev_priv = dev->dev_private;
397 struct intel_ring_buffer *ring; 386 struct i915_hw_context *ctx;
398 int i; 387 int i;
399 388
400 if (!HAS_HW_CONTEXTS(dev))
401 return 0;
402
403 /* Init should only be called once per module load. Eventually the 389 /* Init should only be called once per module load. Eventually the
404 * restriction on the context_disabled check can be loosened. */ 390 * restriction on the context_disabled check can be loosened. */
405 if (WARN_ON(dev_priv->ring[RCS].default_context)) 391 if (WARN_ON(dev_priv->ring[RCS].default_context))
406 return 0; 392 return 0;
407 393
408 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 394 if (HAS_HW_CONTEXTS(dev)) {
409 395 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
410 if (dev_priv->hw_context_size > (1<<20)) { 396 if (dev_priv->hw_context_size > (1<<20)) {
411 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size\n"); 397 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
412 return -E2BIG; 398 dev_priv->hw_context_size);
399 dev_priv->hw_context_size = 0;
400 }
413 } 401 }
414 402
415 dev_priv->ring[RCS].default_context = 403 ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
416 i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 404 if (IS_ERR(ctx)) {
417 405 DRM_ERROR("Failed to create default global context (error %ld)\n",
418 if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) { 406 PTR_ERR(ctx));
419 DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n", 407 return PTR_ERR(ctx);
420 PTR_ERR(dev_priv->ring[RCS].default_context));
421 return PTR_ERR(dev_priv->ring[RCS].default_context);
422 } 408 }
423 409
424 for (i = RCS + 1; i < I915_NUM_RINGS; i++) { 410 /* NB: RCS will hold a ref for all rings */
425 if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 411 for (i = 0; i < I915_NUM_RINGS; i++)
426 continue; 412 dev_priv->ring[i].default_context = ctx;
427
428 ring = &dev_priv->ring[i];
429 413
430 /* NB: RCS will hold a ref for all rings */ 414 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
431 ring->default_context = dev_priv->ring[RCS].default_context;
432 }
433
434 DRM_DEBUG_DRIVER("HW context support initialized\n");
435 return 0; 415 return 0;
436} 416}
437 417
@@ -441,33 +421,30 @@ void i915_gem_context_fini(struct drm_device *dev)
441 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 421 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
442 int i; 422 int i;
443 423
444 if (!HAS_HW_CONTEXTS(dev)) 424 if (dctx->obj) {
445 return; 425 /* The only known way to stop the gpu from accessing the hw context is
446 426 * to reset it. Do this as the very last operation to avoid confusing
447 /* The only known way to stop the gpu from accessing the hw context is 427 * other code, leading to spurious errors. */
448 * to reset it. Do this as the very last operation to avoid confusing 428 intel_gpu_reset(dev);
449 * other code, leading to spurious errors. */ 429
450 intel_gpu_reset(dev); 430 /* When default context is created and switched to, base object refcount
451 431 * will be 2 (+1 from object creation and +1 from do_switch()).
452 /* When default context is created and switched to, base object refcount 432 * i915_gem_context_fini() will be called after gpu_idle() has switched
453 * will be 2 (+1 from object creation and +1 from do_switch()). 433 * to default context. So we need to unreference the base object once
454 * i915_gem_context_fini() will be called after gpu_idle() has switched 434 * to offset the do_switch part, so that i915_gem_context_unreference()
455 * to default context. So we need to unreference the base object once 435 * can then free the base object correctly. */
456 * to offset the do_switch part, so that i915_gem_context_unreference() 436 WARN_ON(!dev_priv->ring[RCS].last_context);
457 * can then free the base object correctly. */ 437 if (dev_priv->ring[RCS].last_context == dctx) {
458 WARN_ON(!dev_priv->ring[RCS].last_context); 438 /* Fake switch to NULL context */
459 if (dev_priv->ring[RCS].last_context == dctx) { 439 WARN_ON(dctx->obj->active);
460 /* Fake switch to NULL context */ 440 i915_gem_object_ggtt_unpin(dctx->obj);
461 WARN_ON(dctx->obj->active); 441 i915_gem_context_unreference(dctx);
462 i915_gem_object_ggtt_unpin(dctx->obj); 442 dev_priv->ring[RCS].last_context = NULL;
463 i915_gem_context_unreference(dctx); 443 }
464 dev_priv->ring[RCS].last_context = NULL;
465 } 444 }
466 445
467 for (i = 0; i < I915_NUM_RINGS; i++) { 446 for (i = 0; i < I915_NUM_RINGS; i++) {
468 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 447 struct intel_ring_buffer *ring = &dev_priv->ring[i];
469 if (!(INTEL_INFO(dev)->ring_mask & (1<<i)))
470 continue;
471 448
472 if (ring->last_context) 449 if (ring->last_context)
473 i915_gem_context_unreference(ring->last_context); 450 i915_gem_context_unreference(ring->last_context);
@@ -478,7 +455,6 @@ void i915_gem_context_fini(struct drm_device *dev)
478 455
479 i915_gem_object_ggtt_unpin(dctx->obj); 456 i915_gem_object_ggtt_unpin(dctx->obj);
480 i915_gem_context_unreference(dctx); 457 i915_gem_context_unreference(dctx);
481 dev_priv->mm.aliasing_ppgtt = NULL;
482} 458}
483 459
484int i915_gem_context_enable(struct drm_i915_private *dev_priv) 460int i915_gem_context_enable(struct drm_i915_private *dev_priv)
@@ -486,9 +462,6 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
486 struct intel_ring_buffer *ring; 462 struct intel_ring_buffer *ring;
487 int ret, i; 463 int ret, i;
488 464
489 if (!HAS_HW_CONTEXTS(dev_priv->dev))
490 return 0;
491
492 /* This is the only place the aliasing PPGTT gets enabled, which means 465 /* This is the only place the aliasing PPGTT gets enabled, which means
493 * it has to happen before we bail on reset */ 466 * it has to happen before we bail on reset */
494 if (dev_priv->mm.aliasing_ppgtt) { 467 if (dev_priv->mm.aliasing_ppgtt) {
@@ -503,7 +476,7 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
503 BUG_ON(!dev_priv->ring[RCS].default_context); 476 BUG_ON(!dev_priv->ring[RCS].default_context);
504 477
505 for_each_ring(ring, dev_priv, i) { 478 for_each_ring(ring, dev_priv, i) {
506 ret = do_switch(ring, ring->default_context); 479 ret = i915_switch_context(ring, ring->default_context);
507 if (ret) 480 if (ret)
508 return ret; 481 return ret;
509 } 482 }
@@ -526,19 +499,6 @@ static int context_idr_cleanup(int id, void *p, void *data)
526int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 499int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
527{ 500{
528 struct drm_i915_file_private *file_priv = file->driver_priv; 501 struct drm_i915_file_private *file_priv = file->driver_priv;
529 struct drm_i915_private *dev_priv = dev->dev_private;
530
531 if (!HAS_HW_CONTEXTS(dev)) {
532 /* Cheat for hang stats */
533 file_priv->private_default_ctx =
534 kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL);
535
536 if (file_priv->private_default_ctx == NULL)
537 return -ENOMEM;
538
539 file_priv->private_default_ctx->vm = &dev_priv->gtt.base;
540 return 0;
541 }
542 502
543 idr_init(&file_priv->context_idr); 503 idr_init(&file_priv->context_idr);
544 504
@@ -559,14 +519,10 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
559{ 519{
560 struct drm_i915_file_private *file_priv = file->driver_priv; 520 struct drm_i915_file_private *file_priv = file->driver_priv;
561 521
562 if (!HAS_HW_CONTEXTS(dev)) {
563 kfree(file_priv->private_default_ctx);
564 return;
565 }
566
567 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 522 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
568 i915_gem_context_unreference(file_priv->private_default_ctx);
569 idr_destroy(&file_priv->context_idr); 523 idr_destroy(&file_priv->context_idr);
524
525 i915_gem_context_unreference(file_priv->private_default_ctx);
570} 526}
571 527
572struct i915_hw_context * 528struct i915_hw_context *
@@ -574,9 +530,6 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
574{ 530{
575 struct i915_hw_context *ctx; 531 struct i915_hw_context *ctx;
576 532
577 if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev))
578 return file_priv->private_default_ctx;
579
580 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 533 ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
581 if (!ctx) 534 if (!ctx)
582 return ERR_PTR(-ENOENT); 535 return ERR_PTR(-ENOENT);
@@ -758,7 +711,6 @@ unpin_out:
758/** 711/**
759 * i915_switch_context() - perform a GPU context switch. 712 * i915_switch_context() - perform a GPU context switch.
760 * @ring: ring for which we'll execute the context switch 713 * @ring: ring for which we'll execute the context switch
761 * @file_priv: file_priv associated with the context, may be NULL
762 * @to: the context to switch to 714 * @to: the context to switch to
763 * 715 *
764 * The context life cycle is simple. The context refcount is incremented and 716 * The context life cycle is simple. The context refcount is incremented and
@@ -767,24 +719,30 @@ unpin_out:
767 * object while letting the normal object tracking destroy the backing BO. 719 * object while letting the normal object tracking destroy the backing BO.
768 */ 720 */
769int i915_switch_context(struct intel_ring_buffer *ring, 721int i915_switch_context(struct intel_ring_buffer *ring,
770 struct drm_file *file,
771 struct i915_hw_context *to) 722 struct i915_hw_context *to)
772{ 723{
773 struct drm_i915_private *dev_priv = ring->dev->dev_private; 724 struct drm_i915_private *dev_priv = ring->dev->dev_private;
774 725
775 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 726 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
776 727
777 BUG_ON(file && to == NULL); 728 if (to->obj == NULL) { /* We have the fake context */
778 729 if (to != ring->last_context) {
779 /* We have the fake context */ 730 i915_gem_context_reference(to);
780 if (!HAS_HW_CONTEXTS(ring->dev)) { 731 if (ring->last_context)
781 ring->last_context = to; 732 i915_gem_context_unreference(ring->last_context);
733 ring->last_context = to;
734 }
782 return 0; 735 return 0;
783 } 736 }
784 737
785 return do_switch(ring, to); 738 return do_switch(ring, to);
786} 739}
787 740
741static bool hw_context_enabled(struct drm_device *dev)
742{
743 return to_i915(dev)->hw_context_size;
744}
745
788int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 746int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
789 struct drm_file *file) 747 struct drm_file *file)
790{ 748{
@@ -793,7 +751,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
793 struct i915_hw_context *ctx; 751 struct i915_hw_context *ctx;
794 int ret; 752 int ret;
795 753
796 if (!HAS_HW_CONTEXTS(dev)) 754 if (!hw_context_enabled(dev))
797 return -ENODEV; 755 return -ENODEV;
798 756
799 ret = i915_mutex_lock_interruptible(dev); 757 ret = i915_mutex_lock_interruptible(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 75fca63dc8c1..bbf4b12d842e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
68int 68int
69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 69i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 unsigned long start, unsigned long end,
71 unsigned flags) 72 unsigned flags)
72{ 73{
73 struct drm_i915_private *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
75 struct i915_vma *vma; 75 struct i915_vma *vma;
76 int ret = 0; 76 int ret = 0;
@@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
102 */ 102 */
103 103
104 INIT_LIST_HEAD(&unwind_list); 104 INIT_LIST_HEAD(&unwind_list);
105 if (flags & PIN_MAPPABLE) { 105 if (start != 0 || end != vm->total) {
106 BUG_ON(!i915_is_ggtt(vm));
107 drm_mm_init_scan_with_range(&vm->mm, min_size, 106 drm_mm_init_scan_with_range(&vm->mm, min_size,
108 alignment, cache_level, 0, 107 alignment, cache_level,
109 dev_priv->gtt.mappable_end); 108 start, end);
110 } else 109 } else
111 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 110 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
112 111
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7447160155a3..20fef6c50267 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,9 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39
40#define BATCH_OFFSET_BIAS (256*1024)
38 41
39struct eb_vmas { 42struct eb_vmas {
40 struct list_head vmas; 43 struct list_head vmas;
@@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
545 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 548 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 549 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
547 bool need_fence; 550 bool need_fence;
548 unsigned flags; 551 uint64_t flags;
549 int ret; 552 int ret;
550 553
551 flags = 0; 554 flags = 0;
@@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
559 562
560 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 563 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
561 flags |= PIN_GLOBAL; 564 flags |= PIN_GLOBAL;
565 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
566 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
562 567
563 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); 568 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
564 if (ret) 569 if (ret)
@@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
592 return 0; 597 return 0;
593} 598}
594 599
600static bool
601eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
602{
603 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
604 struct drm_i915_gem_object *obj = vma->obj;
605 bool need_fence, need_mappable;
606
607 need_fence =
608 has_fenced_gpu_access &&
609 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
610 obj->tiling_mode != I915_TILING_NONE;
611 need_mappable = need_fence || need_reloc_mappable(vma);
612
613 WARN_ON((need_mappable || need_fence) &&
614 !i915_is_ggtt(vma->vm));
615
616 if (entry->alignment &&
617 vma->node.start & (entry->alignment - 1))
618 return true;
619
620 if (need_mappable && !obj->map_and_fenceable)
621 return true;
622
623 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
624 vma->node.start < BATCH_OFFSET_BIAS)
625 return true;
626
627 return false;
628}
629
595static int 630static int
596i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 631i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
597 struct list_head *vmas, 632 struct list_head *vmas,
@@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
653 688
654 /* Unbind any ill-fitting objects or pin. */ 689 /* Unbind any ill-fitting objects or pin. */
655 list_for_each_entry(vma, vmas, exec_list) { 690 list_for_each_entry(vma, vmas, exec_list) {
656 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
657 bool need_fence, need_mappable;
658
659 obj = vma->obj;
660
661 if (!drm_mm_node_allocated(&vma->node)) 691 if (!drm_mm_node_allocated(&vma->node))
662 continue; 692 continue;
663 693
664 need_fence = 694 if (eb_vma_misplaced(vma, has_fenced_gpu_access))
665 has_fenced_gpu_access &&
666 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
667 obj->tiling_mode != I915_TILING_NONE;
668 need_mappable = need_fence || need_reloc_mappable(vma);
669
670 WARN_ON((need_mappable || need_fence) &&
671 !i915_is_ggtt(vma->vm));
672
673 if ((entry->alignment &&
674 vma->node.start & (entry->alignment - 1)) ||
675 (need_mappable && !obj->map_and_fenceable))
676 ret = i915_vma_unbind(vma); 695 ret = i915_vma_unbind(vma);
677 else 696 else
678 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 697 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
773 * relocations were valid. 792 * relocations were valid.
774 */ 793 */
775 for (j = 0; j < exec[i].relocation_count; j++) { 794 for (j = 0; j < exec[i].relocation_count; j++) {
776 if (copy_to_user(&user_relocs[j].presumed_offset, 795 if (__copy_to_user(&user_relocs[j].presumed_offset,
777 &invalid_offset, 796 &invalid_offset,
778 sizeof(invalid_offset))) { 797 sizeof(invalid_offset))) {
779 ret = -EFAULT; 798 ret = -EFAULT;
780 mutex_lock(&dev->struct_mutex); 799 mutex_lock(&dev->struct_mutex);
781 goto err; 800 goto err;
@@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
999 return 0; 1018 return 0;
1000} 1019}
1001 1020
1021static struct drm_i915_gem_object *
1022eb_get_batch(struct eb_vmas *eb)
1023{
1024 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1025
1026 /*
1027 * SNA is doing fancy tricks with compressing batch buffers, which leads
1028 * to negative relocation deltas. Usually that works out ok since the
1029 * relocate address is still positive, except when the batch is placed
1030 * very low in the GTT. Ensure this doesn't happen.
1031 *
1032 * Note that actual hangs have only been observed on gen7, but for
1033 * paranoia do it everywhere.
1034 */
1035 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1036
1037 return vma->obj;
1038}
1039
1002static int 1040static int
1003i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1041i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1004 struct drm_file *file, 1042 struct drm_file *file,
@@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1153 goto err; 1191 goto err;
1154 1192
1155 /* take note of the batch buffer before we might reorder the lists */ 1193 /* take note of the batch buffer before we might reorder the lists */
1156 batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; 1194 batch_obj = eb_get_batch(eb);
1157 1195
1158 /* Move the objects en-masse into the GTT, evicting if necessary. */ 1196 /* Move the objects en-masse into the GTT, evicting if necessary. */
1159 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 1197 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1221,7 +1259,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1221 if (ret) 1259 if (ret)
1222 goto err; 1260 goto err;
1223 1261
1224 ret = i915_switch_context(ring, file, ctx); 1262 ret = i915_switch_context(ring, ctx);
1225 if (ret) 1263 if (ret)
1226 goto err; 1264 goto err;
1227 1265
@@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1355 1393
1356 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1394 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1357 if (!ret) { 1395 if (!ret) {
1396 struct drm_i915_gem_exec_object __user *user_exec_list =
1397 to_user_ptr(args->buffers_ptr);
1398
1358 /* Copy the new buffer offsets back to the user's exec list. */ 1399 /* Copy the new buffer offsets back to the user's exec list. */
1359 for (i = 0; i < args->buffer_count; i++) 1400 for (i = 0; i < args->buffer_count; i++) {
1360 exec_list[i].offset = exec2_list[i].offset; 1401 ret = __copy_to_user(&user_exec_list[i].offset,
1361 /* ... and back out to userspace */ 1402 &exec2_list[i].offset,
1362 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1403 sizeof(user_exec_list[i].offset));
1363 exec_list, 1404 if (ret) {
1364 sizeof(*exec_list) * args->buffer_count); 1405 ret = -EFAULT;
1365 if (ret) { 1406 DRM_DEBUG("failed to copy %d exec entries "
1366 ret = -EFAULT; 1407 "back to user (%d)\n",
1367 DRM_DEBUG("failed to copy %d exec entries " 1408 args->buffer_count, ret);
1368 "back to user (%d)\n", 1409 break;
1369 args->buffer_count, ret); 1410 }
1370 } 1411 }
1371 } 1412 }
1372 1413
@@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1412 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1453 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1413 if (!ret) { 1454 if (!ret) {
1414 /* Copy the new buffer offsets back to the user's exec list. */ 1455 /* Copy the new buffer offsets back to the user's exec list. */
1415 ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1456 struct drm_i915_gem_exec_object2 *user_exec_list =
1416 exec2_list, 1457 to_user_ptr(args->buffers_ptr);
1417 sizeof(*exec2_list) * args->buffer_count); 1458 int i;
1418 if (ret) { 1459
1419 ret = -EFAULT; 1460 for (i = 0; i < args->buffer_count; i++) {
1420 DRM_DEBUG("failed to copy %d exec entries " 1461 ret = __copy_to_user(&user_exec_list[i].offset,
1421 "back to user (%d)\n", 1462 &exec2_list[i].offset,
1422 args->buffer_count, ret); 1463 sizeof(user_exec_list[i].offset));
1464 if (ret) {
1465 ret = -EFAULT;
1466 DRM_DEBUG("failed to copy %d exec entries "
1467 "back to user\n",
1468 args->buffer_count);
1469 break;
1470 }
1423 } 1471 }
1424 } 1472 }
1425 1473
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ab5e93c30aa2..5deb22864c52 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,25 +34,35 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
34 34
35bool intel_enable_ppgtt(struct drm_device *dev, bool full) 35bool intel_enable_ppgtt(struct drm_device *dev, bool full)
36{ 36{
37 if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 37 if (i915.enable_ppgtt == 0)
38 return false; 38 return false;
39 39
40 if (i915.enable_ppgtt == 1 && full) 40 if (i915.enable_ppgtt == 1 && full)
41 return false; 41 return false;
42 42
43 return true;
44}
45
46static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
47{
48 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
49 return 0;
50
51 if (enable_ppgtt == 1)
52 return 1;
53
54 if (enable_ppgtt == 2 && HAS_PPGTT(dev))
55 return 2;
56
43#ifdef CONFIG_INTEL_IOMMU 57#ifdef CONFIG_INTEL_IOMMU
44 /* Disable ppgtt on SNB if VT-d is on. */ 58 /* Disable ppgtt on SNB if VT-d is on. */
45 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 59 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
46 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 60 DRM_INFO("Disabling PPGTT because VT-d is on\n");
47 return false; 61 return 0;
48 } 62 }
49#endif 63#endif
50 64
51 /* Full ppgtt disabled by default for now due to issues. */ 65 return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
52 if (full)
53 return false; /* HAS_PPGTT(dev) */
54 else
55 return HAS_ALIASING_PPGTT(dev);
56} 66}
57 67
58#define GEN6_PPGTT_PD_ENTRIES 512 68#define GEN6_PPGTT_PD_ENTRIES 512
@@ -1079,7 +1089,9 @@ alloc:
1079 if (ret == -ENOSPC && !retried) { 1089 if (ret == -ENOSPC && !retried) {
1080 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1090 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
1081 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1091 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1082 I915_CACHE_NONE, 0); 1092 I915_CACHE_NONE,
1093 0, dev_priv->gtt.base.total,
1094 0);
1083 if (ret) 1095 if (ret)
1084 return ret; 1096 return ret;
1085 1097
@@ -2031,6 +2043,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
2031 gtt->base.total >> 20); 2043 gtt->base.total >> 20);
2032 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); 2044 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
2033 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 2045 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
2046 /*
2047 * i915.enable_ppgtt is read-only, so do an early pass to validate the
2048 * user's requested state against the hardware/driver capabilities. We
2049 * do this now so that we can print out any log messages once rather
2050 * than every time we check intel_enable_ppgtt().
2051 */
2052 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
2053 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
2034 2054
2035 return 0; 2055 return 0;
2036} 2056}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7753249b3a95..f98ba4e6e70b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1362 spin_lock(&dev_priv->irq_lock); 1362 spin_lock(&dev_priv->irq_lock);
1363 for (i = 1; i < HPD_NUM_PINS; i++) { 1363 for (i = 1; i < HPD_NUM_PINS; i++) {
1364 1364
1365 WARN_ONCE(hpd[i] & hotplug_trigger && 1365 if (hpd[i] & hotplug_trigger &&
1366 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1366 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1367 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1367 /*
1368 hotplug_trigger, i, hpd[i]); 1368 * On GMCH platforms the interrupt mask bits only
1369 * prevent irq generation, not the setting of the
1370 * hotplug bits itself. So only WARN about unexpected
1371 * interrupts on saner platforms.
1372 */
1373 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1374 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1375 hotplug_trigger, i, hpd[i]);
1376
1377 continue;
1378 }
1369 1379
1370 if (!(hpd[i] & hotplug_trigger) || 1380 if (!(hpd[i] & hotplug_trigger) ||
1371 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1381 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9f5b18d9d885..c77af69c2d8f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -827,6 +827,7 @@ enum punit_power_well {
827# define MI_FLUSH_ENABLE (1 << 12) 827# define MI_FLUSH_ENABLE (1 << 12)
828# define ASYNC_FLIP_PERF_DISABLE (1 << 14) 828# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
829# define MODE_IDLE (1 << 9) 829# define MODE_IDLE (1 << 9)
830# define STOP_RING (1 << 8)
830 831
831#define GEN6_GT_MODE 0x20d0 832#define GEN6_GT_MODE 0x20d0
832#define GEN7_GT_MODE 0x7008 833#define GEN7_GT_MODE 0x7008
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4867f4cc0938..aff4a113cda3 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -287,6 +287,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
287 const struct bdb_lfp_backlight_data *backlight_data; 287 const struct bdb_lfp_backlight_data *backlight_data;
288 const struct bdb_lfp_backlight_data_entry *entry; 288 const struct bdb_lfp_backlight_data_entry *entry;
289 289
290 /* Err to enabling backlight if no backlight block. */
291 dev_priv->vbt.backlight.present = true;
292
290 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); 293 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
291 if (!backlight_data) 294 if (!backlight_data)
292 return; 295 return;
@@ -299,6 +302,13 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
299 302
300 entry = &backlight_data->data[panel_type]; 303 entry = &backlight_data->data[panel_type];
301 304
305 dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
306 if (!dev_priv->vbt.backlight.present) {
307 DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
308 entry->type);
309 return;
310 }
311
302 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 312 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
303 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 313 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
304 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " 314 DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
@@ -550,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
550 560
551 dev_priv->vbt.edp_pps = *edp_pps; 561 dev_priv->vbt.edp_pps = *edp_pps;
552 562
553 dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : 563 switch (edp_link_params->rate) {
554 DP_LINK_BW_1_62; 564 case EDP_RATE_1_62:
565 dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
566 break;
567 case EDP_RATE_2_7:
568 dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
569 break;
570 default:
571 DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
572 edp_link_params->rate);
573 break;
574 }
575
555 switch (edp_link_params->lanes) { 576 switch (edp_link_params->lanes) {
556 case 0: 577 case EDP_LANE_1:
557 dev_priv->vbt.edp_lanes = 1; 578 dev_priv->vbt.edp_lanes = 1;
558 break; 579 break;
559 case 1: 580 case EDP_LANE_2:
560 dev_priv->vbt.edp_lanes = 2; 581 dev_priv->vbt.edp_lanes = 2;
561 break; 582 break;
562 case 3: 583 case EDP_LANE_4:
563 default:
564 dev_priv->vbt.edp_lanes = 4; 584 dev_priv->vbt.edp_lanes = 4;
565 break; 585 break;
586 default:
587 DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
588 edp_link_params->lanes);
589 break;
566 } 590 }
591
567 switch (edp_link_params->preemphasis) { 592 switch (edp_link_params->preemphasis) {
568 case 0: 593 case EDP_PREEMPHASIS_NONE:
569 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 594 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
570 break; 595 break;
571 case 1: 596 case EDP_PREEMPHASIS_3_5dB:
572 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 597 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
573 break; 598 break;
574 case 2: 599 case EDP_PREEMPHASIS_6dB:
575 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 600 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
576 break; 601 break;
577 case 3: 602 case EDP_PREEMPHASIS_9_5dB:
578 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 603 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
579 break; 604 break;
605 default:
606 DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
607 edp_link_params->preemphasis);
608 break;
580 } 609 }
610
581 switch (edp_link_params->vswing) { 611 switch (edp_link_params->vswing) {
582 case 0: 612 case EDP_VSWING_0_4V:
583 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; 613 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
584 break; 614 break;
585 case 1: 615 case EDP_VSWING_0_6V:
586 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; 616 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
587 break; 617 break;
588 case 2: 618 case EDP_VSWING_0_8V:
589 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; 619 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
590 break; 620 break;
591 case 3: 621 case EDP_VSWING_1_2V:
592 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; 622 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
593 break; 623 break;
624 default:
625 DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
626 edp_link_params->vswing);
627 break;
594 } 628 }
595} 629}
596 630
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 83b7629e4367..f27f7b282465 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -374,6 +374,9 @@ struct bdb_lvds_lfp_data {
374 struct bdb_lvds_lfp_data_entry data[16]; 374 struct bdb_lvds_lfp_data_entry data[16];
375} __packed; 375} __packed;
376 376
377#define BDB_BACKLIGHT_TYPE_NONE 0
378#define BDB_BACKLIGHT_TYPE_PWM 2
379
377struct bdb_lfp_backlight_data_entry { 380struct bdb_lfp_backlight_data_entry {
378 u8 type:2; 381 u8 type:2;
379 u8 active_low_pwm:1; 382 u8 active_low_pwm:1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dae976f51d83..5b60e25baa32 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7825 addr = i915_gem_obj_ggtt_offset(obj); 7825 addr = i915_gem_obj_ggtt_offset(obj);
7826 } else { 7826 } else {
7827 int align = IS_I830(dev) ? 16 * 1024 : 256; 7827 int align = IS_I830(dev) ? 16 * 1024 : 256;
7828 ret = i915_gem_attach_phys_object(dev, obj, 7828 ret = i915_gem_object_attach_phys(obj, align);
7829 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7830 align);
7831 if (ret) { 7829 if (ret) {
7832 DRM_DEBUG_KMS("failed to attach phys object\n"); 7830 DRM_DEBUG_KMS("failed to attach phys object\n");
7833 goto fail_locked; 7831 goto fail_locked;
7834 } 7832 }
7835 addr = obj->phys_obj->handle->busaddr; 7833 addr = obj->phys_handle->busaddr;
7836 } 7834 }
7837 7835
7838 if (IS_GEN2(dev)) 7836 if (IS_GEN2(dev))
@@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7840 7838
7841 finish: 7839 finish:
7842 if (intel_crtc->cursor_bo) { 7840 if (intel_crtc->cursor_bo) {
7843 if (INTEL_INFO(dev)->cursor_needs_physical) { 7841 if (!INTEL_INFO(dev)->cursor_needs_physical)
7844 if (intel_crtc->cursor_bo != obj)
7845 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7846 } else
7847 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); 7842 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
7848 drm_gem_object_unreference(&intel_crtc->cursor_bo->base); 7843 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
7849 } 7844 }
@@ -9654,11 +9649,22 @@ intel_pipe_config_compare(struct drm_device *dev,
9654 PIPE_CONF_CHECK_I(pipe_src_w); 9649 PIPE_CONF_CHECK_I(pipe_src_w);
9655 PIPE_CONF_CHECK_I(pipe_src_h); 9650 PIPE_CONF_CHECK_I(pipe_src_h);
9656 9651
9657 PIPE_CONF_CHECK_I(gmch_pfit.control); 9652 /*
9658 /* pfit ratios are autocomputed by the hw on gen4+ */ 9653 * FIXME: BIOS likes to set up a cloned config with lvds+external
9659 if (INTEL_INFO(dev)->gen < 4) 9654 * screen. Since we don't yet re-compute the pipe config when moving
9660 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 9655 * just the lvds port away to another pipe the sw tracking won't match.
9661 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 9656 *
9657 * Proper atomic modesets with recomputed global state will fix this.
9658 * Until then just don't check gmch state for inherited modes.
9659 */
9660 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
9661 PIPE_CONF_CHECK_I(gmch_pfit.control);
9662 /* pfit ratios are autocomputed by the hw on gen4+ */
9663 if (INTEL_INFO(dev)->gen < 4)
9664 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
9665 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
9666 }
9667
9662 PIPE_CONF_CHECK_I(pch_pfit.enabled); 9668 PIPE_CONF_CHECK_I(pch_pfit.enabled);
9663 if (current_config->pch_pfit.enabled) { 9669 if (current_config->pch_pfit.enabled) {
9664 PIPE_CONF_CHECK_I(pch_pfit.pos); 9670 PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -11384,15 +11390,6 @@ void intel_modeset_init(struct drm_device *dev)
11384 } 11390 }
11385} 11391}
11386 11392
11387static void
11388intel_connector_break_all_links(struct intel_connector *connector)
11389{
11390 connector->base.dpms = DRM_MODE_DPMS_OFF;
11391 connector->base.encoder = NULL;
11392 connector->encoder->connectors_active = false;
11393 connector->encoder->base.crtc = NULL;
11394}
11395
11396static void intel_enable_pipe_a(struct drm_device *dev) 11393static void intel_enable_pipe_a(struct drm_device *dev)
11397{ 11394{
11398 struct intel_connector *connector; 11395 struct intel_connector *connector;
@@ -11474,8 +11471,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11474 if (connector->encoder->base.crtc != &crtc->base) 11471 if (connector->encoder->base.crtc != &crtc->base)
11475 continue; 11472 continue;
11476 11473
11477 intel_connector_break_all_links(connector); 11474 connector->base.dpms = DRM_MODE_DPMS_OFF;
11475 connector->base.encoder = NULL;
11478 } 11476 }
11477 /* multiple connectors may have the same encoder:
11478 * handle them and break crtc link separately */
11479 list_for_each_entry(connector, &dev->mode_config.connector_list,
11480 base.head)
11481 if (connector->encoder->base.crtc == &crtc->base) {
11482 connector->encoder->base.crtc = NULL;
11483 connector->encoder->connectors_active = false;
11484 }
11479 11485
11480 WARN_ON(crtc->active); 11486 WARN_ON(crtc->active);
11481 crtc->base.enabled = false; 11487 crtc->base.enabled = false;
@@ -11557,6 +11563,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11557 drm_get_encoder_name(&encoder->base)); 11563 drm_get_encoder_name(&encoder->base));
11558 encoder->disable(encoder); 11564 encoder->disable(encoder);
11559 } 11565 }
11566 encoder->base.crtc = NULL;
11567 encoder->connectors_active = false;
11560 11568
11561 /* Inconsistent output/port/pipe state happens presumably due to 11569 /* Inconsistent output/port/pipe state happens presumably due to
11562 * a bug in one of the get_hw_state functions. Or someplace else 11570 * a bug in one of the get_hw_state functions. Or someplace else
@@ -11567,8 +11575,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
11567 base.head) { 11575 base.head) {
11568 if (connector->encoder != encoder) 11576 if (connector->encoder != encoder)
11569 continue; 11577 continue;
11570 11578 connector->base.dpms = DRM_MODE_DPMS_OFF;
11571 intel_connector_break_all_links(connector); 11579 connector->base.encoder = NULL;
11572 } 11580 }
11573 } 11581 }
11574 /* Enabled encoders without active connectors will be fixed in 11582 /* Enabled encoders without active connectors will be fixed in
@@ -11616,6 +11624,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
11616 base.head) { 11624 base.head) {
11617 memset(&crtc->config, 0, sizeof(crtc->config)); 11625 memset(&crtc->config, 0, sizeof(crtc->config));
11618 11626
11627 crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
11628
11619 crtc->active = dev_priv->display.get_pipe_config(crtc, 11629 crtc->active = dev_priv->display.get_pipe_config(crtc,
11620 &crtc->config); 11630 &crtc->config);
11621 11631
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a0dad1a2f819..2a00cb828d20 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -105,7 +105,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
105 case DP_LINK_BW_2_7: 105 case DP_LINK_BW_2_7:
106 break; 106 break;
107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
108 if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && 108 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
109 INTEL_INFO(dev)->gen >= 8) &&
109 intel_dp->dpcd[DP_DPCD_REV] >= 0x12) 110 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
110 max_link_bw = DP_LINK_BW_5_4; 111 max_link_bw = DP_LINK_BW_5_4;
111 else 112 else
@@ -120,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
120 return max_link_bw; 121 return max_link_bw;
121} 122}
122 123
124static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
125{
126 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
127 struct drm_device *dev = intel_dig_port->base.base.dev;
128 u8 source_max, sink_max;
129
130 source_max = 4;
131 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
132 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
133 source_max = 2;
134
135 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
136
137 return min(source_max, sink_max);
138}
139
123/* 140/*
124 * The units on the numbers in the next two are... bizarre. Examples will 141 * The units on the numbers in the next two are... bizarre. Examples will
125 * make it clearer; this one parallels an example in the eDP spec. 142 * make it clearer; this one parallels an example in the eDP spec.
@@ -170,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
170 } 187 }
171 188
172 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 189 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
173 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 190 max_lanes = intel_dp_max_lane_count(intel_dp);
174 191
175 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 192 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
176 mode_rate = intel_dp_link_required(target_clock, 18); 193 mode_rate = intel_dp_link_required(target_clock, 18);
@@ -575,7 +592,8 @@ out:
575 return ret; 592 return ret;
576} 593}
577 594
578#define HEADER_SIZE 4 595#define BARE_ADDRESS_SIZE 3
596#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
579static ssize_t 597static ssize_t
580intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 598intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
581{ 599{
@@ -592,7 +610,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
592 switch (msg->request & ~DP_AUX_I2C_MOT) { 610 switch (msg->request & ~DP_AUX_I2C_MOT) {
593 case DP_AUX_NATIVE_WRITE: 611 case DP_AUX_NATIVE_WRITE:
594 case DP_AUX_I2C_WRITE: 612 case DP_AUX_I2C_WRITE:
595 txsize = HEADER_SIZE + msg->size; 613 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
596 rxsize = 1; 614 rxsize = 1;
597 615
598 if (WARN_ON(txsize > 20)) 616 if (WARN_ON(txsize > 20))
@@ -611,7 +629,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
611 629
612 case DP_AUX_NATIVE_READ: 630 case DP_AUX_NATIVE_READ:
613 case DP_AUX_I2C_READ: 631 case DP_AUX_I2C_READ:
614 txsize = HEADER_SIZE; 632 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
615 rxsize = msg->size + 1; 633 rxsize = msg->size + 1;
616 634
617 if (WARN_ON(rxsize > 20)) 635 if (WARN_ON(rxsize > 20))
@@ -749,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
749 struct intel_crtc *intel_crtc = encoder->new_crtc; 767 struct intel_crtc *intel_crtc = encoder->new_crtc;
750 struct intel_connector *intel_connector = intel_dp->attached_connector; 768 struct intel_connector *intel_connector = intel_dp->attached_connector;
751 int lane_count, clock; 769 int lane_count, clock;
752 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 770 int min_lane_count = 1;
771 int max_lane_count = intel_dp_max_lane_count(intel_dp);
753 /* Conveniently, the link BW constants become indices with a shift...*/ 772 /* Conveniently, the link BW constants become indices with a shift...*/
773 int min_clock = 0;
754 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; 774 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
755 int bpp, mode_rate; 775 int bpp, mode_rate;
756 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; 776 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
@@ -783,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder,
783 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 803 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
784 * bpc in between. */ 804 * bpc in between. */
785 bpp = pipe_config->pipe_bpp; 805 bpp = pipe_config->pipe_bpp;
786 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 806 if (is_edp(intel_dp)) {
787 dev_priv->vbt.edp_bpp < bpp) { 807 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
788 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 808 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
789 dev_priv->vbt.edp_bpp); 809 dev_priv->vbt.edp_bpp);
790 bpp = dev_priv->vbt.edp_bpp; 810 bpp = dev_priv->vbt.edp_bpp;
811 }
812
813 if (IS_BROADWELL(dev)) {
814 /* Yes, it's an ugly hack. */
815 min_lane_count = max_lane_count;
816 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
817 min_lane_count);
818 } else if (dev_priv->vbt.edp_lanes) {
819 min_lane_count = min(dev_priv->vbt.edp_lanes,
820 max_lane_count);
821 DRM_DEBUG_KMS("using min %u lanes per VBT\n",
822 min_lane_count);
823 }
824
825 if (dev_priv->vbt.edp_rate) {
826 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
827 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
828 bws[min_clock]);
829 }
791 } 830 }
792 831
793 for (; bpp >= 6*3; bpp -= 2*3) { 832 for (; bpp >= 6*3; bpp -= 2*3) {
794 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 833 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
795 bpp); 834 bpp);
796 835
797 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 836 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
798 for (clock = 0; clock <= max_clock; clock++) { 837 for (clock = min_clock; clock <= max_clock; clock++) {
799 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 838 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
800 link_avail = intel_dp_max_data_rate(link_clock, 839 link_avail = intel_dp_max_data_rate(link_clock,
801 lane_count); 840 lane_count);
@@ -3618,7 +3657,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3618{ 3657{
3619 struct drm_connector *connector = &intel_connector->base; 3658 struct drm_connector *connector = &intel_connector->base;
3620 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3621 struct drm_device *dev = intel_dig_port->base.base.dev; 3660 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3661 struct drm_device *dev = intel_encoder->base.dev;
3622 struct drm_i915_private *dev_priv = dev->dev_private; 3662 struct drm_i915_private *dev_priv = dev->dev_private;
3623 struct drm_display_mode *fixed_mode = NULL; 3663 struct drm_display_mode *fixed_mode = NULL;
3624 bool has_dpcd; 3664 bool has_dpcd;
@@ -3628,6 +3668,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3628 if (!is_edp(intel_dp)) 3668 if (!is_edp(intel_dp))
3629 return true; 3669 return true;
3630 3670
3671 /* The VDD bit needs a power domain reference, so if the bit is already
3672 * enabled when we boot, grab this reference. */
3673 if (edp_have_panel_vdd(intel_dp)) {
3674 enum intel_display_power_domain power_domain;
3675 power_domain = intel_display_port_power_domain(intel_encoder);
3676 intel_display_power_get(dev_priv, power_domain);
3677 }
3678
3631 /* Cache DPCD and EDID for edp. */ 3679 /* Cache DPCD and EDID for edp. */
3632 intel_edp_panel_vdd_on(intel_dp); 3680 intel_edp_panel_vdd_on(intel_dp);
3633 has_dpcd = intel_dp_get_dpcd(intel_dp); 3681 has_dpcd = intel_dp_get_dpcd(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0542de982260..328b1a70264b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,7 +236,8 @@ struct intel_crtc_config {
236 * tracked with quirk flags so that fastboot and state checker can act 236 * tracked with quirk flags so that fastboot and state checker can act
237 * accordingly. 237 * accordingly.
238 */ 238 */
239#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 239#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
240#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
240 unsigned long quirks; 241 unsigned long quirks;
241 242
242 /* User requested mode, only valid as a starting point to 243 /* User requested mode, only valid as a starting point to
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b4d44e62f0c7..f73ba5e6b7a8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
132 132
133 mutex_lock(&dev->struct_mutex); 133 mutex_lock(&dev->struct_mutex);
134 134
135 if (intel_fb &&
136 (sizes->fb_width > intel_fb->base.width ||
137 sizes->fb_height > intel_fb->base.height)) {
138 DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
139 " releasing it\n",
140 intel_fb->base.width, intel_fb->base.height,
141 sizes->fb_width, sizes->fb_height);
142 drm_framebuffer_unreference(&intel_fb->base);
143 intel_fb = ifbdev->fb = NULL;
144 }
135 if (!intel_fb || WARN_ON(!intel_fb->obj)) { 145 if (!intel_fb || WARN_ON(!intel_fb->obj)) {
136 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); 146 DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
137 ret = intelfb_alloc(helper, sizes); 147 ret = intelfb_alloc(helper, sizes);
@@ -377,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
377 height); 387 height);
378 } 388 }
379 389
390 /* No preferred mode marked by the EDID? Are there any modes? */
391 if (!modes[i] && !list_empty(&connector->modes)) {
392 DRM_DEBUG_KMS("using first mode listed on connector %s\n",
393 drm_get_connector_name(connector));
394 modes[i] = list_first_entry(&connector->modes,
395 struct drm_display_mode,
396 head);
397 }
398
380 /* last resort: use current mode */ 399 /* last resort: use current mode */
381 if (!modes[i]) { 400 if (!modes[i]) {
382 /* 401 /*
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b0413e190625..157267aa3561 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
821 } 821 }
822} 822}
823 823
824static int hdmi_portclock_limit(struct intel_hdmi *hdmi) 824static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
825{ 825{
826 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 826 struct drm_device *dev = intel_hdmi_to_dev(hdmi);
827 827
828 if (!hdmi->has_hdmi_sink || IS_G4X(dev)) 828 if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
829 return 165000; 829 return 165000;
830 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) 830 else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
831 return 300000; 831 return 300000;
@@ -837,7 +837,8 @@ static enum drm_mode_status
837intel_hdmi_mode_valid(struct drm_connector *connector, 837intel_hdmi_mode_valid(struct drm_connector *connector,
838 struct drm_display_mode *mode) 838 struct drm_display_mode *mode)
839{ 839{
840 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) 840 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
841 true))
841 return MODE_CLOCK_HIGH; 842 return MODE_CLOCK_HIGH;
842 if (mode->clock < 20000) 843 if (mode->clock < 20000)
843 return MODE_CLOCK_LOW; 844 return MODE_CLOCK_LOW;
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
879 struct drm_device *dev = encoder->base.dev; 880 struct drm_device *dev = encoder->base.dev;
880 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 881 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
881 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; 882 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
882 int portclock_limit = hdmi_portclock_limit(intel_hdmi); 883 int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
883 int desired_bpp; 884 int desired_bpp;
884 885
885 if (intel_hdmi->color_range_auto) { 886 if (intel_hdmi->color_range_auto) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d8adc9104dca..129db0c7d835 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->gtt.mappable, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 199 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
1340 overlay->reg_bo = reg_bo; 1340 overlay->reg_bo = reg_bo;
1341 1341
1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1342 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1343 ret = i915_gem_attach_phys_object(dev, reg_bo, 1343 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1344 I915_GEM_PHYS_OVERLAY_REGS,
1345 PAGE_SIZE);
1346 if (ret) { 1344 if (ret) {
1347 DRM_ERROR("failed to attach phys overlay regs\n"); 1345 DRM_ERROR("failed to attach phys overlay regs\n");
1348 goto out_free_bo; 1346 goto out_free_bo;
1349 } 1347 }
1350 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1348 overlay->flip_addr = reg_bo->phys_handle->busaddr;
1351 } else { 1349 } else {
1352 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); 1350 ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
1353 if (ret) { 1351 if (ret) {
@@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1428 /* Cast to make sparse happy, but it's wc memory anyway, so 1426 /* Cast to make sparse happy, but it's wc memory anyway, so
1429 * equivalent to the wc io mapping on X86. */ 1427 * equivalent to the wc io mapping on X86. */
1430 regs = (struct overlay_registers __iomem *) 1428 regs = (struct overlay_registers __iomem *)
1431 overlay->reg_bo->phys_obj->handle->vaddr; 1429 overlay->reg_bo->phys_handle->vaddr;
1432 else 1430 else
1433 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1431 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1434 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1432 i915_gem_obj_ggtt_offset(overlay->reg_bo));
@@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1462 error->dovsta = I915_READ(DOVSTA); 1460 error->dovsta = I915_READ(DOVSTA);
1463 error->isr = I915_READ(ISR); 1461 error->isr = I915_READ(ISR);
1464 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1462 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
1465 error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; 1463 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1466 else 1464 else
1467 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); 1465 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1468 1466
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index cb058408c70e..cb8cfb7e0974 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
492 enum pipe pipe = intel_get_pipe_from_connector(connector); 492 enum pipe pipe = intel_get_pipe_from_connector(connector);
493 u32 freq; 493 u32 freq;
494 unsigned long flags; 494 unsigned long flags;
495 u64 n;
495 496
496 if (!panel->backlight.present || pipe == INVALID_PIPE) 497 if (!panel->backlight.present || pipe == INVALID_PIPE)
497 return; 498 return;
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
502 503
503 /* scale to hardware max, but be careful to not overflow */ 504 /* scale to hardware max, but be careful to not overflow */
504 freq = panel->backlight.max; 505 freq = panel->backlight.max;
505 if (freq < max) 506 n = (u64)level * freq;
506 level = level * freq / max; 507 do_div(n, max);
507 else 508 level = n;
508 level = freq / max * level;
509 509
510 panel->backlight.level = level; 510 panel->backlight.level = level;
511 if (panel->backlight.device) 511 if (panel->backlight.device)
@@ -1065,6 +1065,11 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1065 unsigned long flags; 1065 unsigned long flags;
1066 int ret; 1066 int ret;
1067 1067
1068 if (!dev_priv->vbt.backlight.present) {
1069 DRM_DEBUG_KMS("native backlight control not available per VBT\n");
1070 return 0;
1071 }
1072
1068 /* set level and max in panel struct */ 1073 /* set level and max in panel struct */
1069 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 1074 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
1070 ret = dev_priv->display.setup_backlight(intel_connector); 1075 ret = dev_priv->display.setup_backlight(intel_connector);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5874716774a7..d93dcf683e8c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1545,6 +1545,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1545 1545
1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1547 1547
1548 if (IS_I915GM(dev) && enabled) {
1549 struct intel_framebuffer *fb;
1550
1551 fb = to_intel_framebuffer(enabled->primary->fb);
1552
1553 /* self-refresh seems busted with untiled */
1554 if (fb->obj->tiling_mode == I915_TILING_NONE)
1555 enabled = NULL;
1556 }
1557
1548 /* 1558 /*
1549 * Overlay gets an aggressive default since video jitter is bad. 1559 * Overlay gets an aggressive default since video jitter is bad.
1550 */ 1560 */
@@ -2085,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev,
2085 } 2095 }
2086} 2096}
2087 2097
2098static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2099 uint16_t wm[5], uint16_t min)
2100{
2101 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2102
2103 if (wm[0] >= min)
2104 return false;
2105
2106 wm[0] = max(wm[0], min);
2107 for (level = 1; level <= max_level; level++)
2108 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2109
2110 return true;
2111}
2112
2113static void snb_wm_latency_quirk(struct drm_device *dev)
2114{
2115 struct drm_i915_private *dev_priv = dev->dev_private;
2116 bool changed;
2117
2118 /*
2119 * The BIOS provided WM memory latency values are often
2120 * inadequate for high resolution displays. Adjust them.
2121 */
2122 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2123 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2124 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2125
2126 if (!changed)
2127 return;
2128
2129 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2130 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2131 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2132 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2133}
2134
2088static void ilk_setup_wm_latency(struct drm_device *dev) 2135static void ilk_setup_wm_latency(struct drm_device *dev)
2089{ 2136{
2090 struct drm_i915_private *dev_priv = dev->dev_private; 2137 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2102,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2102 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2149 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2103 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2150 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2104 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2151 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2152
2153 if (IS_GEN6(dev))
2154 snb_wm_latency_quirk(dev);
2105} 2155}
2106 2156
2107static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2157static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6bc68bdcf433..79fb4cc2137c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
437 I915_WRITE(HWS_PGA, addr); 437 I915_WRITE(HWS_PGA, addr);
438} 438}
439 439
440static int init_ring_common(struct intel_ring_buffer *ring) 440static bool stop_ring(struct intel_ring_buffer *ring)
441{ 441{
442 struct drm_device *dev = ring->dev; 442 struct drm_i915_private *dev_priv = to_i915(ring->dev);
443 struct drm_i915_private *dev_priv = dev->dev_private;
444 struct drm_i915_gem_object *obj = ring->obj;
445 int ret = 0;
446 u32 head;
447 443
448 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 444 if (!IS_GEN2(ring->dev)) {
445 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
446 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
447 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
448 return false;
449 }
450 }
449 451
450 /* Stop the ring if it's running. */
451 I915_WRITE_CTL(ring, 0); 452 I915_WRITE_CTL(ring, 0);
452 I915_WRITE_HEAD(ring, 0); 453 I915_WRITE_HEAD(ring, 0);
453 ring->write_tail(ring, 0); 454 ring->write_tail(ring, 0);
454 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
455 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
456 455
457 if (I915_NEED_GFX_HWS(dev)) 456 if (!IS_GEN2(ring->dev)) {
458 intel_ring_setup_status_page(ring); 457 (void)I915_READ_CTL(ring);
459 else 458 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
460 ring_setup_phys_status_page(ring); 459 }
461 460
462 head = I915_READ_HEAD(ring) & HEAD_ADDR; 461 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
462}
463 463
464 /* G45 ring initialization fails to reset head to zero */ 464static int init_ring_common(struct intel_ring_buffer *ring)
465 if (head != 0) { 465{
466 struct drm_device *dev = ring->dev;
467 struct drm_i915_private *dev_priv = dev->dev_private;
468 struct drm_i915_gem_object *obj = ring->obj;
469 int ret = 0;
470
471 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
472
473 if (!stop_ring(ring)) {
474 /* G45 ring initialization often fails to reset head to zero */
466 DRM_DEBUG_KMS("%s head not reset to zero " 475 DRM_DEBUG_KMS("%s head not reset to zero "
467 "ctl %08x head %08x tail %08x start %08x\n", 476 "ctl %08x head %08x tail %08x start %08x\n",
468 ring->name, 477 ring->name,
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
471 I915_READ_TAIL(ring), 480 I915_READ_TAIL(ring),
472 I915_READ_START(ring)); 481 I915_READ_START(ring));
473 482
474 I915_WRITE_HEAD(ring, 0); 483 if (!stop_ring(ring)) {
475
476 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
477 DRM_ERROR("failed to set %s head to zero " 484 DRM_ERROR("failed to set %s head to zero "
478 "ctl %08x head %08x tail %08x start %08x\n", 485 "ctl %08x head %08x tail %08x start %08x\n",
479 ring->name, 486 ring->name,
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
481 I915_READ_HEAD(ring), 488 I915_READ_HEAD(ring),
482 I915_READ_TAIL(ring), 489 I915_READ_TAIL(ring),
483 I915_READ_START(ring)); 490 I915_READ_START(ring));
491 ret = -EIO;
492 goto out;
484 } 493 }
485 } 494 }
486 495
496 if (I915_NEED_GFX_HWS(dev))
497 intel_ring_setup_status_page(ring);
498 else
499 ring_setup_phys_status_page(ring);
500
487 /* Initialize the ring. This must happen _after_ we've cleared the ring 501 /* Initialize the ring. This must happen _after_ we've cleared the ring
488 * registers with the above sequence (the readback of the HEAD registers 502 * registers with the above sequence (the readback of the HEAD registers
489 * also enforces ordering), otherwise the hw might lose the new ring 503 * also enforces ordering), otherwise the hw might lose the new ring
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 270a6a973438..2b91c4b4d34b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) 34#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
35 35
36#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) 36#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
37#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
37 38
38enum intel_ring_hangcheck_action { 39enum intel_ring_hangcheck_action {
39 HANGCHECK_IDLE = 0, 40 HANGCHECK_IDLE = 0,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d27155adf5db..46be00d66df3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2424 if (ret < 0) 2424 if (ret < 0)
2425 goto err1; 2425 goto err1;
2426 2426
2427 ret = sysfs_create_link(&encoder->ddc.dev.kobj, 2427 ret = sysfs_create_link(&drm_connector->kdev->kobj,
2428 &drm_connector->kdev->kobj, 2428 &encoder->ddc.dev.kobj,
2429 encoder->ddc.dev.kobj.name); 2429 encoder->ddc.dev.kobj.name);
2430 if (ret < 0) 2430 if (ret < 0)
2431 goto err2; 2431 goto err2;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f729dc71d5be..d0c75779d3f6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
185{ 185{
186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
187 _MASKED_BIT_DISABLE(0xffff)); 187 _MASKED_BIT_DISABLE(0xffff));
188 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
189 _MASKED_BIT_DISABLE(0xffff));
188 /* something from same cacheline, but !FORCEWAKE_VLV */ 190 /* something from same cacheline, but !FORCEWAKE_VLV */
189 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 191 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
190} 192}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 3e6c0f3ed592..ef9957dbac94 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc)
510 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); 510 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
511 } else { 511 } else {
512 /* disable cursor: */ 512 /* disable cursor: */
513 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); 513 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
514 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), 514 mdp4_kms->blank_cursor_iova);
515 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
516 } 515 }
517 516
518 /* and drop the iova ref + obj rev when done scanning out: */ 517 /* and drop the iova ref + obj rev when done scanning out: */
@@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
574 573
575 if (old_bo) { 574 if (old_bo) {
576 /* drop our previous reference: */ 575 /* drop our previous reference: */
577 msm_gem_put_iova(old_bo, mdp4_kms->id); 576 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
578 drm_gem_object_unreference_unlocked(old_bo);
579 } 577 }
580 578
581 crtc_flush(crtc);
582 request_pending(crtc, PENDING_CURSOR); 579 request_pending(crtc, PENDING_CURSOR);
583 580
584 return 0; 581 return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index c740ccd1cc67..8edd531cb621 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
70 70
71 VERB("status=%08x", status); 71 VERB("status=%08x", status);
72 72
73 mdp_dispatch_irqs(mdp_kms, status);
74
73 for (id = 0; id < priv->num_crtcs; id++) 75 for (id = 0; id < priv->num_crtcs; id++)
74 if (status & mdp4_crtc_vblank(priv->crtcs[id])) 76 if (status & mdp4_crtc_vblank(priv->crtcs[id]))
75 drm_handle_vblank(dev, id); 77 drm_handle_vblank(dev, id);
76 78
77 mdp_dispatch_irqs(mdp_kms, status);
78
79 return IRQ_HANDLED; 79 return IRQ_HANDLED;
80} 80}
81 81
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 272e707c9487..0bb4faa17523 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
144static void mdp4_destroy(struct msm_kms *kms) 144static void mdp4_destroy(struct msm_kms *kms)
145{ 145{
146 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 146 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
147 if (mdp4_kms->blank_cursor_iova)
148 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
149 if (mdp4_kms->blank_cursor_bo)
150 drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
147 kfree(mdp4_kms); 151 kfree(mdp4_kms);
148} 152}
149 153
@@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
372 goto fail; 376 goto fail;
373 } 377 }
374 378
379 mutex_lock(&dev->struct_mutex);
380 mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
381 mutex_unlock(&dev->struct_mutex);
382 if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
383 ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
384 dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
385 mdp4_kms->blank_cursor_bo = NULL;
386 goto fail;
387 }
388
389 ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
390 &mdp4_kms->blank_cursor_iova);
391 if (ret) {
392 dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
393 goto fail;
394 }
395
375 return kms; 396 return kms;
376 397
377fail: 398fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 66a4d31aec80..715520c54cde 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -44,6 +44,10 @@ struct mdp4_kms {
44 struct clk *lut_clk; 44 struct clk *lut_clk;
45 45
46 struct mdp_irq error_handler; 46 struct mdp_irq error_handler;
47
48 /* empty/blank cursor bo to use when cursor is "disabled" */
49 struct drm_gem_object *blank_cursor_bo;
50 uint32_t blank_cursor_iova;
47}; 51};
48#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) 52#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
49 53
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 353d494a497f..f2b985bc2adf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
71 71
72 VERB("status=%08x", status); 72 VERB("status=%08x", status);
73 73
74 mdp_dispatch_irqs(mdp_kms, status);
75
74 for (id = 0; id < priv->num_crtcs; id++) 76 for (id = 0; id < priv->num_crtcs; id++)
75 if (status & mdp5_crtc_vblank(priv->crtcs[id])) 77 if (status & mdp5_crtc_vblank(priv->crtcs[id]))
76 drm_handle_vblank(dev, id); 78 drm_handle_vblank(dev, id);
77
78 mdp_dispatch_irqs(mdp_kms, status);
79} 79}
80 80
81irqreturn_t mdp5_irq(struct msm_kms *kms) 81irqreturn_t mdp5_irq(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 6c6d7d4c9b4e..a752ab83b810 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
62 dma_addr_t paddr; 62 dma_addr_t paddr;
63 int ret, size; 63 int ret, size;
64 64
65 /* only doing ARGB32 since this is what is needed to alpha-blend
66 * with video overlays:
67 */
68 sizes->surface_bpp = 32; 65 sizes->surface_bpp = 32;
69 sizes->surface_depth = 32; 66 sizes->surface_depth = 24;
70 67
71 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 68 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
72 sizes->surface_height, sizes->surface_bpp, 69 sizes->surface_height, sizes->surface_bpp,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3da8264d3039..bb8026daebc9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj)
118 118
119 if (iommu_present(&platform_bus_type)) 119 if (iommu_present(&platform_bus_type))
120 drm_gem_put_pages(obj, msm_obj->pages, true, false); 120 drm_gem_put_pages(obj, msm_obj->pages, true, false);
121 else 121 else {
122 drm_mm_remove_node(msm_obj->vram_node); 122 drm_mm_remove_node(msm_obj->vram_node);
123 drm_free_large(msm_obj->pages);
124 }
123 125
124 msm_obj->pages = NULL; 126 msm_obj->pages = NULL;
125 } 127 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 7762665ad8fd..876de9ac3793 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
1009 } 1009 }
1010 1010
1011 if (outp == 8) 1011 if (outp == 8)
1012 return false; 1012 return conf;
1013 1013
1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); 1014 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
1015 if (data == 0x0000) 1015 if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
index 1dc37b1ddbfa..b0d0fb2f4d08 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
863{ 863{
864 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 864 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
865 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); 865 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
866 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); 866 mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW);
867 867
868 mmio_list(0x40800c, 0x00000000, 8, 1); 868 mmio_list(0x40800c, 0x00000000, 8, 1);
869 mmio_list(0x408010, 0x80000000, 0, 0); 869 mmio_list(0x408010, 0x80000000, 0, 0);
@@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
877 mmio_list(0x418e24, 0x00000000, 8, 0); 877 mmio_list(0x418e24, 0x00000000, 8, 0);
878 mmio_list(0x418e28, 0x80000030, 0, 0); 878 mmio_list(0x418e28, 0x80000030, 0, 0);
879 879
880 mmio_list(0x4064c8, 0x018002c0, 0, 0);
881
880 mmio_list(0x418810, 0x80000000, 12, 2); 882 mmio_list(0x418810, 0x80000000, 12, 2);
881 mmio_list(0x419848, 0x10000000, 12, 2); 883 mmio_list(0x419848, 0x10000000, 12, 2);
882 mmio_list(0x419c2c, 0x10000000, 12, 2); 884 mmio_list(0x419c2c, 0x10000000, 12, 2);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index e9df94f96d78..222e8ebb669d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -109,7 +109,7 @@ nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
109 return; 109 return;
110 } 110 }
111 111
112 addr = (u64)(addr >> 8) << 8; 112 addr = (addr & 0xffffff00) << 8;
113 if (!addr) { 113 if (!addr) {
114 addr = (u64)nv_rd32(bios, 0x001700) << 16; 114 addr = (u64)nv_rd32(bios, 0x001700) << 16;
115 addr += 0xf0000; 115 addr += 0xf0000;
@@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
168 */ 168 */
169 i = 16; 169 i = 16;
170 do { 170 do {
171 if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55) 171 u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
172 if (data == 0xaa55)
172 break; 173 break;
173 } while (i--); 174 } while (i--);
174 175
@@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
176 goto out; 177 goto out;
177 178
178 /* read entire bios image to system memory */ 179 /* read entire bios image to system memory */
179 bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512; 180 bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
181 bios->size = bios->size * 512;
180 if (!bios->size) 182 if (!bios->size)
181 goto out; 183 goto out;
182 184
183 bios->data = kmalloc(bios->size, GFP_KERNEL); 185 bios->data = kmalloc(bios->size, GFP_KERNEL);
184 if (bios->data) { 186 if (bios->data) {
185 for (i = 0; i < bios->size; i+=4) 187 for (i = 0; i < bios->size; i += 4)
186 nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i)); 188 ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
187 } 189 }
188 190
189 /* check the PCI record header */ 191 /* check the PCI record header */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 43fec17ea540..bbf117be572f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
40 case 0x00: return 2; 40 case 0x00: return 2;
41 case 0x19: return 1; 41 case 0x19: return 1;
42 case 0x1c: return 0; 42 case 0x1c: return 0;
43 case 0x1e: return 2;
43 default: 44 default:
44 break; 45 break;
45 } 46 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 83face3f608f..279206997e5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
389 acpi_status status; 389 acpi_status status;
390 acpi_handle dhandle, rom_handle; 390 acpi_handle dhandle, rom_handle;
391 391
392 if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
393 return false;
394
395 dhandle = ACPI_HANDLE(&pdev->dev); 392 dhandle = ACPI_HANDLE(&pdev->dev);
396 if (!dhandle) 393 if (!dhandle)
397 return false; 394 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 3ff030dc1ee3..da764a4ed958 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
764 } 764 }
765 765
766 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 766 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
767 mutex_unlock(&chan->cli->mutex);
768 if (ret) 767 if (ret)
769 goto fail_unreserve; 768 goto fail_unreserve;
769 mutex_unlock(&chan->cli->mutex);
770 770
771 /* Update the crtc struct and cleanup */ 771 /* Update the crtc struct and cleanup */
772 crtc->primary->fb = fb; 772 crtc->primary->fb = fb;
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 355157e4f78d..e3c47a8005ff 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -33,6 +33,7 @@ struct omap_crtc {
33 int pipe; 33 int pipe;
34 enum omap_channel channel; 34 enum omap_channel channel;
35 struct omap_overlay_manager_info info; 35 struct omap_overlay_manager_info info;
36 struct drm_encoder *current_encoder;
36 37
37 /* 38 /*
38 * Temporary: eventually this will go away, but it is needed 39 * Temporary: eventually this will go away, but it is needed
@@ -120,13 +121,25 @@ static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
120{ 121{
121} 122}
122 123
124static void set_enabled(struct drm_crtc *crtc, bool enable);
125
123static int omap_crtc_enable(struct omap_overlay_manager *mgr) 126static int omap_crtc_enable(struct omap_overlay_manager *mgr)
124{ 127{
128 struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
129
130 dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
131 dispc_mgr_set_timings(omap_crtc->channel,
132 &omap_crtc->timings);
133 set_enabled(&omap_crtc->base, true);
134
125 return 0; 135 return 0;
126} 136}
127 137
128static void omap_crtc_disable(struct omap_overlay_manager *mgr) 138static void omap_crtc_disable(struct omap_overlay_manager *mgr)
129{ 139{
140 struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
141
142 set_enabled(&omap_crtc->base, false);
130} 143}
131 144
132static void omap_crtc_set_timings(struct omap_overlay_manager *mgr, 145static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
@@ -184,7 +197,6 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
184 WARN_ON(omap_crtc->apply_irq.registered); 197 WARN_ON(omap_crtc->apply_irq.registered);
185 omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); 198 omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
186 199
187 omap_crtc->plane->funcs->destroy(omap_crtc->plane);
188 drm_crtc_cleanup(crtc); 200 drm_crtc_cleanup(crtc);
189 201
190 kfree(omap_crtc); 202 kfree(omap_crtc);
@@ -338,17 +350,23 @@ static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
338 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 350 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
339 struct drm_plane *primary = crtc->primary; 351 struct drm_plane *primary = crtc->primary;
340 struct drm_gem_object *bo; 352 struct drm_gem_object *bo;
353 unsigned long flags;
341 354
342 DBG("%d -> %d (event=%p)", primary->fb ? primary->fb->base.id : -1, 355 DBG("%d -> %d (event=%p)", primary->fb ? primary->fb->base.id : -1,
343 fb->base.id, event); 356 fb->base.id, event);
344 357
358 spin_lock_irqsave(&dev->event_lock, flags);
359
345 if (omap_crtc->old_fb) { 360 if (omap_crtc->old_fb) {
361 spin_unlock_irqrestore(&dev->event_lock, flags);
346 dev_err(dev->dev, "already a pending flip\n"); 362 dev_err(dev->dev, "already a pending flip\n");
347 return -EINVAL; 363 return -EINVAL;
348 } 364 }
349 365
350 omap_crtc->event = event; 366 omap_crtc->event = event;
351 primary->fb = fb; 367 omap_crtc->old_fb = primary->fb = fb;
368
369 spin_unlock_irqrestore(&dev->event_lock, flags);
352 370
353 /* 371 /*
354 * Hold a reference temporarily until the crtc is updated 372 * Hold a reference temporarily until the crtc is updated
@@ -528,38 +546,46 @@ static void set_enabled(struct drm_crtc *crtc, bool enable)
528 struct drm_device *dev = crtc->dev; 546 struct drm_device *dev = crtc->dev;
529 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 547 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
530 enum omap_channel channel = omap_crtc->channel; 548 enum omap_channel channel = omap_crtc->channel;
531 struct omap_irq_wait *wait = NULL; 549 struct omap_irq_wait *wait;
550 u32 framedone_irq, vsync_irq;
551 int ret;
532 552
533 if (dispc_mgr_is_enabled(channel) == enable) 553 if (dispc_mgr_is_enabled(channel) == enable)
534 return; 554 return;
535 555
536 /* ignore sync-lost irqs during enable/disable */ 556 /*
557 * Digit output produces some sync lost interrupts during the first
558 * frame when enabling, so we need to ignore those.
559 */
537 omap_irq_unregister(crtc->dev, &omap_crtc->error_irq); 560 omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
538 561
539 if (dispc_mgr_get_framedone_irq(channel)) { 562 framedone_irq = dispc_mgr_get_framedone_irq(channel);
540 if (!enable) { 563 vsync_irq = dispc_mgr_get_vsync_irq(channel);
541 wait = omap_irq_wait_init(dev, 564
542 dispc_mgr_get_framedone_irq(channel), 1); 565 if (enable) {
543 } 566 wait = omap_irq_wait_init(dev, vsync_irq, 1);
544 } else { 567 } else {
545 /* 568 /*
546 * When we disable digit output, we need to wait until fields 569 * When we disable the digit output, we need to wait for
547 * are done. Otherwise the DSS is still working, and turning 570 * FRAMEDONE to know that DISPC has finished with the output.
548 * off the clocks prevents DSS from going to OFF mode. And when 571 *
549 * enabling, we need to wait for the extra sync losts 572 * OMAP2/3 does not have FRAMEDONE irq for digit output, and in
573 * that case we need to use vsync interrupt, and wait for both
574 * even and odd frames.
550 */ 575 */
551 wait = omap_irq_wait_init(dev, 576
552 dispc_mgr_get_vsync_irq(channel), 2); 577 if (framedone_irq)
578 wait = omap_irq_wait_init(dev, framedone_irq, 1);
579 else
580 wait = omap_irq_wait_init(dev, vsync_irq, 2);
553 } 581 }
554 582
555 dispc_mgr_enable(channel, enable); 583 dispc_mgr_enable(channel, enable);
556 584
557 if (wait) { 585 ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
558 int ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100)); 586 if (ret) {
559 if (ret) { 587 dev_err(dev->dev, "%s: timeout waiting for %s\n",
560 dev_err(dev->dev, "%s: timeout waiting for %s\n", 588 omap_crtc->name, enable ? "enable" : "disable");
561 omap_crtc->name, enable ? "enable" : "disable");
562 }
563 } 589 }
564 590
565 omap_irq_register(crtc->dev, &omap_crtc->error_irq); 591 omap_irq_register(crtc->dev, &omap_crtc->error_irq);
@@ -586,8 +612,12 @@ static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
586 } 612 }
587 } 613 }
588 614
615 if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
616 omap_encoder_set_enabled(omap_crtc->current_encoder, false);
617
618 omap_crtc->current_encoder = encoder;
619
589 if (!omap_crtc->enabled) { 620 if (!omap_crtc->enabled) {
590 set_enabled(&omap_crtc->base, false);
591 if (encoder) 621 if (encoder)
592 omap_encoder_set_enabled(encoder, false); 622 omap_encoder_set_enabled(encoder, false);
593 } else { 623 } else {
@@ -596,13 +626,7 @@ static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
596 omap_encoder_update(encoder, omap_crtc->mgr, 626 omap_encoder_update(encoder, omap_crtc->mgr,
597 &omap_crtc->timings); 627 &omap_crtc->timings);
598 omap_encoder_set_enabled(encoder, true); 628 omap_encoder_set_enabled(encoder, true);
599 omap_crtc->full_update = false;
600 } 629 }
601
602 dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
603 dispc_mgr_set_timings(omap_crtc->channel,
604 &omap_crtc->timings);
605 set_enabled(&omap_crtc->base, true);
606 } 630 }
607 631
608 omap_crtc->full_update = false; 632 omap_crtc->full_update = false;
@@ -613,10 +637,30 @@ static void omap_crtc_post_apply(struct omap_drm_apply *apply)
613 /* nothing needed for post-apply */ 637 /* nothing needed for post-apply */
614} 638}
615 639
640void omap_crtc_flush(struct drm_crtc *crtc)
641{
642 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
643 int loops = 0;
644
645 while (!list_empty(&omap_crtc->pending_applies) ||
646 !list_empty(&omap_crtc->queued_applies) ||
647 omap_crtc->event || omap_crtc->old_fb) {
648
649 if (++loops > 10) {
650 dev_err(crtc->dev->dev,
651 "omap_crtc_flush() timeout\n");
652 break;
653 }
654
655 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
656 }
657}
658
616static const char *channel_names[] = { 659static const char *channel_names[] = {
617 [OMAP_DSS_CHANNEL_LCD] = "lcd", 660 [OMAP_DSS_CHANNEL_LCD] = "lcd",
618 [OMAP_DSS_CHANNEL_DIGIT] = "tv", 661 [OMAP_DSS_CHANNEL_DIGIT] = "tv",
619 [OMAP_DSS_CHANNEL_LCD2] = "lcd2", 662 [OMAP_DSS_CHANNEL_LCD2] = "lcd2",
663 [OMAP_DSS_CHANNEL_LCD3] = "lcd3",
620}; 664};
621 665
622void omap_crtc_pre_init(void) 666void omap_crtc_pre_init(void)
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index bf39fcc49e0f..c8270e4b26f3 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -513,12 +513,18 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
513static int dev_unload(struct drm_device *dev) 513static int dev_unload(struct drm_device *dev)
514{ 514{
515 struct omap_drm_private *priv = dev->dev_private; 515 struct omap_drm_private *priv = dev->dev_private;
516 int i;
516 517
517 DBG("unload: dev=%p", dev); 518 DBG("unload: dev=%p", dev);
518 519
519 drm_kms_helper_poll_fini(dev); 520 drm_kms_helper_poll_fini(dev);
520 521
521 omap_fbdev_free(dev); 522 omap_fbdev_free(dev);
523
524 /* flush crtcs so the fbs get released */
525 for (i = 0; i < priv->num_crtcs; i++)
526 omap_crtc_flush(priv->crtcs[i]);
527
522 omap_modeset_free(dev); 528 omap_modeset_free(dev);
523 omap_gem_deinit(dev); 529 omap_gem_deinit(dev);
524 530
@@ -696,10 +702,11 @@ static int pdev_remove(struct platform_device *device)
696{ 702{
697 DBG(""); 703 DBG("");
698 704
705 drm_put_dev(platform_get_drvdata(device));
706
699 omap_disconnect_dssdevs(); 707 omap_disconnect_dssdevs();
700 omap_crtc_pre_uninit(); 708 omap_crtc_pre_uninit();
701 709
702 drm_put_dev(platform_get_drvdata(device));
703 return 0; 710 return 0;
704} 711}
705 712
@@ -726,18 +733,33 @@ static struct platform_driver pdev = {
726 733
727static int __init omap_drm_init(void) 734static int __init omap_drm_init(void)
728{ 735{
736 int r;
737
729 DBG("init"); 738 DBG("init");
730 if (platform_driver_register(&omap_dmm_driver)) { 739
731 /* we can continue on without DMM.. so not fatal */ 740 r = platform_driver_register(&omap_dmm_driver);
732 dev_err(NULL, "DMM registration failed\n"); 741 if (r) {
742 pr_err("DMM driver registration failed\n");
743 return r;
744 }
745
746 r = platform_driver_register(&pdev);
747 if (r) {
748 pr_err("omapdrm driver registration failed\n");
749 platform_driver_unregister(&omap_dmm_driver);
750 return r;
733 } 751 }
734 return platform_driver_register(&pdev); 752
753 return 0;
735} 754}
736 755
737static void __exit omap_drm_fini(void) 756static void __exit omap_drm_fini(void)
738{ 757{
739 DBG("fini"); 758 DBG("fini");
759
740 platform_driver_unregister(&pdev); 760 platform_driver_unregister(&pdev);
761
762 platform_driver_unregister(&omap_dmm_driver);
741} 763}
742 764
743/* need late_initcall() so we load after dss_driver's are loaded */ 765/* need late_initcall() so we load after dss_driver's are loaded */
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 428b2981fd68..284b80fc3c54 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -163,6 +163,7 @@ void omap_crtc_pre_init(void);
163void omap_crtc_pre_uninit(void); 163void omap_crtc_pre_uninit(void);
164struct drm_crtc *omap_crtc_init(struct drm_device *dev, 164struct drm_crtc *omap_crtc_init(struct drm_device *dev,
165 struct drm_plane *plane, enum omap_channel channel, int id); 165 struct drm_plane *plane, enum omap_channel channel, int id);
166void omap_crtc_flush(struct drm_crtc *crtc);
166 167
167struct drm_plane *omap_plane_init(struct drm_device *dev, 168struct drm_plane *omap_plane_init(struct drm_device *dev,
168 int plane_id, bool private_plane); 169 int plane_id, bool private_plane);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index d2b8c49bfb4a..8b019602ffe6 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -218,6 +218,20 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
218 info->rotation_type = OMAP_DSS_ROT_TILER; 218 info->rotation_type = OMAP_DSS_ROT_TILER;
219 info->screen_width = omap_gem_tiled_stride(plane->bo, orient); 219 info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
220 } else { 220 } else {
221 switch (win->rotation & 0xf) {
222 case 0:
223 case BIT(DRM_ROTATE_0):
224 /* OK */
225 break;
226
227 default:
228 dev_warn(fb->dev->dev,
229 "rotation '%d' ignored for non-tiled fb\n",
230 win->rotation);
231 win->rotation = 0;
232 break;
233 }
234
221 info->paddr = get_linear_addr(plane, format, 0, x, y); 235 info->paddr = get_linear_addr(plane, format, 0, x, y);
222 info->rotation_type = OMAP_DSS_ROT_DMA; 236 info->rotation_type = OMAP_DSS_ROT_DMA;
223 info->screen_width = plane->pitch; 237 info->screen_width = plane->pitch;
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 002988d09021..1388ca7f87e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -371,6 +371,9 @@ void omap_fbdev_free(struct drm_device *dev)
371 371
372 fbdev = to_omap_fbdev(priv->fbdev); 372 fbdev = to_omap_fbdev(priv->fbdev);
373 373
374 /* release the ref taken in omap_fbdev_create() */
375 omap_gem_put_paddr(fbdev->bo);
376
374 /* this will free the backing object */ 377 /* this will free the backing object */
375 if (fbdev->fb) { 378 if (fbdev->fb) {
376 drm_framebuffer_unregister_private(fbdev->fb); 379 drm_framebuffer_unregister_private(fbdev->fb);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index c8d972763889..95dbce286a41 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -980,12 +980,9 @@ int omap_gem_resume(struct device *dev)
980#ifdef CONFIG_DEBUG_FS 980#ifdef CONFIG_DEBUG_FS
981void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 981void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
982{ 982{
983 struct drm_device *dev = obj->dev;
984 struct omap_gem_object *omap_obj = to_omap_bo(obj); 983 struct omap_gem_object *omap_obj = to_omap_bo(obj);
985 uint64_t off; 984 uint64_t off;
986 985
987 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
988
989 off = drm_vma_node_start(&obj->vma_node); 986 off = drm_vma_node_start(&obj->vma_node);
990 987
991 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", 988 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
@@ -1050,10 +1047,10 @@ static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1050{ 1047{
1051 struct omap_gem_object *omap_obj = waiter->omap_obj; 1048 struct omap_gem_object *omap_obj = waiter->omap_obj;
1052 if ((waiter->op & OMAP_GEM_READ) && 1049 if ((waiter->op & OMAP_GEM_READ) &&
1053 (omap_obj->sync->read_complete < waiter->read_target)) 1050 (omap_obj->sync->write_complete < waiter->write_target))
1054 return true; 1051 return true;
1055 if ((waiter->op & OMAP_GEM_WRITE) && 1052 if ((waiter->op & OMAP_GEM_WRITE) &&
1056 (omap_obj->sync->write_complete < waiter->write_target)) 1053 (omap_obj->sync->read_complete < waiter->read_target))
1057 return true; 1054 return true;
1058 return false; 1055 return false;
1059} 1056}
@@ -1229,6 +1226,8 @@ int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1229 } 1226 }
1230 1227
1231 spin_unlock(&sync_lock); 1228 spin_unlock(&sync_lock);
1229
1230 kfree(waiter);
1232 } 1231 }
1233 1232
1234 /* no waiting.. */ 1233 /* no waiting.. */
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 046d5e660c04..3cf31ee59aac 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -225,6 +225,11 @@ int omap_plane_mode_set(struct drm_plane *plane,
225 omap_plane->apply_done_cb.arg = arg; 225 omap_plane->apply_done_cb.arg = arg;
226 } 226 }
227 227
228 if (plane->fb)
229 drm_framebuffer_unreference(plane->fb);
230
231 drm_framebuffer_reference(fb);
232
228 plane->fb = fb; 233 plane->fb = fb;
229 plane->crtc = crtc; 234 plane->crtc = crtc;
230 235
@@ -241,10 +246,13 @@ static int omap_plane_update(struct drm_plane *plane,
241 struct omap_plane *omap_plane = to_omap_plane(plane); 246 struct omap_plane *omap_plane = to_omap_plane(plane);
242 omap_plane->enabled = true; 247 omap_plane->enabled = true;
243 248
244 if (plane->fb) 249 /* omap_plane_mode_set() takes adjusted src */
245 drm_framebuffer_unreference(plane->fb); 250 switch (omap_plane->win.rotation & 0xf) {
246 251 case BIT(DRM_ROTATE_90):
247 drm_framebuffer_reference(fb); 252 case BIT(DRM_ROTATE_270):
253 swap(src_w, src_h);
254 break;
255 }
248 256
249 return omap_plane_mode_set(plane, crtc, fb, 257 return omap_plane_mode_set(plane, crtc, fb,
250 crtc_x, crtc_y, crtc_w, crtc_h, 258 crtc_x, crtc_y, crtc_w, crtc_h,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index fb187c78978f..c31c12b4e666 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1177,27 +1177,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1177 1177
1178 /* Set NUM_BANKS. */ 1178 /* Set NUM_BANKS. */
1179 if (rdev->family >= CHIP_TAHITI) { 1179 if (rdev->family >= CHIP_TAHITI) {
1180 unsigned tileb, index, num_banks, tile_split_bytes; 1180 unsigned index, num_banks;
1181 1181
1182 /* Calculate the macrotile mode index. */ 1182 if (rdev->family >= CHIP_BONAIRE) {
1183 tile_split_bytes = 64 << tile_split; 1183 unsigned tileb, tile_split_bytes;
1184 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1185 tileb = min(tile_split_bytes, tileb);
1186 1184
1187 for (index = 0; tileb > 64; index++) { 1185 /* Calculate the macrotile mode index. */
1188 tileb >>= 1; 1186 tile_split_bytes = 64 << tile_split;
1189 } 1187 tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
1188 tileb = min(tile_split_bytes, tileb);
1190 1189
1191 if (index >= 16) { 1190 for (index = 0; tileb > 64; index++)
1192 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", 1191 tileb >>= 1;
1193 target_fb->bits_per_pixel, tile_split); 1192
1194 return -EINVAL; 1193 if (index >= 16) {
1195 } 1194 DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
1195 target_fb->bits_per_pixel, tile_split);
1196 return -EINVAL;
1197 }
1196 1198
1197 if (rdev->family >= CHIP_BONAIRE)
1198 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; 1199 num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
1199 else 1200 } else {
1201 switch (target_fb->bits_per_pixel) {
1202 case 8:
1203 index = 10;
1204 break;
1205 case 16:
1206 index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
1207 break;
1208 default:
1209 case 32:
1210 index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
1211 break;
1212 }
1213
1200 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; 1214 num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
1215 }
1216
1201 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); 1217 fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
1202 } else { 1218 } else {
1203 /* NI and older. */ 1219 /* NI and older. */
@@ -1720,8 +1736,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1720 } 1736 }
1721 /* otherwise, pick one of the plls */ 1737 /* otherwise, pick one of the plls */
1722 if ((rdev->family == CHIP_KAVERI) || 1738 if ((rdev->family == CHIP_KAVERI) ||
1723 (rdev->family == CHIP_KABINI)) { 1739 (rdev->family == CHIP_KABINI) ||
1724 /* KB/KV has PPLL1 and PPLL2 */ 1740 (rdev->family == CHIP_MULLINS)) {
1741 /* KB/KV/ML has PPLL1 and PPLL2 */
1725 pll_in_use = radeon_get_pll_use_mask(crtc); 1742 pll_in_use = radeon_get_pll_use_mask(crtc);
1726 if (!(pll_in_use & (1 << ATOM_PPLL2))) 1743 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1727 return ATOM_PPLL2; 1744 return ATOM_PPLL2;
@@ -1885,6 +1902,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1885 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 1902 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1886 is_tvcv = true; 1903 is_tvcv = true;
1887 1904
1905 if (!radeon_crtc->adjusted_clock)
1906 return -EINVAL;
1907
1888 atombios_crtc_set_pll(crtc, adjusted_mode); 1908 atombios_crtc_set_pll(crtc, adjusted_mode);
1889 1909
1890 if (ASIC_IS_DCE4(rdev)) 1910 if (ASIC_IS_DCE4(rdev))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8b0ab170cef9..54e4f52549af 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -142,7 +142,8 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
142 return recv_bytes; 142 return recv_bytes;
143} 143}
144 144
145#define HEADER_SIZE 4 145#define BARE_ADDRESS_SIZE 3
146#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
146 147
147static ssize_t 148static ssize_t
148radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) 149radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
@@ -160,13 +161,19 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
160 tx_buf[0] = msg->address & 0xff; 161 tx_buf[0] = msg->address & 0xff;
161 tx_buf[1] = msg->address >> 8; 162 tx_buf[1] = msg->address >> 8;
162 tx_buf[2] = msg->request << 4; 163 tx_buf[2] = msg->request << 4;
163 tx_buf[3] = msg->size - 1; 164 tx_buf[3] = msg->size ? (msg->size - 1) : 0;
164 165
165 switch (msg->request & ~DP_AUX_I2C_MOT) { 166 switch (msg->request & ~DP_AUX_I2C_MOT) {
166 case DP_AUX_NATIVE_WRITE: 167 case DP_AUX_NATIVE_WRITE:
167 case DP_AUX_I2C_WRITE: 168 case DP_AUX_I2C_WRITE:
169 /* tx_size needs to be 4 even for bare address packets since the atom
170 * table needs the info in tx_buf[3].
171 */
168 tx_size = HEADER_SIZE + msg->size; 172 tx_size = HEADER_SIZE + msg->size;
169 tx_buf[3] |= tx_size << 4; 173 if (msg->size == 0)
174 tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
175 else
176 tx_buf[3] |= tx_size << 4;
170 memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size); 177 memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
171 ret = radeon_process_aux_ch(chan, 178 ret = radeon_process_aux_ch(chan,
172 tx_buf, tx_size, NULL, 0, delay, &ack); 179 tx_buf, tx_size, NULL, 0, delay, &ack);
@@ -176,8 +183,14 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
176 break; 183 break;
177 case DP_AUX_NATIVE_READ: 184 case DP_AUX_NATIVE_READ:
178 case DP_AUX_I2C_READ: 185 case DP_AUX_I2C_READ:
186 /* tx_size needs to be 4 even for bare address packets since the atom
187 * table needs the info in tx_buf[3].
188 */
179 tx_size = HEADER_SIZE; 189 tx_size = HEADER_SIZE;
180 tx_buf[3] |= tx_size << 4; 190 if (msg->size == 0)
191 tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
192 else
193 tx_buf[3] |= tx_size << 4;
181 ret = radeon_process_aux_ch(chan, 194 ret = radeon_process_aux_ch(chan,
182 tx_buf, tx_size, msg->buffer, msg->size, delay, &ack); 195 tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
183 break; 196 break;
@@ -186,7 +199,7 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
186 break; 199 break;
187 } 200 }
188 201
189 if (ret > 0) 202 if (ret >= 0)
190 msg->reply = ack >> 4; 203 msg->reply = ack >> 4;
191 204
192 return ret; 205 return ret;
@@ -194,98 +207,16 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
194 207
195void radeon_dp_aux_init(struct radeon_connector *radeon_connector) 208void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
196{ 209{
197 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
198
199 dig_connector->dp_i2c_bus->aux.dev = radeon_connector->base.kdev;
200 dig_connector->dp_i2c_bus->aux.transfer = radeon_dp_aux_transfer;
201}
202
203int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
204 u8 write_byte, u8 *read_byte)
205{
206 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
207 struct radeon_i2c_chan *auxch = i2c_get_adapdata(adapter);
208 u16 address = algo_data->address;
209 u8 msg[5];
210 u8 reply[2];
211 unsigned retry;
212 int msg_bytes;
213 int reply_bytes = 1;
214 int ret; 210 int ret;
215 u8 ack;
216 211
217 /* Set up the address */ 212 radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
218 msg[0] = address; 213 radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
219 msg[1] = address >> 8; 214 radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
215 ret = drm_dp_aux_register_i2c_bus(&radeon_connector->ddc_bus->aux);
216 if (!ret)
217 radeon_connector->ddc_bus->has_aux = true;
220 218
221 /* Set up the command byte */ 219 WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
222 if (mode & MODE_I2C_READ) {
223 msg[2] = DP_AUX_I2C_READ << 4;
224 msg_bytes = 4;
225 msg[3] = msg_bytes << 4;
226 } else {
227 msg[2] = DP_AUX_I2C_WRITE << 4;
228 msg_bytes = 5;
229 msg[3] = msg_bytes << 4;
230 msg[4] = write_byte;
231 }
232
233 /* special handling for start/stop */
234 if (mode & (MODE_I2C_START | MODE_I2C_STOP))
235 msg[3] = 3 << 4;
236
237 /* Set MOT bit for all but stop */
238 if ((mode & MODE_I2C_STOP) == 0)
239 msg[2] |= DP_AUX_I2C_MOT << 4;
240
241 for (retry = 0; retry < 7; retry++) {
242 ret = radeon_process_aux_ch(auxch,
243 msg, msg_bytes, reply, reply_bytes, 0, &ack);
244 if (ret == -EBUSY)
245 continue;
246 else if (ret < 0) {
247 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
248 return ret;
249 }
250
251 switch ((ack >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
252 case DP_AUX_NATIVE_REPLY_ACK:
253 /* I2C-over-AUX Reply field is only valid
254 * when paired with AUX ACK.
255 */
256 break;
257 case DP_AUX_NATIVE_REPLY_NACK:
258 DRM_DEBUG_KMS("aux_ch native nack\n");
259 return -EREMOTEIO;
260 case DP_AUX_NATIVE_REPLY_DEFER:
261 DRM_DEBUG_KMS("aux_ch native defer\n");
262 usleep_range(500, 600);
263 continue;
264 default:
265 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
266 return -EREMOTEIO;
267 }
268
269 switch ((ack >> 4) & DP_AUX_I2C_REPLY_MASK) {
270 case DP_AUX_I2C_REPLY_ACK:
271 if (mode == MODE_I2C_READ)
272 *read_byte = reply[0];
273 return ret;
274 case DP_AUX_I2C_REPLY_NACK:
275 DRM_DEBUG_KMS("aux_i2c nack\n");
276 return -EREMOTEIO;
277 case DP_AUX_I2C_REPLY_DEFER:
278 DRM_DEBUG_KMS("aux_i2c defer\n");
279 usleep_range(400, 500);
280 break;
281 default:
282 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
283 return -EREMOTEIO;
284 }
285 }
286
287 DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
288 return -EREMOTEIO;
289} 220}
290 221
291/***** general DP utility functions *****/ 222/***** general DP utility functions *****/
@@ -420,12 +351,11 @@ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
420 351
421u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) 352u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
422{ 353{
423 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
424 struct drm_device *dev = radeon_connector->base.dev; 354 struct drm_device *dev = radeon_connector->base.dev;
425 struct radeon_device *rdev = dev->dev_private; 355 struct radeon_device *rdev = dev->dev_private;
426 356
427 return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, 357 return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
428 dig_connector->dp_i2c_bus->rec.i2c_id, 0); 358 radeon_connector->ddc_bus->rec.i2c_id, 0);
429} 359}
430 360
431static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) 361static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
@@ -436,11 +366,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
436 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 366 if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
437 return; 367 return;
438 368
439 if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_SINK_OUI, buf, 3)) 369 if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
440 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 370 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
441 buf[0], buf[1], buf[2]); 371 buf[0], buf[1], buf[2]);
442 372
443 if (drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_BRANCH_OUI, buf, 3)) 373 if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
444 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 374 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
445 buf[0], buf[1], buf[2]); 375 buf[0], buf[1], buf[2]);
446} 376}
@@ -451,7 +381,7 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
451 u8 msg[DP_DPCD_SIZE]; 381 u8 msg[DP_DPCD_SIZE];
452 int ret, i; 382 int ret, i;
453 383
454 ret = drm_dp_dpcd_read(&dig_connector->dp_i2c_bus->aux, DP_DPCD_REV, msg, 384 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
455 DP_DPCD_SIZE); 385 DP_DPCD_SIZE);
456 if (ret > 0) { 386 if (ret > 0) {
457 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 387 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
@@ -489,21 +419,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
489 419
490 if (dp_bridge != ENCODER_OBJECT_ID_NONE) { 420 if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
491 /* DP bridge chips */ 421 /* DP bridge chips */
492 drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux, 422 if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
493 DP_EDP_CONFIGURATION_CAP, &tmp); 423 DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
494 if (tmp & 1) 424 if (tmp & 1)
495 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 425 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
496 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || 426 else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
497 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) 427 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
498 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; 428 panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
499 else 429 else
500 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; 430 panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
431 }
501 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 432 } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
502 /* eDP */ 433 /* eDP */
503 drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux, 434 if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
504 DP_EDP_CONFIGURATION_CAP, &tmp); 435 DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
505 if (tmp & 1) 436 if (tmp & 1)
506 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; 437 panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
438 }
507 } 439 }
508 440
509 return panel_mode; 441 return panel_mode;
@@ -554,7 +486,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
554 u8 link_status[DP_LINK_STATUS_SIZE]; 486 u8 link_status[DP_LINK_STATUS_SIZE];
555 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 487 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
556 488
557 if (drm_dp_dpcd_read_link_status(&dig->dp_i2c_bus->aux, link_status) <= 0) 489 if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
490 <= 0)
558 return false; 491 return false;
559 if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) 492 if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
560 return false; 493 return false;
@@ -574,7 +507,7 @@ void radeon_dp_set_rx_power_state(struct drm_connector *connector,
574 507
575 /* power up/down the sink */ 508 /* power up/down the sink */
576 if (dig_connector->dpcd[0] >= 0x11) { 509 if (dig_connector->dpcd[0] >= 0x11) {
577 drm_dp_dpcd_writeb(&dig_connector->dp_i2c_bus->aux, 510 drm_dp_dpcd_writeb(&radeon_connector->ddc_bus->aux,
578 DP_SET_POWER, power_state); 511 DP_SET_POWER, power_state);
579 usleep_range(1000, 2000); 512 usleep_range(1000, 2000);
580 } 513 }
@@ -878,11 +811,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
878 else 811 else
879 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; 812 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
880 813
881 drm_dp_dpcd_readb(&dig_connector->dp_i2c_bus->aux, DP_MAX_LANE_COUNT, &tmp); 814 if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
882 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 815 == 1) {
883 dp_info.tp3_supported = true; 816 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
884 else 817 dp_info.tp3_supported = true;
818 else
819 dp_info.tp3_supported = false;
820 } else {
885 dp_info.tp3_supported = false; 821 dp_info.tp3_supported = false;
822 }
886 823
887 memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); 824 memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
888 dp_info.rdev = rdev; 825 dp_info.rdev = rdev;
@@ -890,7 +827,7 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
890 dp_info.connector = connector; 827 dp_info.connector = connector;
891 dp_info.dp_lane_count = dig_connector->dp_lane_count; 828 dp_info.dp_lane_count = dig_connector->dp_lane_count;
892 dp_info.dp_clock = dig_connector->dp_clock; 829 dp_info.dp_clock = dig_connector->dp_clock;
893 dp_info.aux = &dig_connector->dp_i2c_bus->aux; 830 dp_info.aux = &radeon_connector->ddc_bus->aux;
894 831
895 if (radeon_dp_link_train_init(&dp_info)) 832 if (radeon_dp_link_train_init(&dp_info))
896 goto done; 833 goto done;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index cad89a977527..10dae4106c08 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -21,8 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/firmware.h>
24#include "drmP.h" 25#include "drmP.h"
25#include "radeon.h" 26#include "radeon.h"
27#include "radeon_ucode.h"
26#include "cikd.h" 28#include "cikd.h"
27#include "r600_dpm.h" 29#include "r600_dpm.h"
28#include "ci_dpm.h" 30#include "ci_dpm.h"
@@ -202,24 +204,29 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
202 struct ci_power_info *pi = ci_get_pi(rdev); 204 struct ci_power_info *pi = ci_get_pi(rdev);
203 205
204 switch (rdev->pdev->device) { 206 switch (rdev->pdev->device) {
207 case 0x6649:
205 case 0x6650: 208 case 0x6650:
209 case 0x6651:
206 case 0x6658: 210 case 0x6658:
207 case 0x665C: 211 case 0x665C:
212 case 0x665D:
208 default: 213 default:
209 pi->powertune_defaults = &defaults_bonaire_xt; 214 pi->powertune_defaults = &defaults_bonaire_xt;
210 break; 215 break;
211 case 0x6651:
212 case 0x665D:
213 pi->powertune_defaults = &defaults_bonaire_pro;
214 break;
215 case 0x6640: 216 case 0x6640:
216 pi->powertune_defaults = &defaults_saturn_xt;
217 break;
218 case 0x6641: 217 case 0x6641:
219 pi->powertune_defaults = &defaults_saturn_pro; 218 case 0x6646:
219 case 0x6647:
220 pi->powertune_defaults = &defaults_saturn_xt;
220 break; 221 break;
221 case 0x67B8: 222 case 0x67B8:
222 case 0x67B0: 223 case 0x67B0:
224 pi->powertune_defaults = &defaults_hawaii_xt;
225 break;
226 case 0x67BA:
227 case 0x67B1:
228 pi->powertune_defaults = &defaults_hawaii_pro;
229 break;
223 case 0x67A0: 230 case 0x67A0:
224 case 0x67A1: 231 case 0x67A1:
225 case 0x67A2: 232 case 0x67A2:
@@ -228,11 +235,7 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
228 case 0x67AA: 235 case 0x67AA:
229 case 0x67B9: 236 case 0x67B9:
230 case 0x67BE: 237 case 0x67BE:
231 pi->powertune_defaults = &defaults_hawaii_xt; 238 pi->powertune_defaults = &defaults_bonaire_xt;
232 break;
233 case 0x67BA:
234 case 0x67B1:
235 pi->powertune_defaults = &defaults_hawaii_pro;
236 break; 239 break;
237 } 240 }
238 241
@@ -5146,6 +5149,12 @@ int ci_dpm_init(struct radeon_device *rdev)
5146 pi->mclk_dpm_key_disabled = 0; 5149 pi->mclk_dpm_key_disabled = 0;
5147 pi->pcie_dpm_key_disabled = 0; 5150 pi->pcie_dpm_key_disabled = 0;
5148 5151
5152 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5153 if ((rdev->pdev->device == 0x6658) &&
5154 (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5155 pi->mclk_dpm_key_disabled = 1;
5156 }
5157
5149 pi->caps_sclk_ds = true; 5158 pi->caps_sclk_ds = true;
5150 5159
5151 pi->mclk_strobe_mode_threshold = 40000; 5160 pi->mclk_strobe_mode_threshold = 40000;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 745143c2358f..d2fd98968085 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -38,6 +38,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
38MODULE_FIRMWARE("radeon/BONAIRE_ce.bin"); 38MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
39MODULE_FIRMWARE("radeon/BONAIRE_mec.bin"); 39MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin"); 40MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
41MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
41MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin"); 42MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
42MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin"); 43MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43MODULE_FIRMWARE("radeon/BONAIRE_smc.bin"); 44MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
@@ -46,6 +47,7 @@ MODULE_FIRMWARE("radeon/HAWAII_me.bin");
46MODULE_FIRMWARE("radeon/HAWAII_ce.bin"); 47MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
47MODULE_FIRMWARE("radeon/HAWAII_mec.bin"); 48MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
48MODULE_FIRMWARE("radeon/HAWAII_mc.bin"); 49MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
50MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
49MODULE_FIRMWARE("radeon/HAWAII_rlc.bin"); 51MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
50MODULE_FIRMWARE("radeon/HAWAII_sdma.bin"); 52MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
51MODULE_FIRMWARE("radeon/HAWAII_smc.bin"); 53MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
@@ -61,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin");
61MODULE_FIRMWARE("radeon/KABINI_mec.bin"); 63MODULE_FIRMWARE("radeon/KABINI_mec.bin");
62MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); 64MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
63MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); 65MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
66MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
67MODULE_FIRMWARE("radeon/MULLINS_me.bin");
68MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
69MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
70MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
71MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
64 72
65extern int r600_ih_ring_alloc(struct radeon_device *rdev); 73extern int r600_ih_ring_alloc(struct radeon_device *rdev);
66extern void r600_ih_ring_fini(struct radeon_device *rdev); 74extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -1471,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] =
1471 0xd80c, 0xff000ff0, 0x00000100 1479 0xd80c, 0xff000ff0, 0x00000100
1472}; 1480};
1473 1481
1482static const u32 godavari_golden_registers[] =
1483{
1484 0x55e4, 0xff607fff, 0xfc000100,
1485 0x6ed8, 0x00010101, 0x00010000,
1486 0x9830, 0xffffffff, 0x00000000,
1487 0x98302, 0xf00fffff, 0x00000400,
1488 0x6130, 0xffffffff, 0x00010000,
1489 0x5bb0, 0x000000f0, 0x00000070,
1490 0x5bc0, 0xf0311fff, 0x80300000,
1491 0x98f8, 0x73773777, 0x12010001,
1492 0x98fc, 0xffffffff, 0x00000010,
1493 0x8030, 0x00001f0f, 0x0000100a,
1494 0x2f48, 0x73773777, 0x12010001,
1495 0x2408, 0x000fffff, 0x000c007f,
1496 0x8a14, 0xf000003f, 0x00000007,
1497 0x8b24, 0xffffffff, 0x00ff0fff,
1498 0x30a04, 0x0000ff0f, 0x00000000,
1499 0x28a4c, 0x07ffffff, 0x06000000,
1500 0x4d8, 0x00000fff, 0x00000100,
1501 0xd014, 0x00010000, 0x00810001,
1502 0xd814, 0x00010000, 0x00810001,
1503 0x3e78, 0x00000001, 0x00000002,
1504 0xc768, 0x00000008, 0x00000008,
1505 0xc770, 0x00000f00, 0x00000800,
1506 0xc774, 0x00000f00, 0x00000800,
1507 0xc798, 0x00ffffff, 0x00ff7fbf,
1508 0xc79c, 0x00ffffff, 0x00ff7faf,
1509 0x8c00, 0x000000ff, 0x00000001,
1510 0x214f8, 0x01ff01ff, 0x00000002,
1511 0x21498, 0x007ff800, 0x00200000,
1512 0x2015c, 0xffffffff, 0x00000f40,
1513 0x88c4, 0x001f3ae3, 0x00000082,
1514 0x88d4, 0x0000001f, 0x00000010,
1515 0x30934, 0xffffffff, 0x00000000
1516};
1517
1518
1474static void cik_init_golden_registers(struct radeon_device *rdev) 1519static void cik_init_golden_registers(struct radeon_device *rdev)
1475{ 1520{
1476 switch (rdev->family) { 1521 switch (rdev->family) {
@@ -1502,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
1502 kalindi_golden_spm_registers, 1547 kalindi_golden_spm_registers,
1503 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); 1548 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1504 break; 1549 break;
1550 case CHIP_MULLINS:
1551 radeon_program_register_sequence(rdev,
1552 kalindi_mgcg_cgcg_init,
1553 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
1554 radeon_program_register_sequence(rdev,
1555 godavari_golden_registers,
1556 (const u32)ARRAY_SIZE(godavari_golden_registers));
1557 radeon_program_register_sequence(rdev,
1558 kalindi_golden_common_registers,
1559 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
1560 radeon_program_register_sequence(rdev,
1561 kalindi_golden_spm_registers,
1562 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1563 break;
1505 case CHIP_KAVERI: 1564 case CHIP_KAVERI:
1506 radeon_program_register_sequence(rdev, 1565 radeon_program_register_sequence(rdev,
1507 spectre_mgcg_cgcg_init, 1566 spectre_mgcg_cgcg_init,
@@ -1703,20 +1762,20 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
1703 const __be32 *fw_data; 1762 const __be32 *fw_data;
1704 u32 running, blackout = 0; 1763 u32 running, blackout = 0;
1705 u32 *io_mc_regs; 1764 u32 *io_mc_regs;
1706 int i, ucode_size, regs_size; 1765 int i, regs_size, ucode_size;
1707 1766
1708 if (!rdev->mc_fw) 1767 if (!rdev->mc_fw)
1709 return -EINVAL; 1768 return -EINVAL;
1710 1769
1770 ucode_size = rdev->mc_fw->size / 4;
1771
1711 switch (rdev->family) { 1772 switch (rdev->family) {
1712 case CHIP_BONAIRE: 1773 case CHIP_BONAIRE:
1713 io_mc_regs = (u32 *)&bonaire_io_mc_regs; 1774 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
1714 ucode_size = CIK_MC_UCODE_SIZE;
1715 regs_size = BONAIRE_IO_MC_REGS_SIZE; 1775 regs_size = BONAIRE_IO_MC_REGS_SIZE;
1716 break; 1776 break;
1717 case CHIP_HAWAII: 1777 case CHIP_HAWAII:
1718 io_mc_regs = (u32 *)&hawaii_io_mc_regs; 1778 io_mc_regs = (u32 *)&hawaii_io_mc_regs;
1719 ucode_size = HAWAII_MC_UCODE_SIZE;
1720 regs_size = HAWAII_IO_MC_REGS_SIZE; 1779 regs_size = HAWAII_IO_MC_REGS_SIZE;
1721 break; 1780 break;
1722 default: 1781 default:
@@ -1783,7 +1842,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1783 const char *chip_name; 1842 const char *chip_name;
1784 size_t pfp_req_size, me_req_size, ce_req_size, 1843 size_t pfp_req_size, me_req_size, ce_req_size,
1785 mec_req_size, rlc_req_size, mc_req_size = 0, 1844 mec_req_size, rlc_req_size, mc_req_size = 0,
1786 sdma_req_size, smc_req_size = 0; 1845 sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
1787 char fw_name[30]; 1846 char fw_name[30];
1788 int err; 1847 int err;
1789 1848
@@ -1797,7 +1856,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
1797 ce_req_size = CIK_CE_UCODE_SIZE * 4; 1856 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1798 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1857 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1799 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1858 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1800 mc_req_size = CIK_MC_UCODE_SIZE * 4; 1859 mc_req_size = BONAIRE_MC_UCODE_SIZE * 4;
1860 mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
1801 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1861 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1802 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4); 1862 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1803 break; 1863 break;
@@ -1809,6 +1869,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
1809 mec_req_size = CIK_MEC_UCODE_SIZE * 4; 1869 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1810 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4; 1870 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1811 mc_req_size = HAWAII_MC_UCODE_SIZE * 4; 1871 mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
1872 mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
1812 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1873 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1813 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4); 1874 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
1814 break; 1875 break;
@@ -1830,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev)
1830 rlc_req_size = KB_RLC_UCODE_SIZE * 4; 1891 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
1831 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; 1892 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1832 break; 1893 break;
1894 case CHIP_MULLINS:
1895 chip_name = "MULLINS";
1896 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1897 me_req_size = CIK_ME_UCODE_SIZE * 4;
1898 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1899 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1900 rlc_req_size = ML_RLC_UCODE_SIZE * 4;
1901 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1902 break;
1833 default: BUG(); 1903 default: BUG();
1834 } 1904 }
1835 1905
@@ -1904,16 +1974,22 @@ static int cik_init_microcode(struct radeon_device *rdev)
1904 1974
1905 /* No SMC, MC ucode on APUs */ 1975 /* No SMC, MC ucode on APUs */
1906 if (!(rdev->flags & RADEON_IS_IGP)) { 1976 if (!(rdev->flags & RADEON_IS_IGP)) {
1907 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1977 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1908 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1978 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1909 if (err) 1979 if (err) {
1910 goto out; 1980 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1911 if (rdev->mc_fw->size != mc_req_size) { 1981 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1982 if (err)
1983 goto out;
1984 }
1985 if ((rdev->mc_fw->size != mc_req_size) &&
1986 (rdev->mc_fw->size != mc2_req_size)){
1912 printk(KERN_ERR 1987 printk(KERN_ERR
1913 "cik_mc: Bogus length %zu in firmware \"%s\"\n", 1988 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
1914 rdev->mc_fw->size, fw_name); 1989 rdev->mc_fw->size, fw_name);
1915 err = -EINVAL; 1990 err = -EINVAL;
1916 } 1991 }
1992 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1917 1993
1918 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 1994 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1919 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 1995 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
@@ -3262,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3262 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 3338 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3263 break; 3339 break;
3264 case CHIP_KABINI: 3340 case CHIP_KABINI:
3341 case CHIP_MULLINS:
3265 default: 3342 default:
3266 rdev->config.cik.max_shader_engines = 1; 3343 rdev->config.cik.max_shader_engines = 1;
3267 rdev->config.cik.max_tile_pipes = 2; 3344 rdev->config.cik.max_tile_pipes = 2;
@@ -3692,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
3692 r = radeon_fence_emit(rdev, fence, ring->idx); 3769 r = radeon_fence_emit(rdev, fence, ring->idx);
3693 if (r) { 3770 if (r) {
3694 radeon_ring_unlock_undo(rdev, ring); 3771 radeon_ring_unlock_undo(rdev, ring);
3772 radeon_semaphore_free(rdev, &sem, NULL);
3695 return r; 3773 return r;
3696 } 3774 }
3697 3775
@@ -5790,6 +5868,9 @@ static int cik_rlc_resume(struct radeon_device *rdev)
5790 case CHIP_KABINI: 5868 case CHIP_KABINI:
5791 size = KB_RLC_UCODE_SIZE; 5869 size = KB_RLC_UCODE_SIZE;
5792 break; 5870 break;
5871 case CHIP_MULLINS:
5872 size = ML_RLC_UCODE_SIZE;
5873 break;
5793 } 5874 }
5794 5875
5795 cik_rlc_stop(rdev); 5876 cik_rlc_stop(rdev);
@@ -6538,6 +6619,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
6538 buffer[count++] = cpu_to_le32(0x00000000); 6619 buffer[count++] = cpu_to_le32(0x00000000);
6539 break; 6620 break;
6540 case CHIP_KABINI: 6621 case CHIP_KABINI:
6622 case CHIP_MULLINS:
6541 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ 6623 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
6542 buffer[count++] = cpu_to_le32(0x00000000); 6624 buffer[count++] = cpu_to_le32(0x00000000);
6543 break; 6625 break;
@@ -6683,6 +6765,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
6683 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 6765 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
6684 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 6766 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
6685 } 6767 }
6768 /* pflip */
6769 if (rdev->num_crtc >= 2) {
6770 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
6771 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
6772 }
6773 if (rdev->num_crtc >= 4) {
6774 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
6775 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
6776 }
6777 if (rdev->num_crtc >= 6) {
6778 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
6779 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
6780 }
6686 6781
6687 /* dac hotplug */ 6782 /* dac hotplug */
6688 WREG32(DAC_AUTODETECT_INT_CONTROL, 0); 6783 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
@@ -7039,6 +7134,25 @@ int cik_irq_set(struct radeon_device *rdev)
7039 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 7134 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
7040 } 7135 }
7041 7136
7137 if (rdev->num_crtc >= 2) {
7138 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
7139 GRPH_PFLIP_INT_MASK);
7140 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
7141 GRPH_PFLIP_INT_MASK);
7142 }
7143 if (rdev->num_crtc >= 4) {
7144 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
7145 GRPH_PFLIP_INT_MASK);
7146 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
7147 GRPH_PFLIP_INT_MASK);
7148 }
7149 if (rdev->num_crtc >= 6) {
7150 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
7151 GRPH_PFLIP_INT_MASK);
7152 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
7153 GRPH_PFLIP_INT_MASK);
7154 }
7155
7042 WREG32(DC_HPD1_INT_CONTROL, hpd1); 7156 WREG32(DC_HPD1_INT_CONTROL, hpd1);
7043 WREG32(DC_HPD2_INT_CONTROL, hpd2); 7157 WREG32(DC_HPD2_INT_CONTROL, hpd2);
7044 WREG32(DC_HPD3_INT_CONTROL, hpd3); 7158 WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -7075,6 +7189,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7075 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 7189 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
7076 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); 7190 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
7077 7191
7192 rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
7193 EVERGREEN_CRTC0_REGISTER_OFFSET);
7194 rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
7195 EVERGREEN_CRTC1_REGISTER_OFFSET);
7196 if (rdev->num_crtc >= 4) {
7197 rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
7198 EVERGREEN_CRTC2_REGISTER_OFFSET);
7199 rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
7200 EVERGREEN_CRTC3_REGISTER_OFFSET);
7201 }
7202 if (rdev->num_crtc >= 6) {
7203 rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
7204 EVERGREEN_CRTC4_REGISTER_OFFSET);
7205 rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
7206 EVERGREEN_CRTC5_REGISTER_OFFSET);
7207 }
7208
7209 if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
7210 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
7211 GRPH_PFLIP_INT_CLEAR);
7212 if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
7213 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
7214 GRPH_PFLIP_INT_CLEAR);
7078 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) 7215 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
7079 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 7216 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
7080 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) 7217 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
@@ -7085,6 +7222,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7085 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 7222 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
7086 7223
7087 if (rdev->num_crtc >= 4) { 7224 if (rdev->num_crtc >= 4) {
7225 if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
7226 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
7227 GRPH_PFLIP_INT_CLEAR);
7228 if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
7229 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
7230 GRPH_PFLIP_INT_CLEAR);
7088 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 7231 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
7089 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 7232 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
7090 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) 7233 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
@@ -7096,6 +7239,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
7096 } 7239 }
7097 7240
7098 if (rdev->num_crtc >= 6) { 7241 if (rdev->num_crtc >= 6) {
7242 if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
7243 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
7244 GRPH_PFLIP_INT_CLEAR);
7245 if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
7246 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
7247 GRPH_PFLIP_INT_CLEAR);
7099 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 7248 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
7100 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 7249 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
7101 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) 7250 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
@@ -7447,6 +7596,15 @@ restart_ih:
7447 break; 7596 break;
7448 } 7597 }
7449 break; 7598 break;
7599 case 8: /* D1 page flip */
7600 case 10: /* D2 page flip */
7601 case 12: /* D3 page flip */
7602 case 14: /* D4 page flip */
7603 case 16: /* D5 page flip */
7604 case 18: /* D6 page flip */
7605 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
7606 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
7607 break;
7450 case 42: /* HPD hotplug */ 7608 case 42: /* HPD hotplug */
7451 switch (src_data) { 7609 switch (src_data) {
7452 case 0: 7610 case 0:
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 89b4afa5041c..72e464c79a88 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev,
562 r = radeon_fence_emit(rdev, fence, ring->idx); 562 r = radeon_fence_emit(rdev, fence, ring->idx);
563 if (r) { 563 if (r) {
564 radeon_ring_unlock_undo(rdev, ring); 564 radeon_ring_unlock_undo(rdev, ring);
565 radeon_semaphore_free(rdev, &sem, NULL);
565 return r; 566 return r;
566 } 567 }
567 568
@@ -597,7 +598,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
597 tmp = 0xCAFEDEAD; 598 tmp = 0xCAFEDEAD;
598 writel(tmp, ptr); 599 writel(tmp, ptr);
599 600
600 r = radeon_ring_lock(rdev, ring, 4); 601 r = radeon_ring_lock(rdev, ring, 5);
601 if (r) { 602 if (r) {
602 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); 603 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
603 return r; 604 return r;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 213873270d5f..dd7926394a8f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -888,6 +888,15 @@
888# define DC_HPD6_RX_INTERRUPT (1 << 18) 888# define DC_HPD6_RX_INTERRUPT (1 << 18)
889#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 889#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
890 890
891/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
892#define GRPH_INT_STATUS 0x6858
893# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
894# define GRPH_PFLIP_INT_CLEAR (1 << 8)
895/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
896#define GRPH_INT_CONTROL 0x685c
897# define GRPH_PFLIP_INT_MASK (1 << 0)
898# define GRPH_PFLIP_INT_TYPE (1 << 8)
899
891#define DAC_AUTODETECT_INT_CONTROL 0x67c8 900#define DAC_AUTODETECT_INT_CONTROL 0x67c8
892 901
893#define DC_HPD1_INT_STATUS 0x601c 902#define DC_HPD1_INT_STATUS 0x601c
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 94e858751994..0a65dc7e93e7 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -309,11 +309,17 @@ int dce6_audio_init(struct radeon_device *rdev)
309 309
310 rdev->audio.enabled = true; 310 rdev->audio.enabled = true;
311 311
312 if (ASIC_IS_DCE8(rdev)) 312 if (ASIC_IS_DCE81(rdev)) /* KV: 4 streams, 7 endpoints */
313 rdev->audio.num_pins = 7;
314 else if (ASIC_IS_DCE83(rdev)) /* KB: 2 streams, 3 endpoints */
315 rdev->audio.num_pins = 3;
316 else if (ASIC_IS_DCE8(rdev)) /* BN/HW: 6 streams, 7 endpoints */
317 rdev->audio.num_pins = 7;
318 else if (ASIC_IS_DCE61(rdev)) /* TN: 4 streams, 6 endpoints */
313 rdev->audio.num_pins = 6; 319 rdev->audio.num_pins = 6;
314 else if (ASIC_IS_DCE61(rdev)) 320 else if (ASIC_IS_DCE64(rdev)) /* OL: 2 streams, 2 endpoints */
315 rdev->audio.num_pins = 4; 321 rdev->audio.num_pins = 2;
316 else 322 else /* SI: 6 streams, 6 endpoints */
317 rdev->audio.num_pins = 6; 323 rdev->audio.num_pins = 6;
318 324
319 for (i = 0; i < rdev->audio.num_pins; i++) { 325 for (i = 0; i < rdev->audio.num_pins; i++) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b406546440da..0f7a51a3694f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4371,7 +4371,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
4371 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 4371 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
4372 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 4372 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
4373 u32 grbm_int_cntl = 0; 4373 u32 grbm_int_cntl = 0;
4374 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
4375 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 4374 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
4376 u32 dma_cntl, dma_cntl1 = 0; 4375 u32 dma_cntl, dma_cntl1 = 0;
4377 u32 thermal_int = 0; 4376 u32 thermal_int = 0;
@@ -4554,15 +4553,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
4554 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 4553 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
4555 } 4554 }
4556 4555
4557 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 4556 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
4558 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 4557 GRPH_PFLIP_INT_MASK);
4558 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
4559 GRPH_PFLIP_INT_MASK);
4559 if (rdev->num_crtc >= 4) { 4560 if (rdev->num_crtc >= 4) {
4560 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 4561 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
4561 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 4562 GRPH_PFLIP_INT_MASK);
4563 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
4564 GRPH_PFLIP_INT_MASK);
4562 } 4565 }
4563 if (rdev->num_crtc >= 6) { 4566 if (rdev->num_crtc >= 6) {
4564 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 4567 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
4565 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 4568 GRPH_PFLIP_INT_MASK);
4569 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
4570 GRPH_PFLIP_INT_MASK);
4566 } 4571 }
4567 4572
4568 WREG32(DC_HPD1_INT_CONTROL, hpd1); 4573 WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -4951,6 +4956,15 @@ restart_ih:
4951 break; 4956 break;
4952 } 4957 }
4953 break; 4958 break;
4959 case 8: /* D1 page flip */
4960 case 10: /* D2 page flip */
4961 case 12: /* D3 page flip */
4962 case 14: /* D4 page flip */
4963 case 16: /* D5 page flip */
4964 case 18: /* D6 page flip */
4965 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
4966 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
4967 break;
4954 case 42: /* HPD hotplug */ 4968 case 42: /* HPD hotplug */
4955 switch (src_data) { 4969 switch (src_data) {
4956 case 0: 4970 case 0:
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 287fe966d7de..478caefe0fef 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
151 r = radeon_fence_emit(rdev, fence, ring->idx); 151 r = radeon_fence_emit(rdev, fence, ring->idx);
152 if (r) { 152 if (r) {
153 radeon_ring_unlock_undo(rdev, ring); 153 radeon_ring_unlock_undo(rdev, ring);
154 radeon_semaphore_free(rdev, &sem, NULL);
154 return r; 155 return r;
155 } 156 }
156 157
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 16ec9d56a234..3f6e817d97ee 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev,
546 return 0; 546 return 0;
547} 547}
548 548
549static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
550 struct sumo_vid_mapping_table *vid_mapping_table,
551 u32 vid_2bit)
552{
553 struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
554 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
555 u32 i;
556
557 if (vddc_sclk_table && vddc_sclk_table->count) {
558 if (vid_2bit < vddc_sclk_table->count)
559 return vddc_sclk_table->entries[vid_2bit].v;
560 else
561 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
562 } else {
563 for (i = 0; i < vid_mapping_table->num_entries; i++) {
564 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
565 return vid_mapping_table->entries[i].vid_7bit;
566 }
567 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
568 }
569}
570
571static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
572 struct sumo_vid_mapping_table *vid_mapping_table,
573 u32 vid_7bit)
574{
575 struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
576 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
577 u32 i;
578
579 if (vddc_sclk_table && vddc_sclk_table->count) {
580 for (i = 0; i < vddc_sclk_table->count; i++) {
581 if (vddc_sclk_table->entries[i].v == vid_7bit)
582 return i;
583 }
584 return vddc_sclk_table->count - 1;
585 } else {
586 for (i = 0; i < vid_mapping_table->num_entries; i++) {
587 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
588 return vid_mapping_table->entries[i].vid_2bit;
589 }
590
591 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
592 }
593}
594
549static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, 595static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
550 u16 voltage) 596 u16 voltage)
551{ 597{
@@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
556 u32 vid_2bit) 602 u32 vid_2bit)
557{ 603{
558 struct kv_power_info *pi = kv_get_pi(rdev); 604 struct kv_power_info *pi = kv_get_pi(rdev);
559 u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, 605 u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
560 &pi->sys_info.vid_mapping_table, 606 &pi->sys_info.vid_mapping_table,
561 vid_2bit); 607 vid_2bit);
562 608
563 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); 609 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
564} 610}
@@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
639 685
640static int kv_unforce_levels(struct radeon_device *rdev) 686static int kv_unforce_levels(struct radeon_device *rdev)
641{ 687{
642 if (rdev->family == CHIP_KABINI) 688 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
643 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); 689 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
644 else 690 else
645 return kv_set_enabled_levels(rdev); 691 return kv_set_enabled_levels(rdev);
@@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1362 struct radeon_uvd_clock_voltage_dependency_table *table = 1408 struct radeon_uvd_clock_voltage_dependency_table *table =
1363 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1409 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1364 int ret; 1410 int ret;
1411 u32 mask;
1365 1412
1366 if (!gate) { 1413 if (!gate) {
1367 if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) 1414 if (table->count)
1368 pi->uvd_boot_level = table->count - 1; 1415 pi->uvd_boot_level = table->count - 1;
1369 else 1416 else
1370 pi->uvd_boot_level = 0; 1417 pi->uvd_boot_level = 0;
1371 1418
1419 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1420 mask = 1 << pi->uvd_boot_level;
1421 } else {
1422 mask = 0x1f;
1423 }
1424
1372 ret = kv_copy_bytes_to_smc(rdev, 1425 ret = kv_copy_bytes_to_smc(rdev,
1373 pi->dpm_table_start + 1426 pi->dpm_table_start +
1374 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), 1427 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
@@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1377 if (ret) 1430 if (ret)
1378 return ret; 1431 return ret;
1379 1432
1380 if (!pi->caps_uvd_dpm || 1433 kv_send_msg_to_smc_with_parameter(rdev,
1381 pi->caps_stable_p_state) 1434 PPSMC_MSG_UVDDPM_SetEnabledMask,
1382 kv_send_msg_to_smc_with_parameter(rdev, 1435 mask);
1383 PPSMC_MSG_UVDDPM_SetEnabledMask,
1384 (1 << pi->uvd_boot_level));
1385 } 1436 }
1386 1437
1387 return kv_enable_uvd_dpm(rdev, !gate); 1438 return kv_enable_uvd_dpm(rdev, !gate);
@@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1617 if (pi->acp_power_gated == gate) 1668 if (pi->acp_power_gated == gate)
1618 return; 1669 return;
1619 1670
1620 if (rdev->family == CHIP_KABINI) 1671 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1621 return; 1672 return;
1622 1673
1623 pi->acp_power_gated = gate; 1674 pi->acp_power_gated = gate;
@@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1786 } 1837 }
1787 } 1838 }
1788 1839
1789 if (rdev->family == CHIP_KABINI) { 1840 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1790 if (pi->enable_dpm) { 1841 if (pi->enable_dpm) {
1791 kv_set_valid_clock_range(rdev, new_ps); 1842 kv_set_valid_clock_range(rdev, new_ps);
1792 kv_update_dfs_bypass_settings(rdev, new_ps); 1843 kv_update_dfs_bypass_settings(rdev, new_ps);
@@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
1812 return ret; 1863 return ret;
1813 } 1864 }
1814 kv_update_sclk_t(rdev); 1865 kv_update_sclk_t(rdev);
1866 if (rdev->family == CHIP_MULLINS)
1867 kv_enable_nb_dpm(rdev);
1815 } 1868 }
1816 } else { 1869 } else {
1817 if (pi->enable_dpm) { 1870 if (pi->enable_dpm) {
@@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev)
1862{ 1915{
1863 struct kv_power_info *pi = kv_get_pi(rdev); 1916 struct kv_power_info *pi = kv_get_pi(rdev);
1864 1917
1865 if (rdev->family == CHIP_KABINI) { 1918 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
1866 kv_force_lowest_valid(rdev); 1919 kv_force_lowest_valid(rdev);
1867 kv_init_graphics_levels(rdev); 1920 kv_init_graphics_levels(rdev);
1868 kv_program_bootup_state(rdev); 1921 kv_program_bootup_state(rdev);
@@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1901static void kv_patch_voltage_values(struct radeon_device *rdev) 1954static void kv_patch_voltage_values(struct radeon_device *rdev)
1902{ 1955{
1903 int i; 1956 int i;
1904 struct radeon_uvd_clock_voltage_dependency_table *table = 1957 struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
1905 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; 1958 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1959 struct radeon_vce_clock_voltage_dependency_table *vce_table =
1960 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1961 struct radeon_clock_voltage_dependency_table *samu_table =
1962 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1963 struct radeon_clock_voltage_dependency_table *acp_table =
1964 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1906 1965
1907 if (table->count) { 1966 if (uvd_table->count) {
1908 for (i = 0; i < table->count; i++) 1967 for (i = 0; i < uvd_table->count; i++)
1909 table->entries[i].v = 1968 uvd_table->entries[i].v =
1910 kv_convert_8bit_index_to_voltage(rdev, 1969 kv_convert_8bit_index_to_voltage(rdev,
1911 table->entries[i].v); 1970 uvd_table->entries[i].v);
1971 }
1972
1973 if (vce_table->count) {
1974 for (i = 0; i < vce_table->count; i++)
1975 vce_table->entries[i].v =
1976 kv_convert_8bit_index_to_voltage(rdev,
1977 vce_table->entries[i].v);
1978 }
1979
1980 if (samu_table->count) {
1981 for (i = 0; i < samu_table->count; i++)
1982 samu_table->entries[i].v =
1983 kv_convert_8bit_index_to_voltage(rdev,
1984 samu_table->entries[i].v);
1985 }
1986
1987 if (acp_table->count) {
1988 for (i = 0; i < acp_table->count; i++)
1989 acp_table->entries[i].v =
1990 kv_convert_8bit_index_to_voltage(rdev,
1991 acp_table->entries[i].v);
1912 } 1992 }
1913 1993
1914} 1994}
@@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
1941 break; 2021 break;
1942 } 2022 }
1943 2023
1944 if (rdev->family == CHIP_KABINI) 2024 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1945 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2025 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1946 else 2026 else
1947 return kv_set_enabled_level(rdev, i); 2027 return kv_set_enabled_level(rdev, i);
@@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
1961 break; 2041 break;
1962 } 2042 }
1963 2043
1964 if (rdev->family == CHIP_KABINI) 2044 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
1965 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); 2045 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1966 else 2046 else
1967 return kv_set_enabled_level(rdev, i); 2047 return kv_set_enabled_level(rdev, i);
@@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
2118 else 2198 else
2119 pi->battery_state = false; 2199 pi->battery_state = false;
2120 2200
2121 if (rdev->family == CHIP_KABINI) { 2201 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2122 ps->dpm0_pg_nb_ps_lo = 0x1; 2202 ps->dpm0_pg_nb_ps_lo = 0x1;
2123 ps->dpm0_pg_nb_ps_hi = 0x0; 2203 ps->dpm0_pg_nb_ps_hi = 0x0;
2124 ps->dpmx_nb_ps_lo = 0x1; 2204 ps->dpmx_nb_ps_lo = 0x1;
@@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2179 if (pi->lowest_valid > pi->highest_valid) 2259 if (pi->lowest_valid > pi->highest_valid)
2180 return -EINVAL; 2260 return -EINVAL;
2181 2261
2182 if (rdev->family == CHIP_KABINI) { 2262 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
2183 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { 2263 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2184 pi->graphics_level[i].GnbSlow = 1; 2264 pi->graphics_level[i].GnbSlow = 1;
2185 pi->graphics_level[i].ForceNbPs1 = 0; 2265 pi->graphics_level[i].ForceNbPs1 = 0;
@@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev)
2253 break; 2333 break;
2254 2334
2255 kv_set_divider_value(rdev, i, table->entries[i].clk); 2335 kv_set_divider_value(rdev, i, table->entries[i].clk);
2256 vid_2bit = sumo_convert_vid7_to_vid2(rdev, 2336 vid_2bit = kv_convert_vid7_to_vid2(rdev,
2257 &pi->sys_info.vid_mapping_table, 2337 &pi->sys_info.vid_mapping_table,
2258 table->entries[i].v); 2338 table->entries[i].v);
2259 kv_set_vid(rdev, i, vid_2bit); 2339 kv_set_vid(rdev, i, vid_2bit);
2260 kv_set_at(rdev, i, pi->at[i]); 2340 kv_set_at(rdev, i, pi->at[i]);
2261 kv_dpm_power_level_enabled_for_throttle(rdev, i, true); 2341 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
@@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2324 struct kv_power_info *pi = kv_get_pi(rdev); 2404 struct kv_power_info *pi = kv_get_pi(rdev);
2325 u32 nbdpmconfig1; 2405 u32 nbdpmconfig1;
2326 2406
2327 if (rdev->family == CHIP_KABINI) 2407 if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
2328 return; 2408 return;
2329 2409
2330 if (pi->sys_info.nb_dpm_enable) { 2410 if (pi->sys_info.nb_dpm_enable) {
@@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev)
2631 2711
2632 pi->sram_end = SMC_RAM_END; 2712 pi->sram_end = SMC_RAM_END;
2633 2713
2634 if (rdev->family == CHIP_KABINI)
2635 pi->high_voltage_t = 4001;
2636
2637 pi->enable_nb_dpm = true; 2714 pi->enable_nb_dpm = true;
2638 2715
2639 pi->caps_power_containment = true; 2716 pi->caps_power_containment = true;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6e887d004eba..bbc189fd3ddc 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2839 r = radeon_fence_emit(rdev, fence, ring->idx); 2839 r = radeon_fence_emit(rdev, fence, ring->idx);
2840 if (r) { 2840 if (r) {
2841 radeon_ring_unlock_undo(rdev, ring); 2841 radeon_ring_unlock_undo(rdev, ring);
2842 radeon_semaphore_free(rdev, &sem, NULL);
2842 return r; 2843 return r;
2843 } 2844 }
2844 2845
@@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev)
3505 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3506 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3506 u32 grbm_int_cntl = 0; 3507 u32 grbm_int_cntl = 0;
3507 u32 hdmi0, hdmi1; 3508 u32 hdmi0, hdmi1;
3508 u32 d1grph = 0, d2grph = 0;
3509 u32 dma_cntl; 3509 u32 dma_cntl;
3510 u32 thermal_int = 0; 3510 u32 thermal_int = 0;
3511 3511
@@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev)
3614 WREG32(CP_INT_CNTL, cp_int_cntl); 3614 WREG32(CP_INT_CNTL, cp_int_cntl);
3615 WREG32(DMA_CNTL, dma_cntl); 3615 WREG32(DMA_CNTL, dma_cntl);
3616 WREG32(DxMODE_INT_MASK, mode_int); 3616 WREG32(DxMODE_INT_MASK, mode_int);
3617 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 3617 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3618 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 3618 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
3619 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3619 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3620 if (ASIC_IS_DCE3(rdev)) { 3620 if (ASIC_IS_DCE3(rdev)) {
3621 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3621 WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3918,6 +3918,14 @@ restart_ih:
3918 break; 3918 break;
3919 } 3919 }
3920 break; 3920 break;
3921 case 9: /* D1 pflip */
3922 DRM_DEBUG("IH: D1 flip\n");
3923 radeon_crtc_handle_flip(rdev, 0);
3924 break;
3925 case 11: /* D2 pflip */
3926 DRM_DEBUG("IH: D2 flip\n");
3927 radeon_crtc_handle_flip(rdev, 1);
3928 break;
3921 case 19: /* HPD/DAC hotplug */ 3929 case 19: /* HPD/DAC hotplug */
3922 switch (src_data) { 3930 switch (src_data) {
3923 case 0: 3931 case 0:
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 53fcb28f5578..4969cef44a19 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev,
489 r = radeon_fence_emit(rdev, fence, ring->idx); 489 r = radeon_fence_emit(rdev, fence, ring->idx);
490 if (r) { 490 if (r) {
491 radeon_ring_unlock_undo(rdev, ring); 491 radeon_ring_unlock_undo(rdev, ring);
492 radeon_semaphore_free(rdev, &sem, NULL);
492 return r; 493 return r;
493 } 494 }
494 495
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index cbf7e3269f84..9c61b74ef441 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
158 u32 line_time_us, vblank_lines; 158 u32 line_time_us, vblank_lines;
159 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 159 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
160 160
161 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 161 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
162 radeon_crtc = to_radeon_crtc(crtc); 162 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
163 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 163 radeon_crtc = to_radeon_crtc(crtc);
164 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) / 164 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
165 radeon_crtc->hw_mode.clock; 165 line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
166 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end - 166 radeon_crtc->hw_mode.clock;
167 radeon_crtc->hw_mode.crtc_vdisplay + 167 vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
168 (radeon_crtc->v_border * 2); 168 radeon_crtc->hw_mode.crtc_vdisplay +
169 vblank_time_us = vblank_lines * line_time_us; 169 (radeon_crtc->v_border * 2);
170 break; 170 vblank_time_us = vblank_lines * line_time_us;
171 break;
172 }
171 } 173 }
172 } 174 }
173 175
@@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
181 struct radeon_crtc *radeon_crtc; 183 struct radeon_crtc *radeon_crtc;
182 u32 vrefresh = 0; 184 u32 vrefresh = 0;
183 185
184 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 186 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
185 radeon_crtc = to_radeon_crtc(crtc); 187 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
186 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { 188 radeon_crtc = to_radeon_crtc(crtc);
187 vrefresh = radeon_crtc->hw_mode.vrefresh; 189 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
188 break; 190 vrefresh = radeon_crtc->hw_mode.vrefresh;
191 break;
192 }
189 } 193 }
190 } 194 }
191
192 return vrefresh; 195 return vrefresh;
193} 196}
194 197
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f21db7a0b34d..8149e7cf4303 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -730,6 +730,12 @@ struct cik_irq_stat_regs {
730 u32 disp_int_cont4; 730 u32 disp_int_cont4;
731 u32 disp_int_cont5; 731 u32 disp_int_cont5;
732 u32 disp_int_cont6; 732 u32 disp_int_cont6;
733 u32 d1grph_int;
734 u32 d2grph_int;
735 u32 d3grph_int;
736 u32 d4grph_int;
737 u32 d5grph_int;
738 u32 d6grph_int;
733}; 739};
734 740
735union radeon_irq_stat_regs { 741union radeon_irq_stat_regs {
@@ -739,7 +745,7 @@ union radeon_irq_stat_regs {
739 struct cik_irq_stat_regs cik; 745 struct cik_irq_stat_regs cik;
740}; 746};
741 747
742#define RADEON_MAX_HPD_PINS 6 748#define RADEON_MAX_HPD_PINS 7
743#define RADEON_MAX_CRTCS 6 749#define RADEON_MAX_CRTCS 6
744#define RADEON_MAX_AFMT_BLOCKS 7 750#define RADEON_MAX_AFMT_BLOCKS 7
745 751
@@ -1636,6 +1642,7 @@ struct radeon_vce {
1636 unsigned fb_version; 1642 unsigned fb_version;
1637 atomic_t handles[RADEON_MAX_VCE_HANDLES]; 1643 atomic_t handles[RADEON_MAX_VCE_HANDLES];
1638 struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; 1644 struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
1645 unsigned img_size[RADEON_MAX_VCE_HANDLES];
1639 struct delayed_work idle_work; 1646 struct delayed_work idle_work;
1640}; 1647};
1641 1648
@@ -1649,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
1649 uint32_t handle, struct radeon_fence **fence); 1656 uint32_t handle, struct radeon_fence **fence);
1650void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); 1657void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
1651void radeon_vce_note_usage(struct radeon_device *rdev); 1658void radeon_vce_note_usage(struct radeon_device *rdev);
1652int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); 1659int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
1653int radeon_vce_cs_parse(struct radeon_cs_parser *p); 1660int radeon_vce_cs_parse(struct radeon_cs_parser *p);
1654bool radeon_vce_semaphore_emit(struct radeon_device *rdev, 1661bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
1655 struct radeon_ring *ring, 1662 struct radeon_ring *ring,
@@ -2321,6 +2328,7 @@ struct radeon_device {
2321 bool have_disp_power_ref; 2328 bool have_disp_power_ref;
2322}; 2329};
2323 2330
2331bool radeon_is_px(struct drm_device *dev);
2324int radeon_device_init(struct radeon_device *rdev, 2332int radeon_device_init(struct radeon_device *rdev,
2325 struct drm_device *ddev, 2333 struct drm_device *ddev,
2326 struct pci_dev *pdev, 2334 struct pci_dev *pdev,
@@ -2631,6 +2639,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
2631#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) 2639#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
2632#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) 2640#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
2633#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) 2641#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
2642#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
2643#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
2644#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
2645 (rdev->family == CHIP_MULLINS))
2634 2646
2635#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ 2647#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
2636 (rdev->ddev->pdev->device == 0x6850) || \ 2648 (rdev->ddev->pdev->device == 0x6850) || \
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index b8a24a75d4ff..be20e62dac83 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2516,6 +2516,7 @@ int radeon_asic_init(struct radeon_device *rdev)
2516 break; 2516 break;
2517 case CHIP_KAVERI: 2517 case CHIP_KAVERI:
2518 case CHIP_KABINI: 2518 case CHIP_KABINI:
2519 case CHIP_MULLINS:
2519 rdev->asic = &kv_asic; 2520 rdev->asic = &kv_asic;
2520 /* set num crtcs */ 2521 /* set num crtcs */
2521 if (rdev->family == CHIP_KAVERI) { 2522 if (rdev->family == CHIP_KAVERI) {
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index fa9a9c02751e..a9fb0d016d38 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -59,7 +59,7 @@ struct atpx_mux {
59 u16 mux; 59 u16 mux;
60} __packed; 60} __packed;
61 61
62bool radeon_is_px(void) { 62bool radeon_has_atpx(void) {
63 return radeon_atpx_priv.atpx_detected; 63 return radeon_atpx_priv.atpx_detected;
64} 64}
65 65
@@ -528,6 +528,13 @@ static bool radeon_atpx_detect(void)
528 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); 528 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
529 } 529 }
530 530
531 /* some newer PX laptops mark the dGPU as a non-VGA display device */
532 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
533 vga_count++;
534
535 has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
536 }
537
531 if (has_atpx && vga_count == 2) { 538 if (has_atpx && vga_count == 2) {
532 acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer); 539 acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
533 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 540 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b3633d9a5317..9ab30976287d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
196 } 196 }
197 } 197 }
198 198
199 if (!found) {
200 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
201 dhandle = ACPI_HANDLE(&pdev->dev);
202 if (!dhandle)
203 continue;
204
205 status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
206 if (!ACPI_FAILURE(status)) {
207 found = true;
208 break;
209 }
210 }
211 }
212
199 if (!found) 213 if (!found)
200 return false; 214 return false;
201 215
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index c566b486ca08..ea50e0ae7bf7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1261,21 +1261,6 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
1261 .force = radeon_dvi_force, 1261 .force = radeon_dvi_force,
1262}; 1262};
1263 1263
1264static void radeon_dp_connector_destroy(struct drm_connector *connector)
1265{
1266 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1267 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1268
1269 if (radeon_connector->edid)
1270 kfree(radeon_connector->edid);
1271 if (radeon_dig_connector->dp_i2c_bus)
1272 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
1273 kfree(radeon_connector->con_priv);
1274 drm_sysfs_connector_remove(connector);
1275 drm_connector_cleanup(connector);
1276 kfree(connector);
1277}
1278
1279static int radeon_dp_get_modes(struct drm_connector *connector) 1264static int radeon_dp_get_modes(struct drm_connector *connector)
1280{ 1265{
1281 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1266 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1553,7 +1538,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1553 .detect = radeon_dp_detect, 1538 .detect = radeon_dp_detect,
1554 .fill_modes = drm_helper_probe_single_connector_modes, 1539 .fill_modes = drm_helper_probe_single_connector_modes,
1555 .set_property = radeon_connector_set_property, 1540 .set_property = radeon_connector_set_property,
1556 .destroy = radeon_dp_connector_destroy, 1541 .destroy = radeon_connector_destroy,
1557 .force = radeon_dvi_force, 1542 .force = radeon_dvi_force,
1558}; 1543};
1559 1544
@@ -1562,7 +1547,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
1562 .detect = radeon_dp_detect, 1547 .detect = radeon_dp_detect,
1563 .fill_modes = drm_helper_probe_single_connector_modes, 1548 .fill_modes = drm_helper_probe_single_connector_modes,
1564 .set_property = radeon_lvds_set_property, 1549 .set_property = radeon_lvds_set_property,
1565 .destroy = radeon_dp_connector_destroy, 1550 .destroy = radeon_connector_destroy,
1566 .force = radeon_dvi_force, 1551 .force = radeon_dvi_force,
1567}; 1552};
1568 1553
@@ -1571,7 +1556,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
1571 .detect = radeon_dp_detect, 1556 .detect = radeon_dp_detect,
1572 .fill_modes = drm_helper_probe_single_connector_modes, 1557 .fill_modes = drm_helper_probe_single_connector_modes,
1573 .set_property = radeon_lvds_set_property, 1558 .set_property = radeon_lvds_set_property,
1574 .destroy = radeon_dp_connector_destroy, 1559 .destroy = radeon_connector_destroy,
1575 .force = radeon_dvi_force, 1560 .force = radeon_dvi_force,
1576}; 1561};
1577 1562
@@ -1668,17 +1653,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1668 radeon_dig_connector->igp_lane_info = igp_lane_info; 1653 radeon_dig_connector->igp_lane_info = igp_lane_info;
1669 radeon_connector->con_priv = radeon_dig_connector; 1654 radeon_connector->con_priv = radeon_dig_connector;
1670 if (i2c_bus->valid) { 1655 if (i2c_bus->valid) {
1671 /* add DP i2c bus */ 1656 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1672 if (connector_type == DRM_MODE_CONNECTOR_eDP) 1657 if (radeon_connector->ddc_bus)
1673 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
1674 else
1675 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1676 if (radeon_dig_connector->dp_i2c_bus)
1677 has_aux = true; 1658 has_aux = true;
1678 else 1659 else
1679 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1680 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1681 if (!radeon_connector->ddc_bus)
1682 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1660 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1683 } 1661 }
1684 switch (connector_type) { 1662 switch (connector_type) {
@@ -1893,10 +1871,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1893 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1871 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1894 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1872 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1895 if (i2c_bus->valid) { 1873 if (i2c_bus->valid) {
1896 /* add DP i2c bus */
1897 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1898 if (!radeon_dig_connector->dp_i2c_bus)
1899 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1900 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1874 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1901 if (radeon_connector->ddc_bus) 1875 if (radeon_connector->ddc_bus)
1902 has_aux = true; 1876 has_aux = true;
@@ -1942,14 +1916,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1942 drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type); 1916 drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
1943 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); 1917 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1944 if (i2c_bus->valid) { 1918 if (i2c_bus->valid) {
1945 /* add DP i2c bus */ 1919 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1946 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch"); 1920 if (radeon_connector->ddc_bus)
1947 if (radeon_dig_connector->dp_i2c_bus)
1948 has_aux = true; 1921 has_aux = true;
1949 else 1922 else
1950 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1951 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1952 if (!radeon_connector->ddc_bus)
1953 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1923 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1954 } 1924 }
1955 drm_object_attach_property(&radeon_connector->base.base, 1925 drm_object_attach_property(&radeon_connector->base.base,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 2b6e0ebcc13a..41ecf8a60611 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
152 uint32_t domain = r->write_domain ? 152 uint32_t domain = r->write_domain ?
153 r->write_domain : r->read_domains; 153 r->write_domain : r->read_domains;
154 154
155 if (domain & RADEON_GEM_DOMAIN_CPU) {
156 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
157 "for command submission\n");
158 return -EINVAL;
159 }
160
155 p->relocs[i].domain = domain; 161 p->relocs[i].domain = domain;
156 if (domain == RADEON_GEM_DOMAIN_VRAM) 162 if (domain == RADEON_GEM_DOMAIN_VRAM)
157 domain |= RADEON_GEM_DOMAIN_GTT; 163 domain |= RADEON_GEM_DOMAIN_GTT;
@@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
342 return -EINVAL; 348 return -EINVAL;
343 349
344 /* we only support VM on some SI+ rings */ 350 /* we only support VM on some SI+ rings */
345 if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && 351 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
346 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { 352 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
347 DRM_ERROR("Ring %d requires VM!\n", p->ring); 353 DRM_ERROR("Ring %d requires VM!\n", p->ring);
348 return -EINVAL; 354 return -EINVAL;
355 }
356 } else {
357 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
358 DRM_ERROR("VM not supported on ring %d!\n",
359 p->ring);
360 return -EINVAL;
361 }
349 } 362 }
350 } 363 }
351 364
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 835516d2d257..14671406212f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -99,14 +99,18 @@ static const char radeon_family_name[][16] = {
99 "KAVERI", 99 "KAVERI",
100 "KABINI", 100 "KABINI",
101 "HAWAII", 101 "HAWAII",
102 "MULLINS",
102 "LAST", 103 "LAST",
103}; 104};
104 105
105#if defined(CONFIG_VGA_SWITCHEROO) 106bool radeon_is_px(struct drm_device *dev)
106bool radeon_is_px(void); 107{
107#else 108 struct radeon_device *rdev = dev->dev_private;
108static inline bool radeon_is_px(void) { return false; } 109
109#endif 110 if (rdev->flags & RADEON_IS_PX)
111 return true;
112 return false;
113}
110 114
111/** 115/**
112 * radeon_program_register_sequence - program an array of registers. 116 * radeon_program_register_sequence - program an array of registers.
@@ -1082,7 +1086,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
1082{ 1086{
1083 struct drm_device *dev = pci_get_drvdata(pdev); 1087 struct drm_device *dev = pci_get_drvdata(pdev);
1084 1088
1085 if (radeon_is_px() && state == VGA_SWITCHEROO_OFF) 1089 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1086 return; 1090 return;
1087 1091
1088 if (state == VGA_SWITCHEROO_ON) { 1092 if (state == VGA_SWITCHEROO_ON) {
@@ -1301,9 +1305,7 @@ int radeon_device_init(struct radeon_device *rdev,
1301 * ignore it */ 1305 * ignore it */
1302 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 1306 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
1303 1307
1304 if (radeon_runtime_pm == 1) 1308 if (rdev->flags & RADEON_IS_PX)
1305 runtime = true;
1306 if ((radeon_runtime_pm == -1) && radeon_is_px())
1307 runtime = true; 1309 runtime = true;
1308 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime); 1310 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1309 if (runtime) 1311 if (runtime)
@@ -1531,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1531 1533
1532 radeon_restore_bios_scratch_regs(rdev); 1534 radeon_restore_bios_scratch_regs(rdev);
1533 1535
1534 if (fbcon) {
1535 radeon_fbdev_set_suspend(rdev, 0);
1536 console_unlock();
1537 }
1538
1539 /* init dig PHYs, disp eng pll */ 1536 /* init dig PHYs, disp eng pll */
1540 if (rdev->is_atom_bios) { 1537 if (rdev->is_atom_bios) {
1541 radeon_atom_encoder_init(rdev); 1538 radeon_atom_encoder_init(rdev);
@@ -1560,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1560 } 1557 }
1561 1558
1562 drm_kms_helper_poll_enable(dev); 1559 drm_kms_helper_poll_enable(dev);
1560
1561 if (fbcon) {
1562 radeon_fbdev_set_suspend(rdev, 0);
1563 console_unlock();
1564 }
1565
1563 return 0; 1566 return 0;
1564} 1567}
1565 1568
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 386cfa4c194d..356b733caafe 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -284,6 +284,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
284 u32 update_pending; 284 u32 update_pending;
285 int vpos, hpos; 285 int vpos, hpos;
286 286
287 /* can happen during initialization */
288 if (radeon_crtc == NULL)
289 return;
290
287 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 291 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
288 work = radeon_crtc->unpin_work; 292 work = radeon_crtc->unpin_work;
289 if (work == NULL || 293 if (work == NULL ||
@@ -759,19 +763,18 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
759 763
760 if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != 764 if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
761 ENCODER_OBJECT_ID_NONE) { 765 ENCODER_OBJECT_ID_NONE) {
762 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 766 if (radeon_connector->ddc_bus->has_aux)
763
764 if (dig->dp_i2c_bus)
765 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 767 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
766 &dig->dp_i2c_bus->adapter); 768 &radeon_connector->ddc_bus->aux.ddc);
767 } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 769 } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
768 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 770 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
769 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 771 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
770 772
771 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 773 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
772 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) 774 dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
775 radeon_connector->ddc_bus->has_aux)
773 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 776 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
774 &dig->dp_i2c_bus->adapter); 777 &radeon_connector->ddc_bus->aux.ddc);
775 else if (radeon_connector->ddc_bus && !radeon_connector->edid) 778 else if (radeon_connector->ddc_bus && !radeon_connector->edid)
776 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 779 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
777 &radeon_connector->ddc_bus->adapter); 780 &radeon_connector->ddc_bus->adapter);
@@ -827,20 +830,52 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
827 830
828 /* make sure nominator is large enough */ 831 /* make sure nominator is large enough */
829 if (*nom < nom_min) { 832 if (*nom < nom_min) {
830 tmp = (nom_min + *nom - 1) / *nom; 833 tmp = DIV_ROUND_UP(nom_min, *nom);
831 *nom *= tmp; 834 *nom *= tmp;
832 *den *= tmp; 835 *den *= tmp;
833 } 836 }
834 837
835 /* make sure the denominator is large enough */ 838 /* make sure the denominator is large enough */
836 if (*den < den_min) { 839 if (*den < den_min) {
837 tmp = (den_min + *den - 1) / *den; 840 tmp = DIV_ROUND_UP(den_min, *den);
838 *nom *= tmp; 841 *nom *= tmp;
839 *den *= tmp; 842 *den *= tmp;
840 } 843 }
841} 844}
842 845
843/** 846/**
847 * avivo_get_fb_ref_div - feedback and ref divider calculation
848 *
849 * @nom: nominator
850 * @den: denominator
851 * @post_div: post divider
852 * @fb_div_max: feedback divider maximum
853 * @ref_div_max: reference divider maximum
854 * @fb_div: resulting feedback divider
855 * @ref_div: resulting reference divider
856 *
857 * Calculate feedback and reference divider for a given post divider. Makes
858 * sure we stay within the limits.
859 */
860static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
861 unsigned fb_div_max, unsigned ref_div_max,
862 unsigned *fb_div, unsigned *ref_div)
863{
864 /* limit reference * post divider to a maximum */
865 ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
866
867 /* get matching reference and feedback divider */
868 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
869 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
870
871 /* limit fb divider to its maximum */
872 if (*fb_div > fb_div_max) {
873 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
874 *fb_div = fb_div_max;
875 }
876}
877
878/**
844 * radeon_compute_pll_avivo - compute PLL paramaters 879 * radeon_compute_pll_avivo - compute PLL paramaters
845 * 880 *
846 * @pll: information about the PLL 881 * @pll: information about the PLL
@@ -861,11 +896,14 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
861 u32 *ref_div_p, 896 u32 *ref_div_p,
862 u32 *post_div_p) 897 u32 *post_div_p)
863{ 898{
899 unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
900 freq : freq / 10;
901
864 unsigned fb_div_min, fb_div_max, fb_div; 902 unsigned fb_div_min, fb_div_max, fb_div;
865 unsigned post_div_min, post_div_max, post_div; 903 unsigned post_div_min, post_div_max, post_div;
866 unsigned ref_div_min, ref_div_max, ref_div; 904 unsigned ref_div_min, ref_div_max, ref_div;
867 unsigned post_div_best, diff_best; 905 unsigned post_div_best, diff_best;
868 unsigned nom, den, tmp; 906 unsigned nom, den;
869 907
870 /* determine allowed feedback divider range */ 908 /* determine allowed feedback divider range */
871 fb_div_min = pll->min_feedback_div; 909 fb_div_min = pll->min_feedback_div;
@@ -881,14 +919,18 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
881 ref_div_min = pll->reference_div; 919 ref_div_min = pll->reference_div;
882 else 920 else
883 ref_div_min = pll->min_ref_div; 921 ref_div_min = pll->min_ref_div;
884 ref_div_max = pll->max_ref_div; 922
923 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
924 pll->flags & RADEON_PLL_USE_REF_DIV)
925 ref_div_max = pll->reference_div;
926 else
927 ref_div_max = pll->max_ref_div;
885 928
886 /* determine allowed post divider range */ 929 /* determine allowed post divider range */
887 if (pll->flags & RADEON_PLL_USE_POST_DIV) { 930 if (pll->flags & RADEON_PLL_USE_POST_DIV) {
888 post_div_min = pll->post_div; 931 post_div_min = pll->post_div;
889 post_div_max = pll->post_div; 932 post_div_max = pll->post_div;
890 } else { 933 } else {
891 unsigned target_clock = freq / 10;
892 unsigned vco_min, vco_max; 934 unsigned vco_min, vco_max;
893 935
894 if (pll->flags & RADEON_PLL_IS_LCD) { 936 if (pll->flags & RADEON_PLL_IS_LCD) {
@@ -899,6 +941,11 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
899 vco_max = pll->pll_out_max; 941 vco_max = pll->pll_out_max;
900 } 942 }
901 943
944 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
945 vco_min *= 10;
946 vco_max *= 10;
947 }
948
902 post_div_min = vco_min / target_clock; 949 post_div_min = vco_min / target_clock;
903 if ((target_clock * post_div_min) < vco_min) 950 if ((target_clock * post_div_min) < vco_min)
904 ++post_div_min; 951 ++post_div_min;
@@ -913,7 +960,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
913 } 960 }
914 961
915 /* represent the searched ratio as fractional number */ 962 /* represent the searched ratio as fractional number */
916 nom = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? freq : freq / 10; 963 nom = target_clock;
917 den = pll->reference_freq; 964 den = pll->reference_freq;
918 965
919 /* reduce the numbers to a simpler ratio */ 966 /* reduce the numbers to a simpler ratio */
@@ -927,7 +974,12 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
927 diff_best = ~0; 974 diff_best = ~0;
928 975
929 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { 976 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
930 unsigned diff = abs(den - den / post_div * post_div); 977 unsigned diff;
978 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
979 ref_div_max, &fb_div, &ref_div);
980 diff = abs(target_clock - (pll->reference_freq * fb_div) /
981 (ref_div * post_div));
982
931 if (diff < diff_best || (diff == diff_best && 983 if (diff < diff_best || (diff == diff_best &&
932 !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) { 984 !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
933 985
@@ -937,29 +989,24 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
937 } 989 }
938 post_div = post_div_best; 990 post_div = post_div_best;
939 991
940 /* get matching reference and feedback divider */ 992 /* get the feedback and reference divider for the optimal value */
941 ref_div = max(den / post_div, 1u); 993 avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
942 fb_div = nom; 994 &fb_div, &ref_div);
943
944 /* we're almost done, but reference and feedback
945 divider might be to large now */
946
947 tmp = ref_div;
948
949 if (fb_div > fb_div_max) {
950 ref_div = ref_div * fb_div_max / fb_div;
951 fb_div = fb_div_max;
952 }
953
954 if (ref_div > ref_div_max) {
955 ref_div = ref_div_max;
956 fb_div = nom * ref_div_max / tmp;
957 }
958 995
959 /* reduce the numbers to a simpler ratio once more */ 996 /* reduce the numbers to a simpler ratio once more */
960 /* this also makes sure that the reference divider is large enough */ 997 /* this also makes sure that the reference divider is large enough */
961 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); 998 avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
962 999
1000 /* avoid high jitter with small fractional dividers */
1001 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
1002 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
1003 if (fb_div < fb_div_min) {
1004 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
1005 fb_div *= tmp;
1006 ref_div *= tmp;
1007 }
1008 }
1009
963 /* and finally save the result */ 1010 /* and finally save the result */
964 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { 1011 if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
965 *fb_div_p = fb_div / 10; 1012 *fb_div_p = fb_div / 10;
@@ -976,7 +1023,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
976 *post_div_p = post_div; 1023 *post_div_p = post_div;
977 1024
978 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 1025 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
979 freq, *dot_clock_p, *fb_div_p, *frac_fb_div_p, 1026 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
980 ref_div, post_div); 1027 ref_div, post_div);
981} 1028}
982 1029
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d0eba48dd74e..c00a2f585185 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -115,6 +115,7 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
115 unsigned int flags, 115 unsigned int flags,
116 int *vpos, int *hpos, ktime_t *stime, 116 int *vpos, int *hpos, ktime_t *stime,
117 ktime_t *etime); 117 ktime_t *etime);
118extern bool radeon_is_px(struct drm_device *dev);
118extern const struct drm_ioctl_desc radeon_ioctls_kms[]; 119extern const struct drm_ioctl_desc radeon_ioctls_kms[];
119extern int radeon_max_kms_ioctl; 120extern int radeon_max_kms_ioctl;
120int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 121int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -144,11 +145,9 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
144#if defined(CONFIG_VGA_SWITCHEROO) 145#if defined(CONFIG_VGA_SWITCHEROO)
145void radeon_register_atpx_handler(void); 146void radeon_register_atpx_handler(void);
146void radeon_unregister_atpx_handler(void); 147void radeon_unregister_atpx_handler(void);
147bool radeon_is_px(void);
148#else 148#else
149static inline void radeon_register_atpx_handler(void) {} 149static inline void radeon_register_atpx_handler(void) {}
150static inline void radeon_unregister_atpx_handler(void) {} 150static inline void radeon_unregister_atpx_handler(void) {}
151static inline bool radeon_is_px(void) { return false; }
152#endif 151#endif
153 152
154int radeon_no_wb; 153int radeon_no_wb;
@@ -186,7 +185,7 @@ module_param_named(dynclks, radeon_dynclks, int, 0444);
186MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx"); 185MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
187module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444); 186module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
188 187
189MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing"); 188MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
190module_param_named(vramlimit, radeon_vram_limit, int, 0600); 189module_param_named(vramlimit, radeon_vram_limit, int, 0600);
191 190
192MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)"); 191MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
@@ -405,12 +404,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
405 struct drm_device *drm_dev = pci_get_drvdata(pdev); 404 struct drm_device *drm_dev = pci_get_drvdata(pdev);
406 int ret; 405 int ret;
407 406
408 if (radeon_runtime_pm == 0) { 407 if (!radeon_is_px(drm_dev)) {
409 pm_runtime_forbid(dev);
410 return -EBUSY;
411 }
412
413 if (radeon_runtime_pm == -1 && !radeon_is_px()) {
414 pm_runtime_forbid(dev); 408 pm_runtime_forbid(dev);
415 return -EBUSY; 409 return -EBUSY;
416 } 410 }
@@ -434,10 +428,7 @@ static int radeon_pmops_runtime_resume(struct device *dev)
434 struct drm_device *drm_dev = pci_get_drvdata(pdev); 428 struct drm_device *drm_dev = pci_get_drvdata(pdev);
435 int ret; 429 int ret;
436 430
437 if (radeon_runtime_pm == 0) 431 if (!radeon_is_px(drm_dev))
438 return -EINVAL;
439
440 if (radeon_runtime_pm == -1 && !radeon_is_px())
441 return -EINVAL; 432 return -EINVAL;
442 433
443 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 434 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -462,14 +453,7 @@ static int radeon_pmops_runtime_idle(struct device *dev)
462 struct drm_device *drm_dev = pci_get_drvdata(pdev); 453 struct drm_device *drm_dev = pci_get_drvdata(pdev);
463 struct drm_crtc *crtc; 454 struct drm_crtc *crtc;
464 455
465 if (radeon_runtime_pm == 0) { 456 if (!radeon_is_px(drm_dev)) {
466 pm_runtime_forbid(dev);
467 return -EBUSY;
468 }
469
470 /* are we PX enabled? */
471 if (radeon_runtime_pm == -1 && !radeon_is_px()) {
472 DRM_DEBUG_DRIVER("failing to power off - not px\n");
473 pm_runtime_forbid(dev); 457 pm_runtime_forbid(dev);
474 return -EBUSY; 458 return -EBUSY;
475 } 459 }
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 614ad549297f..4b7b87f71a63 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -97,6 +97,7 @@ enum radeon_family {
97 CHIP_KAVERI, 97 CHIP_KAVERI,
98 CHIP_KABINI, 98 CHIP_KABINI,
99 CHIP_HAWAII, 99 CHIP_HAWAII,
100 CHIP_MULLINS,
100 CHIP_LAST, 101 CHIP_LAST,
101}; 102};
102 103
@@ -115,6 +116,7 @@ enum radeon_chip_flags {
115 RADEON_NEW_MEMMAP = 0x00400000UL, 116 RADEON_NEW_MEMMAP = 0x00400000UL,
116 RADEON_IS_PCI = 0x00800000UL, 117 RADEON_IS_PCI = 0x00800000UL,
117 RADEON_IS_IGPGART = 0x01000000UL, 118 RADEON_IS_IGPGART = 0x01000000UL,
119 RADEON_IS_PX = 0x02000000UL,
118}; 120};
119 121
120#endif 122#endif
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e24ca6ab96de..7b944142a9fd 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -64,8 +64,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
64 radeon_router_select_ddc_port(radeon_connector); 64 radeon_router_select_ddc_port(radeon_connector);
65 65
66 if (use_aux) { 66 if (use_aux) {
67 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 67 ret = i2c_transfer(&radeon_connector->ddc_bus->aux.ddc, msgs, 2);
68 ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
69 } else { 68 } else {
70 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 69 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
71 } 70 }
@@ -950,16 +949,16 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
950 /* set the radeon bit adapter */ 949 /* set the radeon bit adapter */
951 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 950 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
952 "Radeon i2c bit bus %s", name); 951 "Radeon i2c bit bus %s", name);
953 i2c->adapter.algo_data = &i2c->algo.bit; 952 i2c->adapter.algo_data = &i2c->bit;
954 i2c->algo.bit.pre_xfer = pre_xfer; 953 i2c->bit.pre_xfer = pre_xfer;
955 i2c->algo.bit.post_xfer = post_xfer; 954 i2c->bit.post_xfer = post_xfer;
956 i2c->algo.bit.setsda = set_data; 955 i2c->bit.setsda = set_data;
957 i2c->algo.bit.setscl = set_clock; 956 i2c->bit.setscl = set_clock;
958 i2c->algo.bit.getsda = get_data; 957 i2c->bit.getsda = get_data;
959 i2c->algo.bit.getscl = get_clock; 958 i2c->bit.getscl = get_clock;
960 i2c->algo.bit.udelay = 10; 959 i2c->bit.udelay = 10;
961 i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */ 960 i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */
962 i2c->algo.bit.data = i2c; 961 i2c->bit.data = i2c;
963 ret = i2c_bit_add_bus(&i2c->adapter); 962 ret = i2c_bit_add_bus(&i2c->adapter);
964 if (ret) { 963 if (ret) {
965 DRM_ERROR("Failed to register bit i2c %s\n", name); 964 DRM_ERROR("Failed to register bit i2c %s\n", name);
@@ -974,46 +973,13 @@ out_free:
974 973
975} 974}
976 975
977struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
978 struct radeon_i2c_bus_rec *rec,
979 const char *name)
980{
981 struct radeon_i2c_chan *i2c;
982 int ret;
983
984 i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
985 if (i2c == NULL)
986 return NULL;
987
988 i2c->rec = *rec;
989 i2c->adapter.owner = THIS_MODULE;
990 i2c->adapter.class = I2C_CLASS_DDC;
991 i2c->adapter.dev.parent = &dev->pdev->dev;
992 i2c->dev = dev;
993 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
994 "Radeon aux bus %s", name);
995 i2c_set_adapdata(&i2c->adapter, i2c);
996 i2c->adapter.algo_data = &i2c->algo.dp;
997 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
998 i2c->algo.dp.address = 0;
999 ret = i2c_dp_aux_add_bus(&i2c->adapter);
1000 if (ret) {
1001 DRM_INFO("Failed to register i2c %s\n", name);
1002 goto out_free;
1003 }
1004
1005 return i2c;
1006out_free:
1007 kfree(i2c);
1008 return NULL;
1009
1010}
1011
1012void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) 976void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
1013{ 977{
1014 if (!i2c) 978 if (!i2c)
1015 return; 979 return;
1016 i2c_del_adapter(&i2c->adapter); 980 i2c_del_adapter(&i2c->adapter);
981 if (i2c->has_aux)
982 drm_dp_aux_unregister_i2c_bus(&i2c->aux);
1017 kfree(i2c); 983 kfree(i2c);
1018} 984}
1019 985
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3e49342a20e6..eaaedba04675 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -35,9 +35,9 @@
35#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36 36
37#if defined(CONFIG_VGA_SWITCHEROO) 37#if defined(CONFIG_VGA_SWITCHEROO)
38bool radeon_is_px(void); 38bool radeon_has_atpx(void);
39#else 39#else
40static inline bool radeon_is_px(void) { return false; } 40static inline bool radeon_has_atpx(void) { return false; }
41#endif 41#endif
42 42
43/** 43/**
@@ -107,6 +107,11 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
107 flags |= RADEON_IS_PCI; 107 flags |= RADEON_IS_PCI;
108 } 108 }
109 109
110 if ((radeon_runtime_pm != 0) &&
111 radeon_has_atpx() &&
112 ((flags & RADEON_IS_IGP) == 0))
113 flags |= RADEON_IS_PX;
114
110 /* radeon_device_init should report only fatal error 115 /* radeon_device_init should report only fatal error
111 * like memory allocation failure or iomapping failure, 116 * like memory allocation failure or iomapping failure,
112 * or memory manager initialization failure, it must 117 * or memory manager initialization failure, it must
@@ -137,8 +142,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
137 "Error during ACPI methods call\n"); 142 "Error during ACPI methods call\n");
138 } 143 }
139 144
140 if ((radeon_runtime_pm == 1) || 145 if (radeon_is_px(dev)) {
141 ((radeon_runtime_pm == -1) && radeon_is_px())) {
142 pm_runtime_use_autosuspend(dev->dev); 146 pm_runtime_use_autosuspend(dev->dev);
143 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 147 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
144 pm_runtime_set_active(dev->dev); 148 pm_runtime_set_active(dev->dev);
@@ -568,28 +572,34 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
568 } 572 }
569 573
570 r = radeon_vm_init(rdev, &fpriv->vm); 574 r = radeon_vm_init(rdev, &fpriv->vm);
571 if (r)
572 return r;
573
574 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
575 if (r)
576 return r;
577
578 /* map the ib pool buffer read only into
579 * virtual address space */
580 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
581 rdev->ring_tmp_bo.bo);
582 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
583 RADEON_VM_PAGE_READABLE |
584 RADEON_VM_PAGE_SNOOPED);
585
586 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
587 if (r) { 575 if (r) {
588 radeon_vm_fini(rdev, &fpriv->vm);
589 kfree(fpriv); 576 kfree(fpriv);
590 return r; 577 return r;
591 } 578 }
592 579
580 if (rdev->accel_working) {
581 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
582 if (r) {
583 radeon_vm_fini(rdev, &fpriv->vm);
584 kfree(fpriv);
585 return r;
586 }
587
588 /* map the ib pool buffer read only into
589 * virtual address space */
590 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
591 rdev->ring_tmp_bo.bo);
592 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
593 RADEON_VM_PAGE_READABLE |
594 RADEON_VM_PAGE_SNOOPED);
595
596 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
597 if (r) {
598 radeon_vm_fini(rdev, &fpriv->vm);
599 kfree(fpriv);
600 return r;
601 }
602 }
593 file_priv->driver_priv = fpriv; 603 file_priv->driver_priv = fpriv;
594 } 604 }
595 605
@@ -617,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
617 struct radeon_bo_va *bo_va; 627 struct radeon_bo_va *bo_va;
618 int r; 628 int r;
619 629
620 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 630 if (rdev->accel_working) {
621 if (!r) { 631 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
622 bo_va = radeon_vm_bo_find(&fpriv->vm, 632 if (!r) {
623 rdev->ring_tmp_bo.bo); 633 bo_va = radeon_vm_bo_find(&fpriv->vm,
624 if (bo_va) 634 rdev->ring_tmp_bo.bo);
625 radeon_vm_bo_rmv(rdev, bo_va); 635 if (bo_va)
626 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 636 radeon_vm_bo_rmv(rdev, bo_va);
637 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
638 }
627 } 639 }
628 640
629 radeon_vm_fini(rdev, &fpriv->vm); 641 radeon_vm_fini(rdev, &fpriv->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 832d9fa1a4c4..6ddf31a2d34e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -187,12 +187,10 @@ struct radeon_pll {
187struct radeon_i2c_chan { 187struct radeon_i2c_chan {
188 struct i2c_adapter adapter; 188 struct i2c_adapter adapter;
189 struct drm_device *dev; 189 struct drm_device *dev;
190 union { 190 struct i2c_algo_bit_data bit;
191 struct i2c_algo_bit_data bit;
192 struct i2c_algo_dp_aux_data dp;
193 } algo;
194 struct radeon_i2c_bus_rec rec; 191 struct radeon_i2c_bus_rec rec;
195 struct drm_dp_aux aux; 192 struct drm_dp_aux aux;
193 bool has_aux;
196}; 194};
197 195
198/* mostly for macs, but really any system without connector tables */ 196/* mostly for macs, but really any system without connector tables */
@@ -440,7 +438,6 @@ struct radeon_encoder {
440struct radeon_connector_atom_dig { 438struct radeon_connector_atom_dig {
441 uint32_t igp_lane_info; 439 uint32_t igp_lane_info;
442 /* displayport */ 440 /* displayport */
443 struct radeon_i2c_chan *dp_i2c_bus;
444 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 441 u8 dpcd[DP_RECEIVER_CAP_SIZE];
445 u8 dp_sink_type; 442 u8 dp_sink_type;
446 int dp_clock; 443 int dp_clock;
@@ -702,8 +699,6 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
702 uint8_t lane_set); 699 uint8_t lane_set);
703extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); 700extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
704extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); 701extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
705extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
706 u8 write_byte, u8 *read_byte);
707void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); 702void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
708 703
709extern void radeon_i2c_init(struct radeon_device *rdev); 704extern void radeon_i2c_init(struct radeon_device *rdev);
@@ -715,9 +710,6 @@ extern void radeon_i2c_add(struct radeon_device *rdev,
715 const char *name); 710 const char *name);
716extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev, 711extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
717 struct radeon_i2c_bus_rec *i2c_bus); 712 struct radeon_i2c_bus_rec *i2c_bus);
718extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
719 struct radeon_i2c_bus_rec *rec,
720 const char *name);
721extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 713extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
722 struct radeon_i2c_bus_rec *rec, 714 struct radeon_i2c_bus_rec *rec,
723 const char *name); 715 const char *name);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 19bec0dbfa38..4faa4d6f9bb4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
458 * into account. We don't want to disallow buffer moves 458 * into account. We don't want to disallow buffer moves
459 * completely. 459 * completely.
460 */ 460 */
461 if (current_domain != RADEON_GEM_DOMAIN_CPU && 461 if ((lobj->alt_domain & current_domain) != 0 &&
462 (domain & current_domain) == 0 && /* will be moved */ 462 (domain & current_domain) == 0 && /* will be moved */
463 bytes_moved > bytes_moved_threshold) { 463 bytes_moved > bytes_moved_threshold) {
464 /* don't move it */ 464 /* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
699 rbo = container_of(bo, struct radeon_bo, tbo); 699 rbo = container_of(bo, struct radeon_bo, tbo);
700 radeon_bo_check_tiling(rbo, 0, 0); 700 radeon_bo_check_tiling(rbo, 0, 0);
701 rdev = rbo->rdev; 701 rdev = rbo->rdev;
702 if (bo->mem.mem_type == TTM_PL_VRAM) { 702 if (bo->mem.mem_type != TTM_PL_VRAM)
703 size = bo->mem.num_pages << PAGE_SHIFT; 703 return 0;
704 offset = bo->mem.start << PAGE_SHIFT; 704
705 if ((offset + size) > rdev->mc.visible_vram_size) { 705 size = bo->mem.num_pages << PAGE_SHIFT;
706 /* hurrah the memory is not visible ! */ 706 offset = bo->mem.start << PAGE_SHIFT;
707 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 707 if ((offset + size) <= rdev->mc.visible_vram_size)
708 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 708 return 0;
709 r = ttm_bo_validate(bo, &rbo->placement, false, false); 709
710 if (unlikely(r != 0)) 710 /* hurrah the memory is not visible ! */
711 return r; 711 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
712 offset = bo->mem.start << PAGE_SHIFT; 712 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
713 /* this should not happen */ 713 r = ttm_bo_validate(bo, &rbo->placement, false, false);
714 if ((offset + size) > rdev->mc.visible_vram_size) 714 if (unlikely(r == -ENOMEM)) {
715 return -EINVAL; 715 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
716 } 716 return ttm_bo_validate(bo, &rbo->placement, false, false);
717 } else if (unlikely(r != 0)) {
718 return r;
717 } 719 }
720
721 offset = bo->mem.start << PAGE_SHIFT;
722 /* this should never happen */
723 if ((offset + size) > rdev->mc.visible_vram_size)
724 return -EINVAL;
725
718 return 0; 726 return 0;
719} 727}
720 728
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index ee738a524639..53d6e1bb48dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
361 struct drm_device *ddev = dev_get_drvdata(dev); 361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct radeon_device *rdev = ddev->dev_private; 362 struct radeon_device *rdev = ddev->dev_private;
363 363
364 /* Can't set profile when the card is off */
365 if ((rdev->flags & RADEON_IS_PX) &&
366 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
367 return -EINVAL;
368
364 mutex_lock(&rdev->pm.mutex); 369 mutex_lock(&rdev->pm.mutex);
365 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 370 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
366 if (strncmp("default", buf, strlen("default")) == 0) 371 if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
409 struct drm_device *ddev = dev_get_drvdata(dev); 414 struct drm_device *ddev = dev_get_drvdata(dev);
410 struct radeon_device *rdev = ddev->dev_private; 415 struct radeon_device *rdev = ddev->dev_private;
411 416
417 /* Can't set method when the card is off */
418 if ((rdev->flags & RADEON_IS_PX) &&
419 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
420 count = -EINVAL;
421 goto fail;
422 }
423
412 /* we don't support the legacy modes with dpm */ 424 /* we don't support the legacy modes with dpm */
413 if (rdev->pm.pm_method == PM_METHOD_DPM) { 425 if (rdev->pm.pm_method == PM_METHOD_DPM) {
414 count = -EINVAL; 426 count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
446 struct radeon_device *rdev = ddev->dev_private; 458 struct radeon_device *rdev = ddev->dev_private;
447 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 459 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
448 460
461 if ((rdev->flags & RADEON_IS_PX) &&
462 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
463 return snprintf(buf, PAGE_SIZE, "off\n");
464
449 return snprintf(buf, PAGE_SIZE, "%s\n", 465 return snprintf(buf, PAGE_SIZE, "%s\n",
450 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 466 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
451 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 467 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
459 struct drm_device *ddev = dev_get_drvdata(dev); 475 struct drm_device *ddev = dev_get_drvdata(dev);
460 struct radeon_device *rdev = ddev->dev_private; 476 struct radeon_device *rdev = ddev->dev_private;
461 477
478 /* Can't set dpm state when the card is off */
479 if ((rdev->flags & RADEON_IS_PX) &&
480 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
481 return -EINVAL;
482
462 mutex_lock(&rdev->pm.mutex); 483 mutex_lock(&rdev->pm.mutex);
463 if (strncmp("battery", buf, strlen("battery")) == 0) 484 if (strncmp("battery", buf, strlen("battery")) == 0)
464 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 485 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
485 struct radeon_device *rdev = ddev->dev_private; 506 struct radeon_device *rdev = ddev->dev_private;
486 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 507 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
487 508
509 if ((rdev->flags & RADEON_IS_PX) &&
510 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
511 return snprintf(buf, PAGE_SIZE, "off\n");
512
488 return snprintf(buf, PAGE_SIZE, "%s\n", 513 return snprintf(buf, PAGE_SIZE, "%s\n",
489 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 514 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
490 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 515 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
500 enum radeon_dpm_forced_level level; 525 enum radeon_dpm_forced_level level;
501 int ret = 0; 526 int ret = 0;
502 527
528 /* Can't force performance level when the card is off */
529 if ((rdev->flags & RADEON_IS_PX) &&
530 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
531 return -EINVAL;
532
503 mutex_lock(&rdev->pm.mutex); 533 mutex_lock(&rdev->pm.mutex);
504 if (strncmp("low", buf, strlen("low")) == 0) { 534 if (strncmp("low", buf, strlen("low")) == 0) {
505 level = RADEON_DPM_FORCED_LEVEL_LOW; 535 level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
538 char *buf) 568 char *buf)
539{ 569{
540 struct radeon_device *rdev = dev_get_drvdata(dev); 570 struct radeon_device *rdev = dev_get_drvdata(dev);
571 struct drm_device *ddev = rdev->ddev;
541 int temp; 572 int temp;
542 573
574 /* Can't get temperature when the card is off */
575 if ((rdev->flags & RADEON_IS_PX) &&
576 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
577 return -EINVAL;
578
543 if (rdev->asic->pm.get_temperature) 579 if (rdev->asic->pm.get_temperature)
544 temp = radeon_get_temperature(rdev); 580 temp = radeon_get_temperature(rdev);
545 else 581 else
@@ -603,7 +639,6 @@ static const struct attribute_group *hwmon_groups[] = {
603static int radeon_hwmon_init(struct radeon_device *rdev) 639static int radeon_hwmon_init(struct radeon_device *rdev)
604{ 640{
605 int err = 0; 641 int err = 0;
606 struct device *hwmon_dev;
607 642
608 switch (rdev->pm.int_thermal_type) { 643 switch (rdev->pm.int_thermal_type) {
609 case THERMAL_TYPE_RV6XX: 644 case THERMAL_TYPE_RV6XX:
@@ -616,11 +651,11 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
616 case THERMAL_TYPE_KV: 651 case THERMAL_TYPE_KV:
617 if (rdev->asic->pm.get_temperature == NULL) 652 if (rdev->asic->pm.get_temperature == NULL)
618 return err; 653 return err;
619 hwmon_dev = hwmon_device_register_with_groups(rdev->dev, 654 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
620 "radeon", rdev, 655 "radeon", rdev,
621 hwmon_groups); 656 hwmon_groups);
622 if (IS_ERR(hwmon_dev)) { 657 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
623 err = PTR_ERR(hwmon_dev); 658 err = PTR_ERR(rdev->pm.int_hwmon_dev);
624 dev_err(rdev->dev, 659 dev_err(rdev->dev,
625 "Unable to register hwmon device: %d\n", err); 660 "Unable to register hwmon device: %d\n", err);
626 } 661 }
@@ -632,6 +667,12 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
632 return err; 667 return err;
633} 668}
634 669
670static void radeon_hwmon_fini(struct radeon_device *rdev)
671{
672 if (rdev->pm.int_hwmon_dev)
673 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
674}
675
635static void radeon_dpm_thermal_work_handler(struct work_struct *work) 676static void radeon_dpm_thermal_work_handler(struct work_struct *work)
636{ 677{
637 struct radeon_device *rdev = 678 struct radeon_device *rdev =
@@ -1257,6 +1298,7 @@ int radeon_pm_init(struct radeon_device *rdev)
1257 case CHIP_RV670: 1298 case CHIP_RV670:
1258 case CHIP_RS780: 1299 case CHIP_RS780:
1259 case CHIP_RS880: 1300 case CHIP_RS880:
1301 case CHIP_RV770:
1260 case CHIP_BARTS: 1302 case CHIP_BARTS:
1261 case CHIP_TURKS: 1303 case CHIP_TURKS:
1262 case CHIP_CAICOS: 1304 case CHIP_CAICOS:
@@ -1273,7 +1315,6 @@ int radeon_pm_init(struct radeon_device *rdev)
1273 else 1315 else
1274 rdev->pm.pm_method = PM_METHOD_PROFILE; 1316 rdev->pm.pm_method = PM_METHOD_PROFILE;
1275 break; 1317 break;
1276 case CHIP_RV770:
1277 case CHIP_RV730: 1318 case CHIP_RV730:
1278 case CHIP_RV710: 1319 case CHIP_RV710:
1279 case CHIP_RV740: 1320 case CHIP_RV740:
@@ -1295,6 +1336,7 @@ int radeon_pm_init(struct radeon_device *rdev)
1295 case CHIP_KABINI: 1336 case CHIP_KABINI:
1296 case CHIP_KAVERI: 1337 case CHIP_KAVERI:
1297 case CHIP_HAWAII: 1338 case CHIP_HAWAII:
1339 case CHIP_MULLINS:
1298 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1340 /* DPM requires the RLC, RV770+ dGPU requires SMC */
1299 if (!rdev->rlc_fw) 1341 if (!rdev->rlc_fw)
1300 rdev->pm.pm_method = PM_METHOD_PROFILE; 1342 rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1353,6 +1395,8 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
1353 device_remove_file(rdev->dev, &dev_attr_power_method); 1395 device_remove_file(rdev->dev, &dev_attr_power_method);
1354 } 1396 }
1355 1397
1398 radeon_hwmon_fini(rdev);
1399
1356 if (rdev->pm.power_state) 1400 if (rdev->pm.power_state)
1357 kfree(rdev->pm.power_state); 1401 kfree(rdev->pm.power_state);
1358} 1402}
@@ -1372,6 +1416,8 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
1372 } 1416 }
1373 radeon_dpm_fini(rdev); 1417 radeon_dpm_fini(rdev);
1374 1418
1419 radeon_hwmon_fini(rdev);
1420
1375 if (rdev->pm.power_state) 1421 if (rdev->pm.power_state)
1376 kfree(rdev->pm.power_state); 1422 kfree(rdev->pm.power_state);
1377} 1423}
@@ -1397,12 +1443,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
1397 1443
1398 rdev->pm.active_crtcs = 0; 1444 rdev->pm.active_crtcs = 0;
1399 rdev->pm.active_crtc_count = 0; 1445 rdev->pm.active_crtc_count = 0;
1400 list_for_each_entry(crtc, 1446 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1401 &ddev->mode_config.crtc_list, head) { 1447 list_for_each_entry(crtc,
1402 radeon_crtc = to_radeon_crtc(crtc); 1448 &ddev->mode_config.crtc_list, head) {
1403 if (radeon_crtc->enabled) { 1449 radeon_crtc = to_radeon_crtc(crtc);
1404 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 1450 if (radeon_crtc->enabled) {
1405 rdev->pm.active_crtc_count++; 1451 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
1452 rdev->pm.active_crtc_count++;
1453 }
1406 } 1454 }
1407 } 1455 }
1408 1456
@@ -1469,12 +1517,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
1469 /* update active crtc counts */ 1517 /* update active crtc counts */
1470 rdev->pm.dpm.new_active_crtcs = 0; 1518 rdev->pm.dpm.new_active_crtcs = 0;
1471 rdev->pm.dpm.new_active_crtc_count = 0; 1519 rdev->pm.dpm.new_active_crtc_count = 0;
1472 list_for_each_entry(crtc, 1520 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
1473 &ddev->mode_config.crtc_list, head) { 1521 list_for_each_entry(crtc,
1474 radeon_crtc = to_radeon_crtc(crtc); 1522 &ddev->mode_config.crtc_list, head) {
1475 if (crtc->enabled) { 1523 radeon_crtc = to_radeon_crtc(crtc);
1476 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); 1524 if (crtc->enabled) {
1477 rdev->pm.dpm.new_active_crtc_count++; 1525 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
1526 rdev->pm.dpm.new_active_crtc_count++;
1527 }
1478 } 1528 }
1479 } 1529 }
1480 1530
@@ -1600,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1600 struct drm_info_node *node = (struct drm_info_node *) m->private; 1650 struct drm_info_node *node = (struct drm_info_node *) m->private;
1601 struct drm_device *dev = node->minor->dev; 1651 struct drm_device *dev = node->minor->dev;
1602 struct radeon_device *rdev = dev->dev_private; 1652 struct radeon_device *rdev = dev->dev_private;
1653 struct drm_device *ddev = rdev->ddev;
1603 1654
1604 if (rdev->pm.dpm_enabled) { 1655 if ((rdev->flags & RADEON_IS_PX) &&
1656 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1657 seq_printf(m, "PX asic powered off\n");
1658 } else if (rdev->pm.dpm_enabled) {
1605 mutex_lock(&rdev->pm.mutex); 1659 mutex_lock(&rdev->pm.mutex);
1606 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1660 if (rdev->asic->dpm.debugfs_print_current_performance_level)
1607 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1661 radeon_dpm_debugfs_print_current_performance_level(rdev, m);
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index a77cd274dfc3..4e7c3269b183 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -52,14 +52,20 @@
52#define BONAIRE_RLC_UCODE_SIZE 2048 52#define BONAIRE_RLC_UCODE_SIZE 2048
53#define KB_RLC_UCODE_SIZE 2560 53#define KB_RLC_UCODE_SIZE 2560
54#define KV_RLC_UCODE_SIZE 2560 54#define KV_RLC_UCODE_SIZE 2560
55#define ML_RLC_UCODE_SIZE 2560
55 56
56/* MC */ 57/* MC */
57#define BTC_MC_UCODE_SIZE 6024 58#define BTC_MC_UCODE_SIZE 6024
58#define CAYMAN_MC_UCODE_SIZE 6037 59#define CAYMAN_MC_UCODE_SIZE 6037
59#define SI_MC_UCODE_SIZE 7769 60#define SI_MC_UCODE_SIZE 7769
61#define TAHITI_MC_UCODE_SIZE 7808
62#define PITCAIRN_MC_UCODE_SIZE 7775
63#define VERDE_MC_UCODE_SIZE 7875
60#define OLAND_MC_UCODE_SIZE 7863 64#define OLAND_MC_UCODE_SIZE 7863
61#define CIK_MC_UCODE_SIZE 7866 65#define BONAIRE_MC_UCODE_SIZE 7866
66#define BONAIRE_MC2_UCODE_SIZE 7948
62#define HAWAII_MC_UCODE_SIZE 7933 67#define HAWAII_MC_UCODE_SIZE 7933
68#define HAWAII_MC2_UCODE_SIZE 8091
63 69
64/* SDMA */ 70/* SDMA */
65#define CIK_SDMA_UCODE_SIZE 1050 71#define CIK_SDMA_UCODE_SIZE 1050
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 5748bdaeacce..1b65ae2433cd 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
99 case CHIP_KABINI: 99 case CHIP_KABINI:
100 case CHIP_KAVERI: 100 case CHIP_KAVERI:
101 case CHIP_HAWAII: 101 case CHIP_HAWAII:
102 case CHIP_MULLINS:
102 fw_name = FIRMWARE_BONAIRE; 103 fw_name = FIRMWARE_BONAIRE;
103 break; 104 break;
104 105
@@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
465 cmd = radeon_get_ib_value(p, p->idx) >> 1; 466 cmd = radeon_get_ib_value(p, p->idx) >> 1;
466 467
467 if (cmd < 0x4) { 468 if (cmd < 0x4) {
469 if (end <= start) {
470 DRM_ERROR("invalid reloc offset %X!\n", offset);
471 return -EINVAL;
472 }
468 if ((end - start) < buf_sizes[cmd]) { 473 if ((end - start) < buf_sizes[cmd]) {
469 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, 474 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
470 (unsigned)(end - start), buf_sizes[cmd]); 475 (unsigned)(end - start), buf_sizes[cmd]);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 76e9904bc537..3971d968af6c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
66 case CHIP_BONAIRE: 66 case CHIP_BONAIRE:
67 case CHIP_KAVERI: 67 case CHIP_KAVERI:
68 case CHIP_KABINI: 68 case CHIP_KABINI:
69 case CHIP_MULLINS:
69 fw_name = FIRMWARE_BONAIRE; 70 fw_name = FIRMWARE_BONAIRE;
70 break; 71 break;
71 72
@@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
442 * @p: parser context 443 * @p: parser context
443 * @lo: address of lower dword 444 * @lo: address of lower dword
444 * @hi: address of higher dword 445 * @hi: address of higher dword
446 * @size: size of checker for relocation buffer
445 * 447 *
446 * Patch relocation inside command stream with real buffer address 448 * Patch relocation inside command stream with real buffer address
447 */ 449 */
448int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) 450int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
451 unsigned size)
449{ 452{
450 struct radeon_cs_chunk *relocs_chunk; 453 struct radeon_cs_chunk *relocs_chunk;
451 uint64_t offset; 454 struct radeon_cs_reloc *reloc;
455 uint64_t start, end, offset;
452 unsigned idx; 456 unsigned idx;
453 457
454 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 458 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -461,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
461 return -EINVAL; 465 return -EINVAL;
462 } 466 }
463 467
464 offset += p->relocs_ptr[(idx / 4)]->gpu_offset; 468 reloc = p->relocs_ptr[(idx / 4)];
469 start = reloc->gpu_offset;
470 end = start + radeon_bo_size(reloc->robj);
471 start += offset;
465 472
466 p->ib.ptr[lo] = offset & 0xFFFFFFFF; 473 p->ib.ptr[lo] = start & 0xFFFFFFFF;
467 p->ib.ptr[hi] = offset >> 32; 474 p->ib.ptr[hi] = start >> 32;
475
476 if (end <= start) {
477 DRM_ERROR("invalid reloc offset %llX!\n", offset);
478 return -EINVAL;
479 }
480 if ((end - start) < size) {
481 DRM_ERROR("buffer to small (%d / %d)!\n",
482 (unsigned)(end - start), size);
483 return -EINVAL;
484 }
468 485
469 return 0; 486 return 0;
470} 487}
471 488
472/** 489/**
490 * radeon_vce_validate_handle - validate stream handle
491 *
492 * @p: parser context
493 * @handle: handle to validate
494 *
495 * Validates the handle and return the found session index or -EINVAL
496 * we we don't have another free session index.
497 */
498int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
499{
500 unsigned i;
501
502 /* validate the handle */
503 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
504 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
505 return i;
506 }
507
508 /* handle not found try to alloc a new one */
509 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
510 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
511 p->rdev->vce.filp[i] = p->filp;
512 p->rdev->vce.img_size[i] = 0;
513 return i;
514 }
515 }
516
517 DRM_ERROR("No more free VCE handles!\n");
518 return -EINVAL;
519}
520
521/**
473 * radeon_vce_cs_parse - parse and validate the command stream 522 * radeon_vce_cs_parse - parse and validate the command stream
474 * 523 *
475 * @p: parser context 524 * @p: parser context
@@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
477 */ 526 */
478int radeon_vce_cs_parse(struct radeon_cs_parser *p) 527int radeon_vce_cs_parse(struct radeon_cs_parser *p)
479{ 528{
480 uint32_t handle = 0; 529 int session_idx = -1;
481 bool destroy = false; 530 bool destroyed = false;
531 uint32_t tmp, handle = 0;
532 uint32_t *size = &tmp;
482 int i, r; 533 int i, r;
483 534
484 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 535 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
490 return -EINVAL; 541 return -EINVAL;
491 } 542 }
492 543
544 if (destroyed) {
545 DRM_ERROR("No other command allowed after destroy!\n");
546 return -EINVAL;
547 }
548
493 switch (cmd) { 549 switch (cmd) {
494 case 0x00000001: // session 550 case 0x00000001: // session
495 handle = radeon_get_ib_value(p, p->idx + 2); 551 handle = radeon_get_ib_value(p, p->idx + 2);
552 session_idx = radeon_vce_validate_handle(p, handle);
553 if (session_idx < 0)
554 return session_idx;
555 size = &p->rdev->vce.img_size[session_idx];
496 break; 556 break;
497 557
498 case 0x00000002: // task info 558 case 0x00000002: // task info
559 break;
560
499 case 0x01000001: // create 561 case 0x01000001: // create
562 *size = radeon_get_ib_value(p, p->idx + 8) *
563 radeon_get_ib_value(p, p->idx + 10) *
564 8 * 3 / 2;
565 break;
566
500 case 0x04000001: // config extension 567 case 0x04000001: // config extension
501 case 0x04000002: // pic control 568 case 0x04000002: // pic control
502 case 0x04000005: // rate control 569 case 0x04000005: // rate control
@@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
505 break; 572 break;
506 573
507 case 0x03000001: // encode 574 case 0x03000001: // encode
508 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); 575 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
576 *size);
509 if (r) 577 if (r)
510 return r; 578 return r;
511 579
512 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); 580 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
581 *size / 3);
513 if (r) 582 if (r)
514 return r; 583 return r;
515 break; 584 break;
516 585
517 case 0x02000001: // destroy 586 case 0x02000001: // destroy
518 destroy = true; 587 destroyed = true;
519 break; 588 break;
520 589
521 case 0x05000001: // context buffer 590 case 0x05000001: // context buffer
591 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
592 *size * 2);
593 if (r)
594 return r;
595 break;
596
522 case 0x05000004: // video bitstream buffer 597 case 0x05000004: // video bitstream buffer
598 tmp = radeon_get_ib_value(p, p->idx + 4);
599 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
600 tmp);
601 if (r)
602 return r;
603 break;
604
523 case 0x05000005: // feedback buffer 605 case 0x05000005: // feedback buffer
524 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); 606 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
607 4096);
525 if (r) 608 if (r)
526 return r; 609 return r;
527 break; 610 break;
@@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
531 return -EINVAL; 614 return -EINVAL;
532 } 615 }
533 616
617 if (session_idx == -1) {
618 DRM_ERROR("no session command at start of IB\n");
619 return -EINVAL;
620 }
621
534 p->idx += len / 4; 622 p->idx += len / 4;
535 } 623 }
536 624
537 if (destroy) { 625 if (destroyed) {
538 /* IB contains a destroy msg, free the handle */ 626 /* IB contains a destroy msg, free the handle */
539 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) 627 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
540 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); 628 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
541
542 return 0;
543 }
544
545 /* create or encode, validate the handle */
546 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
547 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
548 return 0;
549 } 629 }
550 630
551 /* handle not found try to alloc a new one */ 631 return 0;
552 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
553 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
554 p->rdev->vce.filp[i] = p->filp;
555 return 0;
556 }
557 }
558
559 DRM_ERROR("No more free VCE handles!\n");
560 return -EINVAL;
561} 632}
562 633
563/** 634/**
@@ -613,7 +684,7 @@ void radeon_vce_fence_emit(struct radeon_device *rdev,
613 struct radeon_fence *fence) 684 struct radeon_fence *fence)
614{ 685{
615 struct radeon_ring *ring = &rdev->ring[fence->ring]; 686 struct radeon_ring *ring = &rdev->ring[fence->ring];
616 uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; 687 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
617 688
618 radeon_ring_write(ring, VCE_CMD_FENCE); 689 radeon_ring_write(ring, VCE_CMD_FENCE);
619 radeon_ring_write(ring, addr); 690 radeon_ring_write(ring, addr);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2aae6ce49d32..1f426696de36 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
130 struct list_head *head) 130 struct list_head *head)
131{ 131{
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx, size; 133 unsigned i, idx;
134 134
135 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); 135 list = kmalloc_array(vm->max_pde_used + 1,
136 list = kmalloc(size, GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
595 ndw = 64; 595 ndw = 64;
596 596
597 /* assume the worst case */ 597 /* assume the worst case */
598 ndw += vm->max_pde_used * 12; 598 ndw += vm->max_pde_used * 16;
599 599
600 /* update too big for an IB */ 600 /* update too big for an IB */
601 if (ndw > 0xfffff) 601 if (ndw > 0xfffff)
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index aca8cbe8a335..bbf2e076ee45 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
86 r = radeon_fence_emit(rdev, fence, ring->idx); 86 r = radeon_fence_emit(rdev, fence, ring->idx);
87 if (r) { 87 if (r) {
88 radeon_ring_unlock_undo(rdev, ring); 88 radeon_ring_unlock_undo(rdev, ring);
89 radeon_semaphore_free(rdev, &sem, NULL);
89 return r; 90 return r;
90 } 91 }
91 92
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d589475fe9e6..22a63c98ba14 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -39,30 +39,35 @@ MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
39MODULE_FIRMWARE("radeon/TAHITI_me.bin"); 39MODULE_FIRMWARE("radeon/TAHITI_me.bin");
40MODULE_FIRMWARE("radeon/TAHITI_ce.bin"); 40MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
41MODULE_FIRMWARE("radeon/TAHITI_mc.bin"); 41MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
42MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
42MODULE_FIRMWARE("radeon/TAHITI_rlc.bin"); 43MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
43MODULE_FIRMWARE("radeon/TAHITI_smc.bin"); 44MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
44MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); 45MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
45MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); 46MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
46MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin"); 47MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
47MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin"); 48MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
49MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
48MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin"); 50MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
49MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin"); 51MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
50MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); 52MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
51MODULE_FIRMWARE("radeon/VERDE_me.bin"); 53MODULE_FIRMWARE("radeon/VERDE_me.bin");
52MODULE_FIRMWARE("radeon/VERDE_ce.bin"); 54MODULE_FIRMWARE("radeon/VERDE_ce.bin");
53MODULE_FIRMWARE("radeon/VERDE_mc.bin"); 55MODULE_FIRMWARE("radeon/VERDE_mc.bin");
56MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
54MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); 57MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
55MODULE_FIRMWARE("radeon/VERDE_smc.bin"); 58MODULE_FIRMWARE("radeon/VERDE_smc.bin");
56MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); 59MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
57MODULE_FIRMWARE("radeon/OLAND_me.bin"); 60MODULE_FIRMWARE("radeon/OLAND_me.bin");
58MODULE_FIRMWARE("radeon/OLAND_ce.bin"); 61MODULE_FIRMWARE("radeon/OLAND_ce.bin");
59MODULE_FIRMWARE("radeon/OLAND_mc.bin"); 62MODULE_FIRMWARE("radeon/OLAND_mc.bin");
63MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
60MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); 64MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
61MODULE_FIRMWARE("radeon/OLAND_smc.bin"); 65MODULE_FIRMWARE("radeon/OLAND_smc.bin");
62MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); 66MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
63MODULE_FIRMWARE("radeon/HAINAN_me.bin"); 67MODULE_FIRMWARE("radeon/HAINAN_me.bin");
64MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); 68MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
65MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); 69MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
70MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
66MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); 71MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
67MODULE_FIRMWARE("radeon/HAINAN_smc.bin"); 72MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
68 73
@@ -1467,36 +1472,33 @@ int si_mc_load_microcode(struct radeon_device *rdev)
1467 const __be32 *fw_data; 1472 const __be32 *fw_data;
1468 u32 running, blackout = 0; 1473 u32 running, blackout = 0;
1469 u32 *io_mc_regs; 1474 u32 *io_mc_regs;
1470 int i, ucode_size, regs_size; 1475 int i, regs_size, ucode_size;
1471 1476
1472 if (!rdev->mc_fw) 1477 if (!rdev->mc_fw)
1473 return -EINVAL; 1478 return -EINVAL;
1474 1479
1480 ucode_size = rdev->mc_fw->size / 4;
1481
1475 switch (rdev->family) { 1482 switch (rdev->family) {
1476 case CHIP_TAHITI: 1483 case CHIP_TAHITI:
1477 io_mc_regs = (u32 *)&tahiti_io_mc_regs; 1484 io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1478 ucode_size = SI_MC_UCODE_SIZE;
1479 regs_size = TAHITI_IO_MC_REGS_SIZE; 1485 regs_size = TAHITI_IO_MC_REGS_SIZE;
1480 break; 1486 break;
1481 case CHIP_PITCAIRN: 1487 case CHIP_PITCAIRN:
1482 io_mc_regs = (u32 *)&pitcairn_io_mc_regs; 1488 io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1483 ucode_size = SI_MC_UCODE_SIZE;
1484 regs_size = TAHITI_IO_MC_REGS_SIZE; 1489 regs_size = TAHITI_IO_MC_REGS_SIZE;
1485 break; 1490 break;
1486 case CHIP_VERDE: 1491 case CHIP_VERDE:
1487 default: 1492 default:
1488 io_mc_regs = (u32 *)&verde_io_mc_regs; 1493 io_mc_regs = (u32 *)&verde_io_mc_regs;
1489 ucode_size = SI_MC_UCODE_SIZE;
1490 regs_size = TAHITI_IO_MC_REGS_SIZE; 1494 regs_size = TAHITI_IO_MC_REGS_SIZE;
1491 break; 1495 break;
1492 case CHIP_OLAND: 1496 case CHIP_OLAND:
1493 io_mc_regs = (u32 *)&oland_io_mc_regs; 1497 io_mc_regs = (u32 *)&oland_io_mc_regs;
1494 ucode_size = OLAND_MC_UCODE_SIZE;
1495 regs_size = TAHITI_IO_MC_REGS_SIZE; 1498 regs_size = TAHITI_IO_MC_REGS_SIZE;
1496 break; 1499 break;
1497 case CHIP_HAINAN: 1500 case CHIP_HAINAN:
1498 io_mc_regs = (u32 *)&hainan_io_mc_regs; 1501 io_mc_regs = (u32 *)&hainan_io_mc_regs;
1499 ucode_size = OLAND_MC_UCODE_SIZE;
1500 regs_size = TAHITI_IO_MC_REGS_SIZE; 1502 regs_size = TAHITI_IO_MC_REGS_SIZE;
1501 break; 1503 break;
1502 } 1504 }
@@ -1552,7 +1554,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1552 const char *chip_name; 1554 const char *chip_name;
1553 const char *rlc_chip_name; 1555 const char *rlc_chip_name;
1554 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size; 1556 size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1555 size_t smc_req_size; 1557 size_t smc_req_size, mc2_req_size;
1556 char fw_name[30]; 1558 char fw_name[30];
1557 int err; 1559 int err;
1558 1560
@@ -1567,6 +1569,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1567 ce_req_size = SI_CE_UCODE_SIZE * 4; 1569 ce_req_size = SI_CE_UCODE_SIZE * 4;
1568 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1570 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1569 mc_req_size = SI_MC_UCODE_SIZE * 4; 1571 mc_req_size = SI_MC_UCODE_SIZE * 4;
1572 mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
1570 smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4); 1573 smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
1571 break; 1574 break;
1572 case CHIP_PITCAIRN: 1575 case CHIP_PITCAIRN:
@@ -1577,6 +1580,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1577 ce_req_size = SI_CE_UCODE_SIZE * 4; 1580 ce_req_size = SI_CE_UCODE_SIZE * 4;
1578 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1581 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1579 mc_req_size = SI_MC_UCODE_SIZE * 4; 1582 mc_req_size = SI_MC_UCODE_SIZE * 4;
1583 mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
1580 smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4); 1584 smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
1581 break; 1585 break;
1582 case CHIP_VERDE: 1586 case CHIP_VERDE:
@@ -1587,6 +1591,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1587 ce_req_size = SI_CE_UCODE_SIZE * 4; 1591 ce_req_size = SI_CE_UCODE_SIZE * 4;
1588 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1592 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1589 mc_req_size = SI_MC_UCODE_SIZE * 4; 1593 mc_req_size = SI_MC_UCODE_SIZE * 4;
1594 mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
1590 smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4); 1595 smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
1591 break; 1596 break;
1592 case CHIP_OLAND: 1597 case CHIP_OLAND:
@@ -1596,7 +1601,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1596 me_req_size = SI_PM4_UCODE_SIZE * 4; 1601 me_req_size = SI_PM4_UCODE_SIZE * 4;
1597 ce_req_size = SI_CE_UCODE_SIZE * 4; 1602 ce_req_size = SI_CE_UCODE_SIZE * 4;
1598 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1603 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1599 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 1604 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1600 smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4); 1605 smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
1601 break; 1606 break;
1602 case CHIP_HAINAN: 1607 case CHIP_HAINAN:
@@ -1606,7 +1611,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1606 me_req_size = SI_PM4_UCODE_SIZE * 4; 1611 me_req_size = SI_PM4_UCODE_SIZE * 4;
1607 ce_req_size = SI_CE_UCODE_SIZE * 4; 1612 ce_req_size = SI_CE_UCODE_SIZE * 4;
1608 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1613 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1609 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 1614 mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1610 smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4); 1615 smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
1611 break; 1616 break;
1612 default: BUG(); 1617 default: BUG();
@@ -1659,16 +1664,22 @@ static int si_init_microcode(struct radeon_device *rdev)
1659 err = -EINVAL; 1664 err = -EINVAL;
1660 } 1665 }
1661 1666
1662 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); 1667 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1663 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); 1668 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1664 if (err) 1669 if (err) {
1665 goto out; 1670 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1666 if (rdev->mc_fw->size != mc_req_size) { 1671 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1672 if (err)
1673 goto out;
1674 }
1675 if ((rdev->mc_fw->size != mc_req_size) &&
1676 (rdev->mc_fw->size != mc2_req_size)) {
1667 printk(KERN_ERR 1677 printk(KERN_ERR
1668 "si_mc: Bogus length %zu in firmware \"%s\"\n", 1678 "si_mc: Bogus length %zu in firmware \"%s\"\n",
1669 rdev->mc_fw->size, fw_name); 1679 rdev->mc_fw->size, fw_name);
1670 err = -EINVAL; 1680 err = -EINVAL;
1671 } 1681 }
1682 DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
1672 1683
1673 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 1684 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1674 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 1685 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
@@ -5769,7 +5780,6 @@ int si_irq_set(struct radeon_device *rdev)
5769 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 5780 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
5770 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; 5781 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
5771 u32 grbm_int_cntl = 0; 5782 u32 grbm_int_cntl = 0;
5772 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
5773 u32 dma_cntl, dma_cntl1; 5783 u32 dma_cntl, dma_cntl1;
5774 u32 thermal_int = 0; 5784 u32 thermal_int = 0;
5775 5785
@@ -5908,16 +5918,22 @@ int si_irq_set(struct radeon_device *rdev)
5908 } 5918 }
5909 5919
5910 if (rdev->num_crtc >= 2) { 5920 if (rdev->num_crtc >= 2) {
5911 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 5921 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
5912 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 5922 GRPH_PFLIP_INT_MASK);
5923 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
5924 GRPH_PFLIP_INT_MASK);
5913 } 5925 }
5914 if (rdev->num_crtc >= 4) { 5926 if (rdev->num_crtc >= 4) {
5915 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 5927 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
5916 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 5928 GRPH_PFLIP_INT_MASK);
5929 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
5930 GRPH_PFLIP_INT_MASK);
5917 } 5931 }
5918 if (rdev->num_crtc >= 6) { 5932 if (rdev->num_crtc >= 6) {
5919 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); 5933 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
5920 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 5934 GRPH_PFLIP_INT_MASK);
5935 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
5936 GRPH_PFLIP_INT_MASK);
5921 } 5937 }
5922 5938
5923 if (!ASIC_IS_NODCE(rdev)) { 5939 if (!ASIC_IS_NODCE(rdev)) {
@@ -6281,6 +6297,15 @@ restart_ih:
6281 break; 6297 break;
6282 } 6298 }
6283 break; 6299 break;
6300 case 8: /* D1 page flip */
6301 case 10: /* D2 page flip */
6302 case 12: /* D3 page flip */
6303 case 14: /* D4 page flip */
6304 case 16: /* D5 page flip */
6305 case 18: /* D6 page flip */
6306 DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
6307 radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6308 break;
6284 case 42: /* HPD hotplug */ 6309 case 42: /* HPD hotplug */
6285 switch (src_data) { 6310 switch (src_data) {
6286 case 0: 6311 case 0:
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index cf0fdad8c278..de0ca070122f 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -213,6 +213,7 @@ int si_copy_dma(struct radeon_device *rdev,
213 r = radeon_fence_emit(rdev, fence, ring->idx); 213 r = radeon_fence_emit(rdev, fence, ring->idx);
214 if (r) { 214 if (r) {
215 radeon_ring_unlock_undo(rdev, ring); 215 radeon_ring_unlock_undo(rdev, ring);
216 radeon_semaphore_free(rdev, &sem, NULL);
216 return r; 217 return r;
217 } 218 }
218 219
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 683532f84931..7321283602ce 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -107,8 +107,8 @@
107#define SPLL_CHG_STATUS (1 << 1) 107#define SPLL_CHG_STATUS (1 << 1)
108#define SPLL_CNTL_MODE 0x618 108#define SPLL_CNTL_MODE 0x618
109#define SPLL_SW_DIR_CONTROL (1 << 0) 109#define SPLL_SW_DIR_CONTROL (1 << 0)
110# define SPLL_REFCLK_SEL(x) ((x) << 8) 110# define SPLL_REFCLK_SEL(x) ((x) << 26)
111# define SPLL_REFCLK_SEL_MASK 0xFF00 111# define SPLL_REFCLK_SEL_MASK (3 << 26)
112 112
113#define CG_SPLL_SPREAD_SPECTRUM 0x620 113#define CG_SPLL_SPREAD_SPECTRUM 0x620
114#define SSEN (1 << 0) 114#define SSEN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 0a243f0e5d68..be42c8125203 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
83 int r; 83 int r;
84 84
85 /* raise clocks while booting up the VCPU */ 85 /* raise clocks while booting up the VCPU */
86 radeon_set_uvd_clocks(rdev, 53300, 40000); 86 if (rdev->family < CHIP_RV740)
87 radeon_set_uvd_clocks(rdev, 10000, 10000);
88 else
89 radeon_set_uvd_clocks(rdev, 53300, 40000);
87 90
88 r = uvd_v1_0_start(rdev); 91 r = uvd_v1_0_start(rdev);
89 if (r) 92 if (r)
@@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
407 struct radeon_fence *fence = NULL; 410 struct radeon_fence *fence = NULL;
408 int r; 411 int r;
409 412
410 r = radeon_set_uvd_clocks(rdev, 53300, 40000); 413 if (rdev->family < CHIP_RV740)
414 r = radeon_set_uvd_clocks(rdev, 10000, 10000);
415 else
416 r = radeon_set_uvd_clocks(rdev, 53300, 40000);
411 if (r) { 417 if (r) {
412 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); 418 DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
413 return r; 419 return r;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 36c717af6cf9..edb871d7d395 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
312 struct drm_device *drm = crtc->dev; 312 struct drm_device *drm = crtc->dev;
313 struct drm_plane *plane; 313 struct drm_plane *plane;
314 314
315 list_for_each_entry(plane, &drm->mode_config.plane_list, head) { 315 drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
316 if (plane->crtc == crtc) { 316 if (plane->crtc == crtc) {
317 tegra_plane_disable(plane); 317 tegra_plane_disable(plane);
318 plane->crtc = NULL; 318 plane->crtc = NULL;
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index d536ed381fbd..005c19bd92df 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -99,55 +99,73 @@ static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
99static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, 99static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
100 struct drm_dp_aux_msg *msg) 100 struct drm_dp_aux_msg *msg)
101{ 101{
102 unsigned long value = DPAUX_DP_AUXCTL_TRANSACTREQ;
103 unsigned long timeout = msecs_to_jiffies(250); 102 unsigned long timeout = msecs_to_jiffies(250);
104 struct tegra_dpaux *dpaux = to_dpaux(aux); 103 struct tegra_dpaux *dpaux = to_dpaux(aux);
105 unsigned long status; 104 unsigned long status;
106 ssize_t ret = 0; 105 ssize_t ret = 0;
106 u32 value;
107 107
108 if (msg->size < 1 || msg->size > 16) 108 /* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
109 if (msg->size > 16)
109 return -EINVAL; 110 return -EINVAL;
110 111
111 tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR); 112 /*
113 * Allow zero-sized messages only for I2C, in which case they specify
114 * address-only transactions.
115 */
116 if (msg->size < 1) {
117 switch (msg->request & ~DP_AUX_I2C_MOT) {
118 case DP_AUX_I2C_WRITE:
119 case DP_AUX_I2C_READ:
120 value = DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY;
121 break;
122
123 default:
124 return -EINVAL;
125 }
126 } else {
127 /* For non-zero-sized messages, set the CMDLEN field. */
128 value = DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1);
129 }
112 130
113 switch (msg->request & ~DP_AUX_I2C_MOT) { 131 switch (msg->request & ~DP_AUX_I2C_MOT) {
114 case DP_AUX_I2C_WRITE: 132 case DP_AUX_I2C_WRITE:
115 if (msg->request & DP_AUX_I2C_MOT) 133 if (msg->request & DP_AUX_I2C_MOT)
116 value = DPAUX_DP_AUXCTL_CMD_MOT_WR; 134 value |= DPAUX_DP_AUXCTL_CMD_MOT_WR;
117 else 135 else
118 value = DPAUX_DP_AUXCTL_CMD_I2C_WR; 136 value |= DPAUX_DP_AUXCTL_CMD_I2C_WR;
119 137
120 break; 138 break;
121 139
122 case DP_AUX_I2C_READ: 140 case DP_AUX_I2C_READ:
123 if (msg->request & DP_AUX_I2C_MOT) 141 if (msg->request & DP_AUX_I2C_MOT)
124 value = DPAUX_DP_AUXCTL_CMD_MOT_RD; 142 value |= DPAUX_DP_AUXCTL_CMD_MOT_RD;
125 else 143 else
126 value = DPAUX_DP_AUXCTL_CMD_I2C_RD; 144 value |= DPAUX_DP_AUXCTL_CMD_I2C_RD;
127 145
128 break; 146 break;
129 147
130 case DP_AUX_I2C_STATUS: 148 case DP_AUX_I2C_STATUS:
131 if (msg->request & DP_AUX_I2C_MOT) 149 if (msg->request & DP_AUX_I2C_MOT)
132 value = DPAUX_DP_AUXCTL_CMD_MOT_RQ; 150 value |= DPAUX_DP_AUXCTL_CMD_MOT_RQ;
133 else 151 else
134 value = DPAUX_DP_AUXCTL_CMD_I2C_RQ; 152 value |= DPAUX_DP_AUXCTL_CMD_I2C_RQ;
135 153
136 break; 154 break;
137 155
138 case DP_AUX_NATIVE_WRITE: 156 case DP_AUX_NATIVE_WRITE:
139 value = DPAUX_DP_AUXCTL_CMD_AUX_WR; 157 value |= DPAUX_DP_AUXCTL_CMD_AUX_WR;
140 break; 158 break;
141 159
142 case DP_AUX_NATIVE_READ: 160 case DP_AUX_NATIVE_READ:
143 value = DPAUX_DP_AUXCTL_CMD_AUX_RD; 161 value |= DPAUX_DP_AUXCTL_CMD_AUX_RD;
144 break; 162 break;
145 163
146 default: 164 default:
147 return -EINVAL; 165 return -EINVAL;
148 } 166 }
149 167
150 value |= DPAUX_DP_AUXCTL_CMDLEN(msg->size - 1); 168 tegra_dpaux_writel(dpaux, msg->address, DPAUX_DP_AUXADDR);
151 tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL); 169 tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXCTL);
152 170
153 if ((msg->request & DP_AUX_I2C_READ) == 0) { 171 if ((msg->request & DP_AUX_I2C_READ) == 0) {
@@ -198,7 +216,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
198 break; 216 break;
199 } 217 }
200 218
201 if (msg->reply == DP_AUX_NATIVE_REPLY_ACK) { 219 if ((msg->size > 0) && (msg->reply == DP_AUX_NATIVE_REPLY_ACK)) {
202 if (msg->request & DP_AUX_I2C_READ) { 220 if (msg->request & DP_AUX_I2C_READ) {
203 size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK; 221 size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
204 222
diff --git a/drivers/gpu/drm/tegra/dpaux.h b/drivers/gpu/drm/tegra/dpaux.h
index 4f5bf10fdff9..806e245ca787 100644
--- a/drivers/gpu/drm/tegra/dpaux.h
+++ b/drivers/gpu/drm/tegra/dpaux.h
@@ -32,6 +32,7 @@
32#define DPAUX_DP_AUXCTL_CMD_I2C_RQ (2 << 12) 32#define DPAUX_DP_AUXCTL_CMD_I2C_RQ (2 << 12)
33#define DPAUX_DP_AUXCTL_CMD_I2C_RD (1 << 12) 33#define DPAUX_DP_AUXCTL_CMD_I2C_RD (1 << 12)
34#define DPAUX_DP_AUXCTL_CMD_I2C_WR (0 << 12) 34#define DPAUX_DP_AUXCTL_CMD_I2C_WR (0 << 12)
35#define DPAUX_DP_AUXCTL_CMD_ADDRESS_ONLY (1 << 8)
35#define DPAUX_DP_AUXCTL_CMDLEN(x) ((x) & 0xff) 36#define DPAUX_DP_AUXCTL_CMDLEN(x) ((x) & 0xff)
36 37
37#define DPAUX_DP_AUXSTAT 0x31 38#define DPAUX_DP_AUXSTAT 0x31
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 931490b9cfed..87df0b3674fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
1214 SVGA3dCmdSurfaceDMA dma; 1214 SVGA3dCmdSurfaceDMA dma;
1215 } *cmd; 1215 } *cmd;
1216 int ret; 1216 int ret;
1217 SVGA3dCmdSurfaceDMASuffix *suffix;
1218 uint32_t bo_size;
1217 1219
1218 cmd = container_of(header, struct vmw_dma_cmd, header); 1220 cmd = container_of(header, struct vmw_dma_cmd, header);
1221 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1222 header->size - sizeof(*suffix));
1223
1224 /* Make sure device and verifier stays in sync. */
1225 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1226 DRM_ERROR("Invalid DMA suffix size.\n");
1227 return -EINVAL;
1228 }
1229
1219 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 1230 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1220 &cmd->dma.guest.ptr, 1231 &cmd->dma.guest.ptr,
1221 &vmw_bo); 1232 &vmw_bo);
1222 if (unlikely(ret != 0)) 1233 if (unlikely(ret != 0))
1223 return ret; 1234 return ret;
1224 1235
1236 /* Make sure DMA doesn't cross BO boundaries. */
1237 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1238 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1239 DRM_ERROR("Invalid DMA offset.\n");
1240 return -EINVAL;
1241 }
1242
1243 bo_size -= cmd->dma.guest.ptr.offset;
1244 if (unlikely(suffix->maximumOffset > bo_size))
1245 suffix->maximumOffset = bo_size;
1246
1225 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1247 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1226 user_surface_converter, &cmd->dma.host.sid, 1248 user_surface_converter, &cmd->dma.host.sid,
1227 NULL); 1249 NULL);
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index db9017adfe2b..498b37e39058 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -47,7 +47,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
47 unsigned long reg; 47 unsigned long reg;
48 int i, id; 48 int i, id;
49 49
50 for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) { 50 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
51 reg = host1x_sync_readl(host, 51 reg = host1x_sync_readl(host,
52 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); 52 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
53 for_each_set_bit(id, &reg, BITS_PER_LONG) { 53 for_each_set_bit(id, &reg, BITS_PER_LONG) {
@@ -64,7 +64,7 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
64{ 64{
65 u32 i; 65 u32 i;
66 66
67 for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) { 67 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
68 host1x_sync_writel(host, 0xffffffffu, 68 host1x_sync_writel(host, 0xffffffffu,
69 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i)); 69 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
70 host1x_sync_writel(host, 0xffffffffu, 70 host1x_sync_writel(host, 0xffffffffu,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9e8064205bc7..da52279de939 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -718,6 +718,9 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
718 case HID_MAIN_ITEM_TAG_END_COLLECTION: 718 case HID_MAIN_ITEM_TAG_END_COLLECTION:
719 break; 719 break;
720 case HID_MAIN_ITEM_TAG_INPUT: 720 case HID_MAIN_ITEM_TAG_INPUT:
721 /* ignore constant inputs, they will be ignored by hid-input */
722 if (data & HID_MAIN_ITEM_CONSTANT)
723 break;
721 for (i = 0; i < parser->local.usage_index; i++) 724 for (i = 0; i < parser->local.usage_index; i++)
722 hid_scan_input_usage(parser, parser->local.usage[i]); 725 hid_scan_input_usage(parser, parser->local.usage[i]);
723 break; 726 break;
@@ -1250,7 +1253,8 @@ EXPORT_SYMBOL_GPL(hid_output_report);
1250 1253
1251static int hid_report_len(struct hid_report *report) 1254static int hid_report_len(struct hid_report *report)
1252{ 1255{
1253 return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7; 1256 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1257 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1254} 1258}
1255 1259
1256/* 1260/*
@@ -1263,7 +1267,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1263 * of implement() working on 8 byte chunks 1267 * of implement() working on 8 byte chunks
1264 */ 1268 */
1265 1269
1266 int len = hid_report_len(report); 1270 int len = hid_report_len(report) + 7;
1267 1271
1268 return kmalloc(len, flags); 1272 return kmalloc(len, flags);
1269} 1273}
@@ -1821,8 +1825,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
1821 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, 1825 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
1822 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1826 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
1823 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, 1827 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
1824 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
1825 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
1826 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) }, 1828 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
1827 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1829 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1828 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 1830 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index bd221263c739..34bb2205d2ea 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -301,6 +301,9 @@
301 301
302#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 302#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
303 303
304#define USB_VENDOR_ID_ELITEGROUP 0x03fc
305#define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8
306
304#define USB_VENDOR_ID_ELO 0x04E7 307#define USB_VENDOR_ID_ELO 0x04E7
305#define USB_DEVICE_ID_ELO_TS2515 0x0022 308#define USB_DEVICE_ID_ELO_TS2515 0x0022
306#define USB_DEVICE_ID_ELO_TS2700 0x0020 309#define USB_DEVICE_ID_ELO_TS2700 0x0020
@@ -455,7 +458,8 @@
455 458
456#define USB_VENDOR_ID_INTEL_0 0x8086 459#define USB_VENDOR_ID_INTEL_0 0x8086
457#define USB_VENDOR_ID_INTEL_1 0x8087 460#define USB_VENDOR_ID_INTEL_1 0x8087
458#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa 461#define USB_DEVICE_ID_INTEL_HID_SENSOR_0 0x09fa
462#define USB_DEVICE_ID_INTEL_HID_SENSOR_1 0x0a04
459 463
460#define USB_VENDOR_ID_STM_0 0x0483 464#define USB_VENDOR_ID_STM_0 0x0483
461#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1 465#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1
@@ -629,8 +633,6 @@
629#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 633#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
630#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 634#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
631#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c 635#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
632#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
633#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
634 636
635#define USB_VENDOR_ID_MOJO 0x8282 637#define USB_VENDOR_ID_MOJO 0x8282
636#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 638#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -835,6 +837,10 @@
835#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 837#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
836#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 838#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
837#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 839#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
840#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
841
842#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
843#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
838 844
839#define USB_VENDOR_ID_THINGM 0x27b8 845#define USB_VENDOR_ID_THINGM 0x27b8
840#define USB_DEVICE_ID_BLINK1 0x01ed 846#define USB_DEVICE_ID_BLINK1 0x01ed
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 6fd58175a291..8ba17a946f2a 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -274,10 +274,6 @@ static const struct hid_device_id ms_devices[] = {
274 .driver_data = MS_NOGET }, 274 .driver_data = MS_NOGET },
275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
276 .driver_data = MS_DUPLICATE_USAGES }, 276 .driver_data = MS_DUPLICATE_USAGES },
277 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
278 .driver_data = 0 },
279 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
280 .driver_data = 0 },
281 277
282 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), 278 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
283 .driver_data = MS_PRESENTER }, 279 .driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 35278e43c7a4..51e25b9407f2 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1155,6 +1155,11 @@ static const struct hid_device_id mt_devices[] = {
1155 MT_USB_DEVICE(USB_VENDOR_ID_DWAV, 1155 MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
1156 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, 1156 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
1157 1157
1158 /* Elitegroup panel */
1159 { .driver_data = MT_CLS_SERIAL,
1160 MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
1161 USB_DEVICE_ID_ELITEGROUP_05D8) },
1162
1158 /* Flatfrog Panels */ 1163 /* Flatfrog Panels */
1159 { .driver_data = MT_CLS_FLATFROG, 1164 { .driver_data = MT_CLS_FLATFROG,
1160 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, 1165 MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 5182031f7b52..be14b5690e94 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -697,14 +697,20 @@ static void sensor_hub_remove(struct hid_device *hdev)
697 697
698static const struct hid_device_id sensor_hub_devices[] = { 698static const struct hid_device_id sensor_hub_devices[] = {
699 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_0, 699 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_0,
700 USB_DEVICE_ID_INTEL_HID_SENSOR), 700 USB_DEVICE_ID_INTEL_HID_SENSOR_0),
701 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 701 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
702 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1, 702 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
703 USB_DEVICE_ID_INTEL_HID_SENSOR), 703 USB_DEVICE_ID_INTEL_HID_SENSOR_0),
704 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
705 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
706 USB_DEVICE_ID_INTEL_HID_SENSOR_1),
704 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 707 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
705 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, 708 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
706 USB_DEVICE_ID_STM_HID_SENSOR), 709 USB_DEVICE_ID_STM_HID_SENSOR),
707 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, 710 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
711 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
712 USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
713 .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
708 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, 714 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
709 HID_ANY_ID) }, 715 HID_ANY_ID) },
710 { } 716 { }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 69204afea7a4..908de2789219 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -1721,8 +1721,6 @@ static void sony_remove(struct hid_device *hdev)
1721 if (sc->quirks & SONY_LED_SUPPORT) 1721 if (sc->quirks & SONY_LED_SUPPORT)
1722 sony_leds_remove(hdev); 1722 sony_leds_remove(hdev);
1723 1723
1724 if (sc->worker_initialized)
1725 cancel_work_sync(&sc->state_worker);
1726 if (sc->quirks & SONY_BATTERY_SUPPORT) { 1724 if (sc->quirks & SONY_BATTERY_SUPPORT) {
1727 hid_hw_close(hdev); 1725 hid_hw_close(hdev);
1728 sony_battery_remove(sc); 1726 sony_battery_remove(sc);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dbd83878ff99..8e4ddb369883 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -119,6 +119,7 @@ static const struct hid_blacklist {
119 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, 119 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
120 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS }, 120 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
121 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS }, 121 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
122 { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
122 123
123 { 0, 0 } 124 { 0, 0 }
124}; 125};
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f2d7bf90c9fe..2e7801af466e 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -55,6 +55,9 @@ static __u32 vmbus_get_next_version(__u32 current_version)
55 case (VERSION_WIN8): 55 case (VERSION_WIN8):
56 return VERSION_WIN7; 56 return VERSION_WIN7;
57 57
58 case (VERSION_WIN8_1):
59 return VERSION_WIN8;
60
58 case (VERSION_WS2008): 61 case (VERSION_WS2008):
59 default: 62 default:
60 return VERSION_INVAL; 63 return VERSION_INVAL;
@@ -77,7 +80,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
77 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page); 80 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
78 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]); 81 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
79 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]); 82 msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
80 if (version == VERSION_WIN8) 83 if (version == VERSION_WIN8_1)
81 msg->target_vcpu = hv_context.vp_index[smp_processor_id()]; 84 msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
82 85
83 /* 86 /*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bc196f49ec53..4af0da96c2e2 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1053,7 +1053,7 @@ config SENSORS_PC87427
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support"
1056 depends on (!OF && !IIO) || (OF && IIO) 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
1059 interpretation. The driver can also monitor the temperature and 1059 interpretation. The driver can also monitor the temperature and
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6d02e3b06375..d76f0b70c6e0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
365 if (cpu_has_tjmax(c)) 365 if (cpu_has_tjmax(c))
366 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); 366 dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
367 } else { 367 } else {
368 val = (eax >> 16) & 0x7f; 368 val = (eax >> 16) & 0xff;
369 /* 369 /*
370 * If the TjMax is not plausible, an assumption 370 * If the TjMax is not plausible, an assumption
371 * will be used 371 * will be used
372 */ 372 */
373 if (val >= 85) { 373 if (val) {
374 dev_dbg(dev, "TjMax is %d degrees C\n", val); 374 dev_dbg(dev, "TjMax is %d degrees C\n", val);
375 return val * 1000; 375 return val * 1000;
376 } 376 }
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 90ec1173b8a1..01723f04fe45 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev,
163 if (retval < 0) 163 if (retval < 0)
164 goto fail; 164 goto fail;
165 165
166 hyst = val - retval * 1000; 166 hyst = retval * 1000 - val;
167 hyst = DIV_ROUND_CLOSEST(hyst, 1000); 167 hyst = DIV_ROUND_CLOSEST(hyst, 1000);
168 if (hyst < 0 || hyst > 255) { 168 if (hyst < 0 || hyst > 255) {
169 retval = -ERANGE; 169 retval = -ERANGE;
@@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client,
330 } 330 }
331 331
332 id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); 332 id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
333 if (id != 0x01) 333 if (id < 0x01 || id > 0x04)
334 return -ENODEV; 334 return -ENODEV;
335 335
336 return 0; 336 return 0;
@@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client,
355 if (id->driver_data) 355 if (id->driver_data)
356 data->groups[1] = &emc1404_group; 356 data->groups[1] = &emc1404_group;
357 357
358 hwmon_dev = hwmon_device_register_with_groups(&client->dev, 358 hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
359 client->name, data, 359 client->name, data,
360 data->groups); 360 data->groups);
361 if (IS_ERR(hwmon_dev)) 361 if (IS_ERR(hwmon_dev))
362 return PTR_ERR(hwmon_dev); 362 return PTR_ERR(hwmon_dev);
363 363
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
index c104cc32989d..c9cddf5f056b 100644
--- a/drivers/hwmon/ltc2945.c
+++ b/drivers/hwmon/ltc2945.c
@@ -1,4 +1,4 @@
1/* 1 /*
2 * Driver for Linear Technology LTC2945 I2C Power Monitor 2 * Driver for Linear Technology LTC2945 I2C Power Monitor
3 * 3 *
4 * Copyright (c) 2014 Guenter Roeck 4 * Copyright (c) 2014 Guenter Roeck
@@ -314,8 +314,8 @@ static ssize_t ltc2945_reset_history(struct device *dev,
314 reg = LTC2945_MAX_ADIN_H; 314 reg = LTC2945_MAX_ADIN_H;
315 break; 315 break;
316 default: 316 default:
317 BUG(); 317 WARN_ONCE(1, "Bad register: 0x%x\n", reg);
318 break; 318 return -EINVAL;
319 } 319 }
320 /* Reset maximum */ 320 /* Reset maximum */
321 ret = regmap_bulk_write(regmap, reg, buf_max, num_regs); 321 ret = regmap_bulk_write(regmap, reg, buf_max, num_regs);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8a17f01e8672..e76feb86a1d4 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -44,6 +44,7 @@ struct ntc_compensation {
44 unsigned int ohm; 44 unsigned int ohm;
45}; 45};
46 46
47/* Order matters, ntc_match references the entries by index */
47static const struct platform_device_id ntc_thermistor_id[] = { 48static const struct platform_device_id ntc_thermistor_id[] = {
48 { "ncp15wb473", TYPE_NCPXXWB473 }, 49 { "ncp15wb473", TYPE_NCPXXWB473 },
49 { "ncp18wb473", TYPE_NCPXXWB473 }, 50 { "ncp18wb473", TYPE_NCPXXWB473 },
@@ -141,7 +142,7 @@ struct ntc_data {
141 char name[PLATFORM_NAME_SIZE]; 142 char name[PLATFORM_NAME_SIZE];
142}; 143};
143 144
144#ifdef CONFIG_OF 145#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
145static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) 146static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
146{ 147{
147 struct iio_channel *channel = pdata->chan; 148 struct iio_channel *channel = pdata->chan;
@@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163 164
164static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
165 { .compatible = "ntc,ncp15wb473", 166 { .compatible = "ntc,ncp15wb473",
166 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 167 .data = &ntc_thermistor_id[0] },
167 { .compatible = "ntc,ncp18wb473", 168 { .compatible = "ntc,ncp18wb473",
168 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 169 .data = &ntc_thermistor_id[1] },
169 { .compatible = "ntc,ncp21wb473", 170 { .compatible = "ntc,ncp21wb473",
170 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 171 .data = &ntc_thermistor_id[2] },
171 { .compatible = "ntc,ncp03wb473", 172 { .compatible = "ntc,ncp03wb473",
172 .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, 173 .data = &ntc_thermistor_id[3] },
173 { .compatible = "ntc,ncp15wl333", 174 { .compatible = "ntc,ncp15wl333",
174 .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, 175 .data = &ntc_thermistor_id[4] },
175 { }, 176 { },
176}; 177};
177MODULE_DEVICE_TABLE(of, ntc_match); 178MODULE_DEVICE_TABLE(of, ntc_match);
@@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
223 return NULL; 224 return NULL;
224} 225}
225 226
227#define ntc_match NULL
228
226static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) 229static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
227{ } 230{ }
228#endif 231#endif
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index d867e6bb2be1..8242b75d96c8 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -27,15 +27,15 @@
27struct vexpress_hwmon_data { 27struct vexpress_hwmon_data {
28 struct device *hwmon_dev; 28 struct device *hwmon_dev;
29 struct vexpress_config_func *func; 29 struct vexpress_config_func *func;
30 const char *name;
30}; 31};
31 32
32static ssize_t vexpress_hwmon_name_show(struct device *dev, 33static ssize_t vexpress_hwmon_name_show(struct device *dev,
33 struct device_attribute *dev_attr, char *buffer) 34 struct device_attribute *dev_attr, char *buffer)
34{ 35{
35 const char *compatible = of_get_property(dev->of_node, "compatible", 36 struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
36 NULL);
37 37
38 return sprintf(buffer, "%s\n", compatible); 38 return sprintf(buffer, "%s\n", data->name);
39} 39}
40 40
41static ssize_t vexpress_hwmon_label_show(struct device *dev, 41static ssize_t vexpress_hwmon_label_show(struct device *dev,
@@ -43,9 +43,6 @@ static ssize_t vexpress_hwmon_label_show(struct device *dev,
43{ 43{
44 const char *label = of_get_property(dev->of_node, "label", NULL); 44 const char *label = of_get_property(dev->of_node, "label", NULL);
45 45
46 if (!label)
47 return -ENOENT;
48
49 return snprintf(buffer, PAGE_SIZE, "%s\n", label); 46 return snprintf(buffer, PAGE_SIZE, "%s\n", label);
50} 47}
51 48
@@ -84,6 +81,20 @@ static ssize_t vexpress_hwmon_u64_show(struct device *dev,
84 to_sensor_dev_attr(dev_attr)->index)); 81 to_sensor_dev_attr(dev_attr)->index));
85} 82}
86 83
84static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj,
85 struct attribute *attr, int index)
86{
87 struct device *dev = kobj_to_dev(kobj);
88 struct device_attribute *dev_attr = container_of(attr,
89 struct device_attribute, attr);
90
91 if (dev_attr->show == vexpress_hwmon_label_show &&
92 !of_get_property(dev->of_node, "label", NULL))
93 return 0;
94
95 return attr->mode;
96}
97
87static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL); 98static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
88 99
89#define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr) \ 100#define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr) \
@@ -94,14 +105,27 @@ struct attribute *vexpress_hwmon_attrs_##_name[] = { \
94 NULL \ 105 NULL \
95} 106}
96 107
108struct vexpress_hwmon_type {
109 const char *name;
110 const struct attribute_group **attr_groups;
111};
112
97#if !defined(CONFIG_REGULATOR_VEXPRESS) 113#if !defined(CONFIG_REGULATOR_VEXPRESS)
98static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 114static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
99static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show, 115static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
100 NULL, 1000); 116 NULL, 1000);
101static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input); 117static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input);
102static struct attribute_group vexpress_hwmon_group_volt = { 118static struct attribute_group vexpress_hwmon_group_volt = {
119 .is_visible = vexpress_hwmon_attr_is_visible,
103 .attrs = vexpress_hwmon_attrs_volt, 120 .attrs = vexpress_hwmon_attrs_volt,
104}; 121};
122static struct vexpress_hwmon_type vexpress_hwmon_volt = {
123 .name = "vexpress_volt",
124 .attr_groups = (const struct attribute_group *[]) {
125 &vexpress_hwmon_group_volt,
126 NULL,
127 },
128};
105#endif 129#endif
106 130
107static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 131static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
@@ -109,52 +133,84 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
109 NULL, 1000); 133 NULL, 1000);
110static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input); 134static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input);
111static struct attribute_group vexpress_hwmon_group_amp = { 135static struct attribute_group vexpress_hwmon_group_amp = {
136 .is_visible = vexpress_hwmon_attr_is_visible,
112 .attrs = vexpress_hwmon_attrs_amp, 137 .attrs = vexpress_hwmon_attrs_amp,
113}; 138};
139static struct vexpress_hwmon_type vexpress_hwmon_amp = {
140 .name = "vexpress_amp",
141 .attr_groups = (const struct attribute_group *[]) {
142 &vexpress_hwmon_group_amp,
143 NULL
144 },
145};
114 146
115static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 147static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
116static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show, 148static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
117 NULL, 1000); 149 NULL, 1000);
118static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input); 150static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input);
119static struct attribute_group vexpress_hwmon_group_temp = { 151static struct attribute_group vexpress_hwmon_group_temp = {
152 .is_visible = vexpress_hwmon_attr_is_visible,
120 .attrs = vexpress_hwmon_attrs_temp, 153 .attrs = vexpress_hwmon_attrs_temp,
121}; 154};
155static struct vexpress_hwmon_type vexpress_hwmon_temp = {
156 .name = "vexpress_temp",
157 .attr_groups = (const struct attribute_group *[]) {
158 &vexpress_hwmon_group_temp,
159 NULL
160 },
161};
122 162
123static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 163static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
124static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show, 164static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
125 NULL, 1); 165 NULL, 1);
126static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input); 166static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input);
127static struct attribute_group vexpress_hwmon_group_power = { 167static struct attribute_group vexpress_hwmon_group_power = {
168 .is_visible = vexpress_hwmon_attr_is_visible,
128 .attrs = vexpress_hwmon_attrs_power, 169 .attrs = vexpress_hwmon_attrs_power,
129}; 170};
171static struct vexpress_hwmon_type vexpress_hwmon_power = {
172 .name = "vexpress_power",
173 .attr_groups = (const struct attribute_group *[]) {
174 &vexpress_hwmon_group_power,
175 NULL
176 },
177};
130 178
131static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); 179static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
132static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show, 180static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
133 NULL, 1); 181 NULL, 1);
134static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input); 182static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input);
135static struct attribute_group vexpress_hwmon_group_energy = { 183static struct attribute_group vexpress_hwmon_group_energy = {
184 .is_visible = vexpress_hwmon_attr_is_visible,
136 .attrs = vexpress_hwmon_attrs_energy, 185 .attrs = vexpress_hwmon_attrs_energy,
137}; 186};
187static struct vexpress_hwmon_type vexpress_hwmon_energy = {
188 .name = "vexpress_energy",
189 .attr_groups = (const struct attribute_group *[]) {
190 &vexpress_hwmon_group_energy,
191 NULL
192 },
193};
138 194
139static struct of_device_id vexpress_hwmon_of_match[] = { 195static struct of_device_id vexpress_hwmon_of_match[] = {
140#if !defined(CONFIG_REGULATOR_VEXPRESS) 196#if !defined(CONFIG_REGULATOR_VEXPRESS)
141 { 197 {
142 .compatible = "arm,vexpress-volt", 198 .compatible = "arm,vexpress-volt",
143 .data = &vexpress_hwmon_group_volt, 199 .data = &vexpress_hwmon_volt,
144 }, 200 },
145#endif 201#endif
146 { 202 {
147 .compatible = "arm,vexpress-amp", 203 .compatible = "arm,vexpress-amp",
148 .data = &vexpress_hwmon_group_amp, 204 .data = &vexpress_hwmon_amp,
149 }, { 205 }, {
150 .compatible = "arm,vexpress-temp", 206 .compatible = "arm,vexpress-temp",
151 .data = &vexpress_hwmon_group_temp, 207 .data = &vexpress_hwmon_temp,
152 }, { 208 }, {
153 .compatible = "arm,vexpress-power", 209 .compatible = "arm,vexpress-power",
154 .data = &vexpress_hwmon_group_power, 210 .data = &vexpress_hwmon_power,
155 }, { 211 }, {
156 .compatible = "arm,vexpress-energy", 212 .compatible = "arm,vexpress-energy",
157 .data = &vexpress_hwmon_group_energy, 213 .data = &vexpress_hwmon_energy,
158 }, 214 },
159 {} 215 {}
160}; 216};
@@ -165,6 +221,7 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
165 int err; 221 int err;
166 const struct of_device_id *match; 222 const struct of_device_id *match;
167 struct vexpress_hwmon_data *data; 223 struct vexpress_hwmon_data *data;
224 const struct vexpress_hwmon_type *type;
168 225
169 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 226 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
170 if (!data) 227 if (!data)
@@ -174,12 +231,14 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
174 match = of_match_device(vexpress_hwmon_of_match, &pdev->dev); 231 match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
175 if (!match) 232 if (!match)
176 return -ENODEV; 233 return -ENODEV;
234 type = match->data;
235 data->name = type->name;
177 236
178 data->func = vexpress_config_func_get_by_dev(&pdev->dev); 237 data->func = vexpress_config_func_get_by_dev(&pdev->dev);
179 if (!data->func) 238 if (!data->func)
180 return -ENODEV; 239 return -ENODEV;
181 240
182 err = sysfs_create_group(&pdev->dev.kobj, match->data); 241 err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups);
183 if (err) 242 if (err)
184 goto error; 243 goto error;
185 244
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 22e92c3d3d07..3c20e4bd6dd1 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
422 */ 422 */
423 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); 423 dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
424 424
425 /* enforce disabled interrupts (due to HW issues) */
426 i2c_dw_disable_int(dev);
427
425 /* Enable the adapter */ 428 /* Enable the adapter */
426 __i2c_dw_enable(dev, true); 429 __i2c_dw_enable(dev, true);
427 430
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 28cbe1b2a2ec..32c85e9ecdae 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
999 999
1000 dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, 1000 dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
1001 resource_size(&adev->res)); 1001 resource_size(&adev->res));
1002 if (IS_ERR(dev->virtbase)) { 1002 if (!dev->virtbase) {
1003 ret = -ENOMEM; 1003 ret = -ENOMEM;
1004 goto err_no_mem; 1004 goto err_no_mem;
1005 } 1005 }
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 1b4cf14f1106..2a5efb5b487c 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
479 int ret, idx; 479 int ret, idx;
480 480
481 ret = pm_runtime_get_sync(qup->dev); 481 ret = pm_runtime_get_sync(qup->dev);
482 if (ret) 482 if (ret < 0)
483 goto out; 483 goto out;
484 484
485 writel(1, qup->base + QUP_SW_RESET); 485 writel(1, qup->base + QUP_SW_RESET);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d4fa8eba6e9d..06d47aafbb79 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
561 561
562 ret = -EINVAL; 562 ret = -EINVAL;
563 for (i = 0; i < num; i++) { 563 for (i = 0; i < num; i++) {
564 /* This HW can't send STOP after address phase */
565 if (msgs[i].len == 0) {
566 ret = -EOPNOTSUPP;
567 break;
568 }
569
564 /*-------------- spin lock -----------------*/ 570 /*-------------- spin lock -----------------*/
565 spin_lock_irqsave(&priv->lock, flags); 571 spin_lock_irqsave(&priv->lock, flags);
566 572
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
625 631
626static u32 rcar_i2c_func(struct i2c_adapter *adap) 632static u32 rcar_i2c_func(struct i2c_adapter *adap)
627{ 633{
628 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 634 /* This HW can't do SMBUS_QUICK and NOSTART */
635 return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
629} 636}
630 637
631static const struct i2c_algorithm rcar_i2c_algo = { 638static const struct i2c_algorithm rcar_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ae4491062e41..bb3a9964f7e0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
1276 struct platform_device *pdev = to_platform_device(dev); 1276 struct platform_device *pdev = to_platform_device(dev);
1277 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); 1277 struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
1278 1278
1279 i2c->suspended = 0;
1280 clk_prepare_enable(i2c->clk); 1279 clk_prepare_enable(i2c->clk);
1281 s3c24xx_i2c_init(i2c); 1280 s3c24xx_i2c_init(i2c);
1282 clk_disable_unprepare(i2c->clk); 1281 clk_disable_unprepare(i2c->clk);
1282 i2c->suspended = 0;
1283 1283
1284 return 0; 1284 return 0;
1285} 1285}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index a43220c2e3d9..4d140bbbe100 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -750,9 +750,10 @@ void intel_idle_state_table_update(void)
750 if (package_num + 1 > num_sockets) { 750 if (package_num + 1 > num_sockets) {
751 num_sockets = package_num + 1; 751 num_sockets = package_num + 1;
752 752
753 if (num_sockets > 4) 753 if (num_sockets > 4) {
754 cpuidle_state_table = ivt_cstates_8s; 754 cpuidle_state_table = ivt_cstates_8s;
755 return; 755 return;
756 }
756 } 757 }
757 } 758 }
758 759
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index d86196cfe4b4..24c28e3f93a3 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -106,7 +106,7 @@ config AT91_ADC
106 Say yes here to build support for Atmel AT91 ADC. 106 Say yes here to build support for Atmel AT91 ADC.
107 107
108config EXYNOS_ADC 108config EXYNOS_ADC
109 bool "Exynos ADC driver support" 109 tristate "Exynos ADC driver support"
110 depends on OF 110 depends on OF
111 help 111 help
112 Core support for the ADC block found in the Samsung EXYNOS series 112 Core support for the ADC block found in the Samsung EXYNOS series
@@ -114,7 +114,7 @@ config EXYNOS_ADC
114 this resource. 114 this resource.
115 115
116config LP8788_ADC 116config LP8788_ADC
117 bool "LP8788 ADC driver" 117 tristate "LP8788 ADC driver"
118 depends on MFD_LP8788 118 depends on MFD_LP8788
119 help 119 help
120 Say yes here to build support for TI LP8788 ADC. 120 Say yes here to build support for TI LP8788 ADC.
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 5b1aa027c034..89777ed9abd8 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -765,14 +765,17 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
765 if (!pdata) 765 if (!pdata)
766 return -EINVAL; 766 return -EINVAL;
767 767
768 st->caps = (struct at91_adc_caps *)
769 platform_get_device_id(pdev)->driver_data;
770
768 st->use_external = pdata->use_external_triggers; 771 st->use_external = pdata->use_external_triggers;
769 st->vref_mv = pdata->vref; 772 st->vref_mv = pdata->vref;
770 st->channels_mask = pdata->channels_used; 773 st->channels_mask = pdata->channels_used;
771 st->num_channels = pdata->num_channels; 774 st->num_channels = st->caps->num_channels;
772 st->startup_time = pdata->startup_time; 775 st->startup_time = pdata->startup_time;
773 st->trigger_number = pdata->trigger_number; 776 st->trigger_number = pdata->trigger_number;
774 st->trigger_list = pdata->trigger_list; 777 st->trigger_list = pdata->trigger_list;
775 st->registers = pdata->registers; 778 st->registers = &st->caps->registers;
776 779
777 return 0; 780 return 0;
778} 781}
@@ -1004,8 +1007,11 @@ static int at91_adc_probe(struct platform_device *pdev)
1004 * the best converted final value between two channels selection 1007 * the best converted final value between two channels selection
1005 * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock 1008 * The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
1006 */ 1009 */
1007 shtim = round_up((st->sample_hold_time * adc_clk_khz / 1010 if (st->sample_hold_time > 0)
1008 1000) - 1, 1); 1011 shtim = round_up((st->sample_hold_time * adc_clk_khz / 1000)
1012 - 1, 1);
1013 else
1014 shtim = 0;
1009 1015
1010 reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask; 1016 reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
1011 reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask; 1017 reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
@@ -1101,7 +1107,6 @@ static int at91_adc_remove(struct platform_device *pdev)
1101 return 0; 1107 return 0;
1102} 1108}
1103 1109
1104#ifdef CONFIG_OF
1105static struct at91_adc_caps at91sam9260_caps = { 1110static struct at91_adc_caps at91sam9260_caps = {
1106 .calc_startup_ticks = calc_startup_ticks_9260, 1111 .calc_startup_ticks = calc_startup_ticks_9260,
1107 .num_channels = 4, 1112 .num_channels = 4,
@@ -1154,11 +1159,27 @@ static const struct of_device_id at91_adc_dt_ids[] = {
1154 {}, 1159 {},
1155}; 1160};
1156MODULE_DEVICE_TABLE(of, at91_adc_dt_ids); 1161MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
1157#endif 1162
1163static const struct platform_device_id at91_adc_ids[] = {
1164 {
1165 .name = "at91sam9260-adc",
1166 .driver_data = (unsigned long)&at91sam9260_caps,
1167 }, {
1168 .name = "at91sam9g45-adc",
1169 .driver_data = (unsigned long)&at91sam9g45_caps,
1170 }, {
1171 .name = "at91sam9x5-adc",
1172 .driver_data = (unsigned long)&at91sam9x5_caps,
1173 }, {
1174 /* terminator */
1175 }
1176};
1177MODULE_DEVICE_TABLE(platform, at91_adc_ids);
1158 1178
1159static struct platform_driver at91_adc_driver = { 1179static struct platform_driver at91_adc_driver = {
1160 .probe = at91_adc_probe, 1180 .probe = at91_adc_probe,
1161 .remove = at91_adc_remove, 1181 .remove = at91_adc_remove,
1182 .id_table = at91_adc_ids,
1162 .driver = { 1183 .driver = {
1163 .name = DRIVER_NAME, 1184 .name = DRIVER_NAME,
1164 .of_match_table = of_match_ptr(at91_adc_dt_ids), 1185 .of_match_table = of_match_ptr(at91_adc_dt_ids),
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index d25b262193a7..affa93f51789 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -344,7 +344,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
344 344
345 exynos_adc_hw_init(info); 345 exynos_adc_hw_init(info);
346 346
347 ret = of_platform_populate(np, exynos_adc_match, NULL, &pdev->dev); 347 ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
348 if (ret < 0) { 348 if (ret < 0) {
349 dev_err(&pdev->dev, "failed adding child nodes\n"); 349 dev_err(&pdev->dev, "failed adding child nodes\n");
350 goto err_of_populate; 350 goto err_of_populate;
@@ -353,7 +353,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
353 return 0; 353 return 0;
354 354
355err_of_populate: 355err_of_populate:
356 device_for_each_child(&pdev->dev, NULL, 356 device_for_each_child(&indio_dev->dev, NULL,
357 exynos_adc_remove_devices); 357 exynos_adc_remove_devices);
358 regulator_disable(info->vdd); 358 regulator_disable(info->vdd);
359 clk_disable_unprepare(info->clk); 359 clk_disable_unprepare(info->clk);
@@ -369,7 +369,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
369 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 369 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
370 struct exynos_adc *info = iio_priv(indio_dev); 370 struct exynos_adc *info = iio_priv(indio_dev);
371 371
372 device_for_each_child(&pdev->dev, NULL, 372 device_for_each_child(&indio_dev->dev, NULL,
373 exynos_adc_remove_devices); 373 exynos_adc_remove_devices);
374 regulator_disable(info->vdd); 374 regulator_disable(info->vdd);
375 clk_disable_unprepare(info->clk); 375 clk_disable_unprepare(info->clk);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index cb9f96b446a5..d8ad606c7cd0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -660,6 +660,7 @@ static int inv_mpu_probe(struct i2c_client *client,
660{ 660{
661 struct inv_mpu6050_state *st; 661 struct inv_mpu6050_state *st;
662 struct iio_dev *indio_dev; 662 struct iio_dev *indio_dev;
663 struct inv_mpu6050_platform_data *pdata;
663 int result; 664 int result;
664 665
665 if (!i2c_check_functionality(client->adapter, 666 if (!i2c_check_functionality(client->adapter,
@@ -672,8 +673,10 @@ static int inv_mpu_probe(struct i2c_client *client,
672 673
673 st = iio_priv(indio_dev); 674 st = iio_priv(indio_dev);
674 st->client = client; 675 st->client = client;
675 st->plat_data = *(struct inv_mpu6050_platform_data 676 pdata = (struct inv_mpu6050_platform_data
676 *)dev_get_platdata(&client->dev); 677 *)dev_get_platdata(&client->dev);
678 if (pdata)
679 st->plat_data = *pdata;
677 /* power is turned on inside check chip type*/ 680 /* power is turned on inside check chip type*/
678 result = inv_check_and_setup_chip(st, id); 681 result = inv_check_and_setup_chip(st, id);
679 if (result) 682 if (result)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index e108f2a9d827..e472cff6eeae 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -165,7 +165,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
165 int ret; 165 int ret;
166 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 166 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
167 167
168 ret = test_bit(to_iio_dev_attr(attr)->address, 168 /* Ensure ret is 0 or 1. */
169 ret = !!test_bit(to_iio_dev_attr(attr)->address,
169 indio_dev->buffer->scan_mask); 170 indio_dev->buffer->scan_mask);
170 171
171 return sprintf(buf, "%d\n", ret); 172 return sprintf(buf, "%d\n", ret);
@@ -862,7 +863,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
862 if (!buffer->scan_mask) 863 if (!buffer->scan_mask)
863 return 0; 864 return 0;
864 865
865 return test_bit(bit, buffer->scan_mask); 866 /* Ensure return value is 0 or 1. */
867 return !!test_bit(bit, buffer->scan_mask);
866}; 868};
867EXPORT_SYMBOL_GPL(iio_scan_mask_query); 869EXPORT_SYMBOL_GPL(iio_scan_mask_query);
868 870
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 47a6dbac2d0c..d976e6ce60db 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -221,6 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
221 *val = cm32181->calibscale; 221 *val = cm32181->calibscale;
222 return IIO_VAL_INT; 222 return IIO_VAL_INT;
223 case IIO_CHAN_INFO_INT_TIME: 223 case IIO_CHAN_INFO_INT_TIME:
224 *val = 0;
224 ret = cm32181_read_als_it(cm32181, val2); 225 ret = cm32181_read_als_it(cm32181, val2);
225 return ret; 226 return ret;
226 } 227 }
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index a45e07492db3..39fc67e82138 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -652,7 +652,19 @@ static int cm36651_probe(struct i2c_client *client,
652 cm36651->client = client; 652 cm36651->client = client;
653 cm36651->ps_client = i2c_new_dummy(client->adapter, 653 cm36651->ps_client = i2c_new_dummy(client->adapter,
654 CM36651_I2C_ADDR_PS); 654 CM36651_I2C_ADDR_PS);
655 if (!cm36651->ps_client) {
656 dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
657 ret = -ENODEV;
658 goto error_disable_reg;
659 }
660
655 cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA); 661 cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
662 if (!cm36651->ara_client) {
663 dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
664 ret = -ENODEV;
665 goto error_i2c_unregister_ps;
666 }
667
656 mutex_init(&cm36651->lock); 668 mutex_init(&cm36651->lock);
657 indio_dev->dev.parent = &client->dev; 669 indio_dev->dev.parent = &client->dev;
658 indio_dev->channels = cm36651_channels; 670 indio_dev->channels = cm36651_channels;
@@ -664,7 +676,7 @@ static int cm36651_probe(struct i2c_client *client,
664 ret = cm36651_setup_reg(cm36651); 676 ret = cm36651_setup_reg(cm36651);
665 if (ret) { 677 if (ret) {
666 dev_err(&client->dev, "%s: register setup failed\n", __func__); 678 dev_err(&client->dev, "%s: register setup failed\n", __func__);
667 goto error_disable_reg; 679 goto error_i2c_unregister_ara;
668 } 680 }
669 681
670 ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler, 682 ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler,
@@ -672,7 +684,7 @@ static int cm36651_probe(struct i2c_client *client,
672 "cm36651", indio_dev); 684 "cm36651", indio_dev);
673 if (ret) { 685 if (ret) {
674 dev_err(&client->dev, "%s: request irq failed\n", __func__); 686 dev_err(&client->dev, "%s: request irq failed\n", __func__);
675 goto error_disable_reg; 687 goto error_i2c_unregister_ara;
676 } 688 }
677 689
678 ret = iio_device_register(indio_dev); 690 ret = iio_device_register(indio_dev);
@@ -685,6 +697,10 @@ static int cm36651_probe(struct i2c_client *client,
685 697
686error_free_irq: 698error_free_irq:
687 free_irq(client->irq, indio_dev); 699 free_irq(client->irq, indio_dev);
700error_i2c_unregister_ara:
701 i2c_unregister_device(cm36651->ara_client);
702error_i2c_unregister_ps:
703 i2c_unregister_device(cm36651->ps_client);
688error_disable_reg: 704error_disable_reg:
689 regulator_disable(cm36651->vled_reg); 705 regulator_disable(cm36651->vled_reg);
690 return ret; 706 return ret;
@@ -698,6 +714,8 @@ static int cm36651_remove(struct i2c_client *client)
698 iio_device_unregister(indio_dev); 714 iio_device_unregister(indio_dev);
699 regulator_disable(cm36651->vled_reg); 715 regulator_disable(cm36651->vled_reg);
700 free_irq(client->irq, indio_dev); 716 free_irq(client->irq, indio_dev);
717 i2c_unregister_device(cm36651->ps_client);
718 i2c_unregister_device(cm36651->ara_client);
701 719
702 return 0; 720 return 0;
703} 721}
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index d4e8983fba53..23f38cf2c5cd 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,10 +1,10 @@
1config INFINIBAND_CXGB4 1config INFINIBAND_CXGB4
2 tristate "Chelsio T4 RDMA Driver" 2 tristate "Chelsio T4/T5 RDMA Driver"
3 depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) 3 depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
4 select GENERIC_ALLOCATOR 4 select GENERIC_ALLOCATOR
5 ---help--- 5 ---help---
6 This is an iWARP/RDMA driver for the Chelsio T4 1GbE and 6 This is an iWARP/RDMA driver for the Chelsio T4 and T5
7 10GbE adapters. 7 1GbE, 10GbE adapters and T5 40GbE adapter.
8 8
9 For general information about Chelsio and our products, visit 9 For general information about Chelsio and our products, visit
10 our website at <http://www.chelsio.com>. 10 our website at <http://www.chelsio.com>.
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 02436d5d0dab..1f863a96a480 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -173,12 +173,15 @@ static void start_ep_timer(struct c4iw_ep *ep)
173 add_timer(&ep->timer); 173 add_timer(&ep->timer);
174} 174}
175 175
176static void stop_ep_timer(struct c4iw_ep *ep) 176static int stop_ep_timer(struct c4iw_ep *ep)
177{ 177{
178 PDBG("%s ep %p stopping\n", __func__, ep); 178 PDBG("%s ep %p stopping\n", __func__, ep);
179 del_timer_sync(&ep->timer); 179 del_timer_sync(&ep->timer);
180 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) 180 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
181 c4iw_put_ep(&ep->com); 181 c4iw_put_ep(&ep->com);
182 return 0;
183 }
184 return 1;
182} 185}
183 186
184static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 187static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
@@ -584,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
584 opt2 |= SACK_EN(1); 587 opt2 |= SACK_EN(1);
585 if (wscale && enable_tcp_window_scaling) 588 if (wscale && enable_tcp_window_scaling)
586 opt2 |= WND_SCALE_EN(1); 589 opt2 |= WND_SCALE_EN(1);
590 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
591 opt2 |= T5_OPT_2_VALID;
592 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
593 }
587 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 594 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
588 595
589 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 596 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -993,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
993static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 1000static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
994{ 1001{
995 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1002 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
996 state_set(&ep->com, ABORTING); 1003 __state_set(&ep->com, ABORTING);
997 set_bit(ABORT_CONN, &ep->com.history); 1004 set_bit(ABORT_CONN, &ep->com.history);
998 return send_abort(ep, skb, gfp); 1005 return send_abort(ep, skb, gfp);
999} 1006}
@@ -1151,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1151 return credits; 1158 return credits;
1152} 1159}
1153 1160
1154static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1161static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1155{ 1162{
1156 struct mpa_message *mpa; 1163 struct mpa_message *mpa;
1157 struct mpa_v2_conn_params *mpa_v2_params; 1164 struct mpa_v2_conn_params *mpa_v2_params;
@@ -1161,17 +1168,17 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1161 struct c4iw_qp_attributes attrs; 1168 struct c4iw_qp_attributes attrs;
1162 enum c4iw_qp_attr_mask mask; 1169 enum c4iw_qp_attr_mask mask;
1163 int err; 1170 int err;
1171 int disconnect = 0;
1164 1172
1165 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1173 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1166 1174
1167 /* 1175 /*
1168 * Stop mpa timer. If it expired, then the state has 1176 * Stop mpa timer. If it expired, then
1169 * changed and we bail since ep_timeout already aborted 1177 * we ignore the MPA reply. process_timeout()
1170 * the connection. 1178 * will abort the connection.
1171 */ 1179 */
1172 stop_ep_timer(ep); 1180 if (stop_ep_timer(ep))
1173 if (ep->com.state != MPA_REQ_SENT) 1181 return 0;
1174 return;
1175 1182
1176 /* 1183 /*
1177 * If we get more than the supported amount of private data 1184 * If we get more than the supported amount of private data
@@ -1193,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1193 * if we don't even have the mpa message, then bail. 1200 * if we don't even have the mpa message, then bail.
1194 */ 1201 */
1195 if (ep->mpa_pkt_len < sizeof(*mpa)) 1202 if (ep->mpa_pkt_len < sizeof(*mpa))
1196 return; 1203 return 0;
1197 mpa = (struct mpa_message *) ep->mpa_pkt; 1204 mpa = (struct mpa_message *) ep->mpa_pkt;
1198 1205
1199 /* Validate MPA header. */ 1206 /* Validate MPA header. */
@@ -1233,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1233 * We'll continue process when more data arrives. 1240 * We'll continue process when more data arrives.
1234 */ 1241 */
1235 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1242 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1236 return; 1243 return 0;
1237 1244
1238 if (mpa->flags & MPA_REJECT) { 1245 if (mpa->flags & MPA_REJECT) {
1239 err = -ECONNREFUSED; 1246 err = -ECONNREFUSED;
@@ -1335,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1335 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1342 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1336 attrs.ecode = MPA_NOMATCH_RTR; 1343 attrs.ecode = MPA_NOMATCH_RTR;
1337 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1344 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1345 attrs.send_term = 1;
1338 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1346 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1339 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1347 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1340 err = -ENOMEM; 1348 err = -ENOMEM;
1349 disconnect = 1;
1341 goto out; 1350 goto out;
1342 } 1351 }
1343 1352
@@ -1353,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1353 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1362 attrs.layer_etype = LAYER_MPA | DDP_LLP;
1354 attrs.ecode = MPA_INSUFF_IRD; 1363 attrs.ecode = MPA_INSUFF_IRD;
1355 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1364 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1365 attrs.send_term = 1;
1356 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1366 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1357 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1367 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1358 err = -ENOMEM; 1368 err = -ENOMEM;
1369 disconnect = 1;
1359 goto out; 1370 goto out;
1360 } 1371 }
1361 goto out; 1372 goto out;
@@ -1364,7 +1375,7 @@ err:
1364 send_abort(ep, skb, GFP_KERNEL); 1375 send_abort(ep, skb, GFP_KERNEL);
1365out: 1376out:
1366 connect_reply_upcall(ep, err); 1377 connect_reply_upcall(ep, err);
1367 return; 1378 return disconnect;
1368} 1379}
1369 1380
1370static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1381static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@@ -1375,15 +1386,12 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1375 1386
1376 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1387 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1377 1388
1378 if (ep->com.state != MPA_REQ_WAIT)
1379 return;
1380
1381 /* 1389 /*
1382 * If we get more than the supported amount of private data 1390 * If we get more than the supported amount of private data
1383 * then we must fail this connection. 1391 * then we must fail this connection.
1384 */ 1392 */
1385 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1393 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1386 stop_ep_timer(ep); 1394 (void)stop_ep_timer(ep);
1387 abort_connection(ep, skb, GFP_KERNEL); 1395 abort_connection(ep, skb, GFP_KERNEL);
1388 return; 1396 return;
1389 } 1397 }
@@ -1413,13 +1421,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1413 if (mpa->revision > mpa_rev) { 1421 if (mpa->revision > mpa_rev) {
1414 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1422 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
1415 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1423 " Received = %d\n", __func__, mpa_rev, mpa->revision);
1416 stop_ep_timer(ep); 1424 (void)stop_ep_timer(ep);
1417 abort_connection(ep, skb, GFP_KERNEL); 1425 abort_connection(ep, skb, GFP_KERNEL);
1418 return; 1426 return;
1419 } 1427 }
1420 1428
1421 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1429 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1422 stop_ep_timer(ep); 1430 (void)stop_ep_timer(ep);
1423 abort_connection(ep, skb, GFP_KERNEL); 1431 abort_connection(ep, skb, GFP_KERNEL);
1424 return; 1432 return;
1425 } 1433 }
@@ -1430,7 +1438,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1430 * Fail if there's too much private data. 1438 * Fail if there's too much private data.
1431 */ 1439 */
1432 if (plen > MPA_MAX_PRIVATE_DATA) { 1440 if (plen > MPA_MAX_PRIVATE_DATA) {
1433 stop_ep_timer(ep); 1441 (void)stop_ep_timer(ep);
1434 abort_connection(ep, skb, GFP_KERNEL); 1442 abort_connection(ep, skb, GFP_KERNEL);
1435 return; 1443 return;
1436 } 1444 }
@@ -1439,7 +1447,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1439 * If plen does not account for pkt size 1447 * If plen does not account for pkt size
1440 */ 1448 */
1441 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1449 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1442 stop_ep_timer(ep); 1450 (void)stop_ep_timer(ep);
1443 abort_connection(ep, skb, GFP_KERNEL); 1451 abort_connection(ep, skb, GFP_KERNEL);
1444 return; 1452 return;
1445 } 1453 }
@@ -1496,18 +1504,24 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1496 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1504 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1497 ep->mpa_attr.p2p_type); 1505 ep->mpa_attr.p2p_type);
1498 1506
1499 __state_set(&ep->com, MPA_REQ_RCVD); 1507 /*
1500 stop_ep_timer(ep); 1508 * If the endpoint timer already expired, then we ignore
1501 1509 * the start request. process_timeout() will abort
1502 /* drive upcall */ 1510 * the connection.
1503 mutex_lock(&ep->parent_ep->com.mutex); 1511 */
1504 if (ep->parent_ep->com.state != DEAD) { 1512 if (!stop_ep_timer(ep)) {
1505 if (connect_request_upcall(ep)) 1513 __state_set(&ep->com, MPA_REQ_RCVD);
1514
1515 /* drive upcall */
1516 mutex_lock(&ep->parent_ep->com.mutex);
1517 if (ep->parent_ep->com.state != DEAD) {
1518 if (connect_request_upcall(ep))
1519 abort_connection(ep, skb, GFP_KERNEL);
1520 } else {
1506 abort_connection(ep, skb, GFP_KERNEL); 1521 abort_connection(ep, skb, GFP_KERNEL);
1507 } else { 1522 }
1508 abort_connection(ep, skb, GFP_KERNEL); 1523 mutex_unlock(&ep->parent_ep->com.mutex);
1509 } 1524 }
1510 mutex_unlock(&ep->parent_ep->com.mutex);
1511 return; 1525 return;
1512} 1526}
1513 1527
@@ -1519,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1519 unsigned int tid = GET_TID(hdr); 1533 unsigned int tid = GET_TID(hdr);
1520 struct tid_info *t = dev->rdev.lldi.tids; 1534 struct tid_info *t = dev->rdev.lldi.tids;
1521 __u8 status = hdr->status; 1535 __u8 status = hdr->status;
1536 int disconnect = 0;
1522 1537
1523 ep = lookup_tid(t, tid); 1538 ep = lookup_tid(t, tid);
1524 if (!ep) 1539 if (!ep)
@@ -1534,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1534 switch (ep->com.state) { 1549 switch (ep->com.state) {
1535 case MPA_REQ_SENT: 1550 case MPA_REQ_SENT:
1536 ep->rcv_seq += dlen; 1551 ep->rcv_seq += dlen;
1537 process_mpa_reply(ep, skb); 1552 disconnect = process_mpa_reply(ep, skb);
1538 break; 1553 break;
1539 case MPA_REQ_WAIT: 1554 case MPA_REQ_WAIT:
1540 ep->rcv_seq += dlen; 1555 ep->rcv_seq += dlen;
@@ -1550,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1550 ep->com.state, ep->hwtid, status); 1565 ep->com.state, ep->hwtid, status);
1551 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1566 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1552 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1567 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1553 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1568 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1569 disconnect = 1;
1554 break; 1570 break;
1555 } 1571 }
1556 default: 1572 default:
1557 break; 1573 break;
1558 } 1574 }
1559 mutex_unlock(&ep->com.mutex); 1575 mutex_unlock(&ep->com.mutex);
1576 if (disconnect)
1577 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1560 return 0; 1578 return 0;
1561} 1579}
1562 1580
@@ -2004,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2004 if (tcph->ece && tcph->cwr) 2022 if (tcph->ece && tcph->cwr)
2005 opt2 |= CCTRL_ECN(1); 2023 opt2 |= CCTRL_ECN(1);
2006 } 2024 }
2025 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2026 opt2 |= T5_OPT_2_VALID;
2027 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2028 }
2007 2029
2008 rpl = cplhdr(skb); 2030 rpl = cplhdr(skb);
2009 INIT_TP_WR(rpl, ep->hwtid); 2031 INIT_TP_WR(rpl, ep->hwtid);
@@ -2265,7 +2287,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2265 disconnect = 0; 2287 disconnect = 0;
2266 break; 2288 break;
2267 case MORIBUND: 2289 case MORIBUND:
2268 stop_ep_timer(ep); 2290 (void)stop_ep_timer(ep);
2269 if (ep->com.cm_id && ep->com.qp) { 2291 if (ep->com.cm_id && ep->com.qp) {
2270 attrs.next_state = C4IW_QP_STATE_IDLE; 2292 attrs.next_state = C4IW_QP_STATE_IDLE;
2271 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2293 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
@@ -2325,10 +2347,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2325 case CONNECTING: 2347 case CONNECTING:
2326 break; 2348 break;
2327 case MPA_REQ_WAIT: 2349 case MPA_REQ_WAIT:
2328 stop_ep_timer(ep); 2350 (void)stop_ep_timer(ep);
2329 break; 2351 break;
2330 case MPA_REQ_SENT: 2352 case MPA_REQ_SENT:
2331 stop_ep_timer(ep); 2353 (void)stop_ep_timer(ep);
2332 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2354 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2333 connect_reply_upcall(ep, -ECONNRESET); 2355 connect_reply_upcall(ep, -ECONNRESET);
2334 else { 2356 else {
@@ -2433,7 +2455,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2433 __state_set(&ep->com, MORIBUND); 2455 __state_set(&ep->com, MORIBUND);
2434 break; 2456 break;
2435 case MORIBUND: 2457 case MORIBUND:
2436 stop_ep_timer(ep); 2458 (void)stop_ep_timer(ep);
2437 if ((ep->com.cm_id) && (ep->com.qp)) { 2459 if ((ep->com.cm_id) && (ep->com.qp)) {
2438 attrs.next_state = C4IW_QP_STATE_IDLE; 2460 attrs.next_state = C4IW_QP_STATE_IDLE;
2439 c4iw_modify_qp(ep->com.qp->rhp, 2461 c4iw_modify_qp(ep->com.qp->rhp,
@@ -3028,7 +3050,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3028 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3050 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3029 close = 1; 3051 close = 1;
3030 if (abrupt) { 3052 if (abrupt) {
3031 stop_ep_timer(ep); 3053 (void)stop_ep_timer(ep);
3032 ep->com.state = ABORTING; 3054 ep->com.state = ABORTING;
3033 } else 3055 } else
3034 ep->com.state = MORIBUND; 3056 ep->com.state = MORIBUND;
@@ -3462,14 +3484,24 @@ static void process_timeout(struct c4iw_ep *ep)
3462 __state_set(&ep->com, ABORTING); 3484 __state_set(&ep->com, ABORTING);
3463 close_complete_upcall(ep, -ETIMEDOUT); 3485 close_complete_upcall(ep, -ETIMEDOUT);
3464 break; 3486 break;
3487 case ABORTING:
3488 case DEAD:
3489
3490 /*
3491 * These states are expected if the ep timed out at the same
3492 * time as another thread was calling stop_ep_timer().
3493 * So we silently do nothing for these states.
3494 */
3495 abort = 0;
3496 break;
3465 default: 3497 default:
3466 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 3498 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3467 __func__, ep, ep->hwtid, ep->com.state); 3499 __func__, ep, ep->hwtid, ep->com.state);
3468 abort = 0; 3500 abort = 0;
3469 } 3501 }
3470 mutex_unlock(&ep->com.mutex);
3471 if (abort) 3502 if (abort)
3472 abort_connection(ep, NULL, GFP_KERNEL); 3503 abort_connection(ep, NULL, GFP_KERNEL);
3504 mutex_unlock(&ep->com.mutex);
3473 c4iw_put_ep(&ep->com); 3505 c4iw_put_ep(&ep->com);
3474} 3506}
3475 3507
@@ -3483,6 +3515,8 @@ static void process_timedout_eps(void)
3483 3515
3484 tmp = timeout_list.next; 3516 tmp = timeout_list.next;
3485 list_del(tmp); 3517 list_del(tmp);
3518 tmp->next = NULL;
3519 tmp->prev = NULL;
3486 spin_unlock_irq(&timeout_lock); 3520 spin_unlock_irq(&timeout_lock);
3487 ep = list_entry(tmp, struct c4iw_ep, entry); 3521 ep = list_entry(tmp, struct c4iw_ep, entry);
3488 process_timeout(ep); 3522 process_timeout(ep);
@@ -3499,6 +3533,7 @@ static void process_work(struct work_struct *work)
3499 unsigned int opcode; 3533 unsigned int opcode;
3500 int ret; 3534 int ret;
3501 3535
3536 process_timedout_eps();
3502 while ((skb = skb_dequeue(&rxq))) { 3537 while ((skb = skb_dequeue(&rxq))) {
3503 rpl = cplhdr(skb); 3538 rpl = cplhdr(skb);
3504 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3539 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
@@ -3508,8 +3543,8 @@ static void process_work(struct work_struct *work)
3508 ret = work_handlers[opcode](dev, skb); 3543 ret = work_handlers[opcode](dev, skb);
3509 if (!ret) 3544 if (!ret)
3510 kfree_skb(skb); 3545 kfree_skb(skb);
3546 process_timedout_eps();
3511 } 3547 }
3512 process_timedout_eps();
3513} 3548}
3514 3549
3515static DECLARE_WORK(skb_work, process_work); 3550static DECLARE_WORK(skb_work, process_work);
@@ -3521,8 +3556,13 @@ static void ep_timeout(unsigned long arg)
3521 3556
3522 spin_lock(&timeout_lock); 3557 spin_lock(&timeout_lock);
3523 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 3558 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
3524 list_add_tail(&ep->entry, &timeout_list); 3559 /*
3525 kickit = 1; 3560 * Only insert if it is not already on the list.
3561 */
3562 if (!ep->entry.next) {
3563 list_add_tail(&ep->entry, &timeout_list);
3564 kickit = 1;
3565 }
3526 } 3566 }
3527 spin_unlock(&timeout_lock); 3567 spin_unlock(&timeout_lock);
3528 if (kickit) 3568 if (kickit)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ce468e542428..cfaa56ada189 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -235,27 +235,21 @@ int c4iw_flush_sq(struct c4iw_qp *qhp)
235 struct t4_cq *cq = &chp->cq; 235 struct t4_cq *cq = &chp->cq;
236 int idx; 236 int idx;
237 struct t4_swsqe *swsqe; 237 struct t4_swsqe *swsqe;
238 int error = (qhp->attr.state != C4IW_QP_STATE_CLOSING &&
239 qhp->attr.state != C4IW_QP_STATE_IDLE);
240 238
241 if (wq->sq.flush_cidx == -1) 239 if (wq->sq.flush_cidx == -1)
242 wq->sq.flush_cidx = wq->sq.cidx; 240 wq->sq.flush_cidx = wq->sq.cidx;
243 idx = wq->sq.flush_cidx; 241 idx = wq->sq.flush_cidx;
244 BUG_ON(idx >= wq->sq.size); 242 BUG_ON(idx >= wq->sq.size);
245 while (idx != wq->sq.pidx) { 243 while (idx != wq->sq.pidx) {
246 if (error) { 244 swsqe = &wq->sq.sw_sq[idx];
247 swsqe = &wq->sq.sw_sq[idx]; 245 BUG_ON(swsqe->flushed);
248 BUG_ON(swsqe->flushed); 246 swsqe->flushed = 1;
249 swsqe->flushed = 1; 247 insert_sq_cqe(wq, cq, swsqe);
250 insert_sq_cqe(wq, cq, swsqe); 248 if (wq->sq.oldest_read == swsqe) {
251 if (wq->sq.oldest_read == swsqe) { 249 BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
252 BUG_ON(swsqe->opcode != FW_RI_READ_REQ); 250 advance_oldest_read(wq);
253 advance_oldest_read(wq);
254 }
255 flushed++;
256 } else {
257 t4_sq_consume(wq);
258 } 251 }
252 flushed++;
259 if (++idx == wq->sq.size) 253 if (++idx == wq->sq.size)
260 idx = 0; 254 idx = 0;
261 } 255 }
@@ -678,7 +672,7 @@ skip_cqe:
678static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) 672static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
679{ 673{
680 struct c4iw_qp *qhp = NULL; 674 struct c4iw_qp *qhp = NULL;
681 struct t4_cqe cqe = {0, 0}, *rd_cqe; 675 struct t4_cqe uninitialized_var(cqe), *rd_cqe;
682 struct t4_wq *wq; 676 struct t4_wq *wq;
683 u32 credit = 0; 677 u32 credit = 0;
684 u8 cqe_flushed; 678 u8 cqe_flushed;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 9489a388376c..f4fa50a609e2 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -682,7 +682,10 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
682 idr_destroy(&ctx->dev->hwtid_idr); 682 idr_destroy(&ctx->dev->hwtid_idr);
683 idr_destroy(&ctx->dev->stid_idr); 683 idr_destroy(&ctx->dev->stid_idr);
684 idr_destroy(&ctx->dev->atid_idr); 684 idr_destroy(&ctx->dev->atid_idr);
685 iounmap(ctx->dev->rdev.oc_mw_kva); 685 if (ctx->dev->rdev.bar2_kva)
686 iounmap(ctx->dev->rdev.bar2_kva);
687 if (ctx->dev->rdev.oc_mw_kva)
688 iounmap(ctx->dev->rdev.oc_mw_kva);
686 ib_dealloc_device(&ctx->dev->ibdev); 689 ib_dealloc_device(&ctx->dev->ibdev);
687 ctx->dev = NULL; 690 ctx->dev = NULL;
688} 691}
@@ -722,11 +725,31 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
722 } 725 }
723 devp->rdev.lldi = *infop; 726 devp->rdev.lldi = *infop;
724 727
725 devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) + 728 /*
726 (pci_resource_len(devp->rdev.lldi.pdev, 2) - 729 * For T5 devices, we map all of BAR2 with WC.
727 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size)); 730 * For T4 devices with onchip qp mem, we map only that part
728 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 731 * of BAR2 with WC.
729 devp->rdev.lldi.vr->ocq.size); 732 */
733 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
734 if (is_t5(devp->rdev.lldi.adapter_type)) {
735 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
736 pci_resource_len(devp->rdev.lldi.pdev, 2));
737 if (!devp->rdev.bar2_kva) {
738 pr_err(MOD "Unable to ioremap BAR2\n");
739 return ERR_PTR(-EINVAL);
740 }
741 } else if (ocqp_supported(infop)) {
742 devp->rdev.oc_mw_pa =
743 pci_resource_start(devp->rdev.lldi.pdev, 2) +
744 pci_resource_len(devp->rdev.lldi.pdev, 2) -
745 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
746 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
747 devp->rdev.lldi.vr->ocq.size);
748 if (!devp->rdev.oc_mw_kva) {
749 pr_err(MOD "Unable to ioremap onchip mem\n");
750 return ERR_PTR(-EINVAL);
751 }
752 }
730 753
731 PDBG(KERN_INFO MOD "ocq memory: " 754 PDBG(KERN_INFO MOD "ocq memory: "
732 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 755 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
@@ -1003,9 +1026,11 @@ static int enable_qp_db(int id, void *p, void *data)
1003static void resume_rc_qp(struct c4iw_qp *qp) 1026static void resume_rc_qp(struct c4iw_qp *qp)
1004{ 1027{
1005 spin_lock(&qp->lock); 1028 spin_lock(&qp->lock);
1006 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc); 1029 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc,
1030 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
1007 qp->wq.sq.wq_pidx_inc = 0; 1031 qp->wq.sq.wq_pidx_inc = 0;
1008 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc); 1032 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc,
1033 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
1009 qp->wq.rq.wq_pidx_inc = 0; 1034 qp->wq.rq.wq_pidx_inc = 0;
1010 spin_unlock(&qp->lock); 1035 spin_unlock(&qp->lock);
1011} 1036}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index e872203c5424..7474b490760a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -149,6 +149,8 @@ struct c4iw_rdev {
149 struct gen_pool *ocqp_pool; 149 struct gen_pool *ocqp_pool;
150 u32 flags; 150 u32 flags;
151 struct cxgb4_lld_info lldi; 151 struct cxgb4_lld_info lldi;
152 unsigned long bar2_pa;
153 void __iomem *bar2_kva;
152 unsigned long oc_mw_pa; 154 unsigned long oc_mw_pa;
153 void __iomem *oc_mw_kva; 155 void __iomem *oc_mw_kva;
154 struct c4iw_stats stats; 156 struct c4iw_stats stats;
@@ -433,6 +435,7 @@ struct c4iw_qp_attributes {
433 u8 ecode; 435 u8 ecode;
434 u16 sq_db_inc; 436 u16 sq_db_inc;
435 u16 rq_db_inc; 437 u16 rq_db_inc;
438 u8 send_term;
436}; 439};
437 440
438struct c4iw_qp { 441struct c4iw_qp {
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index f9ca072a99ed..ec7a2988a703 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -259,8 +259,12 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
259 259
260 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { 260 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
261 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); 261 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
262 if (!stag_idx) 262 if (!stag_idx) {
263 mutex_lock(&rdev->stats.lock);
264 rdev->stats.stag.fail++;
265 mutex_unlock(&rdev->stats.lock);
263 return -ENOMEM; 266 return -ENOMEM;
267 }
264 mutex_lock(&rdev->stats.lock); 268 mutex_lock(&rdev->stats.lock);
265 rdev->stats.stag.cur += 32; 269 rdev->stats.stag.cur += 32;
266 if (rdev->stats.stag.cur > rdev->stats.stag.max) 270 if (rdev->stats.stag.cur > rdev->stats.stag.max)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 79429256023a..a94a3e12c349 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -328,7 +328,7 @@ static int c4iw_query_device(struct ib_device *ibdev,
328 props->max_mr = c4iw_num_stags(&dev->rdev); 328 props->max_mr = c4iw_num_stags(&dev->rdev);
329 props->max_pd = T4_MAX_NUM_PD; 329 props->max_pd = T4_MAX_NUM_PD;
330 props->local_ca_ack_delay = 0; 330 props->local_ca_ack_delay = 0;
331 props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH; 331 props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
332 332
333 return 0; 333 return 0;
334} 334}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cb76eb5eee1f..086f62f5dc9e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -212,13 +212,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
212 212
213 wq->db = rdev->lldi.db_reg; 213 wq->db = rdev->lldi.db_reg;
214 wq->gts = rdev->lldi.gts_reg; 214 wq->gts = rdev->lldi.gts_reg;
215 if (user) { 215 if (user || is_t5(rdev->lldi.adapter_type)) {
216 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + 216 u32 off;
217 (wq->sq.qid << rdev->qpshift); 217
218 wq->sq.udb &= PAGE_MASK; 218 off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK;
219 wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + 219 if (user) {
220 (wq->rq.qid << rdev->qpshift); 220 wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
221 wq->rq.udb &= PAGE_MASK; 221 } else {
222 off += 128 * (wq->sq.qid & rdev->qpmask) + 8;
223 wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
224 }
225 off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK;
226 if (user) {
227 wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
228 } else {
229 off += 128 * (wq->rq.qid & rdev->qpmask) + 8;
230 wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
231 }
222 } 232 }
223 wq->rdev = rdev; 233 wq->rdev = rdev;
224 wq->rq.msn = 1; 234 wq->rq.msn = 1;
@@ -299,9 +309,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
299 if (ret) 309 if (ret)
300 goto free_dma; 310 goto free_dma;
301 311
302 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", 312 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%lx rqudb 0x%lx\n",
303 __func__, wq->sq.qid, wq->rq.qid, wq->db, 313 __func__, wq->sq.qid, wq->rq.qid, wq->db,
304 (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); 314 (__force unsigned long) wq->sq.udb,
315 (__force unsigned long) wq->rq.udb);
305 316
306 return 0; 317 return 0;
307free_dma: 318free_dma:
@@ -425,6 +436,8 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
425 default: 436 default:
426 return -EINVAL; 437 return -EINVAL;
427 } 438 }
439 wqe->send.r3 = 0;
440 wqe->send.r4 = 0;
428 441
429 plen = 0; 442 plen = 0;
430 if (wr->num_sge) { 443 if (wr->num_sge) {
@@ -555,7 +568,8 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
555 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); 568 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
556 int rem; 569 int rem;
557 570
558 if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) 571 if (wr->wr.fast_reg.page_list_len >
572 t4_max_fr_depth(use_dsgl))
559 return -EINVAL; 573 return -EINVAL;
560 574
561 wqe->fr.qpbinde_to_dcacpu = 0; 575 wqe->fr.qpbinde_to_dcacpu = 0;
@@ -650,9 +664,10 @@ static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
650 664
651 spin_lock_irqsave(&qhp->rhp->lock, flags); 665 spin_lock_irqsave(&qhp->rhp->lock, flags);
652 spin_lock(&qhp->lock); 666 spin_lock(&qhp->lock);
653 if (qhp->rhp->db_state == NORMAL) { 667 if (qhp->rhp->db_state == NORMAL)
654 t4_ring_sq_db(&qhp->wq, inc); 668 t4_ring_sq_db(&qhp->wq, inc,
655 } else { 669 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
670 else {
656 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); 671 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
657 qhp->wq.sq.wq_pidx_inc += inc; 672 qhp->wq.sq.wq_pidx_inc += inc;
658 } 673 }
@@ -667,9 +682,10 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
667 682
668 spin_lock_irqsave(&qhp->rhp->lock, flags); 683 spin_lock_irqsave(&qhp->rhp->lock, flags);
669 spin_lock(&qhp->lock); 684 spin_lock(&qhp->lock);
670 if (qhp->rhp->db_state == NORMAL) { 685 if (qhp->rhp->db_state == NORMAL)
671 t4_ring_rq_db(&qhp->wq, inc); 686 t4_ring_rq_db(&qhp->wq, inc,
672 } else { 687 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
688 else {
673 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); 689 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
674 qhp->wq.rq.wq_pidx_inc += inc; 690 qhp->wq.rq.wq_pidx_inc += inc;
675 } 691 }
@@ -686,7 +702,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
686 enum fw_wr_opcodes fw_opcode = 0; 702 enum fw_wr_opcodes fw_opcode = 0;
687 enum fw_ri_wr_flags fw_flags; 703 enum fw_ri_wr_flags fw_flags;
688 struct c4iw_qp *qhp; 704 struct c4iw_qp *qhp;
689 union t4_wr *wqe; 705 union t4_wr *wqe = NULL;
690 u32 num_wrs; 706 u32 num_wrs;
691 struct t4_swsqe *swsqe; 707 struct t4_swsqe *swsqe;
692 unsigned long flag; 708 unsigned long flag;
@@ -792,7 +808,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
792 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 808 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
793 } 809 }
794 if (!qhp->rhp->rdev.status_page->db_off) { 810 if (!qhp->rhp->rdev.status_page->db_off) {
795 t4_ring_sq_db(&qhp->wq, idx); 811 t4_ring_sq_db(&qhp->wq, idx,
812 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
796 spin_unlock_irqrestore(&qhp->lock, flag); 813 spin_unlock_irqrestore(&qhp->lock, flag);
797 } else { 814 } else {
798 spin_unlock_irqrestore(&qhp->lock, flag); 815 spin_unlock_irqrestore(&qhp->lock, flag);
@@ -806,7 +823,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
806{ 823{
807 int err = 0; 824 int err = 0;
808 struct c4iw_qp *qhp; 825 struct c4iw_qp *qhp;
809 union t4_recv_wr *wqe; 826 union t4_recv_wr *wqe = NULL;
810 u32 num_wrs; 827 u32 num_wrs;
811 u8 len16 = 0; 828 u8 len16 = 0;
812 unsigned long flag; 829 unsigned long flag;
@@ -858,7 +875,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
858 num_wrs--; 875 num_wrs--;
859 } 876 }
860 if (!qhp->rhp->rdev.status_page->db_off) { 877 if (!qhp->rhp->rdev.status_page->db_off) {
861 t4_ring_rq_db(&qhp->wq, idx); 878 t4_ring_rq_db(&qhp->wq, idx,
879 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
862 spin_unlock_irqrestore(&qhp->lock, flag); 880 spin_unlock_irqrestore(&qhp->lock, flag);
863 } else { 881 } else {
864 spin_unlock_irqrestore(&qhp->lock, flag); 882 spin_unlock_irqrestore(&qhp->lock, flag);
@@ -1352,6 +1370,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1352 switch (attrs->next_state) { 1370 switch (attrs->next_state) {
1353 case C4IW_QP_STATE_CLOSING: 1371 case C4IW_QP_STATE_CLOSING:
1354 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); 1372 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1373 t4_set_wq_in_error(&qhp->wq);
1355 set_state(qhp, C4IW_QP_STATE_CLOSING); 1374 set_state(qhp, C4IW_QP_STATE_CLOSING);
1356 ep = qhp->ep; 1375 ep = qhp->ep;
1357 if (!internal) { 1376 if (!internal) {
@@ -1359,30 +1378,30 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1359 disconnect = 1; 1378 disconnect = 1;
1360 c4iw_get_ep(&qhp->ep->com); 1379 c4iw_get_ep(&qhp->ep->com);
1361 } 1380 }
1362 t4_set_wq_in_error(&qhp->wq);
1363 ret = rdma_fini(rhp, qhp, ep); 1381 ret = rdma_fini(rhp, qhp, ep);
1364 if (ret) 1382 if (ret)
1365 goto err; 1383 goto err;
1366 break; 1384 break;
1367 case C4IW_QP_STATE_TERMINATE: 1385 case C4IW_QP_STATE_TERMINATE:
1386 t4_set_wq_in_error(&qhp->wq);
1368 set_state(qhp, C4IW_QP_STATE_TERMINATE); 1387 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1369 qhp->attr.layer_etype = attrs->layer_etype; 1388 qhp->attr.layer_etype = attrs->layer_etype;
1370 qhp->attr.ecode = attrs->ecode; 1389 qhp->attr.ecode = attrs->ecode;
1371 t4_set_wq_in_error(&qhp->wq);
1372 ep = qhp->ep; 1390 ep = qhp->ep;
1373 disconnect = 1; 1391 if (!internal) {
1374 if (!internal) 1392 c4iw_get_ep(&qhp->ep->com);
1375 terminate = 1; 1393 terminate = 1;
1376 else { 1394 disconnect = 1;
1395 } else {
1396 terminate = qhp->attr.send_term;
1377 ret = rdma_fini(rhp, qhp, ep); 1397 ret = rdma_fini(rhp, qhp, ep);
1378 if (ret) 1398 if (ret)
1379 goto err; 1399 goto err;
1380 } 1400 }
1381 c4iw_get_ep(&qhp->ep->com);
1382 break; 1401 break;
1383 case C4IW_QP_STATE_ERROR: 1402 case C4IW_QP_STATE_ERROR:
1384 set_state(qhp, C4IW_QP_STATE_ERROR);
1385 t4_set_wq_in_error(&qhp->wq); 1403 t4_set_wq_in_error(&qhp->wq);
1404 set_state(qhp, C4IW_QP_STATE_ERROR);
1386 if (!internal) { 1405 if (!internal) {
1387 abort = 1; 1406 abort = 1;
1388 disconnect = 1; 1407 disconnect = 1;
@@ -1677,11 +1696,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1677 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); 1696 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1678 insert_mmap(ucontext, mm2); 1697 insert_mmap(ucontext, mm2);
1679 mm3->key = uresp.sq_db_gts_key; 1698 mm3->key = uresp.sq_db_gts_key;
1680 mm3->addr = qhp->wq.sq.udb; 1699 mm3->addr = (__force unsigned long) qhp->wq.sq.udb;
1681 mm3->len = PAGE_SIZE; 1700 mm3->len = PAGE_SIZE;
1682 insert_mmap(ucontext, mm3); 1701 insert_mmap(ucontext, mm3);
1683 mm4->key = uresp.rq_db_gts_key; 1702 mm4->key = uresp.rq_db_gts_key;
1684 mm4->addr = qhp->wq.rq.udb; 1703 mm4->addr = (__force unsigned long) qhp->wq.rq.udb;
1685 mm4->len = PAGE_SIZE; 1704 mm4->len = PAGE_SIZE;
1686 insert_mmap(ucontext, mm4); 1705 insert_mmap(ucontext, mm4);
1687 if (mm5) { 1706 if (mm5) {
@@ -1758,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1758 /* 1777 /*
1759 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for 1778 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1760 * ringing the queue db when we're in DB_FULL mode. 1779 * ringing the queue db when we're in DB_FULL mode.
1780 * Only allow this on T4 devices.
1761 */ 1781 */
1762 attrs.sq_db_inc = attr->sq_psn; 1782 attrs.sq_db_inc = attr->sq_psn;
1763 attrs.rq_db_inc = attr->rq_psn; 1783 attrs.rq_db_inc = attr->rq_psn;
1764 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; 1784 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1765 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; 1785 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1786 if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
1787 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
1788 return -EINVAL;
1766 1789
1767 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); 1790 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1768} 1791}
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index cdef4d7fb6d8..67df71a7012e 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -179,8 +179,12 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
179 kfree(entry); 179 kfree(entry);
180 } else { 180 } else {
181 qid = c4iw_get_resource(&rdev->resource.qid_table); 181 qid = c4iw_get_resource(&rdev->resource.qid_table);
182 if (!qid) 182 if (!qid) {
183 mutex_lock(&rdev->stats.lock);
184 rdev->stats.qid.fail++;
185 mutex_unlock(&rdev->stats.lock);
183 goto out; 186 goto out;
187 }
184 mutex_lock(&rdev->stats.lock); 188 mutex_lock(&rdev->stats.lock);
185 rdev->stats.qid.cur += rdev->qpmask + 1; 189 rdev->stats.qid.cur += rdev->qpmask + 1;
186 mutex_unlock(&rdev->stats.lock); 190 mutex_unlock(&rdev->stats.lock);
@@ -322,8 +326,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
322 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); 326 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
323 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); 327 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
324 if (!addr) 328 if (!addr)
325 printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", 329 pr_warn_ratelimited(MOD "%s: Out of RQT memory\n",
326 pci_name(rdev->lldi.pdev)); 330 pci_name(rdev->lldi.pdev));
327 mutex_lock(&rdev->stats.lock); 331 mutex_lock(&rdev->stats.lock);
328 if (addr) { 332 if (addr) {
329 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); 333 rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index eeca8b1e6376..2178f3198410 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -84,7 +84,14 @@ struct t4_status_page {
84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ 85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
86 sizeof(struct fw_ri_immd)) & ~31UL) 86 sizeof(struct fw_ri_immd)) & ~31UL)
87#define T4_MAX_FR_DEPTH (1024 / sizeof(u64)) 87#define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
88#define T4_MAX_FR_DSGL 1024
89#define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
90
91static inline int t4_max_fr_depth(int use_dsgl)
92{
93 return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH;
94}
88 95
89#define T4_RQ_NUM_SLOTS 2 96#define T4_RQ_NUM_SLOTS 2
90#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) 97#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
@@ -292,7 +299,7 @@ struct t4_sq {
292 unsigned long phys_addr; 299 unsigned long phys_addr;
293 struct t4_swsqe *sw_sq; 300 struct t4_swsqe *sw_sq;
294 struct t4_swsqe *oldest_read; 301 struct t4_swsqe *oldest_read;
295 u64 udb; 302 u64 __iomem *udb;
296 size_t memsize; 303 size_t memsize;
297 u32 qid; 304 u32 qid;
298 u16 in_use; 305 u16 in_use;
@@ -314,7 +321,7 @@ struct t4_rq {
314 dma_addr_t dma_addr; 321 dma_addr_t dma_addr;
315 DEFINE_DMA_UNMAP_ADDR(mapping); 322 DEFINE_DMA_UNMAP_ADDR(mapping);
316 struct t4_swrqe *sw_rq; 323 struct t4_swrqe *sw_rq;
317 u64 udb; 324 u64 __iomem *udb;
318 size_t memsize; 325 size_t memsize;
319 u32 qid; 326 u32 qid;
320 u32 msn; 327 u32 msn;
@@ -435,15 +442,67 @@ static inline u16 t4_sq_wq_size(struct t4_wq *wq)
435 return wq->sq.size * T4_SQ_NUM_SLOTS; 442 return wq->sq.size * T4_SQ_NUM_SLOTS;
436} 443}
437 444
438static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) 445/* This function copies 64 byte coalesced work request to memory
446 * mapped BAR2 space. For coalesced WRs, the SGE fetches data
447 * from the FIFO instead of from Host.
448 */
449static inline void pio_copy(u64 __iomem *dst, u64 *src)
450{
451 int count = 8;
452
453 while (count) {
454 writeq(*src, dst);
455 src++;
456 dst++;
457 count--;
458 }
459}
460
461static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5,
462 union t4_wr *wqe)
439{ 463{
464
465 /* Flush host queue memory writes. */
440 wmb(); 466 wmb();
467 if (t5) {
468 if (inc == 1 && wqe) {
469 PDBG("%s: WC wq->sq.pidx = %d\n",
470 __func__, wq->sq.pidx);
471 pio_copy(wq->sq.udb + 7, (void *)wqe);
472 } else {
473 PDBG("%s: DB wq->sq.pidx = %d\n",
474 __func__, wq->sq.pidx);
475 writel(PIDX_T5(inc), wq->sq.udb);
476 }
477
478 /* Flush user doorbell area writes. */
479 wmb();
480 return;
481 }
441 writel(QID(wq->sq.qid) | PIDX(inc), wq->db); 482 writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
442} 483}
443 484
444static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) 485static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5,
486 union t4_recv_wr *wqe)
445{ 487{
488
489 /* Flush host queue memory writes. */
446 wmb(); 490 wmb();
491 if (t5) {
492 if (inc == 1 && wqe) {
493 PDBG("%s: WC wq->rq.pidx = %d\n",
494 __func__, wq->rq.pidx);
495 pio_copy(wq->rq.udb + 7, (void *)wqe);
496 } else {
497 PDBG("%s: DB wq->rq.pidx = %d\n",
498 __func__, wq->rq.pidx);
499 writel(PIDX_T5(inc), wq->rq.udb);
500 }
501
502 /* Flush user doorbell area writes. */
503 wmb();
504 return;
505 }
447 writel(QID(wq->rq.qid) | PIDX(inc), wq->db); 506 writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
448} 507}
449 508
@@ -568,6 +627,9 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
568 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); 627 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
569 BUG_ON(1); 628 BUG_ON(1);
570 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { 629 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
630
631 /* Ensure CQE is flushed to memory */
632 rmb();
571 *cqe = &cq->queue[cq->cidx]; 633 *cqe = &cq->queue[cq->cidx];
572 ret = 0; 634 ret = 0;
573 } else 635 } else
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index dc193c292671..6121ca08fe58 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -836,4 +836,18 @@ struct ulptx_idata {
836#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) 836#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
837#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) 837#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
838 838
839enum { /* TCP congestion control algorithms */
840 CONG_ALG_RENO,
841 CONG_ALG_TAHOE,
842 CONG_ALG_NEWRENO,
843 CONG_ALG_HIGHSPEED
844};
845
846#define S_CONG_CNTRL 14
847#define M_CONG_CNTRL 0x3
848#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
849#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
850
851#define T5_OPT_2_VALID (1 << 31)
852
839#endif /* _T4FW_RI_API_H_ */ 853#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe156a37..199c7896f081 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
48 48
49#include <linux/mlx4/driver.h> 49#include <linux/mlx4/driver.h>
50#include <linux/mlx4/cmd.h> 50#include <linux/mlx4/cmd.h>
51#include <linux/mlx4/qp.h>
51 52
52#include "mlx4_ib.h" 53#include "mlx4_ib.h"
53#include "user.h" 54#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1614} 1615}
1615#endif 1616#endif
1616 1617
1618#define MLX4_IB_INVALID_MAC ((u64)-1)
1619static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1620 struct net_device *dev,
1621 int port)
1622{
1623 u64 new_smac = 0;
1624 u64 release_mac = MLX4_IB_INVALID_MAC;
1625 struct mlx4_ib_qp *qp;
1626
1627 read_lock(&dev_base_lock);
1628 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1629 read_unlock(&dev_base_lock);
1630
1631 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1632 qp = ibdev->qp1_proxy[port - 1];
1633 if (qp) {
1634 int new_smac_index;
1635 u64 old_smac = qp->pri.smac;
1636 struct mlx4_update_qp_params update_params;
1637
1638 if (new_smac == old_smac)
1639 goto unlock;
1640
1641 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1642
1643 if (new_smac_index < 0)
1644 goto unlock;
1645
1646 update_params.smac_index = new_smac_index;
1647 if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
1648 &update_params)) {
1649 release_mac = new_smac;
1650 goto unlock;
1651 }
1652
1653 qp->pri.smac = new_smac;
1654 qp->pri.smac_index = new_smac_index;
1655
1656 release_mac = old_smac;
1657 }
1658
1659unlock:
1660 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1661 if (release_mac != MLX4_IB_INVALID_MAC)
1662 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1663}
1664
1617static void mlx4_ib_get_dev_addr(struct net_device *dev, 1665static void mlx4_ib_get_dev_addr(struct net_device *dev,
1618 struct mlx4_ib_dev *ibdev, u8 port) 1666 struct mlx4_ib_dev *ibdev, u8 port)
1619{ 1667{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1689 return 0; 1737 return 0;
1690} 1738}
1691 1739
1692static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) 1740static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1741 struct net_device *dev,
1742 unsigned long event)
1743
1693{ 1744{
1694 struct mlx4_ib_iboe *iboe; 1745 struct mlx4_ib_iboe *iboe;
1746 int update_qps_port = -1;
1695 int port; 1747 int port;
1696 1748
1697 iboe = &ibdev->iboe; 1749 iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1719 } 1771 }
1720 curr_master = iboe->masters[port - 1]; 1772 curr_master = iboe->masters[port - 1];
1721 1773
1774 if (dev == iboe->netdevs[port - 1] &&
1775 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1776 event == NETDEV_UP || event == NETDEV_CHANGE))
1777 update_qps_port = port;
1778
1722 if (curr_netdev) { 1779 if (curr_netdev) {
1723 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? 1780 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1724 IB_PORT_ACTIVE : IB_PORT_DOWN; 1781 IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
1752 } 1809 }
1753 1810
1754 spin_unlock(&iboe->lock); 1811 spin_unlock(&iboe->lock);
1812
1813 if (update_qps_port > 0)
1814 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1755} 1815}
1756 1816
1757static int mlx4_ib_netdev_event(struct notifier_block *this, 1817static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
1764 return NOTIFY_DONE; 1824 return NOTIFY_DONE;
1765 1825
1766 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); 1826 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1767 mlx4_ib_scan_netdevs(ibdev); 1827 mlx4_ib_scan_netdevs(ibdev, dev, event);
1768 1828
1769 return NOTIFY_DONE; 1829 return NOTIFY_DONE;
1770} 1830}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2043 goto err_map; 2103 goto err_map;
2044 2104
2045 for (i = 0; i < ibdev->num_ports; ++i) { 2105 for (i = 0; i < ibdev->num_ports; ++i) {
2106 mutex_init(&ibdev->qp1_proxy_lock[i]);
2046 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2107 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2047 IB_LINK_LAYER_ETHERNET) { 2108 IB_LINK_LAYER_ETHERNET) {
2048 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); 2109 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2126 for (i = 1 ; i <= ibdev->num_ports ; ++i) 2187 for (i = 1 ; i <= ibdev->num_ports ; ++i)
2127 reset_gid_table(ibdev, i); 2188 reset_gid_table(ibdev, i);
2128 rtnl_lock(); 2189 rtnl_lock();
2129 mlx4_ib_scan_netdevs(ibdev); 2190 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2130 rtnl_unlock(); 2191 rtnl_unlock();
2131 mlx4_ib_init_gid_table(ibdev); 2192 mlx4_ib_init_gid_table(ibdev);
2132 } 2193 }
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522fddfd..66b0b7dbd9f4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
522 int steer_qpn_count; 522 int steer_qpn_count;
523 int steer_qpn_base; 523 int steer_qpn_base;
524 int steering_support; 524 int steering_support;
525 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
526 /* lock when destroying qp1_proxy and getting netdev events */
527 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
525}; 528};
526 529
527struct ib_event_work { 530struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..dc57482ae7af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
1132 if (is_qp0(dev, mqp)) 1132 if (is_qp0(dev, mqp))
1133 mlx4_CLOSE_PORT(dev->dev, mqp->port); 1133 mlx4_CLOSE_PORT(dev->dev, mqp->port);
1134 1134
1135 if (dev->qp1_proxy[mqp->port - 1] == mqp) {
1136 mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
1137 dev->qp1_proxy[mqp->port - 1] = NULL;
1138 mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
1139 }
1140
1135 pd = get_pd(mqp); 1141 pd = get_pd(mqp);
1136 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); 1142 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
1137 1143
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1646 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1652 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
1647 if (err) 1653 if (err)
1648 return -EINVAL; 1654 return -EINVAL;
1655 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1656 dev->qp1_proxy[qp->port - 1] = qp;
1649 } 1657 }
1650 } 1658 }
1651 } 1659 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fa6dc870adae..364d4b6937f5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -282,6 +282,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
282 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 282 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
283 IB_GUARD_T10DIF_CSUM; 283 IB_GUARD_T10DIF_CSUM;
284 } 284 }
285 if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)
286 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
285 287
286 props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 288 props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
287 0xffffff; 289 0xffffff;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae788d27b93f..dc930ed21eca 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -807,6 +807,15 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
807 spin_lock_init(&qp->sq.lock); 807 spin_lock_init(&qp->sq.lock);
808 spin_lock_init(&qp->rq.lock); 808 spin_lock_init(&qp->rq.lock);
809 809
810 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
811 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
812 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
813 return -EINVAL;
814 } else {
815 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
816 }
817 }
818
810 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 819 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
811 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 820 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
812 821
@@ -878,6 +887,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
878 if (qp->wq_sig) 887 if (qp->wq_sig)
879 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); 888 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
880 889
890 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
891 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
892
881 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { 893 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
882 int rcqe_sz; 894 int rcqe_sz;
883 int scqe_sz; 895 int scqe_sz;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 87897b95666d..ded76c101dde 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -858,13 +858,9 @@ static int mthca_enable_msi_x(struct mthca_dev *mdev)
858 entries[1].entry = 1; 858 entries[1].entry = 1;
859 entries[2].entry = 2; 859 entries[2].entry = 2;
860 860
861 err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries)); 861 err = pci_enable_msix_exact(mdev->pdev, entries, ARRAY_SIZE(entries));
862 if (err) { 862 if (err)
863 if (err > 0)
864 mthca_info(mdev, "Only %d MSI-X vectors available, "
865 "not using MSI-X\n", err);
866 return err; 863 return err;
867 }
868 864
869 mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector; 865 mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
870 mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector; 866 mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index c8d9c4ab142b..61a0046efb76 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -197,46 +197,47 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
197 struct qib_msix_entry *qib_msix_entry) 197 struct qib_msix_entry *qib_msix_entry)
198{ 198{
199 int ret; 199 int ret;
200 u32 tabsize = 0; 200 int nvec = *msixcnt;
201 u16 msix_flags;
202 struct msix_entry *msix_entry; 201 struct msix_entry *msix_entry;
203 int i; 202 int i;
204 203
204 ret = pci_msix_vec_count(dd->pcidev);
205 if (ret < 0)
206 goto do_intx;
207
208 nvec = min(nvec, ret);
209
205 /* We can't pass qib_msix_entry array to qib_msix_setup 210 /* We can't pass qib_msix_entry array to qib_msix_setup
206 * so use a dummy msix_entry array and copy the allocated 211 * so use a dummy msix_entry array and copy the allocated
207 * irq back to the qib_msix_entry array. */ 212 * irq back to the qib_msix_entry array. */
208 msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); 213 msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
209 if (!msix_entry) { 214 if (!msix_entry)
210 ret = -ENOMEM;
211 goto do_intx; 215 goto do_intx;
212 } 216
213 for (i = 0; i < *msixcnt; i++) 217 for (i = 0; i < nvec; i++)
214 msix_entry[i] = qib_msix_entry[i].msix; 218 msix_entry[i] = qib_msix_entry[i].msix;
215 219
216 pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); 220 ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec);
217 tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); 221 if (ret < 0)
218 if (tabsize > *msixcnt) 222 goto free_msix_entry;
219 tabsize = *msixcnt; 223 else
220 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); 224 nvec = ret;
221 if (ret > 0) { 225
222 tabsize = ret; 226 for (i = 0; i < nvec; i++)
223 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
224 }
225do_intx:
226 if (ret) {
227 qib_dev_err(dd,
228 "pci_enable_msix %d vectors failed: %d, falling back to INTx\n",
229 tabsize, ret);
230 tabsize = 0;
231 }
232 for (i = 0; i < tabsize; i++)
233 qib_msix_entry[i].msix = msix_entry[i]; 227 qib_msix_entry[i].msix = msix_entry[i];
228
234 kfree(msix_entry); 229 kfree(msix_entry);
235 *msixcnt = tabsize; 230 *msixcnt = nvec;
231 return;
236 232
237 if (ret) 233free_msix_entry:
238 qib_enable_intx(dd->pcidev); 234 kfree(msix_entry);
239 235
236do_intx:
237 qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
238 "falling back to INTx\n", nvec, ret);
239 *msixcnt = 0;
240 qib_enable_intx(dd->pcidev);
240} 241}
241 242
242/** 243/**
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index c98fdb185931..a1710465faaf 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -28,6 +28,7 @@
28#include <target/target_core_base.h> 28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h> 29#include <target/target_core_fabric.h>
30#include <target/iscsi/iscsi_transport.h> 30#include <target/iscsi/iscsi_transport.h>
31#include <linux/semaphore.h>
31 32
32#include "isert_proto.h" 33#include "isert_proto.h"
33#include "ib_isert.h" 34#include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
561 struct isert_device *device; 562 struct isert_device *device;
562 struct ib_device *ib_dev = cma_id->device; 563 struct ib_device *ib_dev = cma_id->device;
563 int ret = 0; 564 int ret = 0;
564 u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 565 u8 pi_support;
566
567 spin_lock_bh(&np->np_thread_lock);
568 if (!np->enabled) {
569 spin_unlock_bh(&np->np_thread_lock);
570 pr_debug("iscsi_np is not enabled, reject connect request\n");
571 return rdma_reject(cma_id, NULL, 0);
572 }
573 spin_unlock_bh(&np->np_thread_lock);
565 574
566 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 575 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
567 cma_id, cma_id->context); 576 cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
652 goto out_mr; 661 goto out_mr;
653 } 662 }
654 663
664 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
655 if (pi_support && !device->pi_capable) { 665 if (pi_support && !device->pi_capable) {
656 pr_err("Protection information requested but not supported\n"); 666 pr_err("Protection information requested but not supported\n");
657 ret = -EINVAL; 667 ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
663 goto out_conn_dev; 673 goto out_conn_dev;
664 674
665 mutex_lock(&isert_np->np_accept_mutex); 675 mutex_lock(&isert_np->np_accept_mutex);
666 list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); 676 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
667 mutex_unlock(&isert_np->np_accept_mutex); 677 mutex_unlock(&isert_np->np_accept_mutex);
668 678
669 pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); 679 pr_debug("isert_connect_request() up np_sem np: %p\n", np);
670 wake_up(&isert_np->np_accept_wq); 680 up(&isert_np->np_sem);
671 return 0; 681 return 0;
672 682
673out_conn_dev: 683out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
2999 pr_err("Unable to allocate struct isert_np\n"); 3009 pr_err("Unable to allocate struct isert_np\n");
3000 return -ENOMEM; 3010 return -ENOMEM;
3001 } 3011 }
3002 init_waitqueue_head(&isert_np->np_accept_wq); 3012 sema_init(&isert_np->np_sem, 0);
3003 mutex_init(&isert_np->np_accept_mutex); 3013 mutex_init(&isert_np->np_accept_mutex);
3004 INIT_LIST_HEAD(&isert_np->np_accept_list); 3014 INIT_LIST_HEAD(&isert_np->np_accept_list);
3005 init_completion(&isert_np->np_login_comp); 3015 init_completion(&isert_np->np_login_comp);
@@ -3048,18 +3058,6 @@ out:
3048} 3058}
3049 3059
3050static int 3060static int
3051isert_check_accept_queue(struct isert_np *isert_np)
3052{
3053 int empty;
3054
3055 mutex_lock(&isert_np->np_accept_mutex);
3056 empty = list_empty(&isert_np->np_accept_list);
3057 mutex_unlock(&isert_np->np_accept_mutex);
3058
3059 return empty;
3060}
3061
3062static int
3063isert_rdma_accept(struct isert_conn *isert_conn) 3061isert_rdma_accept(struct isert_conn *isert_conn)
3064{ 3062{
3065 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; 3063 struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3151 int max_accept = 0, ret; 3149 int max_accept = 0, ret;
3152 3150
3153accept_wait: 3151accept_wait:
3154 ret = wait_event_interruptible(isert_np->np_accept_wq, 3152 ret = down_interruptible(&isert_np->np_sem);
3155 !isert_check_accept_queue(isert_np) ||
3156 np->np_thread_state == ISCSI_NP_THREAD_RESET);
3157 if (max_accept > 5) 3153 if (max_accept > 5)
3158 return -ENODEV; 3154 return -ENODEV;
3159 3155
3160 spin_lock_bh(&np->np_thread_lock); 3156 spin_lock_bh(&np->np_thread_lock);
3161 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 3157 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
3162 spin_unlock_bh(&np->np_thread_lock); 3158 spin_unlock_bh(&np->np_thread_lock);
3163 pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); 3159 pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
3164 return -ENODEV; 3160 return -ENODEV;
3165 } 3161 }
3166 spin_unlock_bh(&np->np_thread_lock); 3162 spin_unlock_bh(&np->np_thread_lock);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 4c072ae34c01..da6612e68000 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -182,7 +182,7 @@ struct isert_device {
182}; 182};
183 183
184struct isert_np { 184struct isert_np {
185 wait_queue_head_t np_accept_wq; 185 struct semaphore np_sem;
186 struct rdma_cm_id *np_cm_id; 186 struct rdma_cm_id *np_cm_id;
187 struct mutex np_accept_mutex; 187 struct mutex np_accept_mutex;
188 struct list_head np_accept_list; 188 struct list_head np_accept_list;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 76842d7dc2e3..ffc7ad3a2c88 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
71 default y 71 default y
72 select SERIO 72 select SERIO
73 select SERIO_LIBPS2 73 select SERIO_LIBPS2
74 select SERIO_I8042 if X86 74 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
75 select SERIO_GSCPS2 if GSC 75 select SERIO_GSCPS2 if GSC
76 help 76 help
77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually 77 Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 2626773ff29b..2dd1d0dd4f7d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
243static void *atkbd_platform_fixup_data; 243static void *atkbd_platform_fixup_data;
244static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int); 244static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
245 245
246/*
247 * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
248 * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
249 */
250static bool atkbd_skip_deactivate;
251
246static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, 252static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
247 ssize_t (*handler)(struct atkbd *, char *)); 253 ssize_t (*handler)(struct atkbd *, char *));
248static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, 254static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
@@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
768 * Make sure nothing is coming from the keyboard and disturbs our 774 * Make sure nothing is coming from the keyboard and disturbs our
769 * internal state. 775 * internal state.
770 */ 776 */
771 atkbd_deactivate(atkbd); 777 if (!atkbd_skip_deactivate)
778 atkbd_deactivate(atkbd);
772 779
773 return 0; 780 return 0;
774} 781}
@@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
1638 return 1; 1645 return 1;
1639} 1646}
1640 1647
1648static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
1649{
1650 atkbd_skip_deactivate = true;
1651 return 1;
1652}
1653
1641static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { 1654static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
1642 { 1655 {
1643 .matches = { 1656 .matches = {
@@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
1775 .callback = atkbd_setup_scancode_fixup, 1788 .callback = atkbd_setup_scancode_fixup,
1776 .driver_data = atkbd_oqo_01plus_scancode_fixup, 1789 .driver_data = atkbd_oqo_01plus_scancode_fixup,
1777 }, 1790 },
1791 {
1792 .matches = {
1793 DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
1794 DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
1795 },
1796 .callback = atkbd_deactivate_fixup,
1797 },
1798 {
1799 .matches = {
1800 DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
1801 DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
1802 },
1803 .callback = atkbd_deactivate_fixup,
1804 },
1778 { } 1805 { }
1779}; 1806};
1780 1807
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index d8241ba0afa0..a15063bea700 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -111,6 +111,8 @@ struct pxa27x_keypad {
111 unsigned short keycodes[MAX_KEYPAD_KEYS]; 111 unsigned short keycodes[MAX_KEYPAD_KEYS];
112 int rotary_rel_code[2]; 112 int rotary_rel_code[2];
113 113
114 unsigned int row_shift;
115
114 /* state row bits of each column scan */ 116 /* state row bits of each column scan */
115 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; 117 uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
116 uint32_t direct_key_state; 118 uint32_t direct_key_state;
@@ -467,7 +469,8 @@ scan:
467 if ((bits_changed & (1 << row)) == 0) 469 if ((bits_changed & (1 << row)) == 0)
468 continue; 470 continue;
469 471
470 code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); 472 code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
473
471 input_event(input_dev, EV_MSC, MSC_SCAN, code); 474 input_event(input_dev, EV_MSC, MSC_SCAN, code);
472 input_report_key(input_dev, keypad->keycodes[code], 475 input_report_key(input_dev, keypad->keycodes[code],
473 new_state[col] & (1 << row)); 476 new_state[col] & (1 << row));
@@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
802 goto failed_put_clk; 805 goto failed_put_clk;
803 } 806 }
804 807
808 keypad->row_shift = get_count_order(pdata->matrix_key_cols);
809
805 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || 810 if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
806 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { 811 (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
807 input_dev->evbit[0] |= BIT_MASK(EV_REL); 812 input_dev->evbit[0] |= BIT_MASK(EV_REL);
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 55c15304ddbc..4e491c1762cf 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -392,6 +392,13 @@ static const struct of_device_id tca8418_dt_ids[] = {
392 { } 392 { }
393}; 393};
394MODULE_DEVICE_TABLE(of, tca8418_dt_ids); 394MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
395
396/*
397 * The device tree based i2c loader looks for
398 * "i2c:" + second_component_of(property("compatible"))
399 * and therefore we need an alias to be found.
400 */
401MODULE_ALIAS("i2c:tca8418");
395#endif 402#endif
396 403
397static struct i2c_driver tca8418_keypad_driver = { 404static struct i2c_driver tca8418_keypad_driver = {
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 52d3a9b28f0b..b36831c828d3 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -70,6 +70,7 @@
70#define BMA150_CFG_5_REG 0x11 70#define BMA150_CFG_5_REG 0x11
71 71
72#define BMA150_CHIP_ID 2 72#define BMA150_CHIP_ID 2
73#define BMA180_CHIP_ID 3
73#define BMA150_CHIP_ID_REG BMA150_DATA_0_REG 74#define BMA150_CHIP_ID_REG BMA150_DATA_0_REG
74 75
75#define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG 76#define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG
@@ -539,7 +540,7 @@ static int bma150_probe(struct i2c_client *client,
539 } 540 }
540 541
541 chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG); 542 chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
542 if (chip_id != BMA150_CHIP_ID) { 543 if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
543 dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id); 544 dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
544 return -EINVAL; 545 return -EINVAL;
545 } 546 }
@@ -643,6 +644,7 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
643 644
644static const struct i2c_device_id bma150_id[] = { 645static const struct i2c_device_id bma150_id[] = {
645 { "bma150", 0 }, 646 { "bma150", 0 },
647 { "bma180", 0 },
646 { "smb380", 0 }, 648 { "smb380", 0 },
647 { "bma023", 0 }, 649 { "bma023", 0 },
648 { } 650 { }
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
index 4b11ede34950..4765799fef74 100644
--- a/drivers/input/misc/da9055_onkey.c
+++ b/drivers/input/misc/da9055_onkey.c
@@ -109,7 +109,6 @@ static int da9055_onkey_probe(struct platform_device *pdev)
109 109
110 INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work); 110 INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
111 111
112 irq = regmap_irq_get_virq(da9055->irq_data, irq);
113 err = request_threaded_irq(irq, NULL, da9055_onkey_irq, 112 err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
114 IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 113 IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
115 "ONKEY", onkey); 114 "ONKEY", onkey);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 08ead2aaede5..20c80f543d5e 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -169,6 +169,7 @@ static int soc_button_pnp_probe(struct pnp_dev *pdev,
169 soc_button_remove(pdev); 169 soc_button_remove(pdev);
170 return error; 170 return error;
171 } 171 }
172 continue;
172 } 173 }
173 174
174 priv->children[i] = pd; 175 priv->children[i] = pd;
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index effa9c5f2c5c..6b8441f7bc32 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -17,7 +17,7 @@ config MOUSE_PS2
17 default y 17 default y
18 select SERIO 18 select SERIO
19 select SERIO_LIBPS2 19 select SERIO_LIBPS2
20 select SERIO_I8042 if X86 20 select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
21 select SERIO_GSCPS2 if GSC 21 select SERIO_GSCPS2 if GSC
22 help 22 help
23 Say Y here if you have a PS/2 mouse connected to your system. This 23 Say Y here if you have a PS/2 mouse connected to your system. This
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ef1cf52f8bb9..b96e978a37b7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/dmi.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/input.h> 17#include <linux/input.h>
@@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
831 break; 832 break;
832 833
833 case 3: 834 case 3:
834 etd->reg_10 = 0x0b; 835 if (etd->set_hw_resolution)
836 etd->reg_10 = 0x0b;
837 else
838 etd->reg_10 = 0x03;
839
835 if (elantech_write_reg(psmouse, 0x10, etd->reg_10)) 840 if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
836 rc = -1; 841 rc = -1;
837 842
@@ -1331,6 +1336,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
1331} 1336}
1332 1337
1333/* 1338/*
1339 * Some hw_version 3 models go into error state when we try to set bit 3 of r10
1340 */
1341static const struct dmi_system_id no_hw_res_dmi_table[] = {
1342#if defined(CONFIG_DMI) && defined(CONFIG_X86)
1343 {
1344 /* Gigabyte U2442 */
1345 .matches = {
1346 DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
1347 DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
1348 },
1349 },
1350#endif
1351 { }
1352};
1353
1354/*
1334 * determine hardware version and set some properties according to it. 1355 * determine hardware version and set some properties according to it.
1335 */ 1356 */
1336static int elantech_set_properties(struct elantech_data *etd) 1357static int elantech_set_properties(struct elantech_data *etd)
@@ -1353,6 +1374,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1353 case 6: 1374 case 6:
1354 case 7: 1375 case 7:
1355 case 8: 1376 case 8:
1377 case 9:
1356 etd->hw_version = 4; 1378 etd->hw_version = 4;
1357 break; 1379 break;
1358 default: 1380 default:
@@ -1389,6 +1411,9 @@ static int elantech_set_properties(struct elantech_data *etd)
1389 */ 1411 */
1390 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); 1412 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
1391 1413
1414 /* Enable real hardware resolution on hw_version 3 ? */
1415 etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
1416
1392 return 0; 1417 return 0;
1393} 1418}
1394 1419
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 036a04abaef7..9e0e2a1f340d 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -130,6 +130,7 @@ struct elantech_data {
130 bool jumpy_cursor; 130 bool jumpy_cursor;
131 bool reports_pressure; 131 bool reports_pressure;
132 bool crc_enabled; 132 bool crc_enabled;
133 bool set_hw_resolution;
133 unsigned char hw_version; 134 unsigned char hw_version;
134 unsigned int fw_version; 135 unsigned int fw_version;
135 unsigned int single_finger_reports; 136 unsigned int single_finger_reports;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d8d49d10f9bb..c5ec703c727e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,81 @@ void synaptics_reset(struct psmouse *psmouse)
117} 117}
118 118
119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS 119#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
120struct min_max_quirk {
121 const char * const *pnp_ids;
122 int x_min, x_max, y_min, y_max;
123};
124
125static const struct min_max_quirk min_max_pnpid_table[] = {
126 {
127 (const char * const []){"LEN0033", NULL},
128 1024, 5052, 2258, 4832
129 },
130 {
131 (const char * const []){"LEN0035", "LEN0042", NULL},
132 1232, 5710, 1156, 4696
133 },
134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
136 1024, 5112, 2024, 4832
137 },
138 {
139 (const char * const []){"LEN2001", NULL},
140 1024, 5022, 2508, 4832
141 },
142 { }
143};
144
145/* This list has been kindly provided by Synaptics. */
146static const char * const topbuttonpad_pnp_ids[] = {
147 "LEN0017",
148 "LEN0018",
149 "LEN0019",
150 "LEN0023",
151 "LEN002A",
152 "LEN002B",
153 "LEN002C",
154 "LEN002D",
155 "LEN002E",
156 "LEN0033", /* Helix */
157 "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
158 "LEN0035", /* X240 */
159 "LEN0036", /* T440 */
160 "LEN0037",
161 "LEN0038",
162 "LEN0041",
163 "LEN0042", /* Yoga */
164 "LEN0045",
165 "LEN0046",
166 "LEN0047",
167 "LEN0048",
168 "LEN0049",
169 "LEN2000",
170 "LEN2001", /* Edge E431 */
171 "LEN2002",
172 "LEN2003",
173 "LEN2004", /* L440 */
174 "LEN2005",
175 "LEN2006",
176 "LEN2007",
177 "LEN2008",
178 "LEN2009",
179 "LEN200A",
180 "LEN200B",
181 NULL
182};
183
184static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
185{
186 int i;
187
188 if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
189 for (i = 0; ids[i]; i++)
190 if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
191 return true;
192
193 return false;
194}
120 195
121/***************************************************************************** 196/*****************************************************************************
122 * Synaptics communications functions 197 * Synaptics communications functions
@@ -266,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
266 * Resolution is left zero if touchpad does not support the query 341 * Resolution is left zero if touchpad does not support the query
267 */ 342 */
268 343
269static const int *quirk_min_max;
270
271static int synaptics_resolution(struct psmouse *psmouse) 344static int synaptics_resolution(struct psmouse *psmouse)
272{ 345{
273 struct synaptics_data *priv = psmouse->private; 346 struct synaptics_data *priv = psmouse->private;
274 unsigned char resp[3]; 347 unsigned char resp[3];
348 int i;
275 349
276 if (quirk_min_max) { 350 for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
277 priv->x_min = quirk_min_max[0]; 351 if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
278 priv->x_max = quirk_min_max[1]; 352 priv->x_min = min_max_pnpid_table[i].x_min;
279 priv->y_min = quirk_min_max[2]; 353 priv->x_max = min_max_pnpid_table[i].x_max;
280 priv->y_max = quirk_min_max[3]; 354 priv->y_min = min_max_pnpid_table[i].y_min;
281 return 0; 355 priv->y_max = min_max_pnpid_table[i].y_max;
282 } 356 return 0;
357 }
283 358
284 if (SYN_ID_MAJOR(priv->identity) < 4) 359 if (SYN_ID_MAJOR(priv->identity) < 4)
285 return 0; 360 return 0;
@@ -1255,8 +1330,10 @@ static void set_abs_position_params(struct input_dev *dev,
1255 input_abs_set_res(dev, y_code, priv->y_res); 1330 input_abs_set_res(dev, y_code, priv->y_res);
1256} 1331}
1257 1332
1258static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) 1333static void set_input_params(struct psmouse *psmouse,
1334 struct synaptics_data *priv)
1259{ 1335{
1336 struct input_dev *dev = psmouse->dev;
1260 int i; 1337 int i;
1261 1338
1262 /* Things that apply to both modes */ 1339 /* Things that apply to both modes */
@@ -1325,6 +1402,8 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
1325 1402
1326 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { 1403 if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
1327 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); 1404 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1405 if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
1406 __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
1328 /* Clickpads report only left button */ 1407 /* Clickpads report only left button */
1329 __clear_bit(BTN_RIGHT, dev->keybit); 1408 __clear_bit(BTN_RIGHT, dev->keybit);
1330 __clear_bit(BTN_MIDDLE, dev->keybit); 1409 __clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1496,54 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
1496 { } 1575 { }
1497}; 1576};
1498 1577
1499static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1500#if defined(CONFIG_DMI)
1501 {
1502 /* Lenovo ThinkPad Helix */
1503 .matches = {
1504 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1505 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
1506 },
1507 .driver_data = (int []){1024, 5052, 2258, 4832},
1508 },
1509 {
1510 /* Lenovo ThinkPad X240 */
1511 .matches = {
1512 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1513 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
1514 },
1515 .driver_data = (int []){1232, 5710, 1156, 4696},
1516 },
1517 {
1518 /* Lenovo ThinkPad T440s */
1519 .matches = {
1520 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1521 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
1522 },
1523 .driver_data = (int []){1024, 5112, 2024, 4832},
1524 },
1525 {
1526 /* Lenovo ThinkPad T540p */
1527 .matches = {
1528 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1529 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
1530 },
1531 .driver_data = (int []){1024, 5056, 2058, 4832},
1532 },
1533#endif
1534 { }
1535};
1536
1537void __init synaptics_module_init(void) 1578void __init synaptics_module_init(void)
1538{ 1579{
1539 const struct dmi_system_id *min_max_dmi;
1540
1541 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); 1580 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
1542 broken_olpc_ec = dmi_check_system(olpc_dmi_table); 1581 broken_olpc_ec = dmi_check_system(olpc_dmi_table);
1543
1544 min_max_dmi = dmi_first_match(min_max_dmi_table);
1545 if (min_max_dmi)
1546 quirk_min_max = min_max_dmi->driver_data;
1547} 1582}
1548 1583
1549static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) 1584static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
@@ -1593,7 +1628,7 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
1593 priv->capabilities, priv->ext_cap, priv->ext_cap_0c, 1628 priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
1594 priv->board_id, priv->firmware_id); 1629 priv->board_id, priv->firmware_id);
1595 1630
1596 set_input_params(psmouse->dev, priv); 1631 set_input_params(psmouse, priv);
1597 1632
1598 /* 1633 /*
1599 * Encode touchpad model so that it can be used to set 1634 * Encode touchpad model so that it can be used to set
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 762b08432de0..8b748d99b934 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
79 writeb(divisor, KMICLKDIV); 79 writeb(divisor, KMICLKDIV);
80 writeb(KMICR_EN, KMICR); 80 writeb(KMICR_EN, KMICR);
81 81
82 ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); 82 ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
83 kmi);
83 if (ret) { 84 if (ret) {
84 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); 85 printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
85 writeb(0, KMICR); 86 writeb(0, KMICR);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 0ec9abbe31fe..381b20d4c561 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -702,6 +702,17 @@ static int i8042_pnp_aux_irq;
702static char i8042_pnp_kbd_name[32]; 702static char i8042_pnp_kbd_name[32];
703static char i8042_pnp_aux_name[32]; 703static char i8042_pnp_aux_name[32];
704 704
705static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
706{
707 strlcpy(dst, "PNP:", dst_size);
708
709 while (id) {
710 strlcat(dst, " ", dst_size);
711 strlcat(dst, id->id, dst_size);
712 id = id->next;
713 }
714}
715
705static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did) 716static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
706{ 717{
707 if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1) 718 if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1)
@@ -718,6 +729,8 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
718 strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name)); 729 strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
719 strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name)); 730 strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
720 } 731 }
732 i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
733 sizeof(i8042_kbd_firmware_id));
721 734
722 /* Keyboard ports are always supposed to be wakeup-enabled */ 735 /* Keyboard ports are always supposed to be wakeup-enabled */
723 device_set_wakeup_enable(&dev->dev, true); 736 device_set_wakeup_enable(&dev->dev, true);
@@ -742,6 +755,8 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
742 strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name)); 755 strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
743 strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name)); 756 strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
744 } 757 }
758 i8042_pnp_id_to_string(dev->id, i8042_aux_firmware_id,
759 sizeof(i8042_aux_firmware_id));
745 760
746 i8042_pnp_aux_devices++; 761 i8042_pnp_aux_devices++;
747 return 0; 762 return 0;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 020053fa5aaa..3807c3e971cc 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -87,6 +87,8 @@ MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
87#endif 87#endif
88 88
89static bool i8042_bypass_aux_irq_test; 89static bool i8042_bypass_aux_irq_test;
90static char i8042_kbd_firmware_id[128];
91static char i8042_aux_firmware_id[128];
90 92
91#include "i8042.h" 93#include "i8042.h"
92 94
@@ -1218,6 +1220,8 @@ static int __init i8042_create_kbd_port(void)
1218 serio->dev.parent = &i8042_platform_device->dev; 1220 serio->dev.parent = &i8042_platform_device->dev;
1219 strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name)); 1221 strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
1220 strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys)); 1222 strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
1223 strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
1224 sizeof(serio->firmware_id));
1221 1225
1222 port->serio = serio; 1226 port->serio = serio;
1223 port->irq = I8042_KBD_IRQ; 1227 port->irq = I8042_KBD_IRQ;
@@ -1244,6 +1248,8 @@ static int __init i8042_create_aux_port(int idx)
1244 if (idx < 0) { 1248 if (idx < 0) {
1245 strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name)); 1249 strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
1246 strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys)); 1250 strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
1251 strlcpy(serio->firmware_id, i8042_aux_firmware_id,
1252 sizeof(serio->firmware_id));
1247 serio->close = i8042_port_close; 1253 serio->close = i8042_port_close;
1248 } else { 1254 } else {
1249 snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx); 1255 snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 8f4c4ab04bc2..b29134de983b 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -451,6 +451,13 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
451 return retval; 451 return retval;
452} 452}
453 453
454static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
455{
456 struct serio *serio = to_serio_port(dev);
457
458 return sprintf(buf, "%s\n", serio->firmware_id);
459}
460
454static DEVICE_ATTR_RO(type); 461static DEVICE_ATTR_RO(type);
455static DEVICE_ATTR_RO(proto); 462static DEVICE_ATTR_RO(proto);
456static DEVICE_ATTR_RO(id); 463static DEVICE_ATTR_RO(id);
@@ -473,12 +480,14 @@ static DEVICE_ATTR_RO(modalias);
473static DEVICE_ATTR_WO(drvctl); 480static DEVICE_ATTR_WO(drvctl);
474static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL); 481static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
475static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode); 482static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
483static DEVICE_ATTR_RO(firmware_id);
476 484
477static struct attribute *serio_device_attrs[] = { 485static struct attribute *serio_device_attrs[] = {
478 &dev_attr_modalias.attr, 486 &dev_attr_modalias.attr,
479 &dev_attr_description.attr, 487 &dev_attr_description.attr,
480 &dev_attr_drvctl.attr, 488 &dev_attr_drvctl.attr,
481 &dev_attr_bind_mode.attr, 489 &dev_attr_bind_mode.attr,
490 &dev_attr_firmware_id.attr,
482 NULL 491 NULL
483}; 492};
484 493
@@ -921,9 +930,14 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
921 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); 930 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
922 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); 931 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
923 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); 932 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
933
924 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", 934 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
925 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); 935 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
926 936
937 if (serio->firmware_id[0])
938 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
939 serio->firmware_id);
940
927 return 0; 941 return 0;
928} 942}
929#undef SERIO_ADD_UEVENT_VAR 943#undef SERIO_ADD_UEVENT_VAR
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index b16ebef5b911..611fc3905d00 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -22,23 +22,18 @@
22#define HID_USAGE_PAGE_DIGITIZER 0x0d 22#define HID_USAGE_PAGE_DIGITIZER 0x0d
23#define HID_USAGE_PAGE_DESKTOP 0x01 23#define HID_USAGE_PAGE_DESKTOP 0x01
24#define HID_USAGE 0x09 24#define HID_USAGE 0x09
25#define HID_USAGE_X 0x30 25#define HID_USAGE_X ((HID_USAGE_PAGE_DESKTOP << 16) | 0x30)
26#define HID_USAGE_Y 0x31 26#define HID_USAGE_Y ((HID_USAGE_PAGE_DESKTOP << 16) | 0x31)
27#define HID_USAGE_X_TILT 0x3d 27#define HID_USAGE_PRESSURE ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x30)
28#define HID_USAGE_Y_TILT 0x3e 28#define HID_USAGE_X_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3d)
29#define HID_USAGE_FINGER 0x22 29#define HID_USAGE_Y_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3e)
30#define HID_USAGE_STYLUS 0x20 30#define HID_USAGE_FINGER ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x22)
31#define HID_USAGE_CONTACTMAX 0x55 31#define HID_USAGE_STYLUS ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x20)
32#define HID_USAGE_CONTACTMAX ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x55)
32#define HID_COLLECTION 0xa1 33#define HID_COLLECTION 0xa1
33#define HID_COLLECTION_LOGICAL 0x02 34#define HID_COLLECTION_LOGICAL 0x02
34#define HID_COLLECTION_END 0xc0 35#define HID_COLLECTION_END 0xc0
35 36
36enum {
37 WCM_UNDEFINED = 0,
38 WCM_DESKTOP,
39 WCM_DIGITIZER,
40};
41
42struct hid_descriptor { 37struct hid_descriptor {
43 struct usb_descriptor_header header; 38 struct usb_descriptor_header header;
44 __le16 bcdHID; 39 __le16 bcdHID;
@@ -305,7 +300,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
305 char limit = 0; 300 char limit = 0;
306 /* result has to be defined as int for some devices */ 301 /* result has to be defined as int for some devices */
307 int result = 0, touch_max = 0; 302 int result = 0, touch_max = 0;
308 int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0; 303 int i = 0, page = 0, finger = 0, pen = 0;
309 unsigned char *report; 304 unsigned char *report;
310 305
311 report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL); 306 report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL);
@@ -332,134 +327,121 @@ static int wacom_parse_hid(struct usb_interface *intf,
332 327
333 switch (report[i]) { 328 switch (report[i]) {
334 case HID_USAGE_PAGE: 329 case HID_USAGE_PAGE:
335 switch (report[i + 1]) { 330 page = report[i + 1];
336 case HID_USAGE_PAGE_DIGITIZER: 331 i++;
337 usage = WCM_DIGITIZER;
338 i++;
339 break;
340
341 case HID_USAGE_PAGE_DESKTOP:
342 usage = WCM_DESKTOP;
343 i++;
344 break;
345 }
346 break; 332 break;
347 333
348 case HID_USAGE: 334 case HID_USAGE:
349 switch (report[i + 1]) { 335 switch (page << 16 | report[i + 1]) {
350 case HID_USAGE_X: 336 case HID_USAGE_X:
351 if (usage == WCM_DESKTOP) { 337 if (finger) {
352 if (finger) { 338 features->device_type = BTN_TOOL_FINGER;
353 features->device_type = BTN_TOOL_FINGER; 339 /* touch device at least supports one touch point */
354 /* touch device at least supports one touch point */ 340 touch_max = 1;
355 touch_max = 1; 341 switch (features->type) {
356 switch (features->type) { 342 case TABLETPC2FG:
357 case TABLETPC2FG: 343 features->pktlen = WACOM_PKGLEN_TPC2FG;
358 features->pktlen = WACOM_PKGLEN_TPC2FG; 344 break;
359 break; 345
360 346 case MTSCREEN:
361 case MTSCREEN: 347 case WACOM_24HDT:
362 case WACOM_24HDT: 348 features->pktlen = WACOM_PKGLEN_MTOUCH;
363 features->pktlen = WACOM_PKGLEN_MTOUCH; 349 break;
364 break; 350
365 351 case MTTPC:
366 case MTTPC: 352 features->pktlen = WACOM_PKGLEN_MTTPC;
367 features->pktlen = WACOM_PKGLEN_MTTPC; 353 break;
368 break; 354
369 355 case BAMBOO_PT:
370 case BAMBOO_PT: 356 features->pktlen = WACOM_PKGLEN_BBTOUCH;
371 features->pktlen = WACOM_PKGLEN_BBTOUCH; 357 break;
372 break; 358
373 359 default:
374 default: 360 features->pktlen = WACOM_PKGLEN_GRAPHIRE;
375 features->pktlen = WACOM_PKGLEN_GRAPHIRE; 361 break;
376 break; 362 }
377 } 363
378 364 switch (features->type) {
379 switch (features->type) { 365 case BAMBOO_PT:
380 case BAMBOO_PT: 366 features->x_phy =
381 features->x_phy = 367 get_unaligned_le16(&report[i + 5]);
382 get_unaligned_le16(&report[i + 5]); 368 features->x_max =
383 features->x_max = 369 get_unaligned_le16(&report[i + 8]);
384 get_unaligned_le16(&report[i + 8]); 370 i += 15;
385 i += 15; 371 break;
386 break; 372
387 373 case WACOM_24HDT:
388 case WACOM_24HDT:
389 features->x_max =
390 get_unaligned_le16(&report[i + 3]);
391 features->x_phy =
392 get_unaligned_le16(&report[i + 8]);
393 features->unit = report[i - 1];
394 features->unitExpo = report[i - 3];
395 i += 12;
396 break;
397
398 default:
399 features->x_max =
400 get_unaligned_le16(&report[i + 3]);
401 features->x_phy =
402 get_unaligned_le16(&report[i + 6]);
403 features->unit = report[i + 9];
404 features->unitExpo = report[i + 11];
405 i += 12;
406 break;
407 }
408 } else if (pen) {
409 /* penabled only accepts exact bytes of data */
410 if (features->type >= TABLETPC)
411 features->pktlen = WACOM_PKGLEN_GRAPHIRE;
412 features->device_type = BTN_TOOL_PEN;
413 features->x_max = 374 features->x_max =
414 get_unaligned_le16(&report[i + 3]); 375 get_unaligned_le16(&report[i + 3]);
415 i += 4; 376 features->x_phy =
377 get_unaligned_le16(&report[i + 8]);
378 features->unit = report[i - 1];
379 features->unitExpo = report[i - 3];
380 i += 12;
381 break;
382
383 default:
384 features->x_max =
385 get_unaligned_le16(&report[i + 3]);
386 features->x_phy =
387 get_unaligned_le16(&report[i + 6]);
388 features->unit = report[i + 9];
389 features->unitExpo = report[i + 11];
390 i += 12;
391 break;
416 } 392 }
393 } else if (pen) {
394 /* penabled only accepts exact bytes of data */
395 if (features->type >= TABLETPC)
396 features->pktlen = WACOM_PKGLEN_GRAPHIRE;
397 features->device_type = BTN_TOOL_PEN;
398 features->x_max =
399 get_unaligned_le16(&report[i + 3]);
400 i += 4;
417 } 401 }
418 break; 402 break;
419 403
420 case HID_USAGE_Y: 404 case HID_USAGE_Y:
421 if (usage == WCM_DESKTOP) { 405 if (finger) {
422 if (finger) { 406 switch (features->type) {
423 switch (features->type) { 407 case TABLETPC2FG:
424 case TABLETPC2FG: 408 case MTSCREEN:
425 case MTSCREEN: 409 case MTTPC:
426 case MTTPC: 410 features->y_max =
427 features->y_max = 411 get_unaligned_le16(&report[i + 3]);
428 get_unaligned_le16(&report[i + 3]); 412 features->y_phy =
429 features->y_phy = 413 get_unaligned_le16(&report[i + 6]);
430 get_unaligned_le16(&report[i + 6]); 414 i += 7;
431 i += 7; 415 break;
432 break; 416
433 417 case WACOM_24HDT:
434 case WACOM_24HDT: 418 features->y_max =
435 features->y_max = 419 get_unaligned_le16(&report[i + 3]);
436 get_unaligned_le16(&report[i + 3]); 420 features->y_phy =
437 features->y_phy = 421 get_unaligned_le16(&report[i - 2]);
438 get_unaligned_le16(&report[i - 2]); 422 i += 7;
439 i += 7; 423 break;
440 break; 424
441 425 case BAMBOO_PT:
442 case BAMBOO_PT: 426 features->y_phy =
443 features->y_phy = 427 get_unaligned_le16(&report[i + 3]);
444 get_unaligned_le16(&report[i + 3]); 428 features->y_max =
445 features->y_max = 429 get_unaligned_le16(&report[i + 6]);
446 get_unaligned_le16(&report[i + 6]); 430 i += 12;
447 i += 12; 431 break;
448 break; 432
449 433 default:
450 default:
451 features->y_max =
452 features->x_max;
453 features->y_phy =
454 get_unaligned_le16(&report[i + 3]);
455 i += 4;
456 break;
457 }
458 } else if (pen) {
459 features->y_max = 434 features->y_max =
435 features->x_max;
436 features->y_phy =
460 get_unaligned_le16(&report[i + 3]); 437 get_unaligned_le16(&report[i + 3]);
461 i += 4; 438 i += 4;
439 break;
462 } 440 }
441 } else if (pen) {
442 features->y_max =
443 get_unaligned_le16(&report[i + 3]);
444 i += 4;
463 } 445 }
464 break; 446 break;
465 447
@@ -484,12 +466,20 @@ static int wacom_parse_hid(struct usb_interface *intf,
484 wacom_retrieve_report_data(intf, features); 466 wacom_retrieve_report_data(intf, features);
485 i++; 467 i++;
486 break; 468 break;
469
470 case HID_USAGE_PRESSURE:
471 if (pen) {
472 features->pressure_max =
473 get_unaligned_le16(&report[i + 3]);
474 i += 4;
475 }
476 break;
487 } 477 }
488 break; 478 break;
489 479
490 case HID_COLLECTION_END: 480 case HID_COLLECTION_END:
491 /* reset UsagePage and Finger */ 481 /* reset UsagePage and Finger */
492 finger = usage = 0; 482 finger = page = 0;
493 break; 483 break;
494 484
495 case HID_COLLECTION: 485 case HID_COLLECTION:
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 05f371df6c40..4822c57a3756 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -178,10 +178,9 @@ static int wacom_ptu_irq(struct wacom_wac *wacom)
178 178
179static int wacom_dtu_irq(struct wacom_wac *wacom) 179static int wacom_dtu_irq(struct wacom_wac *wacom)
180{ 180{
181 struct wacom_features *features = &wacom->features; 181 unsigned char *data = wacom->data;
182 char *data = wacom->data;
183 struct input_dev *input = wacom->input; 182 struct input_dev *input = wacom->input;
184 int prox = data[1] & 0x20, pressure; 183 int prox = data[1] & 0x20;
185 184
186 dev_dbg(input->dev.parent, 185 dev_dbg(input->dev.parent,
187 "%s: received report #%d", __func__, data[0]); 186 "%s: received report #%d", __func__, data[0]);
@@ -198,10 +197,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
198 input_report_key(input, BTN_STYLUS2, data[1] & 0x10); 197 input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
199 input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); 198 input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
200 input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); 199 input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
201 pressure = ((data[7] & 0x01) << 8) | data[6]; 200 input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x01) << 8) | data[6]);
202 if (pressure < 0)
203 pressure = features->pressure_max + pressure + 1;
204 input_report_abs(input, ABS_PRESSURE, pressure);
205 input_report_key(input, BTN_TOUCH, data[1] & 0x05); 201 input_report_key(input, BTN_TOUCH, data[1] & 0x05);
206 if (!prox) /* out-prox */ 202 if (!prox) /* out-prox */
207 wacom->id[0] = 0; 203 wacom->id[0] = 0;
@@ -906,7 +902,7 @@ static int int_dist(int x1, int y1, int x2, int y2)
906static int wacom_24hdt_irq(struct wacom_wac *wacom) 902static int wacom_24hdt_irq(struct wacom_wac *wacom)
907{ 903{
908 struct input_dev *input = wacom->input; 904 struct input_dev *input = wacom->input;
909 char *data = wacom->data; 905 unsigned char *data = wacom->data;
910 int i; 906 int i;
911 int current_num_contacts = data[61]; 907 int current_num_contacts = data[61];
912 int contacts_to_send = 0; 908 int contacts_to_send = 0;
@@ -959,7 +955,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
959static int wacom_mt_touch(struct wacom_wac *wacom) 955static int wacom_mt_touch(struct wacom_wac *wacom)
960{ 956{
961 struct input_dev *input = wacom->input; 957 struct input_dev *input = wacom->input;
962 char *data = wacom->data; 958 unsigned char *data = wacom->data;
963 int i; 959 int i;
964 int current_num_contacts = data[2]; 960 int current_num_contacts = data[2];
965 int contacts_to_send = 0; 961 int contacts_to_send = 0;
@@ -1038,7 +1034,7 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
1038 1034
1039static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) 1035static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
1040{ 1036{
1041 char *data = wacom->data; 1037 unsigned char *data = wacom->data;
1042 struct input_dev *input = wacom->input; 1038 struct input_dev *input = wacom->input;
1043 bool prox; 1039 bool prox;
1044 int x = 0, y = 0; 1040 int x = 0, y = 0;
@@ -1074,10 +1070,8 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
1074 1070
1075static int wacom_tpc_pen(struct wacom_wac *wacom) 1071static int wacom_tpc_pen(struct wacom_wac *wacom)
1076{ 1072{
1077 struct wacom_features *features = &wacom->features; 1073 unsigned char *data = wacom->data;
1078 char *data = wacom->data;
1079 struct input_dev *input = wacom->input; 1074 struct input_dev *input = wacom->input;
1080 int pressure;
1081 bool prox = data[1] & 0x20; 1075 bool prox = data[1] & 0x20;
1082 1076
1083 if (!wacom->shared->stylus_in_proximity) /* first in prox */ 1077 if (!wacom->shared->stylus_in_proximity) /* first in prox */
@@ -1093,10 +1087,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
1093 input_report_key(input, BTN_STYLUS2, data[1] & 0x10); 1087 input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
1094 input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); 1088 input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
1095 input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); 1089 input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
1096 pressure = ((data[7] & 0x01) << 8) | data[6]; 1090 input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x03) << 8) | data[6]);
1097 if (pressure < 0)
1098 pressure = features->pressure_max + pressure + 1;
1099 input_report_abs(input, ABS_PRESSURE, pressure);
1100 input_report_key(input, BTN_TOUCH, data[1] & 0x05); 1091 input_report_key(input, BTN_TOUCH, data[1] & 0x05);
1101 input_report_key(input, wacom->tool[0], prox); 1092 input_report_key(input, wacom->tool[0], prox);
1102 return 1; 1093 return 1;
@@ -1107,7 +1098,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
1107 1098
1108static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) 1099static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
1109{ 1100{
1110 char *data = wacom->data; 1101 unsigned char *data = wacom->data;
1111 1102
1112 dev_dbg(wacom->input->dev.parent, 1103 dev_dbg(wacom->input->dev.parent,
1113 "%s: received report #%d\n", __func__, data[0]); 1104 "%s: received report #%d\n", __func__, data[0]);
@@ -1838,7 +1829,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1838 case DTU: 1829 case DTU:
1839 if (features->type == DTUS) { 1830 if (features->type == DTUS) {
1840 input_set_capability(input_dev, EV_MSC, MSC_SERIAL); 1831 input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
1841 for (i = 0; i < 3; i++) 1832 for (i = 0; i < 4; i++)
1842 __set_bit(BTN_0 + i, input_dev->keybit); 1833 __set_bit(BTN_0 + i, input_dev->keybit);
1843 } 1834 }
1844 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 1835 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68edc9db2c64..b845e9370871 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
640 640
641config TOUCHSCREEN_WM97XX_ATMEL 641config TOUCHSCREEN_WM97XX_ATMEL
642 tristate "WM97xx Atmel accelerated touch" 642 tristate "WM97xx Atmel accelerated touch"
643 depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) 643 depends on TOUCHSCREEN_WM97XX && AVR32
644 help 644 help
645 Say Y here for support for streaming mode with WM97xx touchscreens 645 Say Y here for support for streaming mode with WM97xx touchscreens
646 on Atmel AT91 or AVR32 systems with an AC97C module. 646 on Atmel AT91 or AVR32 systems with an AC97C module.
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 45a06e495ed2..7f8aa981500d 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -425,7 +425,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
425name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \ 425name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
426{ \ 426{ \
427 struct ads7846 *ts = dev_get_drvdata(dev); \ 427 struct ads7846 *ts = dev_get_drvdata(dev); \
428 ssize_t v = ads7846_read12_ser(dev, \ 428 ssize_t v = ads7846_read12_ser(&ts->spi->dev, \
429 READ_12BIT_SER(var)); \ 429 READ_12BIT_SER(var)); \
430 if (v < 0) \ 430 if (v < 0) \
431 return v; \ 431 return v; \
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c949520bd196..57068e8035b5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3999 iommu_flush_dte(iommu, devid); 3999 iommu_flush_dte(iommu, devid);
4000 if (devid != alias) { 4000 if (devid != alias) {
4001 irq_lookup_table[alias] = table; 4001 irq_lookup_table[alias] = table;
4002 set_dte_irq_entry(devid, table); 4002 set_dte_irq_entry(alias, table);
4003 iommu_flush_dte(iommu, alias); 4003 iommu_flush_dte(iommu, alias);
4004 } 4004 }
4005 4005
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b76c58dbe30c..0e08545d7298 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
788 * per device. But we can enable the exclusion range per 788 * per device. But we can enable the exclusion range per
789 * device. This is done here 789 * device. This is done here
790 */ 790 */
791 set_dev_entry_bit(m->devid, DEV_ENTRY_EX); 791 set_dev_entry_bit(devid, DEV_ENTRY_EX);
792 iommu->exclusion_start = m->range_start; 792 iommu->exclusion_start = m->range_start;
793 iommu->exclusion_length = m->range_length; 793 iommu->exclusion_length = m->range_length;
794 } 794 }
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 5208828792e6..203b2e6a91cf 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work)
504 504
505 write = !!(fault->flags & PPR_FAULT_WRITE); 505 write = !!(fault->flags & PPR_FAULT_WRITE);
506 506
507 down_read(&fault->state->mm->mmap_sem);
507 npages = get_user_pages(fault->state->task, fault->state->mm, 508 npages = get_user_pages(fault->state->task, fault->state->mm,
508 fault->address, 1, write, 0, &page, NULL); 509 fault->address, 1, write, 0, &page, NULL);
510 up_read(&fault->state->mm->mmap_sem);
509 511
510 if (npages == 1) { 512 if (npages == 1) {
511 put_page(page); 513 put_page(page);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8b89e33a89fe..647c3c7fd742 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1381,7 +1381,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1381 1381
1382 do { 1382 do {
1383 next = pmd_addr_end(addr, end); 1383 next = pmd_addr_end(addr, end);
1384 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, 1384 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
1385 prot, stage); 1385 prot, stage);
1386 phys += next - addr; 1386 phys += next - addr;
1387 } while (pmd++, addr = next, addr < end); 1387 } while (pmd++, addr = next, addr < end);
@@ -1499,7 +1499,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1499 1499
1500 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); 1500 ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
1501 arm_smmu_tlb_inv_context(&smmu_domain->root_cfg); 1501 arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
1502 return ret ? ret : size; 1502 return ret ? 0 : size;
1503} 1503}
1504 1504
1505static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, 1505static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index f445c10df8df..39f8b717fe84 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -152,7 +152,8 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
152 info->seg = pci_domain_nr(dev->bus); 152 info->seg = pci_domain_nr(dev->bus);
153 info->level = level; 153 info->level = level;
154 if (event == BUS_NOTIFY_ADD_DEVICE) { 154 if (event == BUS_NOTIFY_ADD_DEVICE) {
155 for (tmp = dev, level--; tmp; tmp = tmp->bus->self) { 155 for (tmp = dev; tmp; tmp = tmp->bus->self) {
156 level--;
156 info->path[level].device = PCI_SLOT(tmp->devfn); 157 info->path[level].device = PCI_SLOT(tmp->devfn);
157 info->path[level].function = PCI_FUNC(tmp->devfn); 158 info->path[level].function = PCI_FUNC(tmp->devfn);
158 if (pci_is_root_bus(tmp->bus)) 159 if (pci_is_root_bus(tmp->bus))
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 69fa7da5e48b..f256ffc02e29 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1009,11 +1009,13 @@ static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1009 if (level == 1) 1009 if (level == 1)
1010 return freelist; 1010 return freelist;
1011 1011
1012 for (pte = page_address(pg); !first_pte_in_page(pte); pte++) { 1012 pte = page_address(pg);
1013 do {
1013 if (dma_pte_present(pte) && !dma_pte_superpage(pte)) 1014 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1014 freelist = dma_pte_list_pagetables(domain, level - 1, 1015 freelist = dma_pte_list_pagetables(domain, level - 1,
1015 pte, freelist); 1016 pte, freelist);
1016 } 1017 pte++;
1018 } while (!first_pte_in_page(pte));
1017 1019
1018 return freelist; 1020 return freelist;
1019} 1021}
@@ -2235,7 +2237,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2235 bridge_devfn = dev_tmp->devfn; 2237 bridge_devfn = dev_tmp->devfn;
2236 } 2238 }
2237 spin_lock_irqsave(&device_domain_lock, flags); 2239 spin_lock_irqsave(&device_domain_lock, flags);
2238 info = dmar_search_domain_by_dev_info(segment, bus, devfn); 2240 info = dmar_search_domain_by_dev_info(segment,
2241 bridge_bus,
2242 bridge_devfn);
2239 if (info) { 2243 if (info) {
2240 iommu = info->iommu; 2244 iommu = info->iommu;
2241 domain = info->domain; 2245 domain = info->domain;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 41be897df8d5..3899ba7821c5 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -41,6 +41,7 @@
41#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 41#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
42#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 42#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
43#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 43#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
44#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
44 45
45#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 46#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
46#define ARMADA_375_PPI_CAUSE (0x10) 47#define ARMADA_375_PPI_CAUSE (0x10)
@@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
132 struct msi_desc *desc) 133 struct msi_desc *desc)
133{ 134{
134 struct msi_msg msg; 135 struct msi_msg msg;
135 irq_hw_number_t hwirq; 136 int virq, hwirq;
136 int virq;
137 137
138 hwirq = armada_370_xp_alloc_msi(); 138 hwirq = armada_370_xp_alloc_msi();
139 if (hwirq < 0) 139 if (hwirq < 0)
@@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
159 unsigned int irq) 159 unsigned int irq)
160{ 160{
161 struct irq_data *d = irq_get_irq_data(irq); 161 struct irq_data *d = irq_get_irq_data(irq);
162 unsigned long hwirq = d->hwirq;
163
162 irq_dispose_mapping(irq); 164 irq_dispose_mapping(irq);
163 armada_370_xp_free_msi(d->hwirq); 165 armada_370_xp_free_msi(hwirq);
166}
167
168static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
169 int nvec, int type)
170{
171 /* We support MSI, but not MSI-X */
172 if (type == PCI_CAP_ID_MSI)
173 return 0;
174 return -EINVAL;
164} 175}
165 176
166static struct irq_chip armada_370_xp_msi_irq_chip = { 177static struct irq_chip armada_370_xp_msi_irq_chip = {
@@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
201 212
202 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 213 msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
203 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 214 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
215 msi_chip->check_device = armada_370_xp_check_msi_device;
204 msi_chip->of_node = node; 216 msi_chip->of_node = node;
205 217
206 armada_370_xp_msi_domain = 218 armada_370_xp_msi_domain =
@@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
244static int armada_xp_set_affinity(struct irq_data *d, 256static int armada_xp_set_affinity(struct irq_data *d,
245 const struct cpumask *mask_val, bool force) 257 const struct cpumask *mask_val, bool force)
246{ 258{
247 unsigned long reg;
248 unsigned long new_mask = 0;
249 unsigned long online_mask = 0;
250 unsigned long count = 0;
251 irq_hw_number_t hwirq = irqd_to_hwirq(d); 259 irq_hw_number_t hwirq = irqd_to_hwirq(d);
260 unsigned long reg, mask;
252 int cpu; 261 int cpu;
253 262
254 for_each_cpu(cpu, mask_val) { 263 /* Select a single core from the affinity mask which is online */
255 new_mask |= 1 << cpu_logical_map(cpu); 264 cpu = cpumask_any_and(mask_val, cpu_online_mask);
256 count++; 265 mask = 1UL << cpu_logical_map(cpu);
257 }
258
259 /*
260 * Forbid mutlicore interrupt affinity
261 * This is required since the MPIC HW doesn't limit
262 * several CPUs from acknowledging the same interrupt.
263 */
264 if (count > 1)
265 return -EINVAL;
266
267 for_each_cpu(cpu, cpu_online_mask)
268 online_mask |= 1 << cpu_logical_map(cpu);
269 266
270 raw_spin_lock(&irq_controller_lock); 267 raw_spin_lock(&irq_controller_lock);
271
272 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 268 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
273 reg = (reg & (~online_mask)) | new_mask; 269 reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
274 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 270 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
275
276 raw_spin_unlock(&irq_controller_lock); 271 raw_spin_unlock(&irq_controller_lock);
277 272
278 return 0; 273 return 0;
@@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
494 489
495#ifdef CONFIG_SMP 490#ifdef CONFIG_SMP
496 armada_xp_mpic_smp_cpu_init(); 491 armada_xp_mpic_smp_cpu_init();
497
498 /*
499 * Set the default affinity from all CPUs to the boot cpu.
500 * This is required since the MPIC doesn't limit several CPUs
501 * from acknowledging the same interrupt.
502 */
503 cpumask_clear(irq_default_affinity);
504 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
505
506#endif 492#endif
507 493
508 armada_370_xp_msi_init(node, main_int_res.start); 494 armada_370_xp_msi_init(node, main_int_res.start);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index fc817d28d1fe..3d15d16a7088 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
107 int i, size, max, reserved = 0, entry; 107 int i, size, max, reserved = 0, entry;
108 const __be32 *irqsr; 108 const __be32 *irqsr;
109 109
110 cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); 110 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
111 111
112 if (!cb) 112 if (!cb)
113 return -ENOMEM; 113 return -ENOMEM;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4300b6606f5e..57d165e026f4 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
246 bool force) 246 bool force)
247{ 247{
248 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 248 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
249 unsigned int shift = (gic_irq(d) % 4) * 8; 249 unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
250 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
251 u32 val, mask, bit; 250 u32 val, mask, bit;
252 251
252 if (!force)
253 cpu = cpumask_any_and(mask_val, cpu_online_mask);
254 else
255 cpu = cpumask_first(mask_val);
256
253 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 257 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
254 return -EINVAL; 258 return -EINVAL;
255 259
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 37dab0b472cd..7d35287f9e90 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -24,6 +24,7 @@
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27#include <linux/irqchip/chained_irq.h>
27#include <linux/irqdomain.h> 28#include <linux/irqdomain.h>
28#include <linux/of.h> 29#include <linux/of.h>
29#include <linux/of_address.h> 30#include <linux/of_address.h>
@@ -228,12 +229,17 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
228static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc) 229static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc)
229{ 230{
230 u32 stat, hwirq; 231 u32 stat, hwirq;
232 struct irq_chip *host_chip = irq_desc_get_chip(desc);
231 struct vic_device *vic = irq_desc_get_handler_data(desc); 233 struct vic_device *vic = irq_desc_get_handler_data(desc);
232 234
235 chained_irq_enter(host_chip, desc);
236
233 while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { 237 while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
234 hwirq = ffs(stat) - 1; 238 hwirq = ffs(stat) - 1;
235 generic_handle_irq(irq_find_mapping(vic->domain, hwirq)); 239 generic_handle_irq(irq_find_mapping(vic->domain, hwirq));
236 } 240 }
241
242 chained_irq_exit(host_chip, desc);
237} 243}
238 244
239/* 245/*
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 8527743b5cef..3fdda3a40269 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -5,7 +5,7 @@
5 * Viresh Kumar <viresh.linux@gmail.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * Copyright (C) 2012 ST Microelectronics 7 * Copyright (C) 2012 ST Microelectronics
8 * Shiraz Hashim <shiraz.hashim@st.com> 8 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
9 * 9 *
10 * This file is licensed under the terms of the GNU General Public 10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any 11 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c
index 51dae9167238..96d1df05044f 100644
--- a/drivers/isdn/hisax/icc.c
+++ b/drivers/isdn/hisax/icc.c
@@ -425,7 +425,7 @@ afterXPR:
425 if (cs->debug & L1_DEB_MONITOR) 425 if (cs->debug & L1_DEB_MONITOR)
426 debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]); 426 debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
427 } 427 }
428 AfterMOX1: 428 AfterMOX1: ;
429#endif 429#endif
430 } 430 }
431 } 431 }
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 53d487f0c79d..6a7447c304ac 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1155,7 +1155,7 @@ icn_command(isdn_ctrl *c, icn_card *card)
1155 ulong a; 1155 ulong a;
1156 ulong flags; 1156 ulong flags;
1157 int i; 1157 int i;
1158 char cbuf[60]; 1158 char cbuf[80];
1159 isdn_ctrl cmd; 1159 isdn_ctrl cmd;
1160 icn_cdef cdef; 1160 icn_cdef cdef;
1161 char __user *arg; 1161 char __user *arg;
@@ -1309,7 +1309,6 @@ icn_command(isdn_ctrl *c, icn_card *card)
1309 break; 1309 break;
1310 if ((c->arg & 255) < ICN_BCH) { 1310 if ((c->arg & 255) < ICN_BCH) {
1311 char *p; 1311 char *p;
1312 char dial[50];
1313 char dcode[4]; 1312 char dcode[4];
1314 1313
1315 a = c->arg; 1314 a = c->arg;
@@ -1321,10 +1320,10 @@ icn_command(isdn_ctrl *c, icn_card *card)
1321 } else 1320 } else
1322 /* Normal Dial */ 1321 /* Normal Dial */
1323 strcpy(dcode, "CAL"); 1322 strcpy(dcode, "CAL");
1324 strcpy(dial, p); 1323 snprintf(cbuf, sizeof(cbuf),
1325 sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), 1324 "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
1326 dcode, dial, c->parm.setup.si1, 1325 dcode, p, c->parm.setup.si1,
1327 c->parm.setup.si2, c->parm.setup.eazmsn); 1326 c->parm.setup.si2, c->parm.setup.eazmsn);
1328 i = icn_writecmd(cbuf, strlen(cbuf), 0, card); 1327 i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
1329 } 1328 }
1330 break; 1329 break;
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index d1278b5f3028..004926955263 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -141,6 +141,7 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
141 default: 141 default:
142 pr_err("Invalid chameleon descriptor type 0x%x\n", 142 pr_err("Invalid chameleon descriptor type 0x%x\n",
143 dtype); 143 dtype);
144 kfree(header);
144 return -EINVAL; 145 return -EINVAL;
145 } 146 }
146 num_cells++; 147 num_cells++;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1bf4a71919ec..5f054c44b485 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
2178 ti->num_discard_bios = 1; 2178 ti->num_discard_bios = 1;
2179 ti->discards_supported = true; 2179 ti->discards_supported = true;
2180 ti->discard_zeroes_data_unsupported = true; 2180 ti->discard_zeroes_data_unsupported = true;
2181 /* Discard bios must be split on a block boundary */
2182 ti->split_discard_bios = true;
2181 2183
2182 cache->features = ca->features; 2184 cache->features = ca->features;
2183 ti->per_bio_data_size = get_per_bio_data_size(cache); 2185 ti->per_bio_data_size = get_per_bio_data_size(cache);
@@ -2488,6 +2490,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
2488 2490
2489 } else { 2491 } else {
2490 inc_hit_counter(cache, bio); 2492 inc_hit_counter(cache, bio);
2493 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2491 2494
2492 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && 2495 if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2493 !is_dirty(cache, lookup_result.cblock)) 2496 !is_dirty(cache, lookup_result.cblock))
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 784695d22fde..53b213226c01 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,6 @@
19#include <linux/crypto.h> 19#include <linux/crypto.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/backing-dev.h> 21#include <linux/backing-dev.h>
22#include <linux/percpu.h>
23#include <linux/atomic.h> 22#include <linux/atomic.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/page.h> 24#include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
43 struct bvec_iter iter_out; 42 struct bvec_iter iter_out;
44 sector_t cc_sector; 43 sector_t cc_sector;
45 atomic_t cc_pending; 44 atomic_t cc_pending;
45 struct ablkcipher_request *req;
46}; 46};
47 47
48/* 48/*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; 111enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
112 112
113/* 113/*
114 * Duplicated per-CPU state for cipher. 114 * The fields in here must be read only after initialization.
115 */
116struct crypt_cpu {
117 struct ablkcipher_request *req;
118};
119
120/*
121 * The fields in here must be read only after initialization,
122 * changing state should be in crypt_cpu.
123 */ 115 */
124struct crypt_config { 116struct crypt_config {
125 struct dm_dev *dev; 117 struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
150 sector_t iv_offset; 142 sector_t iv_offset;
151 unsigned int iv_size; 143 unsigned int iv_size;
152 144
153 /*
154 * Duplicated per cpu state. Access through
155 * per_cpu_ptr() only.
156 */
157 struct crypt_cpu __percpu *cpu;
158
159 /* ESSIV: struct crypto_cipher *essiv_tfm */ 145 /* ESSIV: struct crypto_cipher *essiv_tfm */
160 void *iv_private; 146 void *iv_private;
161 struct crypto_ablkcipher **tfms; 147 struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
192static void kcryptd_queue_crypt(struct dm_crypt_io *io); 178static void kcryptd_queue_crypt(struct dm_crypt_io *io);
193static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); 179static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
194 180
195static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
196{
197 return this_cpu_ptr(cc->cpu);
198}
199
200/* 181/*
201 * Use this to access cipher attributes that are the same for each CPU. 182 * Use this to access cipher attributes that are the same for each CPU.
202 */ 183 */
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
903static void crypt_alloc_req(struct crypt_config *cc, 884static void crypt_alloc_req(struct crypt_config *cc,
904 struct convert_context *ctx) 885 struct convert_context *ctx)
905{ 886{
906 struct crypt_cpu *this_cc = this_crypt_config(cc);
907 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); 887 unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
908 888
909 if (!this_cc->req) 889 if (!ctx->req)
910 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); 890 ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
911 891
912 ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); 892 ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
913 ablkcipher_request_set_callback(this_cc->req, 893 ablkcipher_request_set_callback(ctx->req,
914 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, 894 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
915 kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); 895 kcryptd_async_done, dmreq_of_req(cc, ctx->req));
916} 896}
917 897
918/* 898/*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
921static int crypt_convert(struct crypt_config *cc, 901static int crypt_convert(struct crypt_config *cc,
922 struct convert_context *ctx) 902 struct convert_context *ctx)
923{ 903{
924 struct crypt_cpu *this_cc = this_crypt_config(cc);
925 int r; 904 int r;
926 905
927 atomic_set(&ctx->cc_pending, 1); 906 atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
932 911
933 atomic_inc(&ctx->cc_pending); 912 atomic_inc(&ctx->cc_pending);
934 913
935 r = crypt_convert_block(cc, ctx, this_cc->req); 914 r = crypt_convert_block(cc, ctx, ctx->req);
936 915
937 switch (r) { 916 switch (r) {
938 /* async */ 917 /* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
941 reinit_completion(&ctx->restart); 920 reinit_completion(&ctx->restart);
942 /* fall through*/ 921 /* fall through*/
943 case -EINPROGRESS: 922 case -EINPROGRESS:
944 this_cc->req = NULL; 923 ctx->req = NULL;
945 ctx->cc_sector++; 924 ctx->cc_sector++;
946 continue; 925 continue;
947 926
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
1040 io->sector = sector; 1019 io->sector = sector;
1041 io->error = 0; 1020 io->error = 0;
1042 io->base_io = NULL; 1021 io->base_io = NULL;
1022 io->ctx.req = NULL;
1043 atomic_set(&io->io_pending, 0); 1023 atomic_set(&io->io_pending, 0);
1044 1024
1045 return io; 1025 return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
1065 if (!atomic_dec_and_test(&io->io_pending)) 1045 if (!atomic_dec_and_test(&io->io_pending))
1066 return; 1046 return;
1067 1047
1048 if (io->ctx.req)
1049 mempool_free(io->ctx.req, cc->req_pool);
1068 mempool_free(io, cc->io_pool); 1050 mempool_free(io, cc->io_pool);
1069 1051
1070 if (likely(!base_io)) 1052 if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
1492static void crypt_dtr(struct dm_target *ti) 1474static void crypt_dtr(struct dm_target *ti)
1493{ 1475{
1494 struct crypt_config *cc = ti->private; 1476 struct crypt_config *cc = ti->private;
1495 struct crypt_cpu *cpu_cc;
1496 int cpu;
1497 1477
1498 ti->private = NULL; 1478 ti->private = NULL;
1499 1479
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
1505 if (cc->crypt_queue) 1485 if (cc->crypt_queue)
1506 destroy_workqueue(cc->crypt_queue); 1486 destroy_workqueue(cc->crypt_queue);
1507 1487
1508 if (cc->cpu)
1509 for_each_possible_cpu(cpu) {
1510 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1511 if (cpu_cc->req)
1512 mempool_free(cpu_cc->req, cc->req_pool);
1513 }
1514
1515 crypt_free_tfms(cc); 1488 crypt_free_tfms(cc);
1516 1489
1517 if (cc->bs) 1490 if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
1530 if (cc->dev) 1503 if (cc->dev)
1531 dm_put_device(ti, cc->dev); 1504 dm_put_device(ti, cc->dev);
1532 1505
1533 if (cc->cpu)
1534 free_percpu(cc->cpu);
1535
1536 kzfree(cc->cipher); 1506 kzfree(cc->cipher);
1537 kzfree(cc->cipher_string); 1507 kzfree(cc->cipher_string);
1538 1508
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
1588 if (tmp) 1558 if (tmp)
1589 DMWARN("Ignoring unexpected additional cipher options"); 1559 DMWARN("Ignoring unexpected additional cipher options");
1590 1560
1591 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
1592 __alignof__(struct crypt_cpu));
1593 if (!cc->cpu) {
1594 ti->error = "Cannot allocate per cpu state";
1595 goto bad_mem;
1596 }
1597
1598 /* 1561 /*
1599 * For compatibility with the original dm-crypt mapping format, if 1562 * For compatibility with the original dm-crypt mapping format, if
1600 * only the cipher name is supplied, use cbc-plain. 1563 * only the cipher name is supplied, use cbc-plain.
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa009e865871..ebfa411d1a7d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
445 else 445 else
446 m->saved_queue_if_no_path = queue_if_no_path; 446 m->saved_queue_if_no_path = queue_if_no_path;
447 m->queue_if_no_path = queue_if_no_path; 447 m->queue_if_no_path = queue_if_no_path;
448 if (!m->queue_if_no_path)
449 dm_table_run_md_queue_async(m->ti->table);
450
451 spin_unlock_irqrestore(&m->lock, flags); 448 spin_unlock_irqrestore(&m->lock, flags);
452 449
450 if (!queue_if_no_path)
451 dm_table_run_md_queue_async(m->ti->table);
452
453 return 0; 453 return 0;
454} 454}
455 455
@@ -954,7 +954,7 @@ out:
954 */ 954 */
955static int reinstate_path(struct pgpath *pgpath) 955static int reinstate_path(struct pgpath *pgpath)
956{ 956{
957 int r = 0; 957 int r = 0, run_queue = 0;
958 unsigned long flags; 958 unsigned long flags;
959 struct multipath *m = pgpath->pg->m; 959 struct multipath *m = pgpath->pg->m;
960 960
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
978 978
979 if (!m->nr_valid_paths++) { 979 if (!m->nr_valid_paths++) {
980 m->current_pgpath = NULL; 980 m->current_pgpath = NULL;
981 dm_table_run_md_queue_async(m->ti->table); 981 run_queue = 1;
982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 982 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 983 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
984 m->pg_init_in_progress++; 984 m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
991 991
992out: 992out:
993 spin_unlock_irqrestore(&m->lock, flags); 993 spin_unlock_irqrestore(&m->lock, flags);
994 if (run_queue)
995 dm_table_run_md_queue_async(m->ti->table);
994 996
995 return r; 997 return r;
996} 998}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 53728be84dee..242ac2ea5f29 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,6 +27,9 @@
27#define MAPPING_POOL_SIZE 1024 27#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024 28#define PRISON_CELLS 1024
29#define COMMIT_PERIOD HZ 29#define COMMIT_PERIOD HZ
30#define NO_SPACE_TIMEOUT_SECS 60
31
32static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
30 33
31DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, 34DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
32 "A percentage of time allocated for copy on write"); 35 "A percentage of time allocated for copy on write");
@@ -175,6 +178,7 @@ struct pool {
175 struct workqueue_struct *wq; 178 struct workqueue_struct *wq;
176 struct work_struct worker; 179 struct work_struct worker;
177 struct delayed_work waker; 180 struct delayed_work waker;
181 struct delayed_work no_space_timeout;
178 182
179 unsigned long last_commit_jiffies; 183 unsigned long last_commit_jiffies;
180 unsigned ref_count; 184 unsigned ref_count;
@@ -232,6 +236,13 @@ struct thin_c {
232 struct bio_list deferred_bio_list; 236 struct bio_list deferred_bio_list;
233 struct bio_list retry_on_resume_list; 237 struct bio_list retry_on_resume_list;
234 struct rb_root sort_bio_list; /* sorted list of deferred bios */ 238 struct rb_root sort_bio_list; /* sorted list of deferred bios */
239
240 /*
241 * Ensures the thin is not destroyed until the worker has finished
242 * iterating the active_thins list.
243 */
244 atomic_t refcount;
245 struct completion can_destroy;
235}; 246};
236 247
237/*----------------------------------------------------------------*/ 248/*----------------------------------------------------------------*/
@@ -928,7 +939,7 @@ static int commit(struct pool *pool)
928{ 939{
929 int r; 940 int r;
930 941
931 if (get_pool_mode(pool) != PM_WRITE) 942 if (get_pool_mode(pool) >= PM_READ_ONLY)
932 return -EINVAL; 943 return -EINVAL;
933 944
934 r = dm_pool_commit_metadata(pool->pmd); 945 r = dm_pool_commit_metadata(pool->pmd);
@@ -1486,6 +1497,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1486 blk_finish_plug(&plug); 1497 blk_finish_plug(&plug);
1487} 1498}
1488 1499
1500static void thin_get(struct thin_c *tc);
1501static void thin_put(struct thin_c *tc);
1502
1503/*
1504 * We can't hold rcu_read_lock() around code that can block. So we
1505 * find a thin with the rcu lock held; bump a refcount; then drop
1506 * the lock.
1507 */
1508static struct thin_c *get_first_thin(struct pool *pool)
1509{
1510 struct thin_c *tc = NULL;
1511
1512 rcu_read_lock();
1513 if (!list_empty(&pool->active_thins)) {
1514 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1515 thin_get(tc);
1516 }
1517 rcu_read_unlock();
1518
1519 return tc;
1520}
1521
1522static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1523{
1524 struct thin_c *old_tc = tc;
1525
1526 rcu_read_lock();
1527 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1528 thin_get(tc);
1529 thin_put(old_tc);
1530 rcu_read_unlock();
1531 return tc;
1532 }
1533 thin_put(old_tc);
1534 rcu_read_unlock();
1535
1536 return NULL;
1537}
1538
1489static void process_deferred_bios(struct pool *pool) 1539static void process_deferred_bios(struct pool *pool)
1490{ 1540{
1491 unsigned long flags; 1541 unsigned long flags;
@@ -1493,10 +1543,11 @@ static void process_deferred_bios(struct pool *pool)
1493 struct bio_list bios; 1543 struct bio_list bios;
1494 struct thin_c *tc; 1544 struct thin_c *tc;
1495 1545
1496 rcu_read_lock(); 1546 tc = get_first_thin(pool);
1497 list_for_each_entry_rcu(tc, &pool->active_thins, list) 1547 while (tc) {
1498 process_thin_deferred_bios(tc); 1548 process_thin_deferred_bios(tc);
1499 rcu_read_unlock(); 1549 tc = get_next_thin(pool, tc);
1550 }
1500 1551
1501 /* 1552 /*
1502 * If there are any deferred flush bios, we must commit 1553 * If there are any deferred flush bios, we must commit
@@ -1543,6 +1594,20 @@ static void do_waker(struct work_struct *ws)
1543 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); 1594 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1544} 1595}
1545 1596
1597/*
1598 * We're holding onto IO to allow userland time to react. After the
1599 * timeout either the pool will have been resized (and thus back in
1600 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1601 */
1602static void do_no_space_timeout(struct work_struct *ws)
1603{
1604 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1605 no_space_timeout);
1606
1607 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
1608 set_pool_mode(pool, PM_READ_ONLY);
1609}
1610
1546/*----------------------------------------------------------------*/ 1611/*----------------------------------------------------------------*/
1547 1612
1548struct noflush_work { 1613struct noflush_work {
@@ -1578,7 +1643,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1578{ 1643{
1579 struct noflush_work w; 1644 struct noflush_work w;
1580 1645
1581 INIT_WORK(&w.worker, fn); 1646 INIT_WORK_ONSTACK(&w.worker, fn);
1582 w.tc = tc; 1647 w.tc = tc;
1583 atomic_set(&w.complete, 0); 1648 atomic_set(&w.complete, 0);
1584 init_waitqueue_head(&w.wait); 1649 init_waitqueue_head(&w.wait);
@@ -1607,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1607 struct pool_c *pt = pool->ti->private; 1672 struct pool_c *pt = pool->ti->private;
1608 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); 1673 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1609 enum pool_mode old_mode = get_pool_mode(pool); 1674 enum pool_mode old_mode = get_pool_mode(pool);
1675 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
1610 1676
1611 /* 1677 /*
1612 * Never allow the pool to transition to PM_WRITE mode if user 1678 * Never allow the pool to transition to PM_WRITE mode if user
@@ -1668,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1668 pool->process_discard = process_discard; 1734 pool->process_discard = process_discard;
1669 pool->process_prepared_mapping = process_prepared_mapping; 1735 pool->process_prepared_mapping = process_prepared_mapping;
1670 pool->process_prepared_discard = process_prepared_discard_passdown; 1736 pool->process_prepared_discard = process_prepared_discard_passdown;
1737
1738 if (!pool->pf.error_if_no_space && no_space_timeout)
1739 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1671 break; 1740 break;
1672 1741
1673 case PM_WRITE: 1742 case PM_WRITE:
@@ -2053,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2053 2122
2054 INIT_WORK(&pool->worker, do_worker); 2123 INIT_WORK(&pool->worker, do_worker);
2055 INIT_DELAYED_WORK(&pool->waker, do_waker); 2124 INIT_DELAYED_WORK(&pool->waker, do_waker);
2125 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2056 spin_lock_init(&pool->lock); 2126 spin_lock_init(&pool->lock);
2057 bio_list_init(&pool->deferred_flush_bios); 2127 bio_list_init(&pool->deferred_flush_bios);
2058 INIT_LIST_HEAD(&pool->prepared_mappings); 2128 INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2615,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti)
2615 struct pool *pool = pt->pool; 2685 struct pool *pool = pt->pool;
2616 2686
2617 cancel_delayed_work(&pool->waker); 2687 cancel_delayed_work(&pool->waker);
2688 cancel_delayed_work(&pool->no_space_timeout);
2618 flush_workqueue(pool->wq); 2689 flush_workqueue(pool->wq);
2619 (void) commit(pool); 2690 (void) commit(pool);
2620} 2691}
@@ -3061,11 +3132,25 @@ static struct target_type pool_target = {
3061/*---------------------------------------------------------------- 3132/*----------------------------------------------------------------
3062 * Thin target methods 3133 * Thin target methods
3063 *--------------------------------------------------------------*/ 3134 *--------------------------------------------------------------*/
3135static void thin_get(struct thin_c *tc)
3136{
3137 atomic_inc(&tc->refcount);
3138}
3139
3140static void thin_put(struct thin_c *tc)
3141{
3142 if (atomic_dec_and_test(&tc->refcount))
3143 complete(&tc->can_destroy);
3144}
3145
3064static void thin_dtr(struct dm_target *ti) 3146static void thin_dtr(struct dm_target *ti)
3065{ 3147{
3066 struct thin_c *tc = ti->private; 3148 struct thin_c *tc = ti->private;
3067 unsigned long flags; 3149 unsigned long flags;
3068 3150
3151 thin_put(tc);
3152 wait_for_completion(&tc->can_destroy);
3153
3069 spin_lock_irqsave(&tc->pool->lock, flags); 3154 spin_lock_irqsave(&tc->pool->lock, flags);
3070 list_del_rcu(&tc->list); 3155 list_del_rcu(&tc->list);
3071 spin_unlock_irqrestore(&tc->pool->lock, flags); 3156 spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3186,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3101 struct thin_c *tc; 3186 struct thin_c *tc;
3102 struct dm_dev *pool_dev, *origin_dev; 3187 struct dm_dev *pool_dev, *origin_dev;
3103 struct mapped_device *pool_md; 3188 struct mapped_device *pool_md;
3189 unsigned long flags;
3104 3190
3105 mutex_lock(&dm_thin_pool_table.mutex); 3191 mutex_lock(&dm_thin_pool_table.mutex);
3106 3192
@@ -3191,9 +3277,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3191 3277
3192 mutex_unlock(&dm_thin_pool_table.mutex); 3278 mutex_unlock(&dm_thin_pool_table.mutex);
3193 3279
3194 spin_lock(&tc->pool->lock); 3280 atomic_set(&tc->refcount, 1);
3281 init_completion(&tc->can_destroy);
3282
3283 spin_lock_irqsave(&tc->pool->lock, flags);
3195 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); 3284 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
3196 spin_unlock(&tc->pool->lock); 3285 spin_unlock_irqrestore(&tc->pool->lock, flags);
3197 /* 3286 /*
3198 * This synchronize_rcu() call is needed here otherwise we risk a 3287 * This synchronize_rcu() call is needed here otherwise we risk a
3199 * wake_worker() call finding no bios to process (because the newly 3288 * wake_worker() call finding no bios to process (because the newly
@@ -3422,6 +3511,9 @@ static void dm_thin_exit(void)
3422module_init(dm_thin_init); 3511module_init(dm_thin_init);
3423module_exit(dm_thin_exit); 3512module_exit(dm_thin_exit);
3424 3513
3514module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3515MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3516
3425MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); 3517MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3426MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 3518MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3427MODULE_LICENSE("GPL"); 3519MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 796007a5e0e1..7a7bab8947ae 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -330,15 +330,17 @@ test_block_hash:
330 return r; 330 return r;
331 } 331 }
332 } 332 }
333
334 todo = 1 << v->data_dev_block_bits; 333 todo = 1 << v->data_dev_block_bits;
335 while (io->iter.bi_size) { 334 do {
336 u8 *page; 335 u8 *page;
336 unsigned len;
337 struct bio_vec bv = bio_iter_iovec(bio, io->iter); 337 struct bio_vec bv = bio_iter_iovec(bio, io->iter);
338 338
339 page = kmap_atomic(bv.bv_page); 339 page = kmap_atomic(bv.bv_page);
340 r = crypto_shash_update(desc, page + bv.bv_offset, 340 len = bv.bv_len;
341 bv.bv_len); 341 if (likely(len >= todo))
342 len = todo;
343 r = crypto_shash_update(desc, page + bv.bv_offset, len);
342 kunmap_atomic(page); 344 kunmap_atomic(page);
343 345
344 if (r < 0) { 346 if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
346 return r; 348 return r;
347 } 349 }
348 350
349 bio_advance_iter(bio, &io->iter, bv.bv_len); 351 bio_advance_iter(bio, &io->iter, len);
350 } 352 todo -= len;
353 } while (todo);
351 354
352 if (!v->version) { 355 if (!v->version) {
353 r = crypto_shash_update(desc, v->salt, v->salt_size); 356 r = crypto_shash_update(desc, v->salt, v->salt_size);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8fda38d23e38..237b7e0ddc7a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this,
8516 if (mddev_trylock(mddev)) { 8516 if (mddev_trylock(mddev)) {
8517 if (mddev->pers) 8517 if (mddev->pers)
8518 __md_stop_writes(mddev); 8518 __md_stop_writes(mddev);
8519 mddev->safemode = 2; 8519 if (mddev->persistent)
8520 mddev->safemode = 2;
8520 mddev_unlock(mddev); 8521 mddev_unlock(mddev);
8521 } 8522 }
8522 need_delay = 1; 8523 need_delay = 1;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 33fc408e5eac..cb882aae9e20 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
1172 int max_sectors; 1172 int max_sectors;
1173 int sectors; 1173 int sectors;
1174 1174
1175 /*
1176 * Register the new request and wait if the reconstruction
1177 * thread has put up a bar for new requests.
1178 * Continue immediately if no resync is active currently.
1179 */
1180 wait_barrier(conf);
1181
1175 sectors = bio_sectors(bio); 1182 sectors = bio_sectors(bio);
1176 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1183 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1177 bio->bi_iter.bi_sector < conf->reshape_progress && 1184 bio->bi_iter.bi_sector < conf->reshape_progress &&
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
1552 1559
1553 md_write_start(mddev, bio); 1560 md_write_start(mddev, bio);
1554 1561
1555 /*
1556 * Register the new request and wait if the reconstruction
1557 * thread has put up a bar for new requests.
1558 * Continue immediately if no resync is active currently.
1559 */
1560 wait_barrier(conf);
1561 1562
1562 do { 1563 do {
1563 1564
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 25247a852912..ad1b9bea446e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4370,8 +4370,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
4370 sh->group = NULL; 4370 sh->group = NULL;
4371 } 4371 }
4372 list_del_init(&sh->lru); 4372 list_del_init(&sh->lru);
4373 atomic_inc(&sh->count); 4373 BUG_ON(atomic_inc_return(&sh->count) != 1);
4374 BUG_ON(atomic_read(&sh->count) != 1);
4375 return sh; 4374 return sh;
4376} 4375}
4377 4376
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e8a1ce204036..cdd7c1b7259b 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
1109 * windows that fall outside that. 1109 * windows that fall outside that.
1110 */ 1110 */
1111 for (i = 0; i < n_win_sizes; i++) { 1111 for (i = 0; i < n_win_sizes; i++) {
1112 struct ov7670_win_size *win = &info->devtype->win_sizes[index]; 1112 struct ov7670_win_size *win = &info->devtype->win_sizes[i];
1113 if (info->min_width && win->width < info->min_width) 1113 if (info->min_width && win->width < info->min_width)
1114 continue; 1114 continue;
1115 if (info->min_height && win->height < info->min_height) 1115 if (info->min_height && win->height < info->min_height)
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index a4459301b5f8..ee0f57e01b56 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
1616 if (ret < 0) 1616 if (ret < 0)
1617 return -EINVAL; 1617 return -EINVAL;
1618 1618
1619 node_ep = v4l2_of_get_next_endpoint(node, NULL); 1619 node_ep = of_graph_get_next_endpoint(node, NULL);
1620 if (!node_ep) { 1620 if (!node_ep) {
1621 dev_warn(dev, "no endpoint defined for node: %s\n", 1621 dev_warn(dev, "no endpoint defined for node: %s\n",
1622 node->full_name); 1622 node->full_name);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index d5a7a135f75d..703560fa5e73 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
93 struct media_entity *ent; 93 struct media_entity *ent;
94 struct media_entity_desc u_ent; 94 struct media_entity_desc u_ent;
95 95
96 memset(&u_ent, 0, sizeof(u_ent));
96 if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) 97 if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
97 return -EFAULT; 98 return -EFAULT;
98 99
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index c137abfa0c54..20f1655e6d75 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -56,7 +56,7 @@ config VIDEO_VIU
56 56
57config VIDEO_TIMBERDALE 57config VIDEO_TIMBERDALE
58 tristate "Support for timberdale Video In/LogiWIN" 58 tristate "Support for timberdale Video In/LogiWIN"
59 depends on VIDEO_V4L2 && I2C && DMADEVICES 59 depends on MFD_TIMBERDALE && VIDEO_V4L2 && I2C && DMADEVICES
60 select DMA_ENGINE 60 select DMA_ENGINE
61 select TIMB_DMA 61 select TIMB_DMA
62 select VIDEO_ADV7180 62 select VIDEO_ADV7180
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index b4f12d00be05..656708252962 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq)
372{ 372{
373 struct vpbe_fh *fh = vb2_get_drv_priv(vq); 373 struct vpbe_fh *fh = vb2_get_drv_priv(vq);
374 struct vpbe_layer *layer = fh->layer; 374 struct vpbe_layer *layer = fh->layer;
375 struct vpbe_display *disp = fh->disp_dev;
376 unsigned long flags;
375 377
376 if (!vb2_is_streaming(vq)) 378 if (!vb2_is_streaming(vq))
377 return 0; 379 return 0;
378 380
379 /* release all active buffers */ 381 /* release all active buffers */
382 spin_lock_irqsave(&disp->dma_queue_lock, flags);
383 if (layer->cur_frm == layer->next_frm) {
384 vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
385 } else {
386 if (layer->cur_frm != NULL)
387 vb2_buffer_done(&layer->cur_frm->vb,
388 VB2_BUF_STATE_ERROR);
389 if (layer->next_frm != NULL)
390 vb2_buffer_done(&layer->next_frm->vb,
391 VB2_BUF_STATE_ERROR);
392 }
393
380 while (!list_empty(&layer->dma_queue)) { 394 while (!list_empty(&layer->dma_queue)) {
381 layer->next_frm = list_entry(layer->dma_queue.next, 395 layer->next_frm = list_entry(layer->dma_queue.next,
382 struct vpbe_disp_buffer, list); 396 struct vpbe_disp_buffer, list);
383 list_del(&layer->next_frm->list); 397 list_del(&layer->next_frm->list);
384 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); 398 vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
385 } 399 }
386 400 spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
387 return 0; 401 return 0;
388} 402}
389 403
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index d762246eabf5..0379cb9f9a9c 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file)
734 } 734 }
735 vpfe_dev->io_usrs = 0; 735 vpfe_dev->io_usrs = 0;
736 vpfe_dev->numbuffers = config_params.numbuffers; 736 vpfe_dev->numbuffers = config_params.numbuffers;
737 videobuf_stop(&vpfe_dev->buffer_queue);
738 videobuf_mmap_free(&vpfe_dev->buffer_queue);
737 } 739 }
738 740
739 /* Decrement device usrs counter */ 741 /* Decrement device usrs counter */
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 756da78bac23..8dea0b84a3ad 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
358 358
359 common = &ch->common[VPIF_VIDEO_INDEX]; 359 common = &ch->common[VPIF_VIDEO_INDEX];
360 360
361 /* Disable channel as per its device type and channel id */
362 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
363 enable_channel0(0);
364 channel0_intr_enable(0);
365 }
366 if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
367 (2 == common->started)) {
368 enable_channel1(0);
369 channel1_intr_enable(0);
370 }
371 common->started = 0;
372
361 /* release all active buffers */ 373 /* release all active buffers */
362 spin_lock_irqsave(&common->irqlock, flags); 374 spin_lock_irqsave(&common->irqlock, flags);
375 if (common->cur_frm == common->next_frm) {
376 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
377 } else {
378 if (common->cur_frm != NULL)
379 vb2_buffer_done(&common->cur_frm->vb,
380 VB2_BUF_STATE_ERROR);
381 if (common->next_frm != NULL)
382 vb2_buffer_done(&common->next_frm->vb,
383 VB2_BUF_STATE_ERROR);
384 }
385
363 while (!list_empty(&common->dma_queue)) { 386 while (!list_empty(&common->dma_queue)) {
364 common->next_frm = list_entry(common->dma_queue.next, 387 common->next_frm = list_entry(common->dma_queue.next,
365 struct vpif_cap_buffer, list); 388 struct vpif_cap_buffer, list);
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep)
933 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 956 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
934 /* Reset io_usrs member of channel object */ 957 /* Reset io_usrs member of channel object */
935 common->io_usrs = 0; 958 common->io_usrs = 0;
936 /* Disable channel as per its device type and channel id */
937 if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
938 enable_channel0(0);
939 channel0_intr_enable(0);
940 }
941 if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
942 (2 == common->started)) {
943 enable_channel1(0);
944 channel1_intr_enable(0);
945 }
946 common->started = 0;
947 /* Free buffers allocated */ 959 /* Free buffers allocated */
948 vb2_queue_release(&common->buffer_queue); 960 vb2_queue_release(&common->buffer_queue);
949 vb2_dma_contig_cleanup_ctx(common->alloc_ctx); 961 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 0ac841e35aa4..aed41edd0501 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
320 320
321 common = &ch->common[VPIF_VIDEO_INDEX]; 321 common = &ch->common[VPIF_VIDEO_INDEX];
322 322
323 /* Disable channel */
324 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
325 enable_channel2(0);
326 channel2_intr_enable(0);
327 }
328 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
329 (2 == common->started)) {
330 enable_channel3(0);
331 channel3_intr_enable(0);
332 }
333 common->started = 0;
334
323 /* release all active buffers */ 335 /* release all active buffers */
324 spin_lock_irqsave(&common->irqlock, flags); 336 spin_lock_irqsave(&common->irqlock, flags);
337 if (common->cur_frm == common->next_frm) {
338 vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
339 } else {
340 if (common->cur_frm != NULL)
341 vb2_buffer_done(&common->cur_frm->vb,
342 VB2_BUF_STATE_ERROR);
343 if (common->next_frm != NULL)
344 vb2_buffer_done(&common->next_frm->vb,
345 VB2_BUF_STATE_ERROR);
346 }
347
325 while (!list_empty(&common->dma_queue)) { 348 while (!list_empty(&common->dma_queue)) {
326 common->next_frm = list_entry(common->dma_queue.next, 349 common->next_frm = list_entry(common->dma_queue.next,
327 struct vpif_disp_buffer, list); 350 struct vpif_disp_buffer, list);
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep)
773 if (fh->io_allowed[VPIF_VIDEO_INDEX]) { 796 if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
774 /* Reset io_usrs member of channel object */ 797 /* Reset io_usrs member of channel object */
775 common->io_usrs = 0; 798 common->io_usrs = 0;
776 /* Disable channel */
777 if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
778 enable_channel2(0);
779 channel2_intr_enable(0);
780 }
781 if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
782 (2 == common->started)) {
783 enable_channel3(0);
784 channel3_intr_enable(0);
785 }
786 common->started = 0;
787
788 /* Free buffers allocated */ 799 /* Free buffers allocated */
789 vb2_queue_release(&common->buffer_queue); 800 vb2_queue_release(&common->buffer_queue);
790 vb2_dma_contig_cleanup_ctx(common->alloc_ctx); 801 vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index da2fc86cc524..25dbf5b05a96 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = {
122 }, { 122 }, {
123 .name = "YUV 4:2:2 planar, Y/Cb/Cr", 123 .name = "YUV 4:2:2 planar, Y/Cb/Cr",
124 .fourcc = V4L2_PIX_FMT_YUV422P, 124 .fourcc = V4L2_PIX_FMT_YUV422P,
125 .depth = { 12 }, 125 .depth = { 16 },
126 .color = FIMC_FMT_YCBYCR422, 126 .color = FIMC_FMT_YCBYCR422,
127 .memplanes = 1, 127 .memplanes = 1,
128 .colplanes = 3, 128 .colplanes = 3,
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 3aecaf465094..f0c9c42867de 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
195 195
196 f_ref = 2UL * priv->cfg->clock / r_val; 196 f_ref = 2UL * priv->cfg->clock / r_val;
197 n_val = div_u64_rem(f_vco, f_ref, &k_val); 197 n_val = div_u64_rem(f_vco, f_ref, &k_val);
198 k_val_reg = 1UL * k_val * (1 << 20) / f_ref; 198 k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
199 199
200 ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); 200 ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
201 if (ret < 0) 201 if (ret < 0)
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
348 if (ret < 0) 348 if (ret < 0)
349 goto err; 349 goto err;
350 350
351 ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ 351 ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
352 fc2580_if_filter_lut[i].mul / 1000000000); 352 fc2580_if_filter_lut[i].mul, 1000000000));
353 if (ret < 0) 353 if (ret < 0)
354 goto err; 354 goto err;
355 355
diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h
index be38a9e637e0..646c99452136 100644
--- a/drivers/media/tuners/fc2580_priv.h
+++ b/drivers/media/tuners/fc2580_priv.h
@@ -22,6 +22,7 @@
22#define FC2580_PRIV_H 22#define FC2580_PRIV_H
23 23
24#include "fc2580.h" 24#include "fc2580.h"
25#include <linux/math64.h>
25 26
26struct fc2580_reg_val { 27struct fc2580_reg_val {
27 u8 reg; 28 u8 reg;
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index 7407b8338ccf..bc38f03394cd 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
41ccflags-y += -I$(srctree)/drivers/media/dvb-frontends 41ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
42ccflags-y += -I$(srctree)/drivers/media/tuners 42ccflags-y += -I$(srctree)/drivers/media/tuners
43ccflags-y += -I$(srctree)/drivers/media/common 43ccflags-y += -I$(srctree)/drivers/media/common
44ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 61d196e8b3ab..dcbd392e6efc 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -24,7 +24,6 @@
24 24
25#include "rtl2830.h" 25#include "rtl2830.h"
26#include "rtl2832.h" 26#include "rtl2832.h"
27#include "rtl2832_sdr.h"
28 27
29#include "qt1010.h" 28#include "qt1010.h"
30#include "mt2060.h" 29#include "mt2060.h"
@@ -36,6 +35,45 @@
36#include "tua9001.h" 35#include "tua9001.h"
37#include "r820t.h" 36#include "r820t.h"
38 37
38/*
39 * RTL2832_SDR module is in staging. That logic is added in order to avoid any
40 * hard dependency to drivers/staging/ directory as we want compile mainline
41 * driver even whole staging directory is missing.
42 */
43#include <media/v4l2-subdev.h>
44
45#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
46struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
47 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
48 struct v4l2_subdev *sd);
49#else
50static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
51 struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
52 struct v4l2_subdev *sd)
53{
54 return NULL;
55}
56#endif
57
58#ifdef CONFIG_MEDIA_ATTACH
59#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
60 void *__r = NULL; \
61 typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
62 if (__a) { \
63 __r = (void *) __a(ARGS); \
64 if (__r == NULL) \
65 symbol_put(FUNCTION); \
66 } \
67 __r; \
68})
69
70#else
71#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
72 FUNCTION(ARGS); \
73})
74
75#endif
76
39static int rtl28xxu_disable_rc; 77static int rtl28xxu_disable_rc;
40module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); 78module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
41MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller"); 79MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
908 adap->fe[0]->ops.tuner_ops.get_rf_strength; 946 adap->fe[0]->ops.tuner_ops.get_rf_strength;
909 947
910 /* attach SDR */ 948 /* attach SDR */
911 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 949 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
912 &rtl28xxu_rtl2832_fc0012_config, NULL); 950 &rtl28xxu_rtl2832_fc0012_config, NULL);
913 break; 951 break;
914 case TUNER_RTL2832_FC0013: 952 case TUNER_RTL2832_FC0013:
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
920 adap->fe[0]->ops.tuner_ops.get_rf_strength; 958 adap->fe[0]->ops.tuner_ops.get_rf_strength;
921 959
922 /* attach SDR */ 960 /* attach SDR */
923 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 961 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
924 &rtl28xxu_rtl2832_fc0013_config, NULL); 962 &rtl28xxu_rtl2832_fc0013_config, NULL);
925 break; 963 break;
926 case TUNER_RTL2832_E4000: { 964 case TUNER_RTL2832_E4000: {
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
951 i2c_set_adapdata(i2c_adap_internal, d); 989 i2c_set_adapdata(i2c_adap_internal, d);
952 990
953 /* attach SDR */ 991 /* attach SDR */
954 dvb_attach(rtl2832_sdr_attach, adap->fe[0], 992 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
955 i2c_adap_internal, 993 i2c_adap_internal,
956 &rtl28xxu_rtl2832_e4000_config, sd); 994 &rtl28xxu_rtl2832_e4000_config, sd);
957 } 995 }
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
982 adap->fe[0]->ops.tuner_ops.get_rf_strength; 1020 adap->fe[0]->ops.tuner_ops.get_rf_strength;
983 1021
984 /* attach SDR */ 1022 /* attach SDR */
985 dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, 1023 dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
986 &rtl28xxu_rtl2832_r820t_config, NULL); 1024 &rtl28xxu_rtl2832_r820t_config, NULL);
987 break; 1025 break;
988 case TUNER_RTL2832_R828D: 1026 case TUNER_RTL2832_R828D:
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 7277dbd2afcd..ecbcb39feb71 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = {
1430 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, 1430 {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
1431 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, 1431 {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
1432 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, 1432 {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
1433#if !IS_ENABLED(CONFIG_USB_SN9C102)
1434 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, 1433 {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
1435 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, 1434 {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
1436#endif
1437 {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ 1435 {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
1438 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, 1436 {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
1439 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, 1437 {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 04b2daf567be..7e2411c36419 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
178 178
179static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 179static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
180{ 180{
181 if (get_user(kp->type, &up->type))
182 return -EFAULT;
183
181 switch (kp->type) { 184 switch (kp->type) {
182 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 185 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
183 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 186 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
204 207
205static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) 208static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
206{ 209{
207 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || 210 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
208 get_user(kp->type, &up->type)) 211 return -EFAULT;
209 return -EFAULT;
210 return __get_v4l2_format32(kp, up); 212 return __get_v4l2_format32(kp, up);
211} 213}
212 214
213static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) 215static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
214{ 216{
215 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || 217 if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
216 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) 218 copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
217 return -EFAULT; 219 return -EFAULT;
218 return __get_v4l2_format32(&kp->format, &up->format); 220 return __get_v4l2_format32(&kp->format, &up->format);
219} 221}
220 222
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 110c03627051..b59a17fb7c3e 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
108 node->full_name); 108 node->full_name);
109 return err; 109 return err;
110 } 110 }
111 /* Convert bit width to byte width */ 111
112 r.bus_width /= 8; 112 /*
113 * The bus width is encoded into the register as 0 for 8 bits,
114 * and 1 for 16 bits, so we do the necessary conversion here.
115 */
116 if (r.bus_width == 8)
117 r.bus_width = 0;
118 else if (r.bus_width == 16)
119 r.bus_width = 1;
120 else {
121 dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
122 return -EINVAL;
123 }
113 124
114 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", 125 err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
115 &r.badr_skew); 126 &r.badr_skew);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index c9de3d598ea5..1d15735f9ef9 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -338,28 +338,58 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
338 int num_sg, bool read, int timeout) 338 int num_sg, bool read, int timeout)
339{ 339{
340 struct completion trans_done; 340 struct completion trans_done;
341 int err = 0, count; 341 u8 dir;
342 int err = 0, i, count;
342 long timeleft; 343 long timeleft;
343 unsigned long flags; 344 unsigned long flags;
345 struct scatterlist *sg;
346 enum dma_data_direction dma_dir;
347 u32 val;
348 dma_addr_t addr;
349 unsigned int len;
350
351 dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
352
353 /* don't transfer data during abort processing */
354 if (pcr->remove_pci)
355 return -EINVAL;
356
357 if ((sglist == NULL) || (num_sg <= 0))
358 return -EINVAL;
344 359
345 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 360 if (read) {
361 dir = DEVICE_TO_HOST;
362 dma_dir = DMA_FROM_DEVICE;
363 } else {
364 dir = HOST_TO_DEVICE;
365 dma_dir = DMA_TO_DEVICE;
366 }
367
368 count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
346 if (count < 1) { 369 if (count < 1) {
347 dev_err(&(pcr->pci->dev), "scatterlist map failed\n"); 370 dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
348 return -EINVAL; 371 return -EINVAL;
349 } 372 }
350 dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); 373 dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
351 374
375 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
376 pcr->sgi = 0;
377 for_each_sg(sglist, sg, count, i) {
378 addr = sg_dma_address(sg);
379 len = sg_dma_len(sg);
380 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
381 }
352 382
353 spin_lock_irqsave(&pcr->lock, flags); 383 spin_lock_irqsave(&pcr->lock, flags);
354 384
355 pcr->done = &trans_done; 385 pcr->done = &trans_done;
356 pcr->trans_result = TRANS_NOT_READY; 386 pcr->trans_result = TRANS_NOT_READY;
357 init_completion(&trans_done); 387 init_completion(&trans_done);
388 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
389 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
358 390
359 spin_unlock_irqrestore(&pcr->lock, flags); 391 spin_unlock_irqrestore(&pcr->lock, flags);
360 392
361 rtsx_pci_dma_transfer(pcr, sglist, count, read);
362
363 timeleft = wait_for_completion_interruptible_timeout( 393 timeleft = wait_for_completion_interruptible_timeout(
364 &trans_done, msecs_to_jiffies(timeout)); 394 &trans_done, msecs_to_jiffies(timeout));
365 if (timeleft <= 0) { 395 if (timeleft <= 0) {
@@ -383,7 +413,7 @@ out:
383 pcr->done = NULL; 413 pcr->done = NULL;
384 spin_unlock_irqrestore(&pcr->lock, flags); 414 spin_unlock_irqrestore(&pcr->lock, flags);
385 415
386 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 416 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
387 417
388 if ((err < 0) && (err != -ENODEV)) 418 if ((err < 0) && (err != -ENODEV))
389 rtsx_pci_stop_cmd(pcr); 419 rtsx_pci_stop_cmd(pcr);
@@ -395,73 +425,6 @@ out:
395} 425}
396EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 426EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
397 427
398int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
399 int num_sg, bool read)
400{
401 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
402
403 if (pcr->remove_pci)
404 return -EINVAL;
405
406 if ((sglist == NULL) || num_sg < 1)
407 return -EINVAL;
408
409 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
410}
411EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
412
413int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
414 int num_sg, bool read)
415{
416 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
417
418 if (pcr->remove_pci)
419 return -EINVAL;
420
421 if (sglist == NULL || num_sg < 1)
422 return -EINVAL;
423
424 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
425 return num_sg;
426}
427EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
428
429int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
430 int sg_count, bool read)
431{
432 struct scatterlist *sg;
433 dma_addr_t addr;
434 unsigned int len;
435 int i;
436 u32 val;
437 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
438 unsigned long flags;
439
440 if (pcr->remove_pci)
441 return -EINVAL;
442
443 if ((sglist == NULL) || (sg_count < 1))
444 return -EINVAL;
445
446 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
447 pcr->sgi = 0;
448 for_each_sg(sglist, sg, sg_count, i) {
449 addr = sg_dma_address(sg);
450 len = sg_dma_len(sg);
451 rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1);
452 }
453
454 spin_lock_irqsave(&pcr->lock, flags);
455
456 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
457 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
458
459 spin_unlock_irqrestore(&pcr->lock, flags);
460
461 return 0;
462}
463EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
464
465int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 428int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
466{ 429{
467 int err; 430 int err;
@@ -873,8 +836,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
873 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 836 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
874 /* Clear interrupt flag */ 837 /* Clear interrupt flag */
875 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 838 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
876 dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg);
877
878 if ((int_reg & pcr->bier) == 0) { 839 if ((int_reg & pcr->bier) == 0) {
879 spin_unlock(&pcr->lock); 840 spin_unlock(&pcr->lock);
880 return IRQ_NONE; 841 return IRQ_NONE;
@@ -905,28 +866,17 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
905 } 866 }
906 867
907 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 868 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
908 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) 869 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
909 pcr->trans_result = TRANS_RESULT_FAIL; 870 pcr->trans_result = TRANS_RESULT_FAIL;
910 else if (int_reg & TRANS_OK_INT) 871 if (pcr->done)
872 complete(pcr->done);
873 } else if (int_reg & TRANS_OK_INT) {
911 pcr->trans_result = TRANS_RESULT_OK; 874 pcr->trans_result = TRANS_RESULT_OK;
912 875 if (pcr->done)
913 if (pcr->done) 876 complete(pcr->done);
914 complete(pcr->done);
915
916 if (int_reg & SD_EXIST) {
917 struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
918 if (slot && slot->done_transfer)
919 slot->done_transfer(slot->p_dev);
920 }
921
922 if (int_reg & MS_EXIST) {
923 struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
924 if (slot && slot->done_transfer)
925 slot->done_transfer(slot->p_dev);
926 } 877 }
927 } 878 }
928 879
929
930 if (pcr->card_inserted || pcr->card_removed) 880 if (pcr->card_inserted || pcr->card_removed)
931 schedule_delayed_work(&pcr->carddet_work, 881 schedule_delayed_work(&pcr->carddet_work,
932 msecs_to_jiffies(200)); 882 msecs_to_jiffies(200));
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1cb74085e410..8baff0effc7d 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -300,8 +300,8 @@ config SGI_GRU_DEBUG
300 depends on SGI_GRU 300 depends on SGI_GRU
301 default n 301 default n
302 ---help--- 302 ---help---
303 This option enables addition debugging code for the SGI GRU driver. If 303 This option enables additional debugging code for the SGI GRU driver.
304 you are unsure, say N. 304 If you are unsure, say N.
305 305
306config APDS9802ALS 306config APDS9802ALS
307 tristate "Medfield Avago APDS9802 ALS Sensor module" 307 tristate "Medfield Avago APDS9802 ALS Sensor module"
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 5e4dbd21f89a..0e608a288603 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -337,6 +337,44 @@ enum genwqe_requ_state {
337}; 337};
338 338
339/** 339/**
340 * struct genwqe_sgl - Scatter gather list describing user-space memory
341 * @sgl: scatter gather list needs to be 128 byte aligned
342 * @sgl_dma_addr: dma address of sgl
343 * @sgl_size: size of area used for sgl
344 * @user_addr: user-space address of memory area
345 * @user_size: size of user-space memory area
346 * @page: buffer for partial pages if needed
347 * @page_dma_addr: dma address partial pages
348 */
349struct genwqe_sgl {
350 dma_addr_t sgl_dma_addr;
351 struct sg_entry *sgl;
352 size_t sgl_size; /* size of sgl */
353
354 void __user *user_addr; /* user-space base-address */
355 size_t user_size; /* size of memory area */
356
357 unsigned long nr_pages;
358 unsigned long fpage_offs;
359 size_t fpage_size;
360 size_t lpage_size;
361
362 void *fpage;
363 dma_addr_t fpage_dma_addr;
364
365 void *lpage;
366 dma_addr_t lpage_dma_addr;
367};
368
369int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
370 void __user *user_addr, size_t user_size);
371
372int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
373 dma_addr_t *dma_list);
374
375int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl);
376
377/**
340 * struct ddcb_requ - Kernel internal representation of the DDCB request 378 * struct ddcb_requ - Kernel internal representation of the DDCB request
341 * @cmd: User space representation of the DDCB execution request 379 * @cmd: User space representation of the DDCB execution request
342 */ 380 */
@@ -347,9 +385,7 @@ struct ddcb_requ {
347 struct ddcb_queue *queue; /* associated queue */ 385 struct ddcb_queue *queue; /* associated queue */
348 386
349 struct dma_mapping dma_mappings[DDCB_FIXUPS]; 387 struct dma_mapping dma_mappings[DDCB_FIXUPS];
350 struct sg_entry *sgl[DDCB_FIXUPS]; 388 struct genwqe_sgl sgls[DDCB_FIXUPS];
351 dma_addr_t sgl_dma_addr[DDCB_FIXUPS];
352 size_t sgl_size[DDCB_FIXUPS];
353 389
354 /* kernel/user shared content */ 390 /* kernel/user shared content */
355 struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */ 391 struct genwqe_ddcb_cmd cmd; /* ddcb_no for this request */
@@ -453,22 +489,6 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m,
453int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m, 489int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
454 struct ddcb_requ *req); 490 struct ddcb_requ *req);
455 491
456struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages,
457 dma_addr_t *dma_addr, size_t *sgl_size);
458
459void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
460 dma_addr_t dma_addr, size_t size);
461
462int genwqe_setup_sgl(struct genwqe_dev *cd,
463 unsigned long offs,
464 unsigned long size,
465 struct sg_entry *sgl, /* genwqe sgl */
466 dma_addr_t dma_addr, size_t sgl_size,
467 dma_addr_t *dma_list, int page_offs, int num_pages);
468
469int genwqe_check_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list,
470 int size);
471
472static inline bool dma_mapping_used(struct dma_mapping *m) 492static inline bool dma_mapping_used(struct dma_mapping *m)
473{ 493{
474 if (!m) 494 if (!m)
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 6f1acc0ccf88..c8046db2d5a2 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -305,6 +305,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
305 break; 305 break;
306 306
307 new = (old | DDCB_NEXT_BE32); 307 new = (old | DDCB_NEXT_BE32);
308
309 wmb();
308 icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new); 310 icrc_hsi_shi = cmpxchg(&prev_ddcb->icrc_hsi_shi_32, old, new);
309 311
310 if (icrc_hsi_shi == old) 312 if (icrc_hsi_shi == old)
@@ -314,6 +316,8 @@ static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue,
314 /* Queue must be re-started by updating QUEUE_OFFSET */ 316 /* Queue must be re-started by updating QUEUE_OFFSET */
315 ddcb_mark_tapped(pddcb); 317 ddcb_mark_tapped(pddcb);
316 num = (u64)ddcb_no << 8; 318 num = (u64)ddcb_no << 8;
319
320 wmb();
317 __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ 321 __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */
318 322
319 return RET_DDCB_TAPPED; 323 return RET_DDCB_TAPPED;
@@ -1306,7 +1310,7 @@ static int queue_wake_up_all(struct genwqe_dev *cd)
1306 */ 1310 */
1307int genwqe_finish_queue(struct genwqe_dev *cd) 1311int genwqe_finish_queue(struct genwqe_dev *cd)
1308{ 1312{
1309 int i, rc, in_flight; 1313 int i, rc = 0, in_flight;
1310 int waitmax = genwqe_ddcb_software_timeout; 1314 int waitmax = genwqe_ddcb_software_timeout;
1311 struct pci_dev *pci_dev = cd->pci_dev; 1315 struct pci_dev *pci_dev = cd->pci_dev;
1312 struct ddcb_queue *queue = &cd->queue; 1316 struct ddcb_queue *queue = &cd->queue;
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 2c2c9cc75231..1d2f163a1906 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -531,7 +531,9 @@ static int do_flash_update(struct genwqe_file *cfile,
531 case '1': 531 case '1':
532 cmdopts = 0x1C; 532 cmdopts = 0x1C;
533 break; /* download/erase_first/part_1 */ 533 break; /* download/erase_first/part_1 */
534 case 'v': /* cmdopts = 0x0c (VPD) */ 534 case 'v':
535 cmdopts = 0x0C;
536 break; /* download/erase_first/vpd */
535 default: 537 default:
536 return -EINVAL; 538 return -EINVAL;
537 } 539 }
@@ -665,6 +667,8 @@ static int do_flash_read(struct genwqe_file *cfile,
665 cmdopts = 0x1A; 667 cmdopts = 0x1A;
666 break; /* upload/part_1 */ 668 break; /* upload/part_1 */
667 case 'v': 669 case 'v':
670 cmdopts = 0x0A;
671 break; /* upload/vpd */
668 default: 672 default:
669 return -EINVAL; 673 return -EINVAL;
670 } 674 }
@@ -836,15 +840,8 @@ static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
836 __genwqe_del_mapping(cfile, dma_map); 840 __genwqe_del_mapping(cfile, dma_map);
837 genwqe_user_vunmap(cd, dma_map, req); 841 genwqe_user_vunmap(cd, dma_map, req);
838 } 842 }
839 if (req->sgl[i] != NULL) { 843 if (req->sgls[i].sgl != NULL)
840 genwqe_free_sgl(cd, req->sgl[i], 844 genwqe_free_sync_sgl(cd, &req->sgls[i]);
841 req->sgl_dma_addr[i],
842 req->sgl_size[i]);
843 req->sgl[i] = NULL;
844 req->sgl_dma_addr[i] = 0x0;
845 req->sgl_size[i] = 0;
846 }
847
848 } 845 }
849 return 0; 846 return 0;
850} 847}
@@ -913,7 +910,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
913 910
914 case ATS_TYPE_SGL_RDWR: 911 case ATS_TYPE_SGL_RDWR:
915 case ATS_TYPE_SGL_RD: { 912 case ATS_TYPE_SGL_RD: {
916 int page_offs, nr_pages, offs; 913 int page_offs;
917 914
918 u_addr = be64_to_cpu(*((__be64 *) 915 u_addr = be64_to_cpu(*((__be64 *)
919 &cmd->asiv[asiv_offs])); 916 &cmd->asiv[asiv_offs]));
@@ -951,27 +948,18 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
951 page_offs = 0; 948 page_offs = 0;
952 } 949 }
953 950
954 offs = offset_in_page(u_addr);
955 nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE);
956
957 /* create genwqe style scatter gather list */ 951 /* create genwqe style scatter gather list */
958 req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages, 952 rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
959 &req->sgl_dma_addr[i], 953 (void __user *)u_addr,
960 &req->sgl_size[i]); 954 u_size);
961 if (req->sgl[i] == NULL) { 955 if (rc != 0)
962 rc = -ENOMEM;
963 goto err_out; 956 goto err_out;
964 } 957
965 genwqe_setup_sgl(cd, offs, u_size, 958 genwqe_setup_sgl(cd, &req->sgls[i],
966 req->sgl[i], 959 &m->dma_list[page_offs]);
967 req->sgl_dma_addr[i],
968 req->sgl_size[i],
969 m->dma_list,
970 page_offs,
971 nr_pages);
972 960
973 *((__be64 *)&cmd->asiv[asiv_offs]) = 961 *((__be64 *)&cmd->asiv[asiv_offs]) =
974 cpu_to_be64(req->sgl_dma_addr[i]); 962 cpu_to_be64(req->sgls[i].sgl_dma_addr);
975 963
976 break; 964 break;
977 } 965 }
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 6b1a6ef9f1a8..d049d271699c 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -275,67 +275,107 @@ static int genwqe_sgl_size(int num_pages)
275 return roundup(len, PAGE_SIZE); 275 return roundup(len, PAGE_SIZE);
276} 276}
277 277
278struct sg_entry *genwqe_alloc_sgl(struct genwqe_dev *cd, int num_pages, 278/**
279 dma_addr_t *dma_addr, size_t *sgl_size) 279 * genwqe_alloc_sync_sgl() - Allocate memory for sgl and overlapping pages
280 *
281 * Allocates memory for sgl and overlapping pages. Pages which might
282 * overlap other user-space memory blocks are being cached for DMAs,
283 * such that we do not run into syncronization issues. Data is copied
284 * from user-space into the cached pages.
285 */
286int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
287 void __user *user_addr, size_t user_size)
280{ 288{
289 int rc;
281 struct pci_dev *pci_dev = cd->pci_dev; 290 struct pci_dev *pci_dev = cd->pci_dev;
282 struct sg_entry *sgl;
283 291
284 *sgl_size = genwqe_sgl_size(num_pages); 292 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
285 if (get_order(*sgl_size) > MAX_ORDER) { 293 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
294 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
295 sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
296
297 dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld "
298 "fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
299 __func__, user_addr, user_size, sgl->nr_pages,
300 sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
301
302 sgl->user_addr = user_addr;
303 sgl->user_size = user_size;
304 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
305
306 if (get_order(sgl->sgl_size) > MAX_ORDER) {
286 dev_err(&pci_dev->dev, 307 dev_err(&pci_dev->dev,
287 "[%s] err: too much memory requested!\n", __func__); 308 "[%s] err: too much memory requested!\n", __func__);
288 return NULL; 309 return -ENOMEM;
289 } 310 }
290 311
291 sgl = __genwqe_alloc_consistent(cd, *sgl_size, dma_addr); 312 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
292 if (sgl == NULL) { 313 &sgl->sgl_dma_addr);
314 if (sgl->sgl == NULL) {
293 dev_err(&pci_dev->dev, 315 dev_err(&pci_dev->dev,
294 "[%s] err: no memory available!\n", __func__); 316 "[%s] err: no memory available!\n", __func__);
295 return NULL; 317 return -ENOMEM;
296 } 318 }
297 319
298 return sgl; 320 /* Only use buffering on incomplete pages */
321 if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
322 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
323 &sgl->fpage_dma_addr);
324 if (sgl->fpage == NULL)
325 goto err_out;
326
327 /* Sync with user memory */
328 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
329 user_addr, sgl->fpage_size)) {
330 rc = -EFAULT;
331 goto err_out;
332 }
333 }
334 if (sgl->lpage_size != 0) {
335 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
336 &sgl->lpage_dma_addr);
337 if (sgl->lpage == NULL)
338 goto err_out1;
339
340 /* Sync with user memory */
341 if (copy_from_user(sgl->lpage, user_addr + user_size -
342 sgl->lpage_size, sgl->lpage_size)) {
343 rc = -EFAULT;
344 goto err_out1;
345 }
346 }
347 return 0;
348
349 err_out1:
350 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
351 sgl->fpage_dma_addr);
352 err_out:
353 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
354 sgl->sgl_dma_addr);
355 return -ENOMEM;
299} 356}
300 357
301int genwqe_setup_sgl(struct genwqe_dev *cd, 358int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
302 unsigned long offs, 359 dma_addr_t *dma_list)
303 unsigned long size,
304 struct sg_entry *sgl,
305 dma_addr_t dma_addr, size_t sgl_size,
306 dma_addr_t *dma_list, int page_offs, int num_pages)
307{ 360{
308 int i = 0, j = 0, p; 361 int i = 0, j = 0, p;
309 unsigned long dma_offs, map_offs; 362 unsigned long dma_offs, map_offs;
310 struct pci_dev *pci_dev = cd->pci_dev;
311 dma_addr_t prev_daddr = 0; 363 dma_addr_t prev_daddr = 0;
312 struct sg_entry *s, *last_s = NULL; 364 struct sg_entry *s, *last_s = NULL;
313 365 size_t size = sgl->user_size;
314 /* sanity checks */
315 if (offs > PAGE_SIZE) {
316 dev_err(&pci_dev->dev,
317 "[%s] too large start offs %08lx\n", __func__, offs);
318 return -EFAULT;
319 }
320 if (sgl_size < genwqe_sgl_size(num_pages)) {
321 dev_err(&pci_dev->dev,
322 "[%s] sgl_size too small %08lx for %d pages\n",
323 __func__, sgl_size, num_pages);
324 return -EFAULT;
325 }
326 366
327 dma_offs = 128; /* next block if needed/dma_offset */ 367 dma_offs = 128; /* next block if needed/dma_offset */
328 map_offs = offs; /* offset in first page */ 368 map_offs = sgl->fpage_offs; /* offset in first page */
329 369
330 s = &sgl[0]; /* first set of 8 entries */ 370 s = &sgl->sgl[0]; /* first set of 8 entries */
331 p = 0; /* page */ 371 p = 0; /* page */
332 while (p < num_pages) { 372 while (p < sgl->nr_pages) {
333 dma_addr_t daddr; 373 dma_addr_t daddr;
334 unsigned int size_to_map; 374 unsigned int size_to_map;
335 375
336 /* always write the chaining entry, cleanup is done later */ 376 /* always write the chaining entry, cleanup is done later */
337 j = 0; 377 j = 0;
338 s[j].target_addr = cpu_to_be64(dma_addr + dma_offs); 378 s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
339 s[j].len = cpu_to_be32(128); 379 s[j].len = cpu_to_be32(128);
340 s[j].flags = cpu_to_be32(SG_CHAINED); 380 s[j].flags = cpu_to_be32(SG_CHAINED);
341 j++; 381 j++;
@@ -343,7 +383,17 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
343 while (j < 8) { 383 while (j < 8) {
344 /* DMA mapping for requested page, offs, size */ 384 /* DMA mapping for requested page, offs, size */
345 size_to_map = min(size, PAGE_SIZE - map_offs); 385 size_to_map = min(size, PAGE_SIZE - map_offs);
346 daddr = dma_list[page_offs + p] + map_offs; 386
387 if ((p == 0) && (sgl->fpage != NULL)) {
388 daddr = sgl->fpage_dma_addr + map_offs;
389
390 } else if ((p == sgl->nr_pages - 1) &&
391 (sgl->lpage != NULL)) {
392 daddr = sgl->lpage_dma_addr;
393 } else {
394 daddr = dma_list[p] + map_offs;
395 }
396
347 size -= size_to_map; 397 size -= size_to_map;
348 map_offs = 0; 398 map_offs = 0;
349 399
@@ -358,7 +408,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
358 size_to_map); 408 size_to_map);
359 409
360 p++; /* process next page */ 410 p++; /* process next page */
361 if (p == num_pages) 411 if (p == sgl->nr_pages)
362 goto fixup; /* nothing to do */ 412 goto fixup; /* nothing to do */
363 413
364 prev_daddr = daddr + size_to_map; 414 prev_daddr = daddr + size_to_map;
@@ -374,7 +424,7 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
374 j++; 424 j++;
375 425
376 p++; /* process next page */ 426 p++; /* process next page */
377 if (p == num_pages) 427 if (p == sgl->nr_pages)
378 goto fixup; /* nothing to do */ 428 goto fixup; /* nothing to do */
379 } 429 }
380 dma_offs += 128; 430 dma_offs += 128;
@@ -395,10 +445,50 @@ int genwqe_setup_sgl(struct genwqe_dev *cd,
395 return 0; 445 return 0;
396} 446}
397 447
398void genwqe_free_sgl(struct genwqe_dev *cd, struct sg_entry *sg_list, 448/**
399 dma_addr_t dma_addr, size_t size) 449 * genwqe_free_sync_sgl() - Free memory for sgl and overlapping pages
450 *
451 * After the DMA transfer has been completed we free the memory for
452 * the sgl and the cached pages. Data is being transfered from cached
453 * pages into user-space buffers.
454 */
455int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
400{ 456{
401 __genwqe_free_consistent(cd, size, sg_list, dma_addr); 457 int rc;
458 struct pci_dev *pci_dev = cd->pci_dev;
459
460 if (sgl->fpage) {
461 if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
462 sgl->fpage_size)) {
463 dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
464 __func__);
465 rc = -EFAULT;
466 }
467 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
468 sgl->fpage_dma_addr);
469 sgl->fpage = NULL;
470 sgl->fpage_dma_addr = 0;
471 }
472 if (sgl->lpage) {
473 if (copy_to_user(sgl->user_addr + sgl->user_size -
474 sgl->lpage_size, sgl->lpage,
475 sgl->lpage_size)) {
476 dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
477 __func__);
478 rc = -EFAULT;
479 }
480 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
481 sgl->lpage_dma_addr);
482 sgl->lpage = NULL;
483 sgl->lpage_dma_addr = 0;
484 }
485 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
486 sgl->sgl_dma_addr);
487
488 sgl->sgl = NULL;
489 sgl->sgl_dma_addr = 0x0;
490 sgl->sgl_size = 0;
491 return rc;
402} 492}
403 493
404/** 494/**
diff --git a/drivers/misc/genwqe/genwqe_driver.h b/drivers/misc/genwqe/genwqe_driver.h
index 46e916b36c70..cd5263163a6e 100644
--- a/drivers/misc/genwqe/genwqe_driver.h
+++ b/drivers/misc/genwqe/genwqe_driver.h
@@ -36,7 +36,7 @@
36#include <asm/byteorder.h> 36#include <asm/byteorder.h>
37#include <linux/genwqe/genwqe_card.h> 37#include <linux/genwqe/genwqe_card.h>
38 38
39#define DRV_VERS_STRING "2.0.0" 39#define DRV_VERS_STRING "2.0.15"
40 40
41/* 41/*
42 * Static minor number assignement, until we decide/implement 42 * Static minor number assignement, until we decide/implement
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 66f411a6e8ea..cabc04383685 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -115,6 +115,11 @@
115#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */ 115#define MEI_DEV_ID_LPT_HR 0x8CBA /* Lynx Point H Refresh */
116 116
117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ 117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
118
119/* Host Firmware Status Registers in PCI Config Space */
120#define PCI_CFG_HFS_1 0x40
121#define PCI_CFG_HFS_2 0x48
122
118/* 123/*
119 * MEI HW Section 124 * MEI HW Section
120 */ 125 */
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 29b5af8efb71..4e3cba6da3f5 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -455,8 +455,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
455 455
456 cl->status = 0; 456 cl->status = 0;
457 list_del(&cb->list); 457 list_del(&cb->list);
458 if (MEI_WRITING == cl->writing_state && 458 if (cb->fop_type == MEI_FOP_WRITE &&
459 cb->fop_type == MEI_FOP_WRITE &&
460 cl != &dev->iamthif_cl) { 459 cl != &dev->iamthif_cl) {
461 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); 460 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
462 cl->writing_state = MEI_WRITE_COMPLETE; 461 cl->writing_state = MEI_WRITE_COMPLETE;
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index b35594dbf52f..147413145c97 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -644,8 +644,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
644 goto out; 644 goto out;
645 } 645 }
646 646
647 if (MEI_WRITE_COMPLETE == cl->writing_state) 647 mask |= (POLLIN | POLLRDNORM);
648 mask |= (POLLIN | POLLRDNORM);
649 648
650out: 649out:
651 mutex_unlock(&dev->device_lock); 650 mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 1c8fd3a3e135..95889e2e31ff 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -97,15 +97,31 @@ static bool mei_me_quirk_probe(struct pci_dev *pdev,
97 const struct pci_device_id *ent) 97 const struct pci_device_id *ent)
98{ 98{
99 u32 reg; 99 u32 reg;
100 if (ent->device == MEI_DEV_ID_PBG_1) { 100 /* Cougar Point || Patsburg */
101 pci_read_config_dword(pdev, 0x48, &reg); 101 if (ent->device == MEI_DEV_ID_CPT_1 ||
102 /* make sure that bit 9 is up and bit 10 is down */ 102 ent->device == MEI_DEV_ID_PBG_1) {
103 if ((reg & 0x600) == 0x200) { 103 pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
104 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); 104 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
105 return false; 105 if ((reg & 0x600) == 0x200)
106 } 106 goto no_mei;
107 } 107 }
108
109 /* Lynx Point */
110 if (ent->device == MEI_DEV_ID_LPT_H ||
111 ent->device == MEI_DEV_ID_LPT_W ||
112 ent->device == MEI_DEV_ID_LPT_HR) {
113 /* Read ME FW Status check for SPS Firmware */
114 pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
115 /* if bits [19:16] = 15, running SPS Firmware */
116 if ((reg & 0xf0000) == 0xf0000)
117 goto no_mei;
118 }
119
108 return true; 120 return true;
121
122no_mei:
123 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
124 return false;
109} 125}
110/** 126/**
111 * mei_probe - Device Initialization Routine 127 * mei_probe - Device Initialization Routine
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 5fb994f9a653..0b9ded13a3ae 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -31,28 +31,14 @@
31#include <linux/mfd/rtsx_pci.h> 31#include <linux/mfd/rtsx_pci.h>
32#include <asm/unaligned.h> 32#include <asm/unaligned.h>
33 33
34struct realtek_next {
35 unsigned int sg_count;
36 s32 cookie;
37};
38
39struct realtek_pci_sdmmc { 34struct realtek_pci_sdmmc {
40 struct platform_device *pdev; 35 struct platform_device *pdev;
41 struct rtsx_pcr *pcr; 36 struct rtsx_pcr *pcr;
42 struct mmc_host *mmc; 37 struct mmc_host *mmc;
43 struct mmc_request *mrq; 38 struct mmc_request *mrq;
44 struct mmc_command *cmd; 39
45 struct mmc_data *data; 40 struct mutex host_mutex;
46 41
47 spinlock_t lock;
48 struct timer_list timer;
49 struct tasklet_struct cmd_tasklet;
50 struct tasklet_struct data_tasklet;
51 struct tasklet_struct finish_tasklet;
52
53 u8 rsp_type;
54 u8 rsp_len;
55 int sg_count;
56 u8 ssc_depth; 42 u8 ssc_depth;
57 unsigned int clock; 43 unsigned int clock;
58 bool vpclk; 44 bool vpclk;
@@ -62,13 +48,8 @@ struct realtek_pci_sdmmc {
62 int power_state; 48 int power_state;
63#define SDMMC_POWER_ON 1 49#define SDMMC_POWER_ON 1
64#define SDMMC_POWER_OFF 0 50#define SDMMC_POWER_OFF 0
65
66 struct realtek_next next_data;
67}; 51};
68 52
69static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
70 struct mmc_request *mrq);
71
72static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) 53static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
73{ 54{
74 return &(host->pdev->dev); 55 return &(host->pdev->dev);
@@ -105,95 +86,6 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
105#define sd_print_debug_regs(host) 86#define sd_print_debug_regs(host)
106#endif /* DEBUG */ 87#endif /* DEBUG */
107 88
108static void sd_isr_done_transfer(struct platform_device *pdev)
109{
110 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
111
112 spin_lock(&host->lock);
113 if (host->cmd)
114 tasklet_schedule(&host->cmd_tasklet);
115 if (host->data)
116 tasklet_schedule(&host->data_tasklet);
117 spin_unlock(&host->lock);
118}
119
120static void sd_request_timeout(unsigned long host_addr)
121{
122 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
123 unsigned long flags;
124
125 spin_lock_irqsave(&host->lock, flags);
126
127 if (!host->mrq) {
128 dev_err(sdmmc_dev(host), "error: no request exist\n");
129 goto out;
130 }
131
132 if (host->cmd)
133 host->cmd->error = -ETIMEDOUT;
134 if (host->data)
135 host->data->error = -ETIMEDOUT;
136
137 dev_dbg(sdmmc_dev(host), "timeout for request\n");
138
139out:
140 tasklet_schedule(&host->finish_tasklet);
141 spin_unlock_irqrestore(&host->lock, flags);
142}
143
144static void sd_finish_request(unsigned long host_addr)
145{
146 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
147 struct rtsx_pcr *pcr = host->pcr;
148 struct mmc_request *mrq;
149 struct mmc_command *cmd;
150 struct mmc_data *data;
151 unsigned long flags;
152 bool any_error;
153
154 spin_lock_irqsave(&host->lock, flags);
155
156 del_timer(&host->timer);
157 mrq = host->mrq;
158 if (!mrq) {
159 dev_err(sdmmc_dev(host), "error: no request need finish\n");
160 goto out;
161 }
162
163 cmd = mrq->cmd;
164 data = mrq->data;
165
166 any_error = (mrq->sbc && mrq->sbc->error) ||
167 (mrq->stop && mrq->stop->error) ||
168 (cmd && cmd->error) || (data && data->error);
169
170 if (any_error) {
171 rtsx_pci_stop_cmd(pcr);
172 sd_clear_error(host);
173 }
174
175 if (data) {
176 if (any_error)
177 data->bytes_xfered = 0;
178 else
179 data->bytes_xfered = data->blocks * data->blksz;
180
181 if (!data->host_cookie)
182 rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len,
183 data->flags & MMC_DATA_READ);
184
185 }
186
187 host->mrq = NULL;
188 host->cmd = NULL;
189 host->data = NULL;
190
191out:
192 spin_unlock_irqrestore(&host->lock, flags);
193 mutex_unlock(&pcr->pcr_mutex);
194 mmc_request_done(host->mmc, mrq);
195}
196
197static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, 89static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
198 u8 *buf, int buf_len, int timeout) 90 u8 *buf, int buf_len, int timeout)
199{ 91{
@@ -311,7 +203,8 @@ static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
311 return 0; 203 return 0;
312} 204}
313 205
314static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) 206static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
207 struct mmc_command *cmd)
315{ 208{
316 struct rtsx_pcr *pcr = host->pcr; 209 struct rtsx_pcr *pcr = host->pcr;
317 u8 cmd_idx = (u8)cmd->opcode; 210 u8 cmd_idx = (u8)cmd->opcode;
@@ -319,14 +212,11 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
319 int err = 0; 212 int err = 0;
320 int timeout = 100; 213 int timeout = 100;
321 int i; 214 int i;
215 u8 *ptr;
216 int stat_idx = 0;
322 u8 rsp_type; 217 u8 rsp_type;
323 int rsp_len = 5; 218 int rsp_len = 5;
324 unsigned long flags; 219 bool clock_toggled = false;
325
326 if (host->cmd)
327 dev_err(sdmmc_dev(host), "error: cmd already exist\n");
328
329 host->cmd = cmd;
330 220
331 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", 221 dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
332 __func__, cmd_idx, arg); 222 __func__, cmd_idx, arg);
@@ -361,8 +251,6 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
361 err = -EINVAL; 251 err = -EINVAL;
362 goto out; 252 goto out;
363 } 253 }
364 host->rsp_type = rsp_type;
365 host->rsp_len = rsp_len;
366 254
367 if (rsp_type == SD_RSP_TYPE_R1b) 255 if (rsp_type == SD_RSP_TYPE_R1b)
368 timeout = 3000; 256 timeout = 3000;
@@ -372,6 +260,8 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
372 0xFF, SD_CLK_TOGGLE_EN); 260 0xFF, SD_CLK_TOGGLE_EN);
373 if (err < 0) 261 if (err < 0)
374 goto out; 262 goto out;
263
264 clock_toggled = true;
375 } 265 }
376 266
377 rtsx_pci_init_cmd(pcr); 267 rtsx_pci_init_cmd(pcr);
@@ -395,60 +285,25 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
395 /* Read data from ping-pong buffer */ 285 /* Read data from ping-pong buffer */
396 for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) 286 for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
397 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 287 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
288 stat_idx = 16;
398 } else if (rsp_type != SD_RSP_TYPE_R0) { 289 } else if (rsp_type != SD_RSP_TYPE_R0) {
399 /* Read data from SD_CMDx registers */ 290 /* Read data from SD_CMDx registers */
400 for (i = SD_CMD0; i <= SD_CMD4; i++) 291 for (i = SD_CMD0; i <= SD_CMD4; i++)
401 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); 292 rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
293 stat_idx = 5;
402 } 294 }
403 295
404 rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); 296 rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
405 297
406 mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout)); 298 err = rtsx_pci_send_cmd(pcr, timeout);
407 299 if (err < 0) {
408 spin_lock_irqsave(&pcr->lock, flags); 300 sd_print_debug_regs(host);
409 pcr->trans_result = TRANS_NOT_READY; 301 sd_clear_error(host);
410 rtsx_pci_send_cmd_no_wait(pcr); 302 dev_dbg(sdmmc_dev(host),
411 spin_unlock_irqrestore(&pcr->lock, flags); 303 "rtsx_pci_send_cmd error (err = %d)\n", err);
412
413 return;
414
415out:
416 cmd->error = err;
417 tasklet_schedule(&host->finish_tasklet);
418}
419
420static void sd_get_rsp(unsigned long host_addr)
421{
422 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
423 struct rtsx_pcr *pcr = host->pcr;
424 struct mmc_command *cmd;
425 int i, err = 0, stat_idx;
426 u8 *ptr, rsp_type;
427 unsigned long flags;
428
429 spin_lock_irqsave(&host->lock, flags);
430
431 cmd = host->cmd;
432 host->cmd = NULL;
433
434 if (!cmd) {
435 dev_err(sdmmc_dev(host), "error: cmd not exist\n");
436 goto out; 304 goto out;
437 } 305 }
438 306
439 spin_lock(&pcr->lock);
440 if (pcr->trans_result == TRANS_NO_DEVICE)
441 err = -ENODEV;
442 else if (pcr->trans_result != TRANS_RESULT_OK)
443 err = -EINVAL;
444 spin_unlock(&pcr->lock);
445
446 if (err < 0)
447 goto out;
448
449 rsp_type = host->rsp_type;
450 stat_idx = host->rsp_len;
451
452 if (rsp_type == SD_RSP_TYPE_R0) { 307 if (rsp_type == SD_RSP_TYPE_R0) {
453 err = 0; 308 err = 0;
454 goto out; 309 goto out;
@@ -485,106 +340,26 @@ static void sd_get_rsp(unsigned long host_addr)
485 cmd->resp[0]); 340 cmd->resp[0]);
486 } 341 }
487 342
488 if (cmd == host->mrq->sbc) {
489 sd_send_cmd(host, host->mrq->cmd);
490 spin_unlock_irqrestore(&host->lock, flags);
491 return;
492 }
493
494 if (cmd == host->mrq->stop)
495 goto out;
496
497 if (cmd->data) {
498 sd_start_multi_rw(host, host->mrq);
499 spin_unlock_irqrestore(&host->lock, flags);
500 return;
501 }
502
503out: 343out:
504 cmd->error = err; 344 cmd->error = err;
505 345
506 tasklet_schedule(&host->finish_tasklet); 346 if (err && clock_toggled)
507 spin_unlock_irqrestore(&host->lock, flags); 347 rtsx_pci_write_register(pcr, SD_BUS_STAT,
508} 348 SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
509
510static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
511 struct mmc_data *data, struct realtek_next *next)
512{
513 struct rtsx_pcr *pcr = host->pcr;
514 int read = data->flags & MMC_DATA_READ;
515 int sg_count = 0;
516
517 if (!next && data->host_cookie &&
518 data->host_cookie != host->next_data.cookie) {
519 dev_err(sdmmc_dev(host),
520 "error: invalid cookie data[%d] host[%d]\n",
521 data->host_cookie, host->next_data.cookie);
522 data->host_cookie = 0;
523 }
524
525 if (next || (!next && data->host_cookie != host->next_data.cookie))
526 sg_count = rtsx_pci_dma_map_sg(pcr,
527 data->sg, data->sg_len, read);
528 else
529 sg_count = host->next_data.sg_count;
530
531 if (next) {
532 next->sg_count = sg_count;
533 if (++next->cookie < 0)
534 next->cookie = 1;
535 data->host_cookie = next->cookie;
536 }
537
538 return sg_count;
539}
540
541static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
542 bool is_first_req)
543{
544 struct realtek_pci_sdmmc *host = mmc_priv(mmc);
545 struct mmc_data *data = mrq->data;
546
547 if (data->host_cookie) {
548 dev_err(sdmmc_dev(host),
549 "error: descard already cookie data[%d]\n",
550 data->host_cookie);
551 data->host_cookie = 0;
552 }
553
554 dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n",
555 sd_pre_dma_transfer(host, data, &host->next_data));
556}
557
558static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
559 int err)
560{
561 struct realtek_pci_sdmmc *host = mmc_priv(mmc);
562 struct rtsx_pcr *pcr = host->pcr;
563 struct mmc_data *data = mrq->data;
564 int read = data->flags & MMC_DATA_READ;
565
566 rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
567 data->host_cookie = 0;
568} 349}
569 350
570static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, 351static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
571 struct mmc_request *mrq)
572{ 352{
573 struct rtsx_pcr *pcr = host->pcr; 353 struct rtsx_pcr *pcr = host->pcr;
574 struct mmc_host *mmc = host->mmc; 354 struct mmc_host *mmc = host->mmc;
575 struct mmc_card *card = mmc->card; 355 struct mmc_card *card = mmc->card;
576 struct mmc_data *data = mrq->data; 356 struct mmc_data *data = mrq->data;
577 int uhs = mmc_card_uhs(card); 357 int uhs = mmc_card_uhs(card);
578 int read = data->flags & MMC_DATA_READ; 358 int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
579 u8 cfg2, trans_mode; 359 u8 cfg2, trans_mode;
580 int err; 360 int err;
581 size_t data_len = data->blksz * data->blocks; 361 size_t data_len = data->blksz * data->blocks;
582 362
583 if (host->data)
584 dev_err(sdmmc_dev(host), "error: data already exist\n");
585
586 host->data = data;
587
588 if (read) { 363 if (read) {
589 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | 364 cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
590 SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; 365 SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
@@ -635,54 +410,15 @@ static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
635 rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, 410 rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
636 SD_TRANSFER_END, SD_TRANSFER_END); 411 SD_TRANSFER_END, SD_TRANSFER_END);
637 412
638 mod_timer(&host->timer, jiffies + 10 * HZ);
639 rtsx_pci_send_cmd_no_wait(pcr); 413 rtsx_pci_send_cmd_no_wait(pcr);
640 414
641 err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read); 415 err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
642 if (err < 0) {
643 data->error = err;
644 tasklet_schedule(&host->finish_tasklet);
645 }
646 return 0;
647}
648
649static void sd_finish_multi_rw(unsigned long host_addr)
650{
651 struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
652 struct rtsx_pcr *pcr = host->pcr;
653 struct mmc_data *data;
654 int err = 0;
655 unsigned long flags;
656
657 spin_lock_irqsave(&host->lock, flags);
658
659 if (!host->data) {
660 dev_err(sdmmc_dev(host), "error: no data exist\n");
661 goto out;
662 }
663
664 data = host->data;
665 host->data = NULL;
666
667 if (pcr->trans_result == TRANS_NO_DEVICE)
668 err = -ENODEV;
669 else if (pcr->trans_result != TRANS_RESULT_OK)
670 err = -EINVAL;
671
672 if (err < 0) { 416 if (err < 0) {
673 data->error = err; 417 sd_clear_error(host);
674 goto out; 418 return err;
675 }
676
677 if (!host->mrq->sbc && data->stop) {
678 sd_send_cmd(host, data->stop);
679 spin_unlock_irqrestore(&host->lock, flags);
680 return;
681 } 419 }
682 420
683out: 421 return 0;
684 tasklet_schedule(&host->finish_tasklet);
685 spin_unlock_irqrestore(&host->lock, flags);
686} 422}
687 423
688static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) 424static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
@@ -901,13 +637,6 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
901 return 0; 637 return 0;
902} 638}
903 639
904static inline bool sd_use_muti_rw(struct mmc_command *cmd)
905{
906 return mmc_op_multi(cmd->opcode) ||
907 (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
908 (cmd->opcode == MMC_WRITE_BLOCK);
909}
910
911static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 640static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
912{ 641{
913 struct realtek_pci_sdmmc *host = mmc_priv(mmc); 642 struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -916,14 +645,6 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
916 struct mmc_data *data = mrq->data; 645 struct mmc_data *data = mrq->data;
917 unsigned int data_size = 0; 646 unsigned int data_size = 0;
918 int err; 647 int err;
919 unsigned long flags;
920
921 mutex_lock(&pcr->pcr_mutex);
922 spin_lock_irqsave(&host->lock, flags);
923
924 if (host->mrq)
925 dev_err(sdmmc_dev(host), "error: request already exist\n");
926 host->mrq = mrq;
927 648
928 if (host->eject) { 649 if (host->eject) {
929 cmd->error = -ENOMEDIUM; 650 cmd->error = -ENOMEDIUM;
@@ -936,6 +657,8 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
936 goto finish; 657 goto finish;
937 } 658 }
938 659
660 mutex_lock(&pcr->pcr_mutex);
661
939 rtsx_pci_start_run(pcr); 662 rtsx_pci_start_run(pcr);
940 663
941 rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, 664 rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
@@ -944,28 +667,46 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
944 rtsx_pci_write_register(pcr, CARD_SHARE_MODE, 667 rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
945 CARD_SHARE_MASK, CARD_SHARE_48_SD); 668 CARD_SHARE_MASK, CARD_SHARE_48_SD);
946 669
670 mutex_lock(&host->host_mutex);
671 host->mrq = mrq;
672 mutex_unlock(&host->host_mutex);
673
947 if (mrq->data) 674 if (mrq->data)
948 data_size = data->blocks * data->blksz; 675 data_size = data->blocks * data->blksz;
949 676
950 if (sd_use_muti_rw(cmd)) 677 if (!data_size || mmc_op_multi(cmd->opcode) ||
951 host->sg_count = sd_pre_dma_transfer(host, data, NULL); 678 (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
679 (cmd->opcode == MMC_WRITE_BLOCK)) {
680 sd_send_cmd_get_rsp(host, cmd);
952 681
953 if (!data_size || sd_use_muti_rw(cmd)) { 682 if (!cmd->error && data_size) {
954 if (mrq->sbc) 683 sd_rw_multi(host, mrq);
955 sd_send_cmd(host, mrq->sbc); 684
956 else 685 if (mmc_op_multi(cmd->opcode) && mrq->stop)
957 sd_send_cmd(host, cmd); 686 sd_send_cmd_get_rsp(host, mrq->stop);
958 spin_unlock_irqrestore(&host->lock, flags); 687 }
959 } else { 688 } else {
960 spin_unlock_irqrestore(&host->lock, flags);
961 sd_normal_rw(host, mrq); 689 sd_normal_rw(host, mrq);
962 tasklet_schedule(&host->finish_tasklet);
963 } 690 }
964 return; 691
692 if (mrq->data) {
693 if (cmd->error || data->error)
694 data->bytes_xfered = 0;
695 else
696 data->bytes_xfered = data->blocks * data->blksz;
697 }
698
699 mutex_unlock(&pcr->pcr_mutex);
965 700
966finish: 701finish:
967 tasklet_schedule(&host->finish_tasklet); 702 if (cmd->error)
968 spin_unlock_irqrestore(&host->lock, flags); 703 dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
704
705 mutex_lock(&host->host_mutex);
706 host->mrq = NULL;
707 mutex_unlock(&host->host_mutex);
708
709 mmc_request_done(mmc, mrq);
969} 710}
970 711
971static int sd_set_bus_width(struct realtek_pci_sdmmc *host, 712static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1400,8 +1141,6 @@ out:
1400} 1141}
1401 1142
1402static const struct mmc_host_ops realtek_pci_sdmmc_ops = { 1143static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
1403 .pre_req = sdmmc_pre_req,
1404 .post_req = sdmmc_post_req,
1405 .request = sdmmc_request, 1144 .request = sdmmc_request,
1406 .set_ios = sdmmc_set_ios, 1145 .set_ios = sdmmc_set_ios,
1407 .get_ro = sdmmc_get_ro, 1146 .get_ro = sdmmc_get_ro,
@@ -1465,7 +1204,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
1465 struct realtek_pci_sdmmc *host; 1204 struct realtek_pci_sdmmc *host;
1466 struct rtsx_pcr *pcr; 1205 struct rtsx_pcr *pcr;
1467 struct pcr_handle *handle = pdev->dev.platform_data; 1206 struct pcr_handle *handle = pdev->dev.platform_data;
1468 unsigned long host_addr;
1469 1207
1470 if (!handle) 1208 if (!handle)
1471 return -ENXIO; 1209 return -ENXIO;
@@ -1489,15 +1227,8 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
1489 pcr->slots[RTSX_SD_CARD].p_dev = pdev; 1227 pcr->slots[RTSX_SD_CARD].p_dev = pdev;
1490 pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; 1228 pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
1491 1229
1492 host_addr = (unsigned long)host; 1230 mutex_init(&host->host_mutex);
1493 host->next_data.cookie = 1;
1494 setup_timer(&host->timer, sd_request_timeout, host_addr);
1495 tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr);
1496 tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr);
1497 tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr);
1498 spin_lock_init(&host->lock);
1499 1231
1500 pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer;
1501 realtek_init_host(host); 1232 realtek_init_host(host);
1502 1233
1503 mmc_add_host(mmc); 1234 mmc_add_host(mmc);
@@ -1510,8 +1241,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
1510 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); 1241 struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
1511 struct rtsx_pcr *pcr; 1242 struct rtsx_pcr *pcr;
1512 struct mmc_host *mmc; 1243 struct mmc_host *mmc;
1513 struct mmc_request *mrq;
1514 unsigned long flags;
1515 1244
1516 if (!host) 1245 if (!host)
1517 return 0; 1246 return 0;
@@ -1519,33 +1248,22 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
1519 pcr = host->pcr; 1248 pcr = host->pcr;
1520 pcr->slots[RTSX_SD_CARD].p_dev = NULL; 1249 pcr->slots[RTSX_SD_CARD].p_dev = NULL;
1521 pcr->slots[RTSX_SD_CARD].card_event = NULL; 1250 pcr->slots[RTSX_SD_CARD].card_event = NULL;
1522 pcr->slots[RTSX_SD_CARD].done_transfer = NULL;
1523 mmc = host->mmc; 1251 mmc = host->mmc;
1524 mrq = host->mrq;
1525 1252
1526 spin_lock_irqsave(&host->lock, flags); 1253 mutex_lock(&host->host_mutex);
1527 if (host->mrq) { 1254 if (host->mrq) {
1528 dev_dbg(&(pdev->dev), 1255 dev_dbg(&(pdev->dev),
1529 "%s: Controller removed during transfer\n", 1256 "%s: Controller removed during transfer\n",
1530 mmc_hostname(mmc)); 1257 mmc_hostname(mmc));
1531 1258
1532 if (mrq->sbc) 1259 rtsx_pci_complete_unfinished_transfer(pcr);
1533 mrq->sbc->error = -ENOMEDIUM;
1534 if (mrq->cmd)
1535 mrq->cmd->error = -ENOMEDIUM;
1536 if (mrq->stop)
1537 mrq->stop->error = -ENOMEDIUM;
1538 if (mrq->data)
1539 mrq->data->error = -ENOMEDIUM;
1540 1260
1541 tasklet_schedule(&host->finish_tasklet); 1261 host->mrq->cmd->error = -ENOMEDIUM;
1262 if (host->mrq->stop)
1263 host->mrq->stop->error = -ENOMEDIUM;
1264 mmc_request_done(mmc, host->mrq);
1542 } 1265 }
1543 spin_unlock_irqrestore(&host->lock, flags); 1266 mutex_unlock(&host->host_mutex);
1544
1545 del_timer_sync(&host->timer);
1546 tasklet_kill(&host->cmd_tasklet);
1547 tasklet_kill(&host->data_tasklet);
1548 tasklet_kill(&host->finish_tasklet);
1549 1267
1550 mmc_remove_host(mmc); 1268 mmc_remove_host(mmc);
1551 host->eject = true; 1269 host->eject = true;
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 363da96e6891..c4176b0f382d 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * Copyright © 2010 STMicroelectronics. 7 * Copyright © 2010 STMicroelectronics.
8 * Ashish Priyadarshi 8 * Ashish Priyadarshi
9 * Shiraz Hashim <shiraz.hashim@st.com> 9 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
10 * 10 *
11 * This file is licensed under the terms of the GNU General Public 11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any 12 * License version 2. This program is licensed "as is" without any
@@ -1089,5 +1089,5 @@ static struct platform_driver spear_smi_driver = {
1089module_platform_driver(spear_smi_driver); 1089module_platform_driver(spear_smi_driver);
1090 1090
1091MODULE_LICENSE("GPL"); 1091MODULE_LICENSE("GPL");
1092MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>"); 1092MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
1093MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips"); 1093MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 4615d79fc93f..b922c8efcf40 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -523,6 +523,7 @@ static struct nand_ecclayout hwecc4_2048 = {
523#if defined(CONFIG_OF) 523#if defined(CONFIG_OF)
524static const struct of_device_id davinci_nand_of_match[] = { 524static const struct of_device_id davinci_nand_of_match[] = {
525 {.compatible = "ti,davinci-nand", }, 525 {.compatible = "ti,davinci-nand", },
526 {.compatible = "ti,keystone-nand", },
526 {}, 527 {},
527}; 528};
528MODULE_DEVICE_TABLE(of, davinci_nand_of_match); 529MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
@@ -581,6 +582,11 @@ static struct davinci_nand_pdata
581 of_property_read_bool(pdev->dev.of_node, 582 of_property_read_bool(pdev->dev.of_node,
582 "ti,davinci-nand-use-bbt")) 583 "ti,davinci-nand-use-bbt"))
583 pdata->bbt_options = NAND_BBT_USE_FLASH; 584 pdata->bbt_options = NAND_BBT_USE_FLASH;
585
586 if (of_device_is_compatible(pdev->dev.of_node,
587 "ti,keystone-nand")) {
588 pdata->options |= NAND_NO_SUBPAGE_WRITE;
589 }
584 } 590 }
585 591
586 return dev_get_platdata(&pdev->dev); 592 return dev_get_platdata(&pdev->dev);
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 7ff473c871a9..8d659e6a1b4c 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -431,7 +431,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
431 * Create one workqueue per volume (per registered block device). 431 * Create one workqueue per volume (per registered block device).
432 * Rembember workqueues are cheap, they're not threads. 432 * Rembember workqueues are cheap, they're not threads.
433 */ 433 */
434 dev->wq = alloc_workqueue(gd->disk_name, 0, 0); 434 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
435 if (!dev->wq) 435 if (!dev->wq)
436 goto out_free_queue; 436 goto out_free_queue;
437 INIT_WORK(&dev->work, ubiblock_do_work); 437 INIT_WORK(&dev->work, ubiblock_do_work);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 02317c1c0238..0f3425dac910 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -671,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
671 671
672 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 672 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
673 self_check_in_wl_tree(ubi, e, &ubi->free); 673 self_check_in_wl_tree(ubi, e, &ubi->free);
674 ubi->free_count--;
675 ubi_assert(ubi->free_count >= 0);
674 rb_erase(&e->u.rb, &ubi->free); 676 rb_erase(&e->u.rb, &ubi->free);
675 677
676 return e; 678 return e;
@@ -684,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
684 peb = __wl_get_peb(ubi); 686 peb = __wl_get_peb(ubi);
685 spin_unlock(&ubi->wl_lock); 687 spin_unlock(&ubi->wl_lock);
686 688
689 if (peb < 0)
690 return peb;
691
687 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, 692 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
688 ubi->peb_size - ubi->vid_hdr_aloffset); 693 ubi->peb_size - ubi->vid_hdr_aloffset);
689 if (err) { 694 if (err) {
@@ -1068,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1068 1073
1069 /* Give the unused PEB back */ 1074 /* Give the unused PEB back */
1070 wl_tree_add(e2, &ubi->free); 1075 wl_tree_add(e2, &ubi->free);
1076 ubi->free_count++;
1071 goto out_cancel; 1077 goto out_cancel;
1072 } 1078 }
1073 self_check_in_wl_tree(ubi, e1, &ubi->used); 1079 self_check_in_wl_tree(ubi, e1, &ubi->used);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9f69e818b000..93580a47cc54 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
82} 82}
83 83
84/* Forward declaration */ 84/* Forward declaration */
85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); 85static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
86 bool strict_match);
86static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); 87static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
87static void rlb_src_unlink(struct bonding *bond, u32 index); 88static void rlb_src_unlink(struct bonding *bond, u32 index);
88static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, 89static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
459 460
460 bond->alb_info.rlb_promisc_timeout_counter = 0; 461 bond->alb_info.rlb_promisc_timeout_counter = 0;
461 462
462 alb_send_learning_packets(bond->curr_active_slave, addr); 463 alb_send_learning_packets(bond->curr_active_slave, addr, true);
463} 464}
464 465
465/* slave being removed should not be active at this point 466/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
995/*********************** tlb/rlb shared functions *********************/ 996/*********************** tlb/rlb shared functions *********************/
996 997
997static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], 998static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
998 u16 vid) 999 __be16 vlan_proto, u16 vid)
999{ 1000{
1000 struct learning_pkt pkt; 1001 struct learning_pkt pkt;
1001 struct sk_buff *skb; 1002 struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1021 skb->dev = slave->dev; 1022 skb->dev = slave->dev;
1022 1023
1023 if (vid) { 1024 if (vid) {
1024 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); 1025 skb = vlan_put_tag(skb, vlan_proto, vid);
1025 if (!skb) { 1026 if (!skb) {
1026 pr_err("%s: Error: failed to insert VLAN tag\n", 1027 pr_err("%s: Error: failed to insert VLAN tag\n",
1027 slave->bond->dev->name); 1028 slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
1032 dev_queue_xmit(skb); 1033 dev_queue_xmit(skb);
1033} 1034}
1034 1035
1035 1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
1036static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) 1037 bool strict_match)
1037{ 1038{
1038 struct bonding *bond = bond_get_bond_by_slave(slave); 1039 struct bonding *bond = bond_get_bond_by_slave(slave);
1039 struct net_device *upper; 1040 struct net_device *upper;
1040 struct list_head *iter; 1041 struct list_head *iter;
1041 1042
1042 /* send untagged */ 1043 /* send untagged */
1043 alb_send_lp_vid(slave, mac_addr, 0); 1044 alb_send_lp_vid(slave, mac_addr, 0, 0);
1044 1045
1045 /* loop through vlans and send one packet for each */ 1046 /* loop through vlans and send one packet for each */
1046 rcu_read_lock(); 1047 rcu_read_lock();
1047 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { 1048 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
1048 if (upper->priv_flags & IFF_802_1Q_VLAN) 1049 if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
1049 alb_send_lp_vid(slave, mac_addr, 1050 if (strict_match &&
1050 vlan_dev_vlan_id(upper)); 1051 ether_addr_equal_64bits(mac_addr,
1052 upper->dev_addr)) {
1053 alb_send_lp_vid(slave, mac_addr,
1054 vlan_dev_vlan_proto(upper),
1055 vlan_dev_vlan_id(upper));
1056 } else if (!strict_match) {
1057 alb_send_lp_vid(slave, upper->dev_addr,
1058 vlan_dev_vlan_proto(upper),
1059 vlan_dev_vlan_id(upper));
1060 }
1061 }
1051 } 1062 }
1052 rcu_read_unlock(); 1063 rcu_read_unlock();
1053} 1064}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1107 1118
1108 /* fasten the change in the switch */ 1119 /* fasten the change in the switch */
1109 if (SLAVE_IS_OK(slave1)) { 1120 if (SLAVE_IS_OK(slave1)) {
1110 alb_send_learning_packets(slave1, slave1->dev->dev_addr); 1121 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1111 if (bond->alb_info.rlb_enabled) { 1122 if (bond->alb_info.rlb_enabled) {
1112 /* inform the clients that the mac address 1123 /* inform the clients that the mac address
1113 * has changed 1124 * has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1119 } 1130 }
1120 1131
1121 if (SLAVE_IS_OK(slave2)) { 1132 if (SLAVE_IS_OK(slave2)) {
1122 alb_send_learning_packets(slave2, slave2->dev->dev_addr); 1133 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1123 if (bond->alb_info.rlb_enabled) { 1134 if (bond->alb_info.rlb_enabled) {
1124 /* inform the clients that the mac address 1135 /* inform the clients that the mac address
1125 * has changed 1136 * has changed
@@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work)
1490 1501
1491 /* send learning packets */ 1502 /* send learning packets */
1492 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { 1503 if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1504 bool strict_match;
1505
1493 /* change of curr_active_slave involves swapping of mac addresses. 1506 /* change of curr_active_slave involves swapping of mac addresses.
1494 * in order to avoid this swapping from happening while 1507 * in order to avoid this swapping from happening while
1495 * sending the learning packets, the curr_slave_lock must be held for 1508 * sending the learning packets, the curr_slave_lock must be held for
@@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work)
1497 */ 1510 */
1498 read_lock(&bond->curr_slave_lock); 1511 read_lock(&bond->curr_slave_lock);
1499 1512
1500 bond_for_each_slave_rcu(bond, slave, iter) 1513 bond_for_each_slave_rcu(bond, slave, iter) {
1501 alb_send_learning_packets(slave, slave->dev->dev_addr); 1514 /* If updating current_active, use all currently
1515 * user mac addreses (!strict_match). Otherwise, only
1516 * use mac of the slave device.
1517 */
1518 strict_match = (slave != bond->curr_active_slave);
1519 alb_send_learning_packets(slave, slave->dev->dev_addr,
1520 strict_match);
1521 }
1502 1522
1503 read_unlock(&bond->curr_slave_lock); 1523 read_unlock(&bond->curr_slave_lock);
1504 1524
@@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1721 } else { 1741 } else {
1722 /* set the new_slave to the bond mac address */ 1742 /* set the new_slave to the bond mac address */
1723 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); 1743 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1724 alb_send_learning_packets(new_slave, bond->dev->dev_addr); 1744 alb_send_learning_packets(new_slave, bond->dev->dev_addr,
1745 false);
1725 } 1746 }
1726 1747
1727 write_lock_bh(&bond->curr_slave_lock); 1748 write_lock_bh(&bond->curr_slave_lock);
@@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1764 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); 1785 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
1765 1786
1766 read_lock(&bond->lock); 1787 read_lock(&bond->lock);
1767 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); 1788 alb_send_learning_packets(bond->curr_active_slave,
1789 bond_dev->dev_addr, false);
1768 if (bond->alb_info.rlb_enabled) { 1790 if (bond->alb_info.rlb_enabled) {
1769 /* inform clients mac address has changed */ 1791 /* inform clients mac address has changed */
1770 rlb_req_update_slave_clients(bond, bond->curr_active_slave); 1792 rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 69aff72c8957..d3a67896d435 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2126 */ 2126 */
2127static void bond_arp_send(struct net_device *slave_dev, int arp_op, 2127static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2128 __be32 dest_ip, __be32 src_ip, 2128 __be32 dest_ip, __be32 src_ip,
2129 struct bond_vlan_tag *inner, 2129 struct bond_vlan_tag *tags)
2130 struct bond_vlan_tag *outer)
2131{ 2130{
2132 struct sk_buff *skb; 2131 struct sk_buff *skb;
2132 int i;
2133 2133
2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", 2134 pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
2135 arp_op, slave_dev->name, &dest_ip, &src_ip); 2135 arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2141 net_err_ratelimited("ARP packet allocation failed\n"); 2141 net_err_ratelimited("ARP packet allocation failed\n");
2142 return; 2142 return;
2143 } 2143 }
2144 if (outer->vlan_id) {
2145 if (inner->vlan_id) {
2146 pr_debug("inner tag: proto %X vid %X\n",
2147 ntohs(inner->vlan_proto), inner->vlan_id);
2148 skb = __vlan_put_tag(skb, inner->vlan_proto,
2149 inner->vlan_id);
2150 if (!skb) {
2151 net_err_ratelimited("failed to insert inner VLAN tag\n");
2152 return;
2153 }
2154 }
2155 2144
2156 pr_debug("outer reg: proto %X vid %X\n", 2145 /* Go through all the tags backwards and add them to the packet */
2157 ntohs(outer->vlan_proto), outer->vlan_id); 2146 for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
2158 skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); 2147 if (!tags[i].vlan_id)
2148 continue;
2149
2150 pr_debug("inner tag: proto %X vid %X\n",
2151 ntohs(tags[i].vlan_proto), tags[i].vlan_id);
2152 skb = __vlan_put_tag(skb, tags[i].vlan_proto,
2153 tags[i].vlan_id);
2154 if (!skb) {
2155 net_err_ratelimited("failed to insert inner VLAN tag\n");
2156 return;
2157 }
2158 }
2159 /* Set the outer tag */
2160 if (tags[0].vlan_id) {
2161 pr_debug("outer tag: proto %X vid %X\n",
2162 ntohs(tags[0].vlan_proto), tags[0].vlan_id);
2163 skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
2159 if (!skb) { 2164 if (!skb) {
2160 net_err_ratelimited("failed to insert outer VLAN tag\n"); 2165 net_err_ratelimited("failed to insert outer VLAN tag\n");
2161 return; 2166 return;
@@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
2164 arp_xmit(skb); 2169 arp_xmit(skb);
2165} 2170}
2166 2171
2172/* Validate the device path between the @start_dev and the @end_dev.
2173 * The path is valid if the @end_dev is reachable through device
2174 * stacking.
2175 * When the path is validated, collect any vlan information in the
2176 * path.
2177 */
2178static bool bond_verify_device_path(struct net_device *start_dev,
2179 struct net_device *end_dev,
2180 struct bond_vlan_tag *tags)
2181{
2182 struct net_device *upper;
2183 struct list_head *iter;
2184 int idx;
2185
2186 if (start_dev == end_dev)
2187 return true;
2188
2189 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2190 if (bond_verify_device_path(upper, end_dev, tags)) {
2191 if (is_vlan_dev(upper)) {
2192 idx = vlan_get_encap_level(upper);
2193 if (idx >= BOND_MAX_VLAN_ENCAP)
2194 return false;
2195
2196 tags[idx].vlan_proto =
2197 vlan_dev_vlan_proto(upper);
2198 tags[idx].vlan_id = vlan_dev_vlan_id(upper);
2199 }
2200 return true;
2201 }
2202 }
2203
2204 return false;
2205}
2167 2206
2168static void bond_arp_send_all(struct bonding *bond, struct slave *slave) 2207static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2169{ 2208{
2170 struct net_device *upper, *vlan_upper;
2171 struct list_head *iter, *vlan_iter;
2172 struct rtable *rt; 2209 struct rtable *rt;
2173 struct bond_vlan_tag inner, outer; 2210 struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
2174 __be32 *targets = bond->params.arp_targets, addr; 2211 __be32 *targets = bond->params.arp_targets, addr;
2175 int i; 2212 int i;
2213 bool ret;
2176 2214
2177 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { 2215 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2178 pr_debug("basa: target %pI4\n", &targets[i]); 2216 pr_debug("basa: target %pI4\n", &targets[i]);
2179 inner.vlan_proto = 0; 2217 memset(tags, 0, sizeof(tags));
2180 inner.vlan_id = 0;
2181 outer.vlan_proto = 0;
2182 outer.vlan_id = 0;
2183 2218
2184 /* Find out through which dev should the packet go */ 2219 /* Find out through which dev should the packet go */
2185 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 2220 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2192 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", 2227 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2193 bond->dev->name, 2228 bond->dev->name,
2194 &targets[i]); 2229 &targets[i]);
2195 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); 2230 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2231 0, tags);
2196 continue; 2232 continue;
2197 } 2233 }
2198 2234
@@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2201 goto found; 2237 goto found;
2202 2238
2203 rcu_read_lock(); 2239 rcu_read_lock();
2204 /* first we search only for vlan devices. for every vlan 2240 ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
2205 * found we verify its upper dev list, searching for the
2206 * rt->dst.dev. If found we save the tag of the vlan and
2207 * proceed to send the packet.
2208 */
2209 netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
2210 vlan_iter) {
2211 if (!is_vlan_dev(vlan_upper))
2212 continue;
2213
2214 if (vlan_upper == rt->dst.dev) {
2215 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2216 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2217 rcu_read_unlock();
2218 goto found;
2219 }
2220 netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
2221 iter) {
2222 if (upper == rt->dst.dev) {
2223 /* If the upper dev is a vlan dev too,
2224 * set the vlan tag to inner tag.
2225 */
2226 if (is_vlan_dev(upper)) {
2227 inner.vlan_proto = vlan_dev_vlan_proto(upper);
2228 inner.vlan_id = vlan_dev_vlan_id(upper);
2229 }
2230 outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
2231 outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
2232 rcu_read_unlock();
2233 goto found;
2234 }
2235 }
2236 }
2237
2238 /* if the device we're looking for is not on top of any of
2239 * our upper vlans, then just search for any dev that
2240 * matches, and in case it's a vlan - save the id
2241 */
2242 netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
2243 if (upper == rt->dst.dev) {
2244 rcu_read_unlock();
2245 goto found;
2246 }
2247 }
2248 rcu_read_unlock(); 2241 rcu_read_unlock();
2249 2242
2243 if (ret)
2244 goto found;
2245
2250 /* Not our device - skip */ 2246 /* Not our device - skip */
2251 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", 2247 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2252 bond->dev->name, &targets[i], 2248 bond->dev->name, &targets[i],
@@ -2259,7 +2255,7 @@ found:
2259 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); 2255 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2260 ip_rt_put(rt); 2256 ip_rt_put(rt);
2261 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2257 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2262 addr, &inner, &outer); 2258 addr, tags);
2263 } 2259 }
2264} 2260}
2265 2261
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 724e30fa20b9..832070298446 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
125static const struct bond_opt_value bond_intmax_tbl[] = { 125static const struct bond_opt_value bond_intmax_tbl[] = {
126 { "off", 0, BOND_VALFLAG_DEFAULT}, 126 { "off", 0, BOND_VALFLAG_DEFAULT},
127 { "maxval", INT_MAX, BOND_VALFLAG_MAX}, 127 { "maxval", INT_MAX, BOND_VALFLAG_MAX},
128 { NULL, -1, 0}
128}; 129};
129 130
130static const struct bond_opt_value bond_lacp_rate_tbl[] = { 131static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 0e8b268da0a0..5f6babcfc26e 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -534,7 +534,7 @@ static ssize_t bonding_show_min_links(struct device *d,
534{ 534{
535 struct bonding *bond = to_bond(d); 535 struct bonding *bond = to_bond(d);
536 536
537 return sprintf(buf, "%d\n", bond->params.min_links); 537 return sprintf(buf, "%u\n", bond->params.min_links);
538} 538}
539 539
540static ssize_t bonding_store_min_links(struct device *d, 540static ssize_t bonding_store_min_links(struct device *d,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b8bdd0acc8f3..00bea320e3b5 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
36 36
37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" 37#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
38 38
39#define BOND_MAX_VLAN_ENCAP 2
39#define BOND_MAX_ARP_TARGETS 16 40#define BOND_MAX_ARP_TARGETS 16
40 41
41#define BOND_DEFAULT_MIIMON 100 42#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a5c8dcfa8357..95e04e2002da 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -60,6 +60,8 @@
60#define CONTROL_IE BIT(1) 60#define CONTROL_IE BIT(1)
61#define CONTROL_INIT BIT(0) 61#define CONTROL_INIT BIT(0)
62 62
63#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
64
63/* test register */ 65/* test register */
64#define TEST_RX BIT(7) 66#define TEST_RX BIT(7)
65#define TEST_TX1 BIT(6) 67#define TEST_TX1 BIT(6)
@@ -108,11 +110,14 @@
108#define IF_COMM_CONTROL BIT(4) 110#define IF_COMM_CONTROL BIT(4)
109#define IF_COMM_CLR_INT_PND BIT(3) 111#define IF_COMM_CLR_INT_PND BIT(3)
110#define IF_COMM_TXRQST BIT(2) 112#define IF_COMM_TXRQST BIT(2)
113#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
111#define IF_COMM_DATAA BIT(1) 114#define IF_COMM_DATAA BIT(1)
112#define IF_COMM_DATAB BIT(0) 115#define IF_COMM_DATAB BIT(0)
113#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \ 116
114 IF_COMM_CONTROL | IF_COMM_TXRQST | \ 117/* TX buffer setup */
115 IF_COMM_DATAA | IF_COMM_DATAB) 118#define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \
119 IF_COMM_TXRQST | \
120 IF_COMM_DATAA | IF_COMM_DATAB)
116 121
117/* For the low buffers we clear the interrupt bit, but keep newdat */ 122/* For the low buffers we clear the interrupt bit, but keep newdat */
118#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \ 123#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
@@ -120,12 +125,19 @@
120 IF_COMM_DATAA | IF_COMM_DATAB) 125 IF_COMM_DATAA | IF_COMM_DATAB)
121 126
122/* For the high buffers we clear the interrupt bit and newdat */ 127/* For the high buffers we clear the interrupt bit and newdat */
123#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST) 128#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
129
130
131/* Receive setup of message objects */
132#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
133
134/* Invalidation of message objects */
135#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
124 136
125/* IFx arbitration */ 137/* IFx arbitration */
126#define IF_ARB_MSGVAL BIT(15) 138#define IF_ARB_MSGVAL BIT(31)
127#define IF_ARB_MSGXTD BIT(14) 139#define IF_ARB_MSGXTD BIT(30)
128#define IF_ARB_TRANSMIT BIT(13) 140#define IF_ARB_TRANSMIT BIT(29)
129 141
130/* IFx message control */ 142/* IFx message control */
131#define IF_MCONT_NEWDAT BIT(15) 143#define IF_MCONT_NEWDAT BIT(15)
@@ -139,19 +151,17 @@
139#define IF_MCONT_EOB BIT(7) 151#define IF_MCONT_EOB BIT(7)
140#define IF_MCONT_DLC_MASK 0xf 152#define IF_MCONT_DLC_MASK 0xf
141 153
154#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
155#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
156
157#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
158
142/* 159/*
143 * Use IF1 for RX and IF2 for TX 160 * Use IF1 for RX and IF2 for TX
144 */ 161 */
145#define IF_RX 0 162#define IF_RX 0
146#define IF_TX 1 163#define IF_TX 1
147 164
148/* status interrupt */
149#define STATUS_INTERRUPT 0x8000
150
151/* global interrupt masks */
152#define ENABLE_ALL_INTERRUPTS 1
153#define DISABLE_ALL_INTERRUPTS 0
154
155/* minimum timeout for checking BUSY status */ 165/* minimum timeout for checking BUSY status */
156#define MIN_TIMEOUT_VALUE 6 166#define MIN_TIMEOUT_VALUE 6
157 167
@@ -171,6 +181,7 @@ enum c_can_lec_type {
171 LEC_BIT0_ERROR, 181 LEC_BIT0_ERROR,
172 LEC_CRC_ERROR, 182 LEC_CRC_ERROR,
173 LEC_UNUSED, 183 LEC_UNUSED,
184 LEC_MASK = LEC_UNUSED,
174}; 185};
175 186
176/* 187/*
@@ -226,143 +237,115 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
226 priv->raminit(priv, enable); 237 priv->raminit(priv, enable);
227} 238}
228 239
229static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) 240static void c_can_irq_control(struct c_can_priv *priv, bool enable)
230{
231 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
232 C_CAN_MSG_OBJ_TX_FIRST;
233}
234
235static inline int get_tx_echo_msg_obj(int txecho)
236{
237 return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
238}
239
240static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
241{
242 u32 val = priv->read_reg(priv, index);
243 val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
244 return val;
245}
246
247static void c_can_enable_all_interrupts(struct c_can_priv *priv,
248 int enable)
249{ 241{
250 unsigned int cntrl_save = priv->read_reg(priv, 242 u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
251 C_CAN_CTRL_REG);
252 243
253 if (enable) 244 if (enable)
254 cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); 245 ctrl |= CONTROL_IRQMSK;
255 else
256 cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
257 246
258 priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save); 247 priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
259} 248}
260 249
261static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) 250static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
262{ 251{
263 int count = MIN_TIMEOUT_VALUE; 252 struct c_can_priv *priv = netdev_priv(dev);
253 int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
264 254
265 while (count && priv->read_reg(priv, 255 priv->write_reg(priv, reg + 1, cmd);
266 C_CAN_IFACE(COMREQ_REG, iface)) & 256 priv->write_reg(priv, reg, obj);
267 IF_COMR_BUSY) { 257
268 count--; 258 for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
259 if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
260 return;
269 udelay(1); 261 udelay(1);
270 } 262 }
263 netdev_err(dev, "Updating object timed out\n");
271 264
272 if (!count) 265}
273 return 1;
274 266
275 return 0; 267static inline void c_can_object_get(struct net_device *dev, int iface,
268 u32 obj, u32 cmd)
269{
270 c_can_obj_update(dev, iface, cmd, obj);
276} 271}
277 272
278static inline void c_can_object_get(struct net_device *dev, 273static inline void c_can_object_put(struct net_device *dev, int iface,
279 int iface, int objno, int mask) 274 u32 obj, u32 cmd)
280{ 275{
281 struct c_can_priv *priv = netdev_priv(dev); 276 c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
277}
282 278
283 /* 279/*
284 * As per specs, after writting the message object number in the 280 * Note: According to documentation clearing TXIE while MSGVAL is set
285 * IF command request register the transfer b/w interface 281 * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
286 * register and message RAM must be complete in 6 CAN-CLK 282 * load significantly.
287 * period. 283 */
288 */ 284static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
289 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface), 285{
290 IFX_WRITE_LOW_16BIT(mask)); 286 struct c_can_priv *priv = netdev_priv(dev);
291 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
292 IFX_WRITE_LOW_16BIT(objno));
293 287
294 if (c_can_msg_obj_is_busy(priv, iface)) 288 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
295 netdev_err(dev, "timed out in object get\n"); 289 c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
296} 290}
297 291
298static inline void c_can_object_put(struct net_device *dev, 292static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
299 int iface, int objno, int mask)
300{ 293{
301 struct c_can_priv *priv = netdev_priv(dev); 294 struct c_can_priv *priv = netdev_priv(dev);
302 295
303 /* 296 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
304 * As per specs, after writting the message object number in the 297 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
305 * IF command request register the transfer b/w interface 298 c_can_inval_tx_object(dev, iface, obj);
306 * register and message RAM must be complete in 6 CAN-CLK
307 * period.
308 */
309 priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
310 (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
311 priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
312 IFX_WRITE_LOW_16BIT(objno));
313
314 if (c_can_msg_obj_is_busy(priv, iface))
315 netdev_err(dev, "timed out in object put\n");
316} 299}
317 300
318static void c_can_write_msg_object(struct net_device *dev, 301static void c_can_setup_tx_object(struct net_device *dev, int iface,
319 int iface, struct can_frame *frame, int objno) 302 struct can_frame *frame, int idx)
320{ 303{
321 int i;
322 u16 flags = 0;
323 unsigned int id;
324 struct c_can_priv *priv = netdev_priv(dev); 304 struct c_can_priv *priv = netdev_priv(dev);
325 305 u16 ctrl = IF_MCONT_TX | frame->can_dlc;
326 if (!(frame->can_id & CAN_RTR_FLAG)) 306 bool rtr = frame->can_id & CAN_RTR_FLAG;
327 flags |= IF_ARB_TRANSMIT; 307 u32 arb = IF_ARB_MSGVAL;
308 int i;
328 309
329 if (frame->can_id & CAN_EFF_FLAG) { 310 if (frame->can_id & CAN_EFF_FLAG) {
330 id = frame->can_id & CAN_EFF_MASK; 311 arb |= frame->can_id & CAN_EFF_MASK;
331 flags |= IF_ARB_MSGXTD; 312 arb |= IF_ARB_MSGXTD;
332 } else 313 } else {
333 id = ((frame->can_id & CAN_SFF_MASK) << 18); 314 arb |= (frame->can_id & CAN_SFF_MASK) << 18;
315 }
316
317 if (!rtr)
318 arb |= IF_ARB_TRANSMIT;
319
320 /*
321 * If we change the DIR bit, we need to invalidate the buffer
322 * first, i.e. clear the MSGVAL flag in the arbiter.
323 */
324 if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
325 u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
326
327 c_can_inval_msg_object(dev, iface, obj);
328 change_bit(idx, &priv->tx_dir);
329 }
334 330
335 flags |= IF_ARB_MSGVAL; 331 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
332 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
336 333
337 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 334 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
338 IFX_WRITE_LOW_16BIT(id));
339 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
340 IFX_WRITE_HIGH_16BIT(id));
341 335
342 for (i = 0; i < frame->can_dlc; i += 2) { 336 for (i = 0; i < frame->can_dlc; i += 2) {
343 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, 337 priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
344 frame->data[i] | (frame->data[i + 1] << 8)); 338 frame->data[i] | (frame->data[i + 1] << 8));
345 } 339 }
346
347 /* enable interrupt for this message object */
348 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
349 IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
350 frame->can_dlc);
351 c_can_object_put(dev, iface, objno, IF_COMM_ALL);
352} 340}
353 341
354static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, 342static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
355 int iface, 343 int iface)
356 int ctrl_mask)
357{ 344{
358 int i; 345 int i;
359 struct c_can_priv *priv = netdev_priv(dev);
360 346
361 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { 347 for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
362 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 348 c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
363 ctrl_mask & ~IF_MCONT_NEWDAT);
364 c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
365 }
366} 349}
367 350
368static int c_can_handle_lost_msg_obj(struct net_device *dev, 351static int c_can_handle_lost_msg_obj(struct net_device *dev,
@@ -377,6 +360,9 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
377 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); 360 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
378 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL); 361 c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
379 362
363 stats->rx_errors++;
364 stats->rx_over_errors++;
365
380 /* create an error msg */ 366 /* create an error msg */
381 skb = alloc_can_err_skb(dev, &frame); 367 skb = alloc_can_err_skb(dev, &frame);
382 if (unlikely(!skb)) 368 if (unlikely(!skb))
@@ -384,22 +370,18 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
384 370
385 frame->can_id |= CAN_ERR_CRTL; 371 frame->can_id |= CAN_ERR_CRTL;
386 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 372 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
387 stats->rx_errors++;
388 stats->rx_over_errors++;
389 373
390 netif_receive_skb(skb); 374 netif_receive_skb(skb);
391 return 1; 375 return 1;
392} 376}
393 377
394static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) 378static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
395{ 379{
396 u16 flags, data;
397 int i;
398 unsigned int val;
399 struct c_can_priv *priv = netdev_priv(dev);
400 struct net_device_stats *stats = &dev->stats; 380 struct net_device_stats *stats = &dev->stats;
401 struct sk_buff *skb; 381 struct c_can_priv *priv = netdev_priv(dev);
402 struct can_frame *frame; 382 struct can_frame *frame;
383 struct sk_buff *skb;
384 u32 arb, data;
403 385
404 skb = alloc_can_skb(dev, &frame); 386 skb = alloc_can_skb(dev, &frame);
405 if (!skb) { 387 if (!skb) {
@@ -409,115 +391,82 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
409 391
410 frame->can_dlc = get_can_dlc(ctrl & 0x0F); 392 frame->can_dlc = get_can_dlc(ctrl & 0x0F);
411 393
412 flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)); 394 arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
413 val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) | 395 arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
414 (flags << 16);
415 396
416 if (flags & IF_ARB_MSGXTD) 397 if (arb & IF_ARB_MSGXTD)
417 frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG; 398 frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
418 else 399 else
419 frame->can_id = (val >> 18) & CAN_SFF_MASK; 400 frame->can_id = (arb >> 18) & CAN_SFF_MASK;
420 401
421 if (flags & IF_ARB_TRANSMIT) 402 if (arb & IF_ARB_TRANSMIT) {
422 frame->can_id |= CAN_RTR_FLAG; 403 frame->can_id |= CAN_RTR_FLAG;
423 else { 404 } else {
424 for (i = 0; i < frame->can_dlc; i += 2) { 405 int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
425 data = priv->read_reg(priv, 406
426 C_CAN_IFACE(DATA1_REG, iface) + i / 2); 407 for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
408 data = priv->read_reg(priv, dreg);
427 frame->data[i] = data; 409 frame->data[i] = data;
428 frame->data[i + 1] = data >> 8; 410 frame->data[i + 1] = data >> 8;
429 } 411 }
430 } 412 }
431 413
432 netif_receive_skb(skb);
433
434 stats->rx_packets++; 414 stats->rx_packets++;
435 stats->rx_bytes += frame->can_dlc; 415 stats->rx_bytes += frame->can_dlc;
416
417 netif_receive_skb(skb);
436 return 0; 418 return 0;
437} 419}
438 420
439static void c_can_setup_receive_object(struct net_device *dev, int iface, 421static void c_can_setup_receive_object(struct net_device *dev, int iface,
440 int objno, unsigned int mask, 422 u32 obj, u32 mask, u32 id, u32 mcont)
441 unsigned int id, unsigned int mcont)
442{ 423{
443 struct c_can_priv *priv = netdev_priv(dev); 424 struct c_can_priv *priv = netdev_priv(dev);
444 425
445 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), 426 mask |= BIT(29);
446 IFX_WRITE_LOW_16BIT(mask)); 427 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
447 428 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
448 /* According to C_CAN documentation, the reserved bit
449 * in IFx_MASK2 register is fixed 1
450 */
451 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
452 IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
453 429
454 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 430 id |= IF_ARB_MSGVAL;
455 IFX_WRITE_LOW_16BIT(id)); 431 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
456 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 432 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
457 (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
458 433
459 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); 434 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
460 c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); 435 c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
461
462 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
463 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
464}
465
466static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
467{
468 struct c_can_priv *priv = netdev_priv(dev);
469
470 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
471 priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
472 priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
473
474 c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
475
476 netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
477 c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
478}
479
480static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
481{
482 int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
483
484 /*
485 * as transmission request register's bit n-1 corresponds to
486 * message object n, we need to handle the same properly.
487 */
488 if (val & (1 << (objno - 1)))
489 return 1;
490
491 return 0;
492} 436}
493 437
494static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, 438static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
495 struct net_device *dev) 439 struct net_device *dev)
496{ 440{
497 u32 msg_obj_no;
498 struct c_can_priv *priv = netdev_priv(dev);
499 struct can_frame *frame = (struct can_frame *)skb->data; 441 struct can_frame *frame = (struct can_frame *)skb->data;
442 struct c_can_priv *priv = netdev_priv(dev);
443 u32 idx, obj;
500 444
501 if (can_dropped_invalid_skb(dev, skb)) 445 if (can_dropped_invalid_skb(dev, skb))
502 return NETDEV_TX_OK; 446 return NETDEV_TX_OK;
503
504 spin_lock_bh(&priv->xmit_lock);
505 msg_obj_no = get_tx_next_msg_obj(priv);
506
507 /* prepare message object for transmission */
508 c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
509 priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
510 can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
511
512 /* 447 /*
513 * we have to stop the queue in case of a wrap around or 448 * This is not a FIFO. C/D_CAN sends out the buffers
514 * if the next TX message object is still in use 449 * prioritized. The lowest buffer number wins.
515 */ 450 */
516 priv->tx_next++; 451 idx = fls(atomic_read(&priv->tx_active));
517 if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || 452 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
518 (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) 453
454 /* If this is the last buffer, stop the xmit queue */
455 if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
519 netif_stop_queue(dev); 456 netif_stop_queue(dev);
520 spin_unlock_bh(&priv->xmit_lock); 457 /*
458 * Store the message in the interface so we can call
459 * can_put_echo_skb(). We must do this before we enable
460 * transmit as we might race against do_tx().
461 */
462 c_can_setup_tx_object(dev, IF_TX, frame, idx);
463 priv->dlc[idx] = frame->can_dlc;
464 can_put_echo_skb(skb, dev, idx);
465
466 /* Update the active bits */
467 atomic_add((1 << idx), &priv->tx_active);
468 /* Start transmission */
469 c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
521 470
522 return NETDEV_TX_OK; 471 return NETDEV_TX_OK;
523} 472}
@@ -594,11 +543,10 @@ static void c_can_configure_msg_objects(struct net_device *dev)
594 543
595 /* setup receive message objects */ 544 /* setup receive message objects */
596 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) 545 for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
597 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, 546 c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
598 (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
599 547
600 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, 548 c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
601 IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK); 549 IF_MCONT_RCV_EOB);
602} 550}
603 551
604/* 552/*
@@ -612,30 +560,22 @@ static int c_can_chip_config(struct net_device *dev)
612 struct c_can_priv *priv = netdev_priv(dev); 560 struct c_can_priv *priv = netdev_priv(dev);
613 561
614 /* enable automatic retransmission */ 562 /* enable automatic retransmission */
615 priv->write_reg(priv, C_CAN_CTRL_REG, 563 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
616 CONTROL_ENABLE_AR);
617 564
618 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && 565 if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
619 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { 566 (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
620 /* loopback + silent mode : useful for hot self-test */ 567 /* loopback + silent mode : useful for hot self-test */
621 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 568 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
622 CONTROL_SIE | CONTROL_IE | CONTROL_TEST); 569 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
623 priv->write_reg(priv, C_CAN_TEST_REG,
624 TEST_LBACK | TEST_SILENT);
625 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { 570 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
626 /* loopback mode : useful for self-test function */ 571 /* loopback mode : useful for self-test function */
627 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 572 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
628 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
629 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK); 573 priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
630 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { 574 } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
631 /* silent mode : bus-monitoring mode */ 575 /* silent mode : bus-monitoring mode */
632 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | 576 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
633 CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
634 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT); 577 priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
635 } else 578 }
636 /* normal mode*/
637 priv->write_reg(priv, C_CAN_CTRL_REG,
638 CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
639 579
640 /* configure message objects */ 580 /* configure message objects */
641 c_can_configure_msg_objects(dev); 581 c_can_configure_msg_objects(dev);
@@ -643,6 +583,11 @@ static int c_can_chip_config(struct net_device *dev)
643 /* set a `lec` value so that we can check for updates later */ 583 /* set a `lec` value so that we can check for updates later */
644 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 584 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
645 585
586 /* Clear all internal status */
587 atomic_set(&priv->tx_active, 0);
588 priv->rxmasked = 0;
589 priv->tx_dir = 0;
590
646 /* set bittiming params */ 591 /* set bittiming params */
647 return c_can_set_bittiming(dev); 592 return c_can_set_bittiming(dev);
648} 593}
@@ -657,13 +602,11 @@ static int c_can_start(struct net_device *dev)
657 if (err) 602 if (err)
658 return err; 603 return err;
659 604
660 priv->can.state = CAN_STATE_ERROR_ACTIVE; 605 /* Setup the command for new messages */
661 606 priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
662 /* reset tx helper pointers */ 607 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
663 priv->tx_next = priv->tx_echo = 0;
664 608
665 /* enable status change, error and module interrupts */ 609 priv->can.state = CAN_STATE_ERROR_ACTIVE;
666 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
667 610
668 return 0; 611 return 0;
669} 612}
@@ -672,15 +615,13 @@ static void c_can_stop(struct net_device *dev)
672{ 615{
673 struct c_can_priv *priv = netdev_priv(dev); 616 struct c_can_priv *priv = netdev_priv(dev);
674 617
675 /* disable all interrupts */ 618 c_can_irq_control(priv, false);
676 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
677
678 /* set the state as STOPPED */
679 priv->can.state = CAN_STATE_STOPPED; 619 priv->can.state = CAN_STATE_STOPPED;
680} 620}
681 621
682static int c_can_set_mode(struct net_device *dev, enum can_mode mode) 622static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
683{ 623{
624 struct c_can_priv *priv = netdev_priv(dev);
684 int err; 625 int err;
685 626
686 switch (mode) { 627 switch (mode) {
@@ -689,6 +630,7 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
689 if (err) 630 if (err)
690 return err; 631 return err;
691 netif_wake_queue(dev); 632 netif_wake_queue(dev);
633 c_can_irq_control(priv, true);
692 break; 634 break;
693 default: 635 default:
694 return -EOPNOTSUPP; 636 return -EOPNOTSUPP;
@@ -724,42 +666,29 @@ static int c_can_get_berr_counter(const struct net_device *dev,
724 return err; 666 return err;
725} 667}
726 668
727/*
728 * priv->tx_echo holds the number of the oldest can_frame put for
729 * transmission into the hardware, but not yet ACKed by the CAN tx
730 * complete IRQ.
731 *
732 * We iterate from priv->tx_echo to priv->tx_next and check if the
733 * packet has been transmitted, echo it back to the CAN framework.
734 * If we discover a not yet transmitted packet, stop looking for more.
735 */
736static void c_can_do_tx(struct net_device *dev) 669static void c_can_do_tx(struct net_device *dev)
737{ 670{
738 struct c_can_priv *priv = netdev_priv(dev); 671 struct c_can_priv *priv = netdev_priv(dev);
739 struct net_device_stats *stats = &dev->stats; 672 struct net_device_stats *stats = &dev->stats;
740 u32 val, obj, pkts = 0, bytes = 0; 673 u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
741
742 spin_lock_bh(&priv->xmit_lock);
743
744 for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
745 obj = get_tx_echo_msg_obj(priv->tx_echo);
746 val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
747 674
748 if (val & (1 << (obj - 1))) 675 clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
749 break;
750 676
751 can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST); 677 while ((idx = ffs(pend))) {
752 bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST]; 678 idx--;
679 pend &= ~(1 << idx);
680 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
681 c_can_inval_tx_object(dev, IF_RX, obj);
682 can_get_echo_skb(dev, idx);
683 bytes += priv->dlc[idx];
753 pkts++; 684 pkts++;
754 c_can_inval_msg_object(dev, IF_TX, obj);
755 } 685 }
756 686
757 /* restart queue if wrap-up or if queue stalled on last pkt */ 687 /* Clear the bits in the tx_active mask */
758 if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || 688 atomic_sub(clr, &priv->tx_active);
759 ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
760 netif_wake_queue(dev);
761 689
762 spin_unlock_bh(&priv->xmit_lock); 690 if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
691 netif_wake_queue(dev);
763 692
764 if (pkts) { 693 if (pkts) {
765 stats->tx_bytes += bytes; 694 stats->tx_bytes += bytes;
@@ -800,18 +729,28 @@ static u32 c_can_adjust_pending(u32 pend)
800 return pend & ~((1 << lasts) - 1); 729 return pend & ~((1 << lasts) - 1);
801} 730}
802 731
732static inline void c_can_rx_object_get(struct net_device *dev,
733 struct c_can_priv *priv, u32 obj)
734{
735 c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
736}
737
738static inline void c_can_rx_finalize(struct net_device *dev,
739 struct c_can_priv *priv, u32 obj)
740{
741 if (priv->type != BOSCH_D_CAN)
742 c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
743}
744
803static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, 745static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
804 u32 pend, int quota) 746 u32 pend, int quota)
805{ 747{
806 u32 pkts = 0, ctrl, obj, mcmd; 748 u32 pkts = 0, ctrl, obj;
807 749
808 while ((obj = ffs(pend)) && quota > 0) { 750 while ((obj = ffs(pend)) && quota > 0) {
809 pend &= ~BIT(obj - 1); 751 pend &= ~BIT(obj - 1);
810 752
811 mcmd = obj < C_CAN_MSG_RX_LOW_LAST ? 753 c_can_rx_object_get(dev, priv, obj);
812 IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
813
814 c_can_object_get(dev, IF_RX, obj, mcmd);
815 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); 754 ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
816 755
817 if (ctrl & IF_MCONT_MSGLST) { 756 if (ctrl & IF_MCONT_MSGLST) {
@@ -833,9 +772,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
833 /* read the data from the message object */ 772 /* read the data from the message object */
834 c_can_read_msg_object(dev, IF_RX, ctrl); 773 c_can_read_msg_object(dev, IF_RX, ctrl);
835 774
836 if (obj == C_CAN_MSG_RX_LOW_LAST) 775 c_can_rx_finalize(dev, priv, obj);
837 /* activate all lower message objects */
838 c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
839 776
840 pkts++; 777 pkts++;
841 quota--; 778 quota--;
@@ -844,6 +781,13 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
844 return pkts; 781 return pkts;
845} 782}
846 783
784static inline u32 c_can_get_pending(struct c_can_priv *priv)
785{
786 u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
787
788 return pend;
789}
790
847/* 791/*
848 * theory of operation: 792 * theory of operation:
849 * 793 *
@@ -853,18 +797,9 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
853 * has arrived. To work-around this issue, we keep two groups of message 797 * has arrived. To work-around this issue, we keep two groups of message
854 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. 798 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
855 * 799 *
856 * To ensure in-order frame reception we use the following 800 * We clear the newdat bit right away.
857 * approach while re-activating a message object to receive further 801 *
858 * frames: 802 * This can result in packet reordering when the readout is slow.
859 * - if the current message object number is lower than
860 * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
861 * the INTPND bit.
862 * - if the current message object number is equal to
863 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
864 * receive message objects.
865 * - if the current message object number is greater than
866 * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
867 * only this message object.
868 */ 803 */
869static int c_can_do_rx_poll(struct net_device *dev, int quota) 804static int c_can_do_rx_poll(struct net_device *dev, int quota)
870{ 805{
@@ -880,7 +815,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
880 815
881 while (quota > 0) { 816 while (quota > 0) {
882 if (!pend) { 817 if (!pend) {
883 pend = priv->read_reg(priv, C_CAN_INTPND1_REG); 818 pend = c_can_get_pending(priv);
884 if (!pend) 819 if (!pend)
885 break; 820 break;
886 /* 821 /*
@@ -905,12 +840,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
905 return pkts; 840 return pkts;
906} 841}
907 842
908static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
909{
910 return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
911 (priv->current_status & LEC_UNUSED);
912}
913
914static int c_can_handle_state_change(struct net_device *dev, 843static int c_can_handle_state_change(struct net_device *dev,
915 enum c_can_bus_error_types error_type) 844 enum c_can_bus_error_types error_type)
916{ 845{
@@ -922,6 +851,26 @@ static int c_can_handle_state_change(struct net_device *dev,
922 struct sk_buff *skb; 851 struct sk_buff *skb;
923 struct can_berr_counter bec; 852 struct can_berr_counter bec;
924 853
854 switch (error_type) {
855 case C_CAN_ERROR_WARNING:
856 /* error warning state */
857 priv->can.can_stats.error_warning++;
858 priv->can.state = CAN_STATE_ERROR_WARNING;
859 break;
860 case C_CAN_ERROR_PASSIVE:
861 /* error passive state */
862 priv->can.can_stats.error_passive++;
863 priv->can.state = CAN_STATE_ERROR_PASSIVE;
864 break;
865 case C_CAN_BUS_OFF:
866 /* bus-off state */
867 priv->can.state = CAN_STATE_BUS_OFF;
868 can_bus_off(dev);
869 break;
870 default:
871 break;
872 }
873
925 /* propagate the error condition to the CAN stack */ 874 /* propagate the error condition to the CAN stack */
926 skb = alloc_can_err_skb(dev, &cf); 875 skb = alloc_can_err_skb(dev, &cf);
927 if (unlikely(!skb)) 876 if (unlikely(!skb))
@@ -935,8 +884,6 @@ static int c_can_handle_state_change(struct net_device *dev,
935 switch (error_type) { 884 switch (error_type) {
936 case C_CAN_ERROR_WARNING: 885 case C_CAN_ERROR_WARNING:
937 /* error warning state */ 886 /* error warning state */
938 priv->can.can_stats.error_warning++;
939 priv->can.state = CAN_STATE_ERROR_WARNING;
940 cf->can_id |= CAN_ERR_CRTL; 887 cf->can_id |= CAN_ERR_CRTL;
941 cf->data[1] = (bec.txerr > bec.rxerr) ? 888 cf->data[1] = (bec.txerr > bec.rxerr) ?
942 CAN_ERR_CRTL_TX_WARNING : 889 CAN_ERR_CRTL_TX_WARNING :
@@ -947,8 +894,6 @@ static int c_can_handle_state_change(struct net_device *dev,
947 break; 894 break;
948 case C_CAN_ERROR_PASSIVE: 895 case C_CAN_ERROR_PASSIVE:
949 /* error passive state */ 896 /* error passive state */
950 priv->can.can_stats.error_passive++;
951 priv->can.state = CAN_STATE_ERROR_PASSIVE;
952 cf->can_id |= CAN_ERR_CRTL; 897 cf->can_id |= CAN_ERR_CRTL;
953 if (rx_err_passive) 898 if (rx_err_passive)
954 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 899 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -960,22 +905,16 @@ static int c_can_handle_state_change(struct net_device *dev,
960 break; 905 break;
961 case C_CAN_BUS_OFF: 906 case C_CAN_BUS_OFF:
962 /* bus-off state */ 907 /* bus-off state */
963 priv->can.state = CAN_STATE_BUS_OFF;
964 cf->can_id |= CAN_ERR_BUSOFF; 908 cf->can_id |= CAN_ERR_BUSOFF;
965 /*
966 * disable all interrupts in bus-off mode to ensure that
967 * the CPU is not hogged down
968 */
969 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
970 can_bus_off(dev); 909 can_bus_off(dev);
971 break; 910 break;
972 default: 911 default:
973 break; 912 break;
974 } 913 }
975 914
976 netif_receive_skb(skb);
977 stats->rx_packets++; 915 stats->rx_packets++;
978 stats->rx_bytes += cf->can_dlc; 916 stats->rx_bytes += cf->can_dlc;
917 netif_receive_skb(skb);
979 918
980 return 1; 919 return 1;
981} 920}
@@ -996,6 +935,13 @@ static int c_can_handle_bus_err(struct net_device *dev,
996 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR) 935 if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
997 return 0; 936 return 0;
998 937
938 if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
939 return 0;
940
941 /* common for all type of bus errors */
942 priv->can.can_stats.bus_error++;
943 stats->rx_errors++;
944
999 /* propagate the error condition to the CAN stack */ 945 /* propagate the error condition to the CAN stack */
1000 skb = alloc_can_err_skb(dev, &cf); 946 skb = alloc_can_err_skb(dev, &cf);
1001 if (unlikely(!skb)) 947 if (unlikely(!skb))
@@ -1005,10 +951,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
1005 * check for 'last error code' which tells us the 951 * check for 'last error code' which tells us the
1006 * type of the last error to occur on the CAN bus 952 * type of the last error to occur on the CAN bus
1007 */ 953 */
1008
1009 /* common for all type of bus errors */
1010 priv->can.can_stats.bus_error++;
1011 stats->rx_errors++;
1012 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; 954 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1013 cf->data[2] |= CAN_ERR_PROT_UNSPEC; 955 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1014 956
@@ -1043,95 +985,64 @@ static int c_can_handle_bus_err(struct net_device *dev,
1043 break; 985 break;
1044 } 986 }
1045 987
1046 /* set a `lec` value so that we can check for updates later */
1047 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1048
1049 netif_receive_skb(skb);
1050 stats->rx_packets++; 988 stats->rx_packets++;
1051 stats->rx_bytes += cf->can_dlc; 989 stats->rx_bytes += cf->can_dlc;
1052 990 netif_receive_skb(skb);
1053 return 1; 991 return 1;
1054} 992}
1055 993
1056static int c_can_poll(struct napi_struct *napi, int quota) 994static int c_can_poll(struct napi_struct *napi, int quota)
1057{ 995{
1058 u16 irqstatus;
1059 int lec_type = 0;
1060 int work_done = 0;
1061 struct net_device *dev = napi->dev; 996 struct net_device *dev = napi->dev;
1062 struct c_can_priv *priv = netdev_priv(dev); 997 struct c_can_priv *priv = netdev_priv(dev);
998 u16 curr, last = priv->last_status;
999 int work_done = 0;
1063 1000
1064 irqstatus = priv->irqstatus; 1001 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1065 if (!irqstatus) 1002 /* Ack status on C_CAN. D_CAN is self clearing */
1066 goto end; 1003 if (priv->type != BOSCH_D_CAN)
1004 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1067 1005
1068 /* status events have the highest priority */ 1006 /* handle state changes */
1069 if (irqstatus == STATUS_INTERRUPT) { 1007 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1070 priv->current_status = priv->read_reg(priv, 1008 netdev_dbg(dev, "entered error warning state\n");
1071 C_CAN_STS_REG); 1009 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
1072 1010 }
1073 /* handle Tx/Rx events */
1074 if (priv->current_status & STATUS_TXOK)
1075 priv->write_reg(priv, C_CAN_STS_REG,
1076 priv->current_status & ~STATUS_TXOK);
1077
1078 if (priv->current_status & STATUS_RXOK)
1079 priv->write_reg(priv, C_CAN_STS_REG,
1080 priv->current_status & ~STATUS_RXOK);
1081
1082 /* handle state changes */
1083 if ((priv->current_status & STATUS_EWARN) &&
1084 (!(priv->last_status & STATUS_EWARN))) {
1085 netdev_dbg(dev, "entered error warning state\n");
1086 work_done += c_can_handle_state_change(dev,
1087 C_CAN_ERROR_WARNING);
1088 }
1089 if ((priv->current_status & STATUS_EPASS) &&
1090 (!(priv->last_status & STATUS_EPASS))) {
1091 netdev_dbg(dev, "entered error passive state\n");
1092 work_done += c_can_handle_state_change(dev,
1093 C_CAN_ERROR_PASSIVE);
1094 }
1095 if ((priv->current_status & STATUS_BOFF) &&
1096 (!(priv->last_status & STATUS_BOFF))) {
1097 netdev_dbg(dev, "entered bus off state\n");
1098 work_done += c_can_handle_state_change(dev,
1099 C_CAN_BUS_OFF);
1100 }
1101 1011
1102 /* handle bus recovery events */ 1012 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1103 if ((!(priv->current_status & STATUS_BOFF)) && 1013 netdev_dbg(dev, "entered error passive state\n");
1104 (priv->last_status & STATUS_BOFF)) { 1014 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1105 netdev_dbg(dev, "left bus off state\n"); 1015 }
1106 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1107 }
1108 if ((!(priv->current_status & STATUS_EPASS)) &&
1109 (priv->last_status & STATUS_EPASS)) {
1110 netdev_dbg(dev, "left error passive state\n");
1111 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1112 }
1113 1016
1114 priv->last_status = priv->current_status; 1017 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1115 1018 netdev_dbg(dev, "entered bus off state\n");
1116 /* handle lec errors on the bus */ 1019 work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1117 lec_type = c_can_has_and_handle_berr(priv); 1020 goto end;
1118 if (lec_type)
1119 work_done += c_can_handle_bus_err(dev, lec_type);
1120 } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
1121 (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
1122 /* handle events corresponding to receive message objects */
1123 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1124 } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
1125 (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
1126 /* handle events corresponding to transmit message objects */
1127 c_can_do_tx(dev);
1128 } 1021 }
1129 1022
1023 /* handle bus recovery events */
1024 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1025 netdev_dbg(dev, "left bus off state\n");
1026 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1027 }
1028 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1029 netdev_dbg(dev, "left error passive state\n");
1030 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1031 }
1032
1033 /* handle lec errors on the bus */
1034 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1035
1036 /* Handle Tx/Rx events. We do this unconditionally */
1037 work_done += c_can_do_rx_poll(dev, (quota - work_done));
1038 c_can_do_tx(dev);
1039
1130end: 1040end:
1131 if (work_done < quota) { 1041 if (work_done < quota) {
1132 napi_complete(napi); 1042 napi_complete(napi);
1133 /* enable all IRQs */ 1043 /* enable all IRQs if we are not in bus off state */
1134 c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); 1044 if (priv->can.state != CAN_STATE_BUS_OFF)
1045 c_can_irq_control(priv, true);
1135 } 1046 }
1136 1047
1137 return work_done; 1048 return work_done;
@@ -1142,12 +1053,11 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
1142 struct net_device *dev = (struct net_device *)dev_id; 1053 struct net_device *dev = (struct net_device *)dev_id;
1143 struct c_can_priv *priv = netdev_priv(dev); 1054 struct c_can_priv *priv = netdev_priv(dev);
1144 1055
1145 priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG); 1056 if (!priv->read_reg(priv, C_CAN_INT_REG))
1146 if (!priv->irqstatus)
1147 return IRQ_NONE; 1057 return IRQ_NONE;
1148 1058
1149 /* disable all interrupts and schedule the NAPI */ 1059 /* disable all interrupts and schedule the NAPI */
1150 c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); 1060 c_can_irq_control(priv, false);
1151 napi_schedule(&priv->napi); 1061 napi_schedule(&priv->napi);
1152 1062
1153 return IRQ_HANDLED; 1063 return IRQ_HANDLED;
@@ -1184,6 +1094,8 @@ static int c_can_open(struct net_device *dev)
1184 can_led_event(dev, CAN_LED_EVENT_OPEN); 1094 can_led_event(dev, CAN_LED_EVENT_OPEN);
1185 1095
1186 napi_enable(&priv->napi); 1096 napi_enable(&priv->napi);
1097 /* enable status change, error and module interrupts */
1098 c_can_irq_control(priv, true);
1187 netif_start_queue(dev); 1099 netif_start_queue(dev);
1188 1100
1189 return 0; 1101 return 0;
@@ -1226,7 +1138,6 @@ struct net_device *alloc_c_can_dev(void)
1226 return NULL; 1138 return NULL;
1227 1139
1228 priv = netdev_priv(dev); 1140 priv = netdev_priv(dev);
1229 spin_lock_init(&priv->xmit_lock);
1230 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); 1141 netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1231 1142
1232 priv->dev = dev; 1143 priv->dev = dev;
@@ -1281,6 +1192,7 @@ int c_can_power_up(struct net_device *dev)
1281 u32 val; 1192 u32 val;
1282 unsigned long time_out; 1193 unsigned long time_out;
1283 struct c_can_priv *priv = netdev_priv(dev); 1194 struct c_can_priv *priv = netdev_priv(dev);
1195 int ret;
1284 1196
1285 if (!(dev->flags & IFF_UP)) 1197 if (!(dev->flags & IFF_UP))
1286 return 0; 1198 return 0;
@@ -1307,7 +1219,11 @@ int c_can_power_up(struct net_device *dev)
1307 if (time_after(jiffies, time_out)) 1219 if (time_after(jiffies, time_out))
1308 return -ETIMEDOUT; 1220 return -ETIMEDOUT;
1309 1221
1310 return c_can_start(dev); 1222 ret = c_can_start(dev);
1223 if (!ret)
1224 c_can_irq_control(priv, true);
1225
1226 return ret;
1311} 1227}
1312EXPORT_SYMBOL_GPL(c_can_power_up); 1228EXPORT_SYMBOL_GPL(c_can_power_up);
1313#endif 1229#endif
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index faa8404162b3..c56f1b1c11ca 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,14 +22,6 @@
22#ifndef C_CAN_H 22#ifndef C_CAN_H
23#define C_CAN_H 23#define C_CAN_H
24 24
25/*
26 * IFx register masks:
27 * allow easy operation on 16-bit registers when the
28 * argument is 32-bit instead
29 */
30#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
31#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
32
33/* message object split */ 25/* message object split */
34#define C_CAN_NO_OF_OBJECTS 32 26#define C_CAN_NO_OF_OBJECTS 32
35#define C_CAN_MSG_OBJ_RX_NUM 16 27#define C_CAN_MSG_OBJ_RX_NUM 16
@@ -45,8 +37,6 @@
45 37
46#define C_CAN_MSG_OBJ_RX_SPLIT 9 38#define C_CAN_MSG_OBJ_RX_SPLIT 9
47#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) 39#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
48
49#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
50#define RECEIVE_OBJECT_BITS 0x0000ffff 40#define RECEIVE_OBJECT_BITS 0x0000ffff
51 41
52enum reg { 42enum reg {
@@ -183,23 +173,20 @@ struct c_can_priv {
183 struct napi_struct napi; 173 struct napi_struct napi;
184 struct net_device *dev; 174 struct net_device *dev;
185 struct device *device; 175 struct device *device;
186 spinlock_t xmit_lock; 176 atomic_t tx_active;
187 int tx_object; 177 unsigned long tx_dir;
188 int current_status;
189 int last_status; 178 int last_status;
190 u16 (*read_reg) (struct c_can_priv *priv, enum reg index); 179 u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
191 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); 180 void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
192 void __iomem *base; 181 void __iomem *base;
193 const u16 *regs; 182 const u16 *regs;
194 unsigned long irq_flags; /* for request_irq() */
195 unsigned int tx_next;
196 unsigned int tx_echo;
197 void *priv; /* for board-specific data */ 183 void *priv; /* for board-specific data */
198 u16 irqstatus;
199 enum c_can_dev_id type; 184 enum c_can_dev_id type;
200 u32 __iomem *raminit_ctrlreg; 185 u32 __iomem *raminit_ctrlreg;
201 unsigned int instance; 186 int instance;
202 void (*raminit) (const struct c_can_priv *priv, bool enable); 187 void (*raminit) (const struct c_can_priv *priv, bool enable);
188 u32 comm_rcv_high;
189 u32 rxmasked;
203 u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; 190 u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
204}; 191};
205 192
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index bce0be54c2f5..fe5f6303b584 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -84,8 +84,11 @@ static int c_can_pci_probe(struct pci_dev *pdev,
84 goto out_disable_device; 84 goto out_disable_device;
85 } 85 }
86 86
87 pci_set_master(pdev); 87 ret = pci_enable_msi(pdev);
88 pci_enable_msi(pdev); 88 if (!ret) {
89 dev_info(&pdev->dev, "MSI enabled\n");
90 pci_set_master(pdev);
91 }
89 92
90 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); 93 addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
91 if (!addr) { 94 if (!addr) {
@@ -132,6 +135,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
132 goto out_free_c_can; 135 goto out_free_c_can;
133 } 136 }
134 137
138 priv->type = c_can_pci_data->type;
139
135 /* Configure access to registers */ 140 /* Configure access to registers */
136 switch (c_can_pci_data->reg_align) { 141 switch (c_can_pci_data->reg_align) {
137 case C_CAN_REG_ALIGN_32: 142 case C_CAN_REG_ALIGN_32:
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 806d92753427..1df0b322d1e4 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -222,7 +222,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
222 222
223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 223 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 224 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
225 if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0) 225 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
226 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 226 dev_info(&pdev->dev, "control memory is not used for raminit\n");
227 else 227 else
228 priv->raminit = c_can_hw_raminit; 228 priv->raminit = c_can_hw_raminit;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c7a260478749..e318e87e2bfc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
256 256
257 /* Check if the CAN device has bit-timing parameters */ 257 /* Check if the CAN device has bit-timing parameters */
258 if (!btc) 258 if (!btc)
259 return -ENOTSUPP; 259 return -EOPNOTSUPP;
260 260
261 /* 261 /*
262 * Depending on the given can_bittiming parameter structure the CAN 262 * Depending on the given can_bittiming parameter structure the CAN
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
551{ 551{
552 struct sja1000_priv *priv; 552 struct sja1000_priv *priv;
553 struct peak_pci_chan *chan; 553 struct peak_pci_chan *chan;
554 struct net_device *dev; 554 struct net_device *dev, *prev_dev;
555 void __iomem *cfg_base, *reg_base; 555 void __iomem *cfg_base, *reg_base;
556 u16 sub_sys_id, icr; 556 u16 sub_sys_id, icr;
557 int i, err, channels; 557 int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
688 writew(0x0, cfg_base + PITA_ICR + 2); 688 writew(0x0, cfg_base + PITA_ICR + 2);
689 689
690 chan = NULL; 690 chan = NULL;
691 for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { 691 for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
692 unregister_sja1000dev(dev);
693 free_sja1000dev(dev);
694 priv = netdev_priv(dev); 692 priv = netdev_priv(dev);
695 chan = priv->priv; 693 chan = priv->priv;
694 prev_dev = chan->prev_dev;
695
696 unregister_sja1000dev(dev);
697 free_sja1000dev(dev);
696 } 698 }
697 699
698 /* free any PCIeC resources too */ 700 /* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
726 728
727 /* Loop over all registered devices */ 729 /* Loop over all registered devices */
728 while (1) { 730 while (1) {
731 struct net_device *prev_dev = chan->prev_dev;
732
729 dev_info(&pdev->dev, "removing device %s\n", dev->name); 733 dev_info(&pdev->dev, "removing device %s\n", dev->name);
730 unregister_sja1000dev(dev); 734 unregister_sja1000dev(dev);
731 free_sja1000dev(dev); 735 free_sja1000dev(dev);
732 dev = chan->prev_dev; 736 dev = prev_dev;
733 737
734 if (!dev) { 738 if (!dev) {
735 /* do that only for first channel */ 739 /* do that only for first channel */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index df136a2516c4..014695d7e6a3 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -46,6 +46,7 @@ static int clk[MAXDEV];
46static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 46static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
47static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 47static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
48static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; 48static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
49static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */
49 50
50module_param_array(port, ulong, NULL, S_IRUGO); 51module_param_array(port, ulong, NULL, S_IRUGO);
51MODULE_PARM_DESC(port, "I/O port number"); 52MODULE_PARM_DESC(port, "I/O port number");
@@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv,
101static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv, 102static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv,
102 int reg) 103 int reg)
103{ 104{
104 unsigned long base = (unsigned long)priv->reg_base; 105 unsigned long flags, base = (unsigned long)priv->reg_base;
106 u8 readval;
105 107
108 spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
106 outb(reg, base); 109 outb(reg, base);
107 return inb(base + 1); 110 readval = inb(base + 1);
111 spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
112
113 return readval;
108} 114}
109 115
110static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, 116static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
111 int reg, u8 val) 117 int reg, u8 val)
112{ 118{
113 unsigned long base = (unsigned long)priv->reg_base; 119 unsigned long flags, base = (unsigned long)priv->reg_base;
114 120
121 spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
115 outb(reg, base); 122 outb(reg, base);
116 outb(val, base + 1); 123 outb(val, base + 1);
124 spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
117} 125}
118 126
119static int sja1000_isa_probe(struct platform_device *pdev) 127static int sja1000_isa_probe(struct platform_device *pdev)
@@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
169 if (iosize == SJA1000_IOSIZE_INDIRECT) { 177 if (iosize == SJA1000_IOSIZE_INDIRECT) {
170 priv->read_reg = sja1000_isa_port_read_reg_indirect; 178 priv->read_reg = sja1000_isa_port_read_reg_indirect;
171 priv->write_reg = sja1000_isa_port_write_reg_indirect; 179 priv->write_reg = sja1000_isa_port_write_reg_indirect;
180 spin_lock_init(&indirect_lock[idx]);
172 } else { 181 } else {
173 priv->read_reg = sja1000_isa_port_read_reg; 182 priv->read_reg = sja1000_isa_port_read_reg;
174 priv->write_reg = sja1000_isa_port_write_reg; 183 priv->write_reg = sja1000_isa_port_write_reg;
@@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
198 207
199 platform_set_drvdata(pdev, dev); 208 platform_set_drvdata(pdev, dev);
200 SET_NETDEV_DEV(dev, &pdev->dev); 209 SET_NETDEV_DEV(dev, &pdev->dev);
210 dev->dev_id = idx;
201 211
202 err = register_sja1000dev(dev); 212 err = register_sja1000dev(dev);
203 if (err) { 213 if (err) {
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f5b16e0e3a12..dcf9196f6316 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
323 return; 323 return;
324 324
325 spin_lock(&sl->lock); 325 spin_lock_bh(&sl->lock);
326 if (sl->xleft <= 0) { 326 if (sl->xleft <= 0) {
327 /* Now serial buffer is almost free & we can start 327 /* Now serial buffer is almost free & we can start
328 * transmission of another packet */ 328 * transmission of another packet */
329 sl->dev->stats.tx_packets++; 329 sl->dev->stats.tx_packets++;
330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
331 spin_unlock(&sl->lock); 331 spin_unlock_bh(&sl->lock);
332 netif_wake_queue(sl->dev); 332 netif_wake_queue(sl->dev);
333 return; 333 return;
334 } 334 }
@@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
336 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 336 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
337 sl->xleft -= actual; 337 sl->xleft -= actual;
338 sl->xhead += actual; 338 sl->xhead += actual;
339 spin_unlock(&sl->lock); 339 spin_unlock_bh(&sl->lock);
340} 340}
341 341
342/* Send a can_frame to a TTY queue. */ 342/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..d7401017a3f1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
35source "drivers/net/ethernet/chelsio/Kconfig" 35source "drivers/net/ethernet/chelsio/Kconfig"
36source "drivers/net/ethernet/cirrus/Kconfig" 36source "drivers/net/ethernet/cirrus/Kconfig"
37source "drivers/net/ethernet/cisco/Kconfig" 37source "drivers/net/ethernet/cisco/Kconfig"
38
39config CX_ECAT
40 tristate "Beckhoff CX5020 EtherCAT master support"
41 depends on PCI
42 ---help---
43 Driver for EtherCAT master module located on CCAT FPGA
44 that can be found on Beckhoff CX5020, and possibly other of CX
45 Beckhoff CX series industrial PCs.
46
47 To compile this driver as a module, choose M here. The module
48 will be called ec_bhf.
49
38source "drivers/net/ethernet/davicom/Kconfig" 50source "drivers/net/ethernet/davicom/Kconfig"
39 51
40config DNET 52config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ 21obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ 22obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ 23obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
24obj-$(CONFIG_CX_ECAT) += ec_bhf.o
24obj-$(CONFIG_DM9000) += davicom/ 25obj-$(CONFIG_DM9000) += davicom/
25obj-$(CONFIG_DNET) += dnet.o 26obj-$(CONFIG_DNET) += dnet.o
26obj-$(CONFIG_NET_VENDOR_DEC) += dec/ 27obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 80c1ab74a4b8..fdddba51473e 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -1,5 +1,6 @@
1config ALTERA_TSE 1config ALTERA_TSE
2 tristate "Altera Triple-Speed Ethernet MAC support" 2 tristate "Altera Triple-Speed Ethernet MAC support"
3 depends on HAS_DMA
3 select PHYLIB 4 select PHYLIB
4 ---help--- 5 ---help---
5 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. 6 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o 5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ 6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o 7altera_msgdma.o altera_sgdma.o altera_utils.o
8ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 3df18669ea30..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -18,6 +18,7 @@
18#include "altera_utils.h" 18#include "altera_utils.h"
19#include "altera_tse.h" 19#include "altera_tse.h"
20#include "altera_msgdmahw.h" 20#include "altera_msgdmahw.h"
21#include "altera_msgdma.h"
21 22
22/* No initialization work to do for MSGDMA */ 23/* No initialization work to do for MSGDMA */
23int msgdma_initialize(struct altera_tse_private *priv) 24int msgdma_initialize(struct altera_tse_private *priv)
@@ -29,21 +30,23 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
29{ 30{
30} 31}
31 32
33void msgdma_start_rxdma(struct altera_tse_private *priv)
34{
35}
36
32void msgdma_reset(struct altera_tse_private *priv) 37void msgdma_reset(struct altera_tse_private *priv)
33{ 38{
34 int counter; 39 int counter;
35 struct msgdma_csr *txcsr =
36 (struct msgdma_csr *)priv->tx_dma_csr;
37 struct msgdma_csr *rxcsr =
38 (struct msgdma_csr *)priv->rx_dma_csr;
39 40
40 /* Reset Rx mSGDMA */ 41 /* Reset Rx mSGDMA */
41 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 42 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
42 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); 43 msgdma_csroffs(status));
44 csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
45 msgdma_csroffs(control));
43 46
44 counter = 0; 47 counter = 0;
45 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 48 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
46 if (tse_bit_is_clear(&rxcsr->status, 49 if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
47 MSGDMA_CSR_STAT_RESETTING)) 50 MSGDMA_CSR_STAT_RESETTING))
48 break; 51 break;
49 udelay(1); 52 udelay(1);
@@ -54,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
54 "TSE Rx mSGDMA resetting bit never cleared!\n"); 57 "TSE Rx mSGDMA resetting bit never cleared!\n");
55 58
56 /* clear all status bits */ 59 /* clear all status bits */
57 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); 60 csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
58 61
59 /* Reset Tx mSGDMA */ 62 /* Reset Tx mSGDMA */
60 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 63 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
61 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); 64 msgdma_csroffs(status));
65
66 csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
67 msgdma_csroffs(control));
62 68
63 counter = 0; 69 counter = 0;
64 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 70 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
65 if (tse_bit_is_clear(&txcsr->status, 71 if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
66 MSGDMA_CSR_STAT_RESETTING)) 72 MSGDMA_CSR_STAT_RESETTING))
67 break; 73 break;
68 udelay(1); 74 udelay(1);
@@ -73,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
73 "TSE Tx mSGDMA resetting bit never cleared!\n"); 79 "TSE Tx mSGDMA resetting bit never cleared!\n");
74 80
75 /* clear all status bits */ 81 /* clear all status bits */
76 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); 82 csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
77} 83}
78 84
79void msgdma_disable_rxirq(struct altera_tse_private *priv) 85void msgdma_disable_rxirq(struct altera_tse_private *priv)
80{ 86{
81 struct msgdma_csr *csr = priv->rx_dma_csr; 87 tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
82 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 88 MSGDMA_CSR_CTL_GLOBAL_INTR);
83} 89}
84 90
85void msgdma_enable_rxirq(struct altera_tse_private *priv) 91void msgdma_enable_rxirq(struct altera_tse_private *priv)
86{ 92{
87 struct msgdma_csr *csr = priv->rx_dma_csr; 93 tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
88 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 94 MSGDMA_CSR_CTL_GLOBAL_INTR);
89} 95}
90 96
91void msgdma_disable_txirq(struct altera_tse_private *priv) 97void msgdma_disable_txirq(struct altera_tse_private *priv)
92{ 98{
93 struct msgdma_csr *csr = priv->tx_dma_csr; 99 tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
94 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 100 MSGDMA_CSR_CTL_GLOBAL_INTR);
95} 101}
96 102
97void msgdma_enable_txirq(struct altera_tse_private *priv) 103void msgdma_enable_txirq(struct altera_tse_private *priv)
98{ 104{
99 struct msgdma_csr *csr = priv->tx_dma_csr; 105 tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
100 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); 106 MSGDMA_CSR_CTL_GLOBAL_INTR);
101} 107}
102 108
103void msgdma_clear_rxirq(struct altera_tse_private *priv) 109void msgdma_clear_rxirq(struct altera_tse_private *priv)
104{ 110{
105 struct msgdma_csr *csr = priv->rx_dma_csr; 111 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
106 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
107} 112}
108 113
109void msgdma_clear_txirq(struct altera_tse_private *priv) 114void msgdma_clear_txirq(struct altera_tse_private *priv)
110{ 115{
111 struct msgdma_csr *csr = priv->tx_dma_csr; 116 csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
112 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
113} 117}
114 118
115/* return 0 to indicate transmit is pending */ 119/* return 0 to indicate transmit is pending */
116int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 120int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
117{ 121{
118 struct msgdma_extended_desc *desc = priv->tx_dma_desc; 122 csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
119 123 msgdma_descroffs(read_addr_lo));
120 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); 124 csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
121 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); 125 msgdma_descroffs(read_addr_hi));
122 iowrite32(0, &desc->write_addr_lo); 126 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
123 iowrite32(0, &desc->write_addr_hi); 127 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
124 iowrite32(buffer->len, &desc->len); 128 csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
125 iowrite32(0, &desc->burst_seq_num); 129 csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
126 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); 130 csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
127 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); 131 msgdma_descroffs(stride));
132 csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
133 msgdma_descroffs(control));
128 return 0; 134 return 0;
129} 135}
130 136
@@ -133,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
133 u32 ready = 0; 139 u32 ready = 0;
134 u32 inuse; 140 u32 inuse;
135 u32 status; 141 u32 status;
136 struct msgdma_csr *txcsr =
137 (struct msgdma_csr *)priv->tx_dma_csr;
138 142
139 /* Get number of sent descriptors */ 143 /* Get number of sent descriptors */
140 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; 144 inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
145 & 0xffff;
141 146
142 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
143 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
144 } else { 149 } else {
145 /* Check for buffered last packet */ 150 /* Check for buffered last packet */
146 status = ioread32(&txcsr->status); 151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
147 if (status & MSGDMA_CSR_STAT_BUSY) 152 if (status & MSGDMA_CSR_STAT_BUSY)
148 ready = priv->tx_prod - priv->tx_cons - 1; 153 ready = priv->tx_prod - priv->tx_cons - 1;
149 else 154 else
@@ -154,10 +159,9 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
154 159
155/* Put buffer to the mSGDMA RX FIFO 160/* Put buffer to the mSGDMA RX FIFO
156 */ 161 */
157int msgdma_add_rx_desc(struct altera_tse_private *priv, 162void msgdma_add_rx_desc(struct altera_tse_private *priv,
158 struct tse_buffer *rxbuffer) 163 struct tse_buffer *rxbuffer)
159{ 164{
160 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
161 u32 len = priv->rx_dma_buf_sz; 165 u32 len = priv->rx_dma_buf_sz;
162 dma_addr_t dma_addr = rxbuffer->dma_addr; 166 dma_addr_t dma_addr = rxbuffer->dma_addr;
163 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP 167 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -167,15 +171,16 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
167 | MSGDMA_DESC_CTL_TR_ERR_IRQ 171 | MSGDMA_DESC_CTL_TR_ERR_IRQ
168 | MSGDMA_DESC_CTL_GO); 172 | MSGDMA_DESC_CTL_GO);
169 173
170 iowrite32(0, &desc->read_addr_lo); 174 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
171 iowrite32(0, &desc->read_addr_hi); 175 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
172 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); 176 csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
173 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); 177 msgdma_descroffs(write_addr_lo));
174 iowrite32(len, &desc->len); 178 csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
175 iowrite32(0, &desc->burst_seq_num); 179 msgdma_descroffs(write_addr_hi));
176 iowrite32(0x00010001, &desc->stride); 180 csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
177 iowrite32(control, &desc->control); 181 csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
178 return 1; 182 csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
183 csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
179} 184}
180 185
181/* status is returned on upper 16 bits, 186/* status is returned on upper 16 bits,
@@ -186,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
186 u32 rxstatus = 0; 191 u32 rxstatus = 0;
187 u32 pktlength; 192 u32 pktlength;
188 u32 pktstatus; 193 u32 pktstatus;
189 struct msgdma_csr *rxcsr = 194
190 (struct msgdma_csr *)priv->rx_dma_csr; 195 if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
191 struct msgdma_response *rxresp = 196 & 0xffff) {
192 (struct msgdma_response *)priv->rx_dma_resp; 197 pktlength = csrrd32(priv->rx_dma_resp,
193 198 msgdma_respoffs(bytes_transferred));
194 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { 199 pktstatus = csrrd32(priv->rx_dma_resp,
195 pktlength = ioread32(&rxresp->bytes_transferred); 200 msgdma_respoffs(status));
196 pktstatus = ioread32(&rxresp->status);
197 rxstatus = pktstatus; 201 rxstatus = pktstatus;
198 rxstatus = rxstatus << 16; 202 rxstatus = rxstatus << 16;
199 rxstatus |= (pktlength & 0xffff); 203 rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
index 7f0f5bf2bba2..42cf61c81057 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.h
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
25void msgdma_clear_rxirq(struct altera_tse_private *); 25void msgdma_clear_rxirq(struct altera_tse_private *);
26void msgdma_clear_txirq(struct altera_tse_private *); 26void msgdma_clear_txirq(struct altera_tse_private *);
27u32 msgdma_tx_completions(struct altera_tse_private *); 27u32 msgdma_tx_completions(struct altera_tse_private *);
28int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); 28void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); 29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
30u32 msgdma_rx_status(struct altera_tse_private *); 30u32 msgdma_rx_status(struct altera_tse_private *);
31int msgdma_initialize(struct altera_tse_private *); 31int msgdma_initialize(struct altera_tse_private *);
32void msgdma_uninitialize(struct altera_tse_private *); 32void msgdma_uninitialize(struct altera_tse_private *);
33void msgdma_start_rxdma(struct altera_tse_private *);
33 34
34#endif /* __ALTERA_MSGDMA_H__ */ 35#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
17#ifndef __ALTERA_MSGDMAHW_H__ 17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__ 18#define __ALTERA_MSGDMAHW_H__
19 19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format 20/* mSGDMA extended descriptor format
30 */ 21 */
31struct msgdma_extended_desc { 22struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
159 u32 status; 150 u32 status;
160}; 151};
161 152
153#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
154#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
155#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
156
162/* mSGDMA response register bit definitions 157/* mSGDMA response register bit definitions
163 */ 158 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8) 159#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 0ee96639ae44..99cc56f451cf 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,28 +20,28 @@
20#include "altera_sgdmahw.h" 20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h" 21#include "altera_sgdma.h"
22 22
23static void sgdma_descrip(struct sgdma_descrip *desc, 23static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
24 struct sgdma_descrip *ndesc, 24 struct sgdma_descrip __iomem *ndesc,
25 dma_addr_t ndesc_phys, 25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr, 26 dma_addr_t raddr,
27 dma_addr_t waddr, 27 dma_addr_t waddr,
28 u16 length, 28 u16 length,
29 int generate_eop, 29 int generate_eop,
30 int rfixed, 30 int rfixed,
31 int wfixed); 31 int wfixed);
32 32
33static int sgdma_async_write(struct altera_tse_private *priv, 33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc); 34 struct sgdma_descrip __iomem *desc);
35 35
36static int sgdma_async_read(struct altera_tse_private *priv); 36static int sgdma_async_read(struct altera_tse_private *priv);
37 37
38static dma_addr_t 38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv, 39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc); 40 struct sgdma_descrip __iomem *desc);
41 41
42static dma_addr_t 42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv, 43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc); 44 struct sgdma_descrip __iomem *desc);
45 45
46static int sgdma_txbusy(struct altera_tse_private *priv); 46static int sgdma_txbusy(struct altera_tse_private *priv);
47 47
@@ -64,18 +64,23 @@ queue_rx_peekhead(struct altera_tse_private *priv);
64 64
65int sgdma_initialize(struct altera_tse_private *priv) 65int sgdma_initialize(struct altera_tse_private *priv)
66{ 66{
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD; 67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
68 SGDMA_CTRLREG_INTEN;
68 69
69 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | 70 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
71 SGDMA_CTRLREG_INTEN |
70 SGDMA_CTRLREG_ILASTD; 72 SGDMA_CTRLREG_ILASTD;
71 73
74 priv->sgdmadesclen = sizeof(struct sgdma_descrip);
75
72 INIT_LIST_HEAD(&priv->txlisthd); 76 INIT_LIST_HEAD(&priv->txlisthd);
73 INIT_LIST_HEAD(&priv->rxlisthd); 77 INIT_LIST_HEAD(&priv->rxlisthd);
74 78
75 priv->rxdescphys = (dma_addr_t) 0; 79 priv->rxdescphys = (dma_addr_t) 0;
76 priv->txdescphys = (dma_addr_t) 0; 80 priv->txdescphys = (dma_addr_t) 0;
77 81
78 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, 82 priv->rxdescphys = dma_map_single(priv->device,
83 (void __force *)priv->rx_dma_desc,
79 priv->rxdescmem, DMA_BIDIRECTIONAL); 84 priv->rxdescmem, DMA_BIDIRECTIONAL);
80 85
81 if (dma_mapping_error(priv->device, priv->rxdescphys)) { 86 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -84,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
84 return -EINVAL; 89 return -EINVAL;
85 } 90 }
86 91
87 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, 92 priv->txdescphys = dma_map_single(priv->device,
93 (void __force *)priv->tx_dma_desc,
88 priv->txdescmem, DMA_TO_DEVICE); 94 priv->txdescmem, DMA_TO_DEVICE);
89 95
90 if (dma_mapping_error(priv->device, priv->txdescphys)) { 96 if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -93,6 +99,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
93 return -EINVAL; 99 return -EINVAL;
94 } 100 }
95 101
102 /* Initialize descriptor memory to all 0's, sync memory to cache */
103 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
104 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
105
106 dma_sync_single_for_device(priv->device, priv->txdescphys,
107 priv->txdescmem, DMA_TO_DEVICE);
108
109 dma_sync_single_for_device(priv->device, priv->rxdescphys,
110 priv->rxdescmem, DMA_TO_DEVICE);
111
96 return 0; 112 return 0;
97} 113}
98 114
@@ -112,58 +128,48 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
112 */ 128 */
113void sgdma_reset(struct altera_tse_private *priv) 129void sgdma_reset(struct altera_tse_private *priv)
114{ 130{
115 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 u32 txdescriplen = priv->txdescmem;
117 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 u32 rxdescriplen = priv->rxdescmem;
119 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
121
122 /* Initialize descriptor memory to 0 */ 131 /* Initialize descriptor memory to 0 */
123 memset(ptxdescripmem, 0, txdescriplen); 132 memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
124 memset(prxdescripmem, 0, rxdescriplen); 133 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
125 134
126 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); 135 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
127 iowrite32(0, &ptxsgdma->control); 136 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
128 137
129 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); 138 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
130 iowrite32(0, &prxsgdma->control); 139 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
131} 140}
132 141
142/* For SGDMA, interrupts remain enabled after initially enabling,
143 * so no need to provide implementations for abstract enable
144 * and disable
145 */
146
133void sgdma_enable_rxirq(struct altera_tse_private *priv) 147void sgdma_enable_rxirq(struct altera_tse_private *priv)
134{ 148{
135 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
138} 149}
139 150
140void sgdma_enable_txirq(struct altera_tse_private *priv) 151void sgdma_enable_txirq(struct altera_tse_private *priv)
141{ 152{
142 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
145} 153}
146 154
147/* for SGDMA, RX interrupts remain enabled after enabling */
148void sgdma_disable_rxirq(struct altera_tse_private *priv) 155void sgdma_disable_rxirq(struct altera_tse_private *priv)
149{ 156{
150} 157}
151 158
152/* for SGDMA, TX interrupts remain enabled after enabling */
153void sgdma_disable_txirq(struct altera_tse_private *priv) 159void sgdma_disable_txirq(struct altera_tse_private *priv)
154{ 160{
155} 161}
156 162
157void sgdma_clear_rxirq(struct altera_tse_private *priv) 163void sgdma_clear_rxirq(struct altera_tse_private *priv)
158{ 164{
159 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 165 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
160 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 166 SGDMA_CTRLREG_CLRINT);
161} 167}
162 168
163void sgdma_clear_txirq(struct altera_tse_private *priv) 169void sgdma_clear_txirq(struct altera_tse_private *priv)
164{ 170{
165 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; 171 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
166 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); 172 SGDMA_CTRLREG_CLRINT);
167} 173}
168 174
169/* transmits buffer through SGDMA. Returns number of buffers 175/* transmits buffer through SGDMA. Returns number of buffers
@@ -173,28 +179,27 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
173 */ 179 */
174int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 180int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
175{ 181{
176 int pktstx = 0; 182 struct sgdma_descrip __iomem *descbase =
177 struct sgdma_descrip *descbase = 183 (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
178 (struct sgdma_descrip *)priv->tx_dma_desc;
179 184
180 struct sgdma_descrip *cdesc = &descbase[0]; 185 struct sgdma_descrip __iomem *cdesc = &descbase[0];
181 struct sgdma_descrip *ndesc = &descbase[1]; 186 struct sgdma_descrip __iomem *ndesc = &descbase[1];
182 187
183 /* wait 'til the tx sgdma is ready for the next transmit request */ 188 /* wait 'til the tx sgdma is ready for the next transmit request */
184 if (sgdma_txbusy(priv)) 189 if (sgdma_txbusy(priv))
185 return 0; 190 return 0;
186 191
187 sgdma_descrip(cdesc, /* current descriptor */ 192 sgdma_setup_descrip(cdesc, /* current descriptor */
188 ndesc, /* next descriptor */ 193 ndesc, /* next descriptor */
189 sgdma_txphysaddr(priv, ndesc), 194 sgdma_txphysaddr(priv, ndesc),
190 buffer->dma_addr, /* address of packet to xmit */ 195 buffer->dma_addr, /* address of packet to xmit */
191 0, /* write addr 0 for tx dma */ 196 0, /* write addr 0 for tx dma */
192 buffer->len, /* length of packet */ 197 buffer->len, /* length of packet */
193 SGDMA_CONTROL_EOP, /* Generate EOP */ 198 SGDMA_CONTROL_EOP, /* Generate EOP */
194 0, /* read fixed */ 199 0, /* read fixed */
195 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 200 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
196 201
197 pktstx = sgdma_async_write(priv, cdesc); 202 sgdma_async_write(priv, cdesc);
198 203
199 /* enqueue the request to the pending transmit queue */ 204 /* enqueue the request to the pending transmit queue */
200 queue_tx(priv, buffer); 205 queue_tx(priv, buffer);
@@ -208,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
208u32 sgdma_tx_completions(struct altera_tse_private *priv) 213u32 sgdma_tx_completions(struct altera_tse_private *priv)
209{ 214{
210 u32 ready = 0; 215 u32 ready = 0;
211 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
212 216
213 if (!sgdma_txbusy(priv) && 217 if (!sgdma_txbusy(priv) &&
214 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && 218 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
219 & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 (dequeue_tx(priv))) { 220 (dequeue_tx(priv))) {
216 ready = 1; 221 ready = 1;
217 } 222 }
@@ -219,11 +224,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
219 return ready; 224 return ready;
220} 225}
221 226
222int sgdma_add_rx_desc(struct altera_tse_private *priv, 227void sgdma_start_rxdma(struct altera_tse_private *priv)
223 struct tse_buffer *rxbuffer) 228{
229 sgdma_async_read(priv);
230}
231
232void sgdma_add_rx_desc(struct altera_tse_private *priv,
233 struct tse_buffer *rxbuffer)
224{ 234{
225 queue_rx(priv, rxbuffer); 235 queue_rx(priv, rxbuffer);
226 return sgdma_async_read(priv);
227} 236}
228 237
229/* status is returned on upper 16 bits, 238/* status is returned on upper 16 bits,
@@ -231,38 +240,62 @@ int sgdma_add_rx_desc(struct altera_tse_private *priv,
231 */ 240 */
232u32 sgdma_rx_status(struct altera_tse_private *priv) 241u32 sgdma_rx_status(struct altera_tse_private *priv)
233{ 242{
234 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 243 struct sgdma_descrip __iomem *base =
235 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; 244 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
236 struct sgdma_descrip *desc = NULL; 245 struct sgdma_descrip __iomem *desc = NULL;
237 int pktsrx;
238 unsigned int rxstatus = 0;
239 unsigned int pktlength = 0;
240 unsigned int pktstatus = 0;
241 struct tse_buffer *rxbuffer = NULL; 246 struct tse_buffer *rxbuffer = NULL;
247 unsigned int rxstatus = 0;
242 248
243 dma_sync_single_for_cpu(priv->device, 249 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
244 priv->rxdescphys,
245 priv->rxdescmem,
246 DMA_BIDIRECTIONAL);
247 250
248 desc = &base[0]; 251 desc = &base[0];
249 if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) || 252 if (sts & SGDMA_STSREG_EOP) {
250 (desc->status & SGDMA_STATUS_EOP)) { 253 unsigned int pktlength = 0;
251 pktlength = desc->bytes_xferred; 254 unsigned int pktstatus = 0;
252 pktstatus = desc->status & 0x3f; 255 dma_sync_single_for_cpu(priv->device,
253 rxstatus = pktstatus; 256 priv->rxdescphys,
257 priv->sgdmadesclen,
258 DMA_FROM_DEVICE);
259
260 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
261 pktstatus = csrrd8(desc, sgdma_descroffs(status));
262 rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
254 rxstatus = rxstatus << 16; 263 rxstatus = rxstatus << 16;
255 rxstatus |= (pktlength & 0xffff); 264 rxstatus |= (pktlength & 0xffff);
256 265
257 desc->status = 0; 266 if (rxstatus) {
258 267 csrwr8(0, desc, sgdma_descroffs(status));
259 rxbuffer = dequeue_rx(priv); 268
260 if (rxbuffer == NULL) 269 rxbuffer = dequeue_rx(priv);
270 if (rxbuffer == NULL)
271 netdev_info(priv->dev,
272 "sgdma rx and rx queue empty!\n");
273
274 /* Clear control */
275 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
276 /* clear status */
277 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
278
279 /* kick the rx sgdma after reaping this descriptor */
280 sgdma_async_read(priv);
281
282 } else {
283 /* If the SGDMA indicated an end of packet on recv,
284 * then it's expected that the rxstatus from the
285 * descriptor is non-zero - meaning a valid packet
286 * with a nonzero length, or an error has been
287 * indicated. if not, then all we can do is signal
288 * an error and return no packet received. Most likely
289 * there is a system design error, or an error in the
290 * underlying kernel (cache or cache management problem)
291 */
261 netdev_err(priv->dev, 292 netdev_err(priv->dev,
262 "sgdma rx and rx queue empty!\n"); 293 "SGDMA RX Error Info: %x, %x, %x\n",
263 294 sts, csrrd8(desc, sgdma_descroffs(status)),
264 /* kick the rx sgdma after reaping this descriptor */ 295 rxstatus);
265 pktsrx = sgdma_async_read(priv); 296 }
297 } else if (sts == 0) {
298 sgdma_async_read(priv);
266 } 299 }
267 300
268 return rxstatus; 301 return rxstatus;
@@ -270,38 +303,41 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
270 303
271 304
272/* Private functions */ 305/* Private functions */
273static void sgdma_descrip(struct sgdma_descrip *desc, 306static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
274 struct sgdma_descrip *ndesc, 307 struct sgdma_descrip __iomem *ndesc,
275 dma_addr_t ndesc_phys, 308 dma_addr_t ndesc_phys,
276 dma_addr_t raddr, 309 dma_addr_t raddr,
277 dma_addr_t waddr, 310 dma_addr_t waddr,
278 u16 length, 311 u16 length,
279 int generate_eop, 312 int generate_eop,
280 int rfixed, 313 int rfixed,
281 int wfixed) 314 int wfixed)
282{ 315{
283 /* Clear the next descriptor as not owned by hardware */ 316 /* Clear the next descriptor as not owned by hardware */
284 u32 ctrl = ndesc->control; 317
318 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
285 ctrl &= ~SGDMA_CONTROL_HW_OWNED; 319 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 ndesc->control = ctrl; 320 csrwr8(ctrl, ndesc, sgdma_descroffs(control));
287 321
288 ctrl = 0;
289 ctrl = SGDMA_CONTROL_HW_OWNED; 322 ctrl = SGDMA_CONTROL_HW_OWNED;
290 ctrl |= generate_eop; 323 ctrl |= generate_eop;
291 ctrl |= rfixed; 324 ctrl |= rfixed;
292 ctrl |= wfixed; 325 ctrl |= wfixed;
293 326
294 /* Channel is implicitly zero, initialized to 0 by default */ 327 /* Channel is implicitly zero, initialized to 0 by default */
295 328 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
296 desc->raddr = raddr; 329 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
297 desc->waddr = waddr; 330
298 desc->next = lower_32_bits(ndesc_phys); 331 csrwr32(0, desc, sgdma_descroffs(pad1));
299 desc->control = ctrl; 332 csrwr32(0, desc, sgdma_descroffs(pad2));
300 desc->status = 0; 333 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
301 desc->rburst = 0; 334
302 desc->wburst = 0; 335 csrwr8(ctrl, desc, sgdma_descroffs(control));
303 desc->bytes = length; 336 csrwr8(0, desc, sgdma_descroffs(status));
304 desc->bytes_xferred = 0; 337 csrwr8(0, desc, sgdma_descroffs(wburst));
338 csrwr8(0, desc, sgdma_descroffs(rburst));
339 csrwr16(length, desc, sgdma_descroffs(bytes));
340 csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
305} 341}
306 342
307/* If hardware is busy, don't restart async read. 343/* If hardware is busy, don't restart async read.
@@ -312,48 +348,43 @@ static void sgdma_descrip(struct sgdma_descrip *desc,
312 */ 348 */
313static int sgdma_async_read(struct altera_tse_private *priv) 349static int sgdma_async_read(struct altera_tse_private *priv)
314{ 350{
315 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 351 struct sgdma_descrip __iomem *descbase =
316 struct sgdma_descrip *descbase = 352 (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
317 (struct sgdma_descrip *)priv->rx_dma_desc;
318 353
319 struct sgdma_descrip *cdesc = &descbase[0]; 354 struct sgdma_descrip __iomem *cdesc = &descbase[0];
320 struct sgdma_descrip *ndesc = &descbase[1]; 355 struct sgdma_descrip __iomem *ndesc = &descbase[1];
321 356
322 unsigned int sts = ioread32(&csr->status);
323 struct tse_buffer *rxbuffer = NULL; 357 struct tse_buffer *rxbuffer = NULL;
324 358
325 if (!sgdma_rxbusy(priv)) { 359 if (!sgdma_rxbusy(priv)) {
326 rxbuffer = queue_rx_peekhead(priv); 360 rxbuffer = queue_rx_peekhead(priv);
327 if (rxbuffer == NULL) 361 if (rxbuffer == NULL) {
362 netdev_err(priv->dev, "no rx buffers available\n");
328 return 0; 363 return 0;
329 364 }
330 sgdma_descrip(cdesc, /* current descriptor */ 365
331 ndesc, /* next descriptor */ 366 sgdma_setup_descrip(cdesc, /* current descriptor */
332 sgdma_rxphysaddr(priv, ndesc), 367 ndesc, /* next descriptor */
333 0, /* read addr 0 for rx dma */ 368 sgdma_rxphysaddr(priv, ndesc),
334 rxbuffer->dma_addr, /* write addr for rx dma */ 369 0, /* read addr 0 for rx dma */
335 0, /* read 'til EOP */ 370 rxbuffer->dma_addr, /* write addr for rx dma */
336 0, /* EOP: NA for rx dma */ 371 0, /* read 'til EOP */
337 0, /* read fixed: NA for rx dma */ 372 0, /* EOP: NA for rx dma */
338 0); /* SOP: NA for rx DMA */ 373 0, /* read fixed: NA for rx dma */
339 374 0); /* SOP: NA for rx DMA */
340 /* clear control and status */
341 iowrite32(0, &csr->control);
342
343 /* If status available, clear those bits */
344 if (sts & 0xf)
345 iowrite32(0xf, &csr->status);
346 375
347 dma_sync_single_for_device(priv->device, 376 dma_sync_single_for_device(priv->device,
348 priv->rxdescphys, 377 priv->rxdescphys,
349 priv->rxdescmem, 378 priv->sgdmadesclen,
350 DMA_BIDIRECTIONAL); 379 DMA_TO_DEVICE);
351 380
352 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 381 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
353 &csr->next_descrip); 382 priv->rx_dma_csr,
383 sgdma_csroffs(next_descrip));
354 384
355 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), 385 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
356 &csr->control); 386 priv->rx_dma_csr,
387 sgdma_csroffs(control));
357 388
358 return 1; 389 return 1;
359 } 390 }
@@ -362,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
362} 393}
363 394
364static int sgdma_async_write(struct altera_tse_private *priv, 395static int sgdma_async_write(struct altera_tse_private *priv,
365 struct sgdma_descrip *desc) 396 struct sgdma_descrip __iomem *desc)
366{ 397{
367 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
368
369 if (sgdma_txbusy(priv)) 398 if (sgdma_txbusy(priv))
370 return 0; 399 return 0;
371 400
372 /* clear control and status */ 401 /* clear control and status */
373 iowrite32(0, &csr->control); 402 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
374 iowrite32(0x1f, &csr->status); 403 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
375 404
376 dma_sync_single_for_device(priv->device, priv->txdescphys, 405 dma_sync_single_for_device(priv->device, priv->txdescphys,
377 priv->txdescmem, DMA_TO_DEVICE); 406 priv->sgdmadesclen, DMA_TO_DEVICE);
378 407
379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 408 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
380 &csr->next_descrip); 409 priv->tx_dma_csr,
410 sgdma_csroffs(next_descrip));
381 411
382 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), 412 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
383 &csr->control); 413 priv->tx_dma_csr,
414 sgdma_csroffs(control));
384 415
385 return 1; 416 return 1;
386} 417}
387 418
388static dma_addr_t 419static dma_addr_t
389sgdma_txphysaddr(struct altera_tse_private *priv, 420sgdma_txphysaddr(struct altera_tse_private *priv,
390 struct sgdma_descrip *desc) 421 struct sgdma_descrip __iomem *desc)
391{ 422{
392 dma_addr_t paddr = priv->txdescmem_busaddr; 423 dma_addr_t paddr = priv->txdescmem_busaddr;
393 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; 424 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -396,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
396 427
397static dma_addr_t 428static dma_addr_t
398sgdma_rxphysaddr(struct altera_tse_private *priv, 429sgdma_rxphysaddr(struct altera_tse_private *priv,
399 struct sgdma_descrip *desc) 430 struct sgdma_descrip __iomem *desc)
400{ 431{
401 dma_addr_t paddr = priv->rxdescmem_busaddr; 432 dma_addr_t paddr = priv->rxdescmem_busaddr;
402 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; 433 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -485,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
485 */ 516 */
486static int sgdma_rxbusy(struct altera_tse_private *priv) 517static int sgdma_rxbusy(struct altera_tse_private *priv)
487{ 518{
488 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; 519 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
489 return ioread32(&csr->status) & SGDMA_STSREG_BUSY; 520 & SGDMA_STSREG_BUSY;
490} 521}
491 522
492/* waits for the tx sgdma to finish it's current operation, returns 0 523/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -495,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
495static int sgdma_txbusy(struct altera_tse_private *priv) 526static int sgdma_txbusy(struct altera_tse_private *priv)
496{ 527{
497 int delay = 0; 528 int delay = 0;
498 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
499 529
500 /* if DMA is busy, wait for current transactino to finish */ 530 /* if DMA is busy, wait for current transactino to finish */
501 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) 531 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
532 & SGDMA_STSREG_BUSY) && (delay++ < 100))
502 udelay(1); 533 udelay(1);
503 534
504 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { 535 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
536 & SGDMA_STSREG_BUSY) {
505 netdev_err(priv->dev, "timeout waiting for tx dma\n"); 537 netdev_err(priv->dev, "timeout waiting for tx dma\n");
506 return 1; 538 return 1;
507 } 539 }
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
index 07d471729dc4..584977e29ef9 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.h
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
26void sgdma_clear_txirq(struct altera_tse_private *); 26void sgdma_clear_txirq(struct altera_tse_private *);
27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); 27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
28u32 sgdma_tx_completions(struct altera_tse_private *); 28u32 sgdma_tx_completions(struct altera_tse_private *);
29int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); 29void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
30void sgdma_status(struct altera_tse_private *); 30void sgdma_status(struct altera_tse_private *);
31u32 sgdma_rx_status(struct altera_tse_private *); 31u32 sgdma_rx_status(struct altera_tse_private *);
32int sgdma_initialize(struct altera_tse_private *); 32int sgdma_initialize(struct altera_tse_private *);
33void sgdma_uninitialize(struct altera_tse_private *); 33void sgdma_uninitialize(struct altera_tse_private *);
34void sgdma_start_rxdma(struct altera_tse_private *);
34 35
35#endif /* __ALTERA_SGDMA_H__ */ 36#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
19 19
20/* SGDMA descriptor structure */ 20/* SGDMA descriptor structure */
21struct sgdma_descrip { 21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */ 22 u32 raddr; /* address of data to be read */
23 unsigned int pad1; 23 u32 pad1;
24 unsigned int waddr; 24 u32 waddr;
25 unsigned int pad2; 25 u32 pad2;
26 unsigned int next; 26 u32 next;
27 unsigned int pad3; 27 u32 pad3;
28 unsigned short bytes; 28 u16 bytes;
29 unsigned char rburst; 29 u8 rburst;
30 unsigned char wburst; 30 u8 wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */ 31 u16 bytes_xferred; /* 16 bits, bytes xferred */
32 32
33 /* bit 0: error 33 /* bit 0: error
34 * bit 1: length error 34 * bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
39 * bit 6: reserved 39 * bit 6: reserved
40 * bit 7: status eop for recv case 40 * bit 7: status eop for recv case
41 */ 41 */
42 unsigned char status; 42 u8 status;
43 43
44 /* bit 0: eop 44 /* bit 0: eop
45 * bit 1: read_fixed 45 * bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
47 * bits 3,4,5,6: Channel (always 0) 47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned 48 * bit 7: hardware owned
49 */ 49 */
50 unsigned char control; 50 u8 control;
51} __packed; 51} __packed;
52 52
53 53
@@ -101,6 +101,8 @@ struct sgdma_csr {
101 u32 pad3[3]; 101 u32 pad3[3];
102}; 102};
103 103
104#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
105#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
104 106
105#define SGDMA_STSREG_ERR BIT(0) /* Error */ 107#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */ 108#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 8feeed05de0e..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -58,6 +58,8 @@
58/* MAC function configuration default settings */ 58/* MAC function configuration default settings */
59#define ALTERA_TSE_TX_IPG_LENGTH 12 59#define ALTERA_TSE_TX_IPG_LENGTH 12
60 60
61#define ALTERA_TSE_PAUSE_QUANTA 0xffff
62
61#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) 63#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
62 64
63/* MAC Command_Config Register Bit Definitions 65/* MAC Command_Config Register Bit Definitions
@@ -355,6 +357,8 @@ struct altera_tse_mac {
355 u32 reserved5[42]; 357 u32 reserved5[42];
356}; 358};
357 359
360#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
361
358/* Transmit and Receive Command Registers Bit Definitions 362/* Transmit and Receive Command Registers Bit Definitions
359 */ 363 */
360#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) 364#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -390,10 +394,11 @@ struct altera_dmaops {
390 void (*clear_rxirq)(struct altera_tse_private *); 394 void (*clear_rxirq)(struct altera_tse_private *);
391 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); 395 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
392 u32 (*tx_completions)(struct altera_tse_private *); 396 u32 (*tx_completions)(struct altera_tse_private *);
393 int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); 397 void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
394 u32 (*get_rx_status)(struct altera_tse_private *); 398 u32 (*get_rx_status)(struct altera_tse_private *);
395 int (*init_dma)(struct altera_tse_private *); 399 int (*init_dma)(struct altera_tse_private *);
396 void (*uninit_dma)(struct altera_tse_private *); 400 void (*uninit_dma)(struct altera_tse_private *);
401 void (*start_rxdma)(struct altera_tse_private *);
397}; 402};
398 403
399/* This structure is private to each device. 404/* This structure is private to each device.
@@ -453,6 +458,7 @@ struct altera_tse_private {
453 u32 rxctrlreg; 458 u32 rxctrlreg;
454 dma_addr_t rxdescphys; 459 dma_addr_t rxdescphys;
455 dma_addr_t txdescphys; 460 dma_addr_t txdescphys;
461 size_t sgdmadesclen;
456 462
457 struct list_head txlisthd; 463 struct list_head txlisthd;
458 struct list_head rxlisthd; 464 struct list_head rxlisthd;
@@ -483,4 +489,49 @@ struct altera_tse_private {
483 */ 489 */
484void altera_tse_set_ethtool_ops(struct net_device *); 490void altera_tse_set_ethtool_ops(struct net_device *);
485 491
492static inline
493u32 csrrd32(void __iomem *mac, size_t offs)
494{
495 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
496 return readl(paddr);
497}
498
499static inline
500u16 csrrd16(void __iomem *mac, size_t offs)
501{
502 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
503 return readw(paddr);
504}
505
506static inline
507u8 csrrd8(void __iomem *mac, size_t offs)
508{
509 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
510 return readb(paddr);
511}
512
513static inline
514void csrwr32(u32 val, void __iomem *mac, size_t offs)
515{
516 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
517
518 writel(val, paddr);
519}
520
521static inline
522void csrwr16(u16 val, void __iomem *mac, size_t offs)
523{
524 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
525
526 writew(val, paddr);
527}
528
529static inline
530void csrwr8(u8 val, void __iomem *mac, size_t offs)
531{
532 void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
533
534 writeb(val, paddr);
535}
536
486#endif /* __ALTERA_TSE_H__ */ 537#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 319ca74f5e74..54c25eff7952 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
77 struct altera_tse_private *priv = netdev_priv(dev); 77 struct altera_tse_private *priv = netdev_priv(dev);
78 u32 rev = ioread32(&priv->mac_dev->megacore_revision); 78 u32 rev = ioread32(&priv->mac_dev->megacore_revision);
79 79
80 strcpy(info->driver, "Altera TSE MAC IP Driver"); 80 strcpy(info->driver, "altera_tse");
81 strcpy(info->version, "v8.0"); 81 strcpy(info->version, "v8.0");
82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", 82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); 83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf) 96 u64 *buf)
97{ 97{
98 struct altera_tse_private *priv = netdev_priv(dev); 98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext; 99 u64 ext;
101 100
102 buf[0] = ioread32(&mac->frames_transmitted_ok); 101 buf[0] = csrrd32(priv->mac_dev,
103 buf[1] = ioread32(&mac->frames_received_ok); 102 tse_csroffs(frames_transmitted_ok));
104 buf[2] = ioread32(&mac->frames_check_sequence_errors); 103 buf[1] = csrrd32(priv->mac_dev,
105 buf[3] = ioread32(&mac->alignment_errors); 104 tse_csroffs(frames_received_ok));
105 buf[2] = csrrd32(priv->mac_dev,
106 tse_csroffs(frames_check_sequence_errors));
107 buf[3] = csrrd32(priv->mac_dev,
108 tse_csroffs(alignment_errors));
106 109
107 /* Extended aOctetsTransmittedOK counter */ 110 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; 111 ext = (u64) csrrd32(priv->mac_dev,
109 ext |= ioread32(&mac->octets_transmitted_ok); 112 tse_csroffs(msb_octets_transmitted_ok)) << 32;
113
114 ext |= csrrd32(priv->mac_dev,
115 tse_csroffs(octets_transmitted_ok));
110 buf[4] = ext; 116 buf[4] = ext;
111 117
112 /* Extended aOctetsReceivedOK counter */ 118 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; 119 ext = (u64) csrrd32(priv->mac_dev,
114 ext |= ioread32(&mac->octets_received_ok); 120 tse_csroffs(msb_octets_received_ok)) << 32;
121
122 ext |= csrrd32(priv->mac_dev,
123 tse_csroffs(octets_received_ok));
115 buf[5] = ext; 124 buf[5] = ext;
116 125
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); 126 buf[6] = csrrd32(priv->mac_dev,
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); 127 tse_csroffs(tx_pause_mac_ctrl_frames));
119 buf[8] = ioread32(&mac->if_in_errors); 128 buf[7] = csrrd32(priv->mac_dev,
120 buf[9] = ioread32(&mac->if_out_errors); 129 tse_csroffs(rx_pause_mac_ctrl_frames));
121 buf[10] = ioread32(&mac->if_in_ucast_pkts); 130 buf[8] = csrrd32(priv->mac_dev,
122 buf[11] = ioread32(&mac->if_in_multicast_pkts); 131 tse_csroffs(if_in_errors));
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts); 132 buf[9] = csrrd32(priv->mac_dev,
124 buf[13] = ioread32(&mac->if_out_discards); 133 tse_csroffs(if_out_errors));
125 buf[14] = ioread32(&mac->if_out_ucast_pkts); 134 buf[10] = csrrd32(priv->mac_dev,
126 buf[15] = ioread32(&mac->if_out_multicast_pkts); 135 tse_csroffs(if_in_ucast_pkts));
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts); 136 buf[11] = csrrd32(priv->mac_dev,
128 buf[17] = ioread32(&mac->ether_stats_drop_events); 137 tse_csroffs(if_in_multicast_pkts));
138 buf[12] = csrrd32(priv->mac_dev,
139 tse_csroffs(if_in_broadcast_pkts));
140 buf[13] = csrrd32(priv->mac_dev,
141 tse_csroffs(if_out_discards));
142 buf[14] = csrrd32(priv->mac_dev,
143 tse_csroffs(if_out_ucast_pkts));
144 buf[15] = csrrd32(priv->mac_dev,
145 tse_csroffs(if_out_multicast_pkts));
146 buf[16] = csrrd32(priv->mac_dev,
147 tse_csroffs(if_out_broadcast_pkts));
148 buf[17] = csrrd32(priv->mac_dev,
149 tse_csroffs(ether_stats_drop_events));
129 150
130 /* Extended etherStatsOctets counter */ 151 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; 152 ext = (u64) csrrd32(priv->mac_dev,
132 ext |= ioread32(&mac->ether_stats_octets); 153 tse_csroffs(msb_ether_stats_octets)) << 32;
154 ext |= csrrd32(priv->mac_dev,
155 tse_csroffs(ether_stats_octets));
133 buf[18] = ext; 156 buf[18] = ext;
134 157
135 buf[19] = ioread32(&mac->ether_stats_pkts); 158 buf[19] = csrrd32(priv->mac_dev,
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts); 159 tse_csroffs(ether_stats_pkts));
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts); 160 buf[20] = csrrd32(priv->mac_dev,
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); 161 tse_csroffs(ether_stats_undersize_pkts));
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); 162 buf[21] = csrrd32(priv->mac_dev,
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); 163 tse_csroffs(ether_stats_oversize_pkts));
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); 164 buf[22] = csrrd32(priv->mac_dev,
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); 165 tse_csroffs(ether_stats_pkts_64_octets));
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); 166 buf[23] = csrrd32(priv->mac_dev,
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); 167 tse_csroffs(ether_stats_pkts_65to127_octets));
145 buf[29] = ioread32(&mac->ether_stats_jabbers); 168 buf[24] = csrrd32(priv->mac_dev,
146 buf[30] = ioread32(&mac->ether_stats_fragments); 169 tse_csroffs(ether_stats_pkts_128to255_octets));
170 buf[25] = csrrd32(priv->mac_dev,
171 tse_csroffs(ether_stats_pkts_256to511_octets));
172 buf[26] = csrrd32(priv->mac_dev,
173 tse_csroffs(ether_stats_pkts_512to1023_octets));
174 buf[27] = csrrd32(priv->mac_dev,
175 tse_csroffs(ether_stats_pkts_1024to1518_octets));
176 buf[28] = csrrd32(priv->mac_dev,
177 tse_csroffs(ether_stats_pkts_1519tox_octets));
178 buf[29] = csrrd32(priv->mac_dev,
179 tse_csroffs(ether_stats_jabbers));
180 buf[30] = csrrd32(priv->mac_dev,
181 tse_csroffs(ether_stats_fragments));
147} 182}
148 183
149static int tse_sset_count(struct net_device *dev, int sset) 184static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,19 +213,24 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
178{ 213{
179 int i; 214 int i;
180 struct altera_tse_private *priv = netdev_priv(dev); 215 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf; 216 u32 *buf = regbuf;
183 217
184 /* Set version to a known value, so ethtool knows 218 /* Set version to a known value, so ethtool knows
185 * how to do any special formatting of this data. 219 * how to do any special formatting of this data.
186 * This version number will need to change if and 220 * This version number will need to change if and
187 * when this register table is changed. 221 * when this register table is changed.
222 *
223 * version[31:0] = 1: Dump the first 128 TSE Registers
224 * Upper bits are all 0 by default
225 *
226 * Upper 16-bits will indicate feature presence for
227 * Ethtool register decoding in future version.
188 */ 228 */
189 229
190 regs->version = 1; 230 regs->version = 1;
191 231
192 for (i = 0; i < TSE_NUM_REGS; i++) 232 for (i = 0; i < TSE_NUM_REGS; i++)
193 buf[i] = ioread32(&tse_mac_regs[i]); 233 buf[i] = csrrd32(priv->mac_dev, i * 4);
194} 234}
195 235
196static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 236static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index c70a29e0b9f7..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
100 */ 100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{ 102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 103 struct net_device *ndev = bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 104 struct altera_tse_private *priv = netdev_priv(ndev);
105 u32 data;
106 105
107 /* set MDIO address */ 106 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 107 csrwr32((mii_id & 0x1f), priv->mac_dev,
108 tse_csroffs(mdio_phy0_addr));
109 109
110 /* get the data */ 110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff; 111 return csrrd32(priv->mac_dev,
112 return data; 112 tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
113} 113}
114 114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value) 116 u16 value)
117{ 117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; 118 struct net_device *ndev = bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; 119 struct altera_tse_private *priv = netdev_priv(ndev);
120 120
121 /* set MDIO address */ 121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); 122 csrwr32((mii_id & 0x1f), priv->mac_dev,
123 tse_csroffs(mdio_phy0_addr));
123 124
124 /* write the data */ 125 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]); 126 csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
126 return 0; 127 return 0;
127} 128}
128 129
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
168 for (i = 0; i < PHY_MAX_ADDR; i++) 169 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL; 170 mdio->irq[i] = PHY_POLL;
170 171
171 mdio->priv = priv->mac_dev; 172 mdio->priv = dev;
172 mdio->parent = priv->device; 173 mdio->parent = priv->device;
173 174
174 ret = of_mdiobus_register(mdio, mdio_node); 175 ret = of_mdiobus_register(mdio, mdio_node);
@@ -224,6 +225,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
224 dev_kfree_skb_any(rxbuffer->skb); 225 dev_kfree_skb_any(rxbuffer->skb);
225 return -EINVAL; 226 return -EINVAL;
226 } 227 }
228 rxbuffer->dma_addr &= (dma_addr_t)~3;
227 rxbuffer->len = len; 229 rxbuffer->len = len;
228 return 0; 230 return 0;
229} 231}
@@ -425,9 +427,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
425 priv->dev->stats.rx_bytes += pktlength; 427 priv->dev->stats.rx_bytes += pktlength;
426 428
427 entry = next_entry; 429 entry = next_entry;
430
431 tse_rx_refill(priv);
428 } 432 }
429 433
430 tse_rx_refill(priv);
431 return count; 434 return count;
432} 435}
433 436
@@ -520,7 +523,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
520 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
521 unsigned long int flags; 524 unsigned long int flags;
522 525
523
524 if (unlikely(!dev)) { 526 if (unlikely(!dev)) {
525 pr_err("%s: invalid dev pointer\n", __func__); 527 pr_err("%s: invalid dev pointer\n", __func__);
526 return IRQ_NONE; 528 return IRQ_NONE;
@@ -562,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
562 unsigned int nopaged_len = skb_headlen(skb); 564 unsigned int nopaged_len = skb_headlen(skb);
563 enum netdev_tx ret = NETDEV_TX_OK; 565 enum netdev_tx ret = NETDEV_TX_OK;
564 dma_addr_t dma_addr; 566 dma_addr_t dma_addr;
565 int txcomplete = 0;
566 567
567 spin_lock_bh(&priv->tx_lock); 568 spin_lock_bh(&priv->tx_lock);
568 569
@@ -598,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
598 dma_sync_single_for_device(priv->device, buffer->dma_addr, 599 dma_sync_single_for_device(priv->device, buffer->dma_addr,
599 buffer->len, DMA_TO_DEVICE); 600 buffer->len, DMA_TO_DEVICE);
600 601
601 txcomplete = priv->dmaops->tx_buffer(priv, buffer); 602 priv->dmaops->tx_buffer(priv, buffer);
602 603
603 skb_tx_timestamp(skb); 604 skb_tx_timestamp(skb);
604 605
@@ -697,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
697 struct altera_tse_private *priv = netdev_priv(dev); 698 struct altera_tse_private *priv = netdev_priv(dev);
698 struct phy_device *phydev = NULL; 699 struct phy_device *phydev = NULL;
699 char phy_id_fmt[MII_BUS_ID_SIZE + 3]; 700 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
700 int ret;
701 701
702 if (priv->phy_addr != POLL_PHY) { 702 if (priv->phy_addr != POLL_PHY) {
703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, 703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -711,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
711 netdev_err(dev, "Could not attach to PHY\n"); 711 netdev_err(dev, "Could not attach to PHY\n");
712 712
713 } else { 713 } else {
714 int ret;
714 phydev = phy_find_first(priv->mdio); 715 phydev = phy_find_first(priv->mdio);
715 if (phydev == NULL) { 716 if (phydev == NULL) {
716 netdev_err(dev, "No PHY found\n"); 717 netdev_err(dev, "No PHY found\n");
@@ -790,7 +791,6 @@ static int init_phy(struct net_device *dev)
790 791
791static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) 792static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
792{ 793{
793 struct altera_tse_mac *mac = priv->mac_dev;
794 u32 msb; 794 u32 msb;
795 u32 lsb; 795 u32 lsb;
796 796
@@ -798,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff; 798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
799 799
800 /* Set primary MAC address */ 800 /* Set primary MAC address */
801 iowrite32(msb, &mac->mac_addr_0); 801 csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
802 iowrite32(lsb, &mac->mac_addr_1); 802 csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
803} 803}
804 804
805/* MAC software reset. 805/* MAC software reset.
@@ -810,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
810 */ 810 */
811static int reset_mac(struct altera_tse_private *priv) 811static int reset_mac(struct altera_tse_private *priv)
812{ 812{
813 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
814 int counter; 813 int counter;
815 u32 dat; 814 u32 dat;
816 815
817 dat = ioread32(cmd_cfg_reg); 816 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
818 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 817 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
819 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; 818 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
820 iowrite32(dat, cmd_cfg_reg); 819 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
821 820
822 counter = 0; 821 counter = 0;
823 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 822 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
824 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) 823 if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
824 MAC_CMDCFG_SW_RESET))
825 break; 825 break;
826 udelay(1); 826 udelay(1);
827 } 827 }
828 828
829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { 829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
830 dat = ioread32(cmd_cfg_reg); 830 dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
831 dat &= ~MAC_CMDCFG_SW_RESET; 831 dat &= ~MAC_CMDCFG_SW_RESET;
832 iowrite32(dat, cmd_cfg_reg); 832 csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
833 return -1; 833 return -1;
834 } 834 }
835 return 0; 835 return 0;
@@ -839,42 +839,58 @@ static int reset_mac(struct altera_tse_private *priv)
839*/ 839*/
840static int init_mac(struct altera_tse_private *priv) 840static int init_mac(struct altera_tse_private *priv)
841{ 841{
842 struct altera_tse_mac *mac = priv->mac_dev;
843 unsigned int cmd = 0; 842 unsigned int cmd = 0;
844 u32 frm_length; 843 u32 frm_length;
845 844
846 /* Setup Rx FIFO */ 845 /* Setup Rx FIFO */
847 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, 846 csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
848 &mac->rx_section_empty); 847 priv->mac_dev, tse_csroffs(rx_section_empty));
849 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); 848
850 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); 849 csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
851 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); 850 tse_csroffs(rx_section_full));
851
852 csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
853 tse_csroffs(rx_almost_empty));
854
855 csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
856 tse_csroffs(rx_almost_full));
852 857
853 /* Setup Tx FIFO */ 858 /* Setup Tx FIFO */
854 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, 859 csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
855 &mac->tx_section_empty); 860 priv->mac_dev, tse_csroffs(tx_section_empty));
856 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); 861
857 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); 862 csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
858 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); 863 tse_csroffs(tx_section_full));
864
865 csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
866 tse_csroffs(tx_almost_empty));
867
868 csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
869 tse_csroffs(tx_almost_full));
859 870
860 /* MAC Address Configuration */ 871 /* MAC Address Configuration */
861 tse_update_mac_addr(priv, priv->dev->dev_addr); 872 tse_update_mac_addr(priv, priv->dev->dev_addr);
862 873
863 /* MAC Function Configuration */ 874 /* MAC Function Configuration */
864 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; 875 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
865 iowrite32(frm_length, &mac->frm_length); 876 csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
866 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); 877
878 csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
879 tse_csroffs(tx_ipg_length));
867 880
868 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit 881 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
869 * start address 882 * start address
870 */ 883 */
871 tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); 884 tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
872 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | 885 ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
873 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); 886
887 tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
888 ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
889 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
874 890
875 /* Set the MAC options */ 891 /* Set the MAC options */
876 cmd = ioread32(&mac->command_config); 892 cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
877 cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */ 893 cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
878 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ 894 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
879 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames 895 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
880 * with CRC errors 896 * with CRC errors
@@ -882,7 +898,16 @@ static int init_mac(struct altera_tse_private *priv)
882 cmd |= MAC_CMDCFG_CNTL_FRM_ENA; 898 cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
883 cmd &= ~MAC_CMDCFG_TX_ENA; 899 cmd &= ~MAC_CMDCFG_TX_ENA;
884 cmd &= ~MAC_CMDCFG_RX_ENA; 900 cmd &= ~MAC_CMDCFG_RX_ENA;
885 iowrite32(cmd, &mac->command_config); 901
902 /* Default speed and duplex setting, full/100 */
903 cmd &= ~MAC_CMDCFG_HD_ENA;
904 cmd &= ~MAC_CMDCFG_ETH_SPEED;
905 cmd &= ~MAC_CMDCFG_ENA_10;
906
907 csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
908
909 csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
910 tse_csroffs(pause_quanta));
886 911
887 if (netif_msg_hw(priv)) 912 if (netif_msg_hw(priv))
888 dev_dbg(priv->device, 913 dev_dbg(priv->device,
@@ -895,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
895 */ 920 */
896static void tse_set_mac(struct altera_tse_private *priv, bool enable) 921static void tse_set_mac(struct altera_tse_private *priv, bool enable)
897{ 922{
898 struct altera_tse_mac *mac = priv->mac_dev; 923 u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
899 u32 value = ioread32(&mac->command_config);
900 924
901 if (enable) 925 if (enable)
902 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; 926 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
903 else 927 else
904 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); 928 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
905 929
906 iowrite32(value, &mac->command_config); 930 csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
907} 931}
908 932
909/* Change the MTU 933/* Change the MTU
@@ -933,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
933static void altera_tse_set_mcfilter(struct net_device *dev) 957static void altera_tse_set_mcfilter(struct net_device *dev)
934{ 958{
935 struct altera_tse_private *priv = netdev_priv(dev); 959 struct altera_tse_private *priv = netdev_priv(dev);
936 struct altera_tse_mac *mac = priv->mac_dev;
937 int i; 960 int i;
938 struct netdev_hw_addr *ha; 961 struct netdev_hw_addr *ha;
939 962
940 /* clear the hash filter */ 963 /* clear the hash filter */
941 for (i = 0; i < 64; i++) 964 for (i = 0; i < 64; i++)
942 iowrite32(0, &(mac->hash_table[i])); 965 csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
943 966
944 netdev_for_each_mc_addr(ha, dev) { 967 netdev_for_each_mc_addr(ha, dev) {
945 unsigned int hash = 0; 968 unsigned int hash = 0;
@@ -955,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
955 978
956 hash = (hash << 1) | xor_bit; 979 hash = (hash << 1) | xor_bit;
957 } 980 }
958 iowrite32(1, &(mac->hash_table[hash])); 981 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
959 } 982 }
960} 983}
961 984
@@ -963,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
963static void altera_tse_set_mcfilterall(struct net_device *dev) 986static void altera_tse_set_mcfilterall(struct net_device *dev)
964{ 987{
965 struct altera_tse_private *priv = netdev_priv(dev); 988 struct altera_tse_private *priv = netdev_priv(dev);
966 struct altera_tse_mac *mac = priv->mac_dev;
967 int i; 989 int i;
968 990
969 /* set the hash filter */ 991 /* set the hash filter */
970 for (i = 0; i < 64; i++) 992 for (i = 0; i < 64; i++)
971 iowrite32(1, &(mac->hash_table[i])); 993 csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
972} 994}
973 995
974/* Set or clear the multicast filter for this adaptor 996/* Set or clear the multicast filter for this adaptor
@@ -976,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
976static void tse_set_rx_mode_hashfilter(struct net_device *dev) 998static void tse_set_rx_mode_hashfilter(struct net_device *dev)
977{ 999{
978 struct altera_tse_private *priv = netdev_priv(dev); 1000 struct altera_tse_private *priv = netdev_priv(dev);
979 struct altera_tse_mac *mac = priv->mac_dev;
980 1001
981 spin_lock(&priv->mac_cfg_lock); 1002 spin_lock(&priv->mac_cfg_lock);
982 1003
983 if (dev->flags & IFF_PROMISC) 1004 if (dev->flags & IFF_PROMISC)
984 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1005 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1006 MAC_CMDCFG_PROMIS_EN);
985 1007
986 if (dev->flags & IFF_ALLMULTI) 1008 if (dev->flags & IFF_ALLMULTI)
987 altera_tse_set_mcfilterall(dev); 1009 altera_tse_set_mcfilterall(dev);
@@ -996,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
996static void tse_set_rx_mode(struct net_device *dev) 1018static void tse_set_rx_mode(struct net_device *dev)
997{ 1019{
998 struct altera_tse_private *priv = netdev_priv(dev); 1020 struct altera_tse_private *priv = netdev_priv(dev);
999 struct altera_tse_mac *mac = priv->mac_dev;
1000 1021
1001 spin_lock(&priv->mac_cfg_lock); 1022 spin_lock(&priv->mac_cfg_lock);
1002 1023
1003 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || 1024 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1004 !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) 1025 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1005 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1026 tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1027 MAC_CMDCFG_PROMIS_EN);
1006 else 1028 else
1007 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); 1029 tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1030 MAC_CMDCFG_PROMIS_EN);
1008 1031
1009 spin_unlock(&priv->mac_cfg_lock); 1032 spin_unlock(&priv->mac_cfg_lock);
1010} 1033}
@@ -1085,17 +1108,19 @@ static int tse_open(struct net_device *dev)
1085 1108
1086 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 1109 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1087 1110
1088 /* Start MAC Rx/Tx */
1089 spin_lock(&priv->mac_cfg_lock);
1090 tse_set_mac(priv, true);
1091 spin_unlock(&priv->mac_cfg_lock);
1092
1093 if (priv->phydev) 1111 if (priv->phydev)
1094 phy_start(priv->phydev); 1112 phy_start(priv->phydev);
1095 1113
1096 napi_enable(&priv->napi); 1114 napi_enable(&priv->napi);
1097 netif_start_queue(dev); 1115 netif_start_queue(dev);
1098 1116
1117 priv->dmaops->start_rxdma(priv);
1118
1119 /* Start MAC Rx/Tx */
1120 spin_lock(&priv->mac_cfg_lock);
1121 tse_set_mac(priv, true);
1122 spin_unlock(&priv->mac_cfg_lock);
1123
1099 return 0; 1124 return 0;
1100 1125
1101tx_request_irq_error: 1126tx_request_irq_error:
@@ -1167,7 +1192,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
1167 .ndo_validate_addr = eth_validate_addr, 1192 .ndo_validate_addr = eth_validate_addr,
1168}; 1193};
1169 1194
1170
1171static int request_and_map(struct platform_device *pdev, const char *name, 1195static int request_and_map(struct platform_device *pdev, const char *name,
1172 struct resource **res, void __iomem **ptr) 1196 struct resource **res, void __iomem **ptr)
1173{ 1197{
@@ -1235,7 +1259,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1235 /* Get the mapped address to the SGDMA descriptor memory */ 1259 /* Get the mapped address to the SGDMA descriptor memory */
1236 ret = request_and_map(pdev, "s1", &dma_res, &descmap); 1260 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1237 if (ret) 1261 if (ret)
1238 goto out_free; 1262 goto err_free_netdev;
1239 1263
1240 /* Start of that memory is for transmit descriptors */ 1264 /* Start of that memory is for transmit descriptors */
1241 priv->tx_dma_desc = descmap; 1265 priv->tx_dma_desc = descmap;
@@ -1254,24 +1278,24 @@ static int altera_tse_probe(struct platform_device *pdev)
1254 if (upper_32_bits(priv->rxdescmem_busaddr)) { 1278 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1255 dev_dbg(priv->device, 1279 dev_dbg(priv->device,
1256 "SGDMA bus addresses greater than 32-bits\n"); 1280 "SGDMA bus addresses greater than 32-bits\n");
1257 goto out_free; 1281 goto err_free_netdev;
1258 } 1282 }
1259 if (upper_32_bits(priv->txdescmem_busaddr)) { 1283 if (upper_32_bits(priv->txdescmem_busaddr)) {
1260 dev_dbg(priv->device, 1284 dev_dbg(priv->device,
1261 "SGDMA bus addresses greater than 32-bits\n"); 1285 "SGDMA bus addresses greater than 32-bits\n");
1262 goto out_free; 1286 goto err_free_netdev;
1263 } 1287 }
1264 } else if (priv->dmaops && 1288 } else if (priv->dmaops &&
1265 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { 1289 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1266 ret = request_and_map(pdev, "rx_resp", &dma_res, 1290 ret = request_and_map(pdev, "rx_resp", &dma_res,
1267 &priv->rx_dma_resp); 1291 &priv->rx_dma_resp);
1268 if (ret) 1292 if (ret)
1269 goto out_free; 1293 goto err_free_netdev;
1270 1294
1271 ret = request_and_map(pdev, "tx_desc", &dma_res, 1295 ret = request_and_map(pdev, "tx_desc", &dma_res,
1272 &priv->tx_dma_desc); 1296 &priv->tx_dma_desc);
1273 if (ret) 1297 if (ret)
1274 goto out_free; 1298 goto err_free_netdev;
1275 1299
1276 priv->txdescmem = resource_size(dma_res); 1300 priv->txdescmem = resource_size(dma_res);
1277 priv->txdescmem_busaddr = dma_res->start; 1301 priv->txdescmem_busaddr = dma_res->start;
@@ -1279,13 +1303,13 @@ static int altera_tse_probe(struct platform_device *pdev)
1279 ret = request_and_map(pdev, "rx_desc", &dma_res, 1303 ret = request_and_map(pdev, "rx_desc", &dma_res,
1280 &priv->rx_dma_desc); 1304 &priv->rx_dma_desc);
1281 if (ret) 1305 if (ret)
1282 goto out_free; 1306 goto err_free_netdev;
1283 1307
1284 priv->rxdescmem = resource_size(dma_res); 1308 priv->rxdescmem = resource_size(dma_res);
1285 priv->rxdescmem_busaddr = dma_res->start; 1309 priv->rxdescmem_busaddr = dma_res->start;
1286 1310
1287 } else { 1311 } else {
1288 goto out_free; 1312 goto err_free_netdev;
1289 } 1313 }
1290 1314
1291 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) 1315 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
@@ -1294,26 +1318,26 @@ static int altera_tse_probe(struct platform_device *pdev)
1294 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) 1318 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1295 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); 1319 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1296 else 1320 else
1297 goto out_free; 1321 goto err_free_netdev;
1298 1322
1299 /* MAC address space */ 1323 /* MAC address space */
1300 ret = request_and_map(pdev, "control_port", &control_port, 1324 ret = request_and_map(pdev, "control_port", &control_port,
1301 (void __iomem **)&priv->mac_dev); 1325 (void __iomem **)&priv->mac_dev);
1302 if (ret) 1326 if (ret)
1303 goto out_free; 1327 goto err_free_netdev;
1304 1328
1305 /* xSGDMA Rx Dispatcher address space */ 1329 /* xSGDMA Rx Dispatcher address space */
1306 ret = request_and_map(pdev, "rx_csr", &dma_res, 1330 ret = request_and_map(pdev, "rx_csr", &dma_res,
1307 &priv->rx_dma_csr); 1331 &priv->rx_dma_csr);
1308 if (ret) 1332 if (ret)
1309 goto out_free; 1333 goto err_free_netdev;
1310 1334
1311 1335
1312 /* xSGDMA Tx Dispatcher address space */ 1336 /* xSGDMA Tx Dispatcher address space */
1313 ret = request_and_map(pdev, "tx_csr", &dma_res, 1337 ret = request_and_map(pdev, "tx_csr", &dma_res,
1314 &priv->tx_dma_csr); 1338 &priv->tx_dma_csr);
1315 if (ret) 1339 if (ret)
1316 goto out_free; 1340 goto err_free_netdev;
1317 1341
1318 1342
1319 /* Rx IRQ */ 1343 /* Rx IRQ */
@@ -1321,7 +1345,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1321 if (priv->rx_irq == -ENXIO) { 1345 if (priv->rx_irq == -ENXIO) {
1322 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); 1346 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1323 ret = -ENXIO; 1347 ret = -ENXIO;
1324 goto out_free; 1348 goto err_free_netdev;
1325 } 1349 }
1326 1350
1327 /* Tx IRQ */ 1351 /* Tx IRQ */
@@ -1329,7 +1353,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1329 if (priv->tx_irq == -ENXIO) { 1353 if (priv->tx_irq == -ENXIO) {
1330 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); 1354 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1331 ret = -ENXIO; 1355 ret = -ENXIO;
1332 goto out_free; 1356 goto err_free_netdev;
1333 } 1357 }
1334 1358
1335 /* get FIFO depths from device tree */ 1359 /* get FIFO depths from device tree */
@@ -1337,14 +1361,14 @@ static int altera_tse_probe(struct platform_device *pdev)
1337 &priv->rx_fifo_depth)) { 1361 &priv->rx_fifo_depth)) {
1338 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); 1362 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1339 ret = -ENXIO; 1363 ret = -ENXIO;
1340 goto out_free; 1364 goto err_free_netdev;
1341 } 1365 }
1342 1366
1343 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1367 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1344 &priv->rx_fifo_depth)) { 1368 &priv->rx_fifo_depth)) {
1345 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1369 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1346 ret = -ENXIO; 1370 ret = -ENXIO;
1347 goto out_free; 1371 goto err_free_netdev;
1348 } 1372 }
1349 1373
1350 /* get hash filter settings for this instance */ 1374 /* get hash filter settings for this instance */
@@ -1352,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
1352 of_property_read_bool(pdev->dev.of_node, 1376 of_property_read_bool(pdev->dev.of_node,
1353 "altr,has-hash-multicast-filter"); 1377 "altr,has-hash-multicast-filter");
1354 1378
1379 /* Set hash filter to not set for now until the
1380 * multicast filter receive issue is debugged
1381 */
1382 priv->hash_filter = 0;
1383
1355 /* get supplemental address settings for this instance */ 1384 /* get supplemental address settings for this instance */
1356 priv->added_unicast = 1385 priv->added_unicast =
1357 of_property_read_bool(pdev->dev.of_node, 1386 of_property_read_bool(pdev->dev.of_node,
@@ -1393,7 +1422,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1393 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { 1422 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
1394 dev_err(&pdev->dev, "invalid phy-addr specified %d\n", 1423 dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
1395 priv->phy_addr); 1424 priv->phy_addr);
1396 goto out_free; 1425 goto err_free_netdev;
1397 } 1426 }
1398 1427
1399 /* Create/attach to MDIO bus */ 1428 /* Create/attach to MDIO bus */
@@ -1401,7 +1430,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1401 atomic_add_return(1, &instance_count)); 1430 atomic_add_return(1, &instance_count));
1402 1431
1403 if (ret) 1432 if (ret)
1404 goto out_free; 1433 goto err_free_netdev;
1405 1434
1406 /* initialize netdev */ 1435 /* initialize netdev */
1407 ether_setup(ndev); 1436 ether_setup(ndev);
@@ -1438,7 +1467,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1438 ret = register_netdev(ndev); 1467 ret = register_netdev(ndev);
1439 if (ret) { 1468 if (ret) {
1440 dev_err(&pdev->dev, "failed to register TSE net device\n"); 1469 dev_err(&pdev->dev, "failed to register TSE net device\n");
1441 goto out_free_mdio; 1470 goto err_register_netdev;
1442 } 1471 }
1443 1472
1444 platform_set_drvdata(pdev, ndev); 1473 platform_set_drvdata(pdev, ndev);
@@ -1455,13 +1484,16 @@ static int altera_tse_probe(struct platform_device *pdev)
1455 ret = init_phy(ndev); 1484 ret = init_phy(ndev);
1456 if (ret != 0) { 1485 if (ret != 0) {
1457 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); 1486 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1458 goto out_free_mdio; 1487 goto err_init_phy;
1459 } 1488 }
1460 return 0; 1489 return 0;
1461 1490
1462out_free_mdio: 1491err_init_phy:
1492 unregister_netdev(ndev);
1493err_register_netdev:
1494 netif_napi_del(&priv->napi);
1463 altera_tse_mdio_destroy(ndev); 1495 altera_tse_mdio_destroy(ndev);
1464out_free: 1496err_free_netdev:
1465 free_netdev(ndev); 1497 free_netdev(ndev);
1466 return ret; 1498 return ret;
1467} 1499}
@@ -1480,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
1480 return 0; 1512 return 0;
1481} 1513}
1482 1514
1483struct altera_dmaops altera_dtype_sgdma = { 1515static const struct altera_dmaops altera_dtype_sgdma = {
1484 .altera_dtype = ALTERA_DTYPE_SGDMA, 1516 .altera_dtype = ALTERA_DTYPE_SGDMA,
1485 .dmamask = 32, 1517 .dmamask = 32,
1486 .reset_dma = sgdma_reset, 1518 .reset_dma = sgdma_reset,
@@ -1496,9 +1528,10 @@ struct altera_dmaops altera_dtype_sgdma = {
1496 .get_rx_status = sgdma_rx_status, 1528 .get_rx_status = sgdma_rx_status,
1497 .init_dma = sgdma_initialize, 1529 .init_dma = sgdma_initialize,
1498 .uninit_dma = sgdma_uninitialize, 1530 .uninit_dma = sgdma_uninitialize,
1531 .start_rxdma = sgdma_start_rxdma,
1499}; 1532};
1500 1533
1501struct altera_dmaops altera_dtype_msgdma = { 1534static const struct altera_dmaops altera_dtype_msgdma = {
1502 .altera_dtype = ALTERA_DTYPE_MSGDMA, 1535 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1503 .dmamask = 64, 1536 .dmamask = 64,
1504 .reset_dma = msgdma_reset, 1537 .reset_dma = msgdma_reset,
@@ -1514,6 +1547,7 @@ struct altera_dmaops altera_dtype_msgdma = {
1514 .get_rx_status = msgdma_rx_status, 1547 .get_rx_status = msgdma_rx_status,
1515 .init_dma = msgdma_initialize, 1548 .init_dma = msgdma_initialize,
1516 .uninit_dma = msgdma_uninitialize, 1549 .uninit_dma = msgdma_uninitialize,
1550 .start_rxdma = msgdma_start_rxdma,
1517}; 1551};
1518 1552
1519static struct of_device_id altera_tse_ids[] = { 1553static struct of_device_id altera_tse_ids[] = {
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
17#include "altera_tse.h" 17#include "altera_tse.h"
18#include "altera_utils.h" 18#include "altera_utils.h"
19 19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) 20void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
21{ 21{
22 u32 value = ioread32(ioaddr); 22 u32 value = csrrd32(ioaddr, offs);
23 value |= bit_mask; 23 value |= bit_mask;
24 iowrite32(value, ioaddr); 24 csrwr32(value, ioaddr, offs);
25} 25}
26 26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) 27void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
28{ 28{
29 u32 value = ioread32(ioaddr); 29 u32 value = csrrd32(ioaddr, offs);
30 value &= ~bit_mask; 30 value &= ~bit_mask;
31 iowrite32(value, ioaddr); 31 csrwr32(value, ioaddr, offs);
32} 32}
33 33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) 34int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
35{ 35{
36 u32 value = ioread32(ioaddr); 36 u32 value = csrrd32(ioaddr, offs);
37 return (value & bit_mask) ? 1 : 0; 37 return (value & bit_mask) ? 1 : 0;
38} 38}
39 39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) 40int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
41{ 41{
42 u32 value = ioread32(ioaddr); 42 u32 value = csrrd32(ioaddr, offs);
43 return (value & bit_mask) ? 0 : 1; 43 return (value & bit_mask) ? 0 : 1;
44} 44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
19#ifndef __ALTERA_UTILS_H__ 19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__ 20#define __ALTERA_UTILS_H__
21 21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); 22void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); 23void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); 24int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); 25int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
26 26
27#endif /* __ALTERA_UTILS_H__*/ 27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 928fac6dd10a..53f85bf71526 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -11,6 +11,7 @@
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/phy.h> 13#include <linux/phy.h>
14#include <linux/clk.h>
14 15
15/* STATUS and ENABLE Register bit masks */ 16/* STATUS and ENABLE Register bit masks */
16#define TXINT_MASK (1<<0) /* Transmit interrupt */ 17#define TXINT_MASK (1<<0) /* Transmit interrupt */
@@ -131,6 +132,7 @@ struct arc_emac_priv {
131 struct mii_bus *bus; 132 struct mii_bus *bus;
132 133
133 void __iomem *regs; 134 void __iomem *regs;
135 struct clk *clk;
134 136
135 struct napi_struct napi; 137 struct napi_struct napi;
136 struct net_device_stats stats; 138 struct net_device_stats stats;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index eeecc29cf5b7..d647a7d115ac 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -574,6 +574,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
574 return NETDEV_TX_OK; 574 return NETDEV_TX_OK;
575} 575}
576 576
577static void arc_emac_set_address_internal(struct net_device *ndev)
578{
579 struct arc_emac_priv *priv = netdev_priv(ndev);
580 unsigned int addr_low, addr_hi;
581
582 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
583 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
584
585 arc_reg_set(priv, R_ADDRL, addr_low);
586 arc_reg_set(priv, R_ADDRH, addr_hi);
587}
588
577/** 589/**
578 * arc_emac_set_address - Set the MAC address for this device. 590 * arc_emac_set_address - Set the MAC address for this device.
579 * @ndev: Pointer to net_device structure. 591 * @ndev: Pointer to net_device structure.
@@ -587,9 +599,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
587 */ 599 */
588static int arc_emac_set_address(struct net_device *ndev, void *p) 600static int arc_emac_set_address(struct net_device *ndev, void *p)
589{ 601{
590 struct arc_emac_priv *priv = netdev_priv(ndev);
591 struct sockaddr *addr = p; 602 struct sockaddr *addr = p;
592 unsigned int addr_low, addr_hi;
593 603
594 if (netif_running(ndev)) 604 if (netif_running(ndev))
595 return -EBUSY; 605 return -EBUSY;
@@ -599,11 +609,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
599 609
600 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 610 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
601 611
602 addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); 612 arc_emac_set_address_internal(ndev);
603 addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
604
605 arc_reg_set(priv, R_ADDRL, addr_low);
606 arc_reg_set(priv, R_ADDRH, addr_hi);
607 613
608 return 0; 614 return 0;
609} 615}
@@ -643,13 +649,6 @@ static int arc_emac_probe(struct platform_device *pdev)
643 return -ENODEV; 649 return -ENODEV;
644 } 650 }
645 651
646 /* Get CPU clock frequency from device tree */
647 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
648 &clock_frequency)) {
649 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
650 return -EINVAL;
651 }
652
653 /* Get IRQ from device tree */ 652 /* Get IRQ from device tree */
654 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 653 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
655 if (!irq) { 654 if (!irq) {
@@ -677,17 +676,36 @@ static int arc_emac_probe(struct platform_device *pdev)
677 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); 676 priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
678 if (IS_ERR(priv->regs)) { 677 if (IS_ERR(priv->regs)) {
679 err = PTR_ERR(priv->regs); 678 err = PTR_ERR(priv->regs);
680 goto out; 679 goto out_netdev;
681 } 680 }
682 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); 681 dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
683 682
683 priv->clk = of_clk_get(pdev->dev.of_node, 0);
684 if (IS_ERR(priv->clk)) {
685 /* Get CPU clock frequency from device tree */
686 if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
687 &clock_frequency)) {
688 dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
689 err = -EINVAL;
690 goto out_netdev;
691 }
692 } else {
693 err = clk_prepare_enable(priv->clk);
694 if (err) {
695 dev_err(&pdev->dev, "failed to enable clock\n");
696 goto out_clkget;
697 }
698
699 clock_frequency = clk_get_rate(priv->clk);
700 }
701
684 id = arc_reg_get(priv, R_ID); 702 id = arc_reg_get(priv, R_ID);
685 703
686 /* Check for EMAC revision 5 or 7, magic number */ 704 /* Check for EMAC revision 5 or 7, magic number */
687 if (!(id == 0x0005fd02 || id == 0x0007fd02)) { 705 if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
688 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); 706 dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
689 err = -ENODEV; 707 err = -ENODEV;
690 goto out; 708 goto out_clken;
691 } 709 }
692 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); 710 dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
693 711
@@ -702,7 +720,7 @@ static int arc_emac_probe(struct platform_device *pdev)
702 ndev->name, ndev); 720 ndev->name, ndev);
703 if (err) { 721 if (err) {
704 dev_err(&pdev->dev, "could not allocate IRQ\n"); 722 dev_err(&pdev->dev, "could not allocate IRQ\n");
705 goto out; 723 goto out_clken;
706 } 724 }
707 725
708 /* Get MAC address from device tree */ 726 /* Get MAC address from device tree */
@@ -713,6 +731,7 @@ static int arc_emac_probe(struct platform_device *pdev)
713 else 731 else
714 eth_hw_addr_random(ndev); 732 eth_hw_addr_random(ndev);
715 733
734 arc_emac_set_address_internal(ndev);
716 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); 735 dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
717 736
718 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ 737 /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
@@ -722,7 +741,7 @@ static int arc_emac_probe(struct platform_device *pdev)
722 if (!priv->rxbd) { 741 if (!priv->rxbd) {
723 dev_err(&pdev->dev, "failed to allocate data buffers\n"); 742 dev_err(&pdev->dev, "failed to allocate data buffers\n");
724 err = -ENOMEM; 743 err = -ENOMEM;
725 goto out; 744 goto out_clken;
726 } 745 }
727 746
728 priv->txbd = priv->rxbd + RX_BD_NUM; 747 priv->txbd = priv->rxbd + RX_BD_NUM;
@@ -734,7 +753,7 @@ static int arc_emac_probe(struct platform_device *pdev)
734 err = arc_mdio_probe(pdev, priv); 753 err = arc_mdio_probe(pdev, priv);
735 if (err) { 754 if (err) {
736 dev_err(&pdev->dev, "failed to probe MII bus\n"); 755 dev_err(&pdev->dev, "failed to probe MII bus\n");
737 goto out; 756 goto out_clken;
738 } 757 }
739 758
740 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, 759 priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
@@ -742,7 +761,7 @@ static int arc_emac_probe(struct platform_device *pdev)
742 if (!priv->phy_dev) { 761 if (!priv->phy_dev) {
743 dev_err(&pdev->dev, "of_phy_connect() failed\n"); 762 dev_err(&pdev->dev, "of_phy_connect() failed\n");
744 err = -ENODEV; 763 err = -ENODEV;
745 goto out; 764 goto out_mdio;
746 } 765 }
747 766
748 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", 767 dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
@@ -752,14 +771,25 @@ static int arc_emac_probe(struct platform_device *pdev)
752 771
753 err = register_netdev(ndev); 772 err = register_netdev(ndev);
754 if (err) { 773 if (err) {
755 netif_napi_del(&priv->napi);
756 dev_err(&pdev->dev, "failed to register network device\n"); 774 dev_err(&pdev->dev, "failed to register network device\n");
757 goto out; 775 goto out_netif_api;
758 } 776 }
759 777
760 return 0; 778 return 0;
761 779
762out: 780out_netif_api:
781 netif_napi_del(&priv->napi);
782 phy_disconnect(priv->phy_dev);
783 priv->phy_dev = NULL;
784out_mdio:
785 arc_mdio_remove(priv);
786out_clken:
787 if (!IS_ERR(priv->clk))
788 clk_disable_unprepare(priv->clk);
789out_clkget:
790 if (!IS_ERR(priv->clk))
791 clk_put(priv->clk);
792out_netdev:
763 free_netdev(ndev); 793 free_netdev(ndev);
764 return err; 794 return err;
765} 795}
@@ -774,6 +804,12 @@ static int arc_emac_remove(struct platform_device *pdev)
774 arc_mdio_remove(priv); 804 arc_mdio_remove(priv);
775 unregister_netdev(ndev); 805 unregister_netdev(ndev);
776 netif_napi_del(&priv->napi); 806 netif_napi_del(&priv->napi);
807
808 if (!IS_ERR(priv->clk)) {
809 clk_disable_unprepare(priv->clk);
810 clk_put(priv->clk);
811 }
812
777 free_netdev(ndev); 813 free_netdev(ndev);
778 814
779 return 0; 815 return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a8efb18e42fa..0ab83708b6a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8627,6 +8627,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8627 pci_disable_device(pdev); 8627 pci_disable_device(pdev);
8628} 8628}
8629 8629
8630#ifdef CONFIG_PM_SLEEP
8630static int 8631static int
8631bnx2_suspend(struct device *device) 8632bnx2_suspend(struct device *device)
8632{ 8633{
@@ -8665,7 +8666,6 @@ bnx2_resume(struct device *device)
8665 return 0; 8666 return 0;
8666} 8667}
8667 8668
8668#ifdef CONFIG_PM_SLEEP
8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); 8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8670#define BNX2_PM_OPS (&bnx2_pm_ops) 8670#define BNX2_PM_OPS (&bnx2_pm_ops)
8671 8671
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a78edaccceee..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10051#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10052#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10053#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10053#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10054#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) 10054#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) 10055#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10056static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
10057{ 10057{
10058 u8 major, minor, version; 10058 u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10352 /* Reset should be performed after BRB is emptied */ 10352 /* Reset should be performed after BRB is emptied */
10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10353 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10354 u32 timer_count = 1000; 10354 u32 timer_count = 1000;
10355 bool need_write = true;
10355 10356
10356 /* Close the MAC Rx to prevent BRB from filling up */ 10357 /* Close the MAC Rx to prevent BRB from filling up */
10357 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10358 bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10398 * cleaning methods - might be redundant but harmless. 10399 * cleaning methods - might be redundant but harmless.
10399 */ 10400 */
10400 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10401 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
10401 bnx2x_prev_unload_undi_mf(bp); 10402 if (need_write) {
10403 bnx2x_prev_unload_undi_mf(bp);
10404 need_write = false;
10405 }
10402 } else if (prev_undi) { 10406 } else if (prev_undi) {
10403 /* If UNDI resides in memory, 10407 /* If UNDI resides in memory,
10404 * manually increment it 10408 * manually increment it
@@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13233 iounmap(bp->doorbells); 13237 iounmap(bp->doorbells);
13234 13238
13235 bnx2x_release_firmware(bp); 13239 bnx2x_release_firmware(bp);
13240 } else {
13241 bnx2x_vf_pci_dealloc(bp);
13236 } 13242 }
13237 bnx2x_free_mem_bp(bp); 13243 bnx2x_free_mem_bp(bp);
13238 13244
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5c523b32db70..b8078d50261b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
429 vf_vlan_rules_cnt(vf))) { 429 vf_vlan_rules_cnt(vf))) {
430 BNX2X_ERR("No credits for vlan\n"); 430 BNX2X_ERR("No credits for vlan [%d >= %d]\n",
431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
432 vf_vlan_rules_cnt(vf));
431 return -ENOMEM; 433 return -ENOMEM;
432 } 434 }
433 435
@@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
610 } 612 }
611 613
612 /* add new mcasts */ 614 /* add new mcasts */
615 mcast.mcast_list_len = mc_num;
613 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 616 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
614 if (rc) 617 if (rc)
615 BNX2X_ERR("Faled to add multicasts\n"); 618 BNX2X_ERR("Faled to add multicasts\n");
@@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
837 return 0; 840 return 0;
838} 841}
839 842
843static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
844 struct bnx2x_virtf *vf,
845 int new)
846{
847 int num = vf_vlan_rules_cnt(vf);
848 int diff = new - num;
849 bool rc = true;
850
851 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
852 vf->abs_vfid, new, num);
853
854 if (diff > 0)
855 rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
856 else if (diff < 0)
857 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
858
859 if (rc)
860 vf_vlan_rules_cnt(vf) = new;
861 else
862 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
863 vf->abs_vfid);
864}
865
840/* must be called after the number of PF queues and the number of VFs are 866/* must be called after the number of PF queues and the number of VFs are
841 * both known 867 * both known
842 */ 868 */
@@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
854 resc->num_mac_filters = 1; 880 resc->num_mac_filters = 1;
855 881
856 /* divvy up vlan rules */ 882 /* divvy up vlan rules */
883 bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
857 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 884 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
858 vlan_count = 1 << ilog2(vlan_count); 885 vlan_count = 1 << ilog2(vlan_count);
859 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 886 bnx2x_iov_re_set_vlan_filters(bp, vf,
887 vlan_count / BNX2X_NR_VIRTFN(bp));
860 888
861 /* no real limitation */ 889 /* no real limitation */
862 resc->num_mc_filters = 0; 890 resc->num_mc_filters = 0;
@@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
1478 bnx2x_iov_static_resc(bp, vf); 1506 bnx2x_iov_static_resc(bp, vf);
1479 1507
1480 /* queues are initialized during VF-ACQUIRE */ 1508 /* queues are initialized during VF-ACQUIRE */
1481
1482 /* reserve the vf vlan credit */
1483 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
1484
1485 vf->filter_state = 0; 1509 vf->filter_state = 0;
1486 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1510 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1487 1511
@@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1912 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1936 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1913 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1937 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1914 1938
1939 /* Save a vlan filter for the Hypervisor */
1915 return ((req_resc->num_rxqs <= rxq_cnt) && 1940 return ((req_resc->num_rxqs <= rxq_cnt) &&
1916 (req_resc->num_txqs <= txq_cnt) && 1941 (req_resc->num_txqs <= txq_cnt) &&
1917 (req_resc->num_sbs <= vf_sb_count(vf)) && 1942 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1918 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1943 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1919 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 1944 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
1920} 1945}
1921 1946
1922/* CORE VF API */ 1947/* CORE VF API */
@@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1972 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1997 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
1973 if (resc->num_mac_filters) 1998 if (resc->num_mac_filters)
1974 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 1999 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
1975 if (resc->num_vlan_filters) 2000 /* Add an additional vlan filter credit for the hypervisor */
1976 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2001 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
1977 2002
1978 DP(BNX2X_MSG_IOV, 2003 DP(BNX2X_MSG_IOV,
1979 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2004 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
1980 vf_sb_count(vf), vf_rxq_count(vf), 2005 vf_sb_count(vf), vf_rxq_count(vf),
1981 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2006 vf_txq_count(vf), vf_mac_rules_cnt(vf),
1982 vf_vlan_rules_cnt(vf)); 2007 vf_vlan_rules_visible_cnt(vf));
1983 2008
1984 /* Initialize the queues */ 2009 /* Initialize the queues */
1985 if (!vf->vfqs) { 2010 if (!vf->vfqs) {
@@ -2670,7 +2695,7 @@ out:
2670 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2695 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2671 } 2696 }
2672 2697
2673 return 0; 2698 return rc;
2674} 2699}
2675 2700
2676int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2701int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
@@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
2896 return bp->regview + PXP_VF_ADDR_DB_START; 2921 return bp->regview + PXP_VF_ADDR_DB_START;
2897} 2922}
2898 2923
2924void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
2925{
2926 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
2927 sizeof(struct bnx2x_vf_mbx_msg));
2928 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2929 sizeof(union pf_vf_bulletin));
2930}
2931
2899int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2932int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2900{ 2933{
2901 mutex_init(&bp->vf2pf_mutex); 2934 mutex_init(&bp->vf2pf_mutex);
@@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
2915 return 0; 2948 return 0;
2916 2949
2917alloc_mem_err: 2950alloc_mem_err:
2918 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2951 bnx2x_vf_pci_dealloc(bp);
2919 sizeof(struct bnx2x_vf_mbx_msg));
2920 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
2921 sizeof(union pf_vf_bulletin));
2922 return -ENOMEM; 2952 return -ENOMEM;
2923} 2953}
2924 2954
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8bf764570eef..6929adba52f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -159,6 +159,8 @@ struct bnx2x_virtf {
159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) 159#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) 160#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) 161#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
162 /* Hide a single vlan filter credit for the hypervisor */
163#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
162 164
163 u8 sb_count; /* actual number of SBs */ 165 u8 sb_count; /* actual number of SBs */
164 u8 igu_base_id; /* base igu status block id */ 166 u8 igu_base_id; /* base igu status block id */
@@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
502enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 504enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
503void bnx2x_timer_sriov(struct bnx2x *bp); 505void bnx2x_timer_sriov(struct bnx2x *bp);
504void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 506void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
507void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
505int bnx2x_vf_pci_alloc(struct bnx2x *bp); 508int bnx2x_vf_pci_alloc(struct bnx2x *bp);
506int bnx2x_enable_sriov(struct bnx2x *bp); 509int bnx2x_enable_sriov(struct bnx2x *bp);
507void bnx2x_disable_sriov(struct bnx2x *bp); 510void bnx2x_disable_sriov(struct bnx2x *bp);
@@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
568 return NULL; 571 return NULL;
569} 572}
570 573
574static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
571static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 575static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
572static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 576static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
573static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 577static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0622884596b2..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
747out: 747out:
748 bnx2x_vfpf_finalize(bp, &req->first_tlv); 748 bnx2x_vfpf_finalize(bp, &req->first_tlv);
749 749
750 return 0; 750 return rc;
751} 751}
752 752
753/* request pf to config rss table for vf queues*/ 753/* request pf to config rss table for vf queues*/
@@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1163 bnx2x_vf_max_queue_cnt(bp, vf); 1163 bnx2x_vf_max_queue_cnt(bp, vf);
1164 resc->num_sbs = vf_sb_count(vf); 1164 resc->num_sbs = vf_sb_count(vf);
1165 resc->num_mac_filters = vf_mac_rules_cnt(vf); 1165 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1166 resc->num_vlan_filters = vf_vlan_rules_cnt(vf); 1166 resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
1167 resc->num_mc_filters = 0; 1167 resc->num_mc_filters = 0;
1168 1168
1169 if (status == PFVF_STATUS_SUCCESS) { 1169 if (status == PFVF_STATUS_SUCCESS) {
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b9f7022f4e81..e5d95c5ce1ad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12286,7 +12286,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
12286 if (tg3_flag(tp, MAX_RXPEND_64) && 12286 if (tg3_flag(tp, MAX_RXPEND_64) &&
12287 tp->rx_pending > 63) 12287 tp->rx_pending > 63)
12288 tp->rx_pending = 63; 12288 tp->rx_pending = 63;
12289 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12289
12290 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12291 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12290 12292
12291 for (i = 0; i < tp->irq_max; i++) 12293 for (i = 0; i < tp->irq_max; i++)
12292 tp->napi[i].tx_pending = ering->tx_pending; 12294 tp->napi[i].tx_pending = ering->tx_pending;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 751d5c7b312d..9e089d24466e 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
4 4
5config NET_CADENCE 5config NET_CADENCE
6 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
8 default y 8 default y
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on HAS_DMA 25 depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST)
26 select MACB 26 select MACB
27 ---help--- 27 ---help---
28 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
30 30
31config MACB 31config MACB
32 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
33 depends on HAS_DMA 33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
34 select PHYLIB 34 select PHYLIB
35 ---help--- 35 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 36 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca97005e24b4..e9daa072ebb4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
599{ 599{
600 unsigned int entry; 600 unsigned int entry;
601 struct sk_buff *skb; 601 struct sk_buff *skb;
602 struct macb_dma_desc *desc;
603 dma_addr_t paddr; 602 dma_addr_t paddr;
604 603
605 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { 604 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
606 u32 addr, ctrl;
607
608 entry = macb_rx_ring_wrap(bp->rx_prepared_head); 605 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
609 desc = &bp->rx_ring[entry];
610 606
611 /* Make hw descriptor updates visible to CPU */ 607 /* Make hw descriptor updates visible to CPU */
612 rmb(); 608 rmb();
613 609
614 addr = desc->addr;
615 ctrl = desc->ctrl;
616 bp->rx_prepared_head++; 610 bp->rx_prepared_head++;
617 611
618 if ((addr & MACB_BIT(RX_USED)))
619 continue;
620
621 if (bp->rx_skbuff[entry] == NULL) { 612 if (bp->rx_skbuff[entry] == NULL) {
622 /* allocate sk_buff for this free entry in ring */ 613 /* allocate sk_buff for this free entry in ring */
623 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); 614 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
698 if (!(addr & MACB_BIT(RX_USED))) 689 if (!(addr & MACB_BIT(RX_USED)))
699 break; 690 break;
700 691
701 desc->addr &= ~MACB_BIT(RX_USED);
702 bp->rx_tail++; 692 bp->rx_tail++;
703 count++; 693 count++;
704 694
@@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
891 if (work_done < budget) { 881 if (work_done < budget) {
892 napi_complete(napi); 882 napi_complete(napi);
893 883
894 /*
895 * We've done what we can to clean the buffers. Make sure we
896 * get notified when new packets arrive.
897 */
898 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
899
900 /* Packets received while interrupts were disabled */ 884 /* Packets received while interrupts were disabled */
901 status = macb_readl(bp, RSR); 885 status = macb_readl(bp, RSR);
902 if (unlikely(status)) 886 if (status) {
887 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
888 macb_writel(bp, ISR, MACB_BIT(RCOMP));
903 napi_reschedule(napi); 889 napi_reschedule(napi);
890 } else {
891 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
892 }
904 } 893 }
905 894
906 /* TODO: Handle errors */ 895 /* TODO: Handle errors */
@@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
951 if (unlikely(status & (MACB_TX_ERR_FLAGS))) { 940 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
952 macb_writel(bp, IDR, MACB_TX_INT_FLAGS); 941 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
953 schedule_work(&bp->tx_error_task); 942 schedule_work(&bp->tx_error_task);
943
944 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
945 macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
946
954 break; 947 break;
955 } 948 }
956 949
@@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
968 bp->hw_stats.gem.rx_overruns++; 961 bp->hw_stats.gem.rx_overruns++;
969 else 962 else
970 bp->hw_stats.macb.rx_overruns++; 963 bp->hw_stats.macb.rx_overruns++;
964
965 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
966 macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
971 } 967 }
972 968
973 if (status & MACB_BIT(HRESP)) { 969 if (status & MACB_BIT(HRESP)) {
@@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
977 * (work queue?) 973 * (work queue?)
978 */ 974 */
979 netdev_err(dev, "DMA bus error: HRESP not OK\n"); 975 netdev_err(dev, "DMA bus error: HRESP not OK\n");
976
977 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
978 macb_writel(bp, ISR, MACB_BIT(HRESP));
980 } 979 }
981 980
982 status = macb_readl(bp, ISR); 981 status = macb_readl(bp, ISR);
@@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
1113 1112
1114 desc = &bp->rx_ring[i]; 1113 desc = &bp->rx_ring[i];
1115 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1114 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1116 dma_unmap_single(&bp->pdev->dev, addr, skb->len, 1115 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1117 DMA_FROM_DEVICE); 1116 DMA_FROM_DEVICE);
1118 dev_kfree_skb_any(skb); 1117 dev_kfree_skb_any(skb);
1119 skb = NULL; 1118 skb = NULL;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index d40c994a4f6a..570222c33410 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,13 +67,13 @@ config CHELSIO_T3
67 will be called cxgb3. 67 will be called cxgb3.
68 68
69config CHELSIO_T4 69config CHELSIO_T4
70 tristate "Chelsio Communications T4 Ethernet support" 70 tristate "Chelsio Communications T4/T5 Ethernet support"
71 depends on PCI 71 depends on PCI
72 select FW_LOADER 72 select FW_LOADER
73 select MDIO 73 select MDIO
74 ---help--- 74 ---help---
75 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 75 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
76 adapters. 76 adapter and T5 based 40Gb Ethernet adapter.
77 77
78 For general information about Chelsio and our products, visit 78 For general information about Chelsio and our products, visit
79 our website at <http://www.chelsio.com>. 79 our website at <http://www.chelsio.com>.
@@ -87,11 +87,12 @@ config CHELSIO_T4
87 will be called cxgb4. 87 will be called cxgb4.
88 88
89config CHELSIO_T4VF 89config CHELSIO_T4VF
90 tristate "Chelsio Communications T4 Virtual Function Ethernet support" 90 tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
91 depends on PCI 91 depends on PCI
92 ---help--- 92 ---help---
93 This driver supports Chelsio T4-based gigabit and 10Gb Ethernet 93 This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
94 adapters with PCI-E SR-IOV Virtual Functions. 94 adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
95 Functions.
95 96
96 For general information about Chelsio and our products, visit 97 For general information about Chelsio and our products, visit
97 our website at <http://www.chelsio.com>. 98 our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6fe58913403a..24e16e3301e0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5870,6 +5870,8 @@ static void print_port_info(const struct net_device *dev)
5870 spd = " 2.5 GT/s"; 5870 spd = " 2.5 GT/s";
5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 5871 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5872 spd = " 5 GT/s"; 5872 spd = " 5 GT/s";
5873 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5874 spd = " 8 GT/s";
5873 5875
5874 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 5876 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5875 bufp += sprintf(bufp, "100/"); 5877 bufp += sprintf(bufp, "100/");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 81e8402a74b4..8a96572fdde0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -154,7 +154,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); 154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
155 req->l2t_idx = htons(e->idx); 155 req->l2t_idx = htons(e->idx);
156 req->vlan = htons(e->vlan); 156 req->vlan = htons(e->vlan);
157 if (e->neigh) 157 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); 158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
160 160
@@ -394,6 +394,8 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
394 if (e) { 394 if (e) {
395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
396 e->state = L2T_STATE_RESOLVING; 396 e->state = L2T_STATE_RESOLVING;
397 if (neigh->dev->flags & IFF_LOOPBACK)
398 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
397 memcpy(e->addr, addr, addr_len); 399 memcpy(e->addr, addr, addr_len);
398 e->ifindex = ifidx; 400 e->ifindex = ifidx;
399 e->hash = hash; 401 e->hash = hash;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fb2fe65903c2..bba67681aeaa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -682,7 +682,7 @@ enum {
682 SF_RD_ID = 0x9f, /* read ID */ 682 SF_RD_ID = 0x9f, /* read ID */
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 684
685 FW_MAX_SIZE = 512 * 1024, 685 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
686}; 686};
687 687
688/** 688/**
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
1 /*
2 * drivers/net/ethernet/beckhoff/ec_bhf.c
3 *
4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17/* This is a driver for EtherCAT master module present on CCAT FPGA.
18 * Those can be found on Bechhoff CX50xx industrial PCs.
19 */
20
21#if 0
22#define DEBUG
23#endif
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/skbuff.h>
34#include <linux/hrtimer.h>
35#include <linux/interrupt.h>
36#include <linux/stat.h>
37
38#define TIMER_INTERVAL_NSEC 20000
39
40#define INFO_BLOCK_SIZE 0x10
41#define INFO_BLOCK_TYPE 0x0
42#define INFO_BLOCK_REV 0x2
43#define INFO_BLOCK_BLK_CNT 0x4
44#define INFO_BLOCK_TX_CHAN 0x4
45#define INFO_BLOCK_RX_CHAN 0x5
46#define INFO_BLOCK_OFFSET 0x8
47
48#define EC_MII_OFFSET 0x4
49#define EC_FIFO_OFFSET 0x8
50#define EC_MAC_OFFSET 0xc
51
52#define MAC_FRAME_ERR_CNT 0x0
53#define MAC_RX_ERR_CNT 0x1
54#define MAC_CRC_ERR_CNT 0x2
55#define MAC_LNK_LST_ERR_CNT 0x3
56#define MAC_TX_FRAME_CNT 0x10
57#define MAC_RX_FRAME_CNT 0x14
58#define MAC_TX_FIFO_LVL 0x20
59#define MAC_DROPPED_FRMS 0x28
60#define MAC_CONNECTED_CCAT_FLAG 0x78
61
62#define MII_MAC_ADDR 0x8
63#define MII_MAC_FILT_FLAG 0xe
64#define MII_LINK_STATUS 0xf
65
66#define FIFO_TX_REG 0x0
67#define FIFO_TX_RESET 0x8
68#define FIFO_RX_REG 0x10
69#define FIFO_RX_ADDR_VALID (1u << 31)
70#define FIFO_RX_RESET 0x18
71
72#define DMA_CHAN_OFFSET 0x1000
73#define DMA_CHAN_SIZE 0x8
74
75#define DMA_WINDOW_SIZE_MASK 0xfffffffc
76
77static struct pci_device_id ids[] = {
78 { PCI_DEVICE(0x15ec, 0x5000), },
79 { 0, }
80};
81MODULE_DEVICE_TABLE(pci, ids);
82
83struct rx_header {
84#define RXHDR_NEXT_ADDR_MASK 0xffffffu
85#define RXHDR_NEXT_VALID (1u << 31)
86 __le32 next;
87#define RXHDR_NEXT_RECV_FLAG 0x1
88 __le32 recv;
89#define RXHDR_LEN_MASK 0xfffu
90 __le16 len;
91 __le16 port;
92 __le32 reserved;
93 u8 timestamp[8];
94} __packed;
95
96#define PKT_PAYLOAD_SIZE 0x7e8
97struct rx_desc {
98 struct rx_header header;
99 u8 data[PKT_PAYLOAD_SIZE];
100} __packed;
101
102struct tx_header {
103 __le16 len;
104#define TX_HDR_PORT_0 0x1
105#define TX_HDR_PORT_1 0x2
106 u8 port;
107 u8 ts_enable;
108#define TX_HDR_SENT 0x1
109 __le32 sent;
110 u8 timestamp[8];
111} __packed;
112
113struct tx_desc {
114 struct tx_header header;
115 u8 data[PKT_PAYLOAD_SIZE];
116} __packed;
117
118#define FIFO_SIZE 64
119
120static long polling_frequency = TIMER_INTERVAL_NSEC;
121
122struct bhf_dma {
123 u8 *buf;
124 size_t len;
125 dma_addr_t buf_phys;
126
127 u8 *alloc;
128 size_t alloc_len;
129 dma_addr_t alloc_phys;
130};
131
132struct ec_bhf_priv {
133 struct net_device *net_dev;
134
135 struct pci_dev *dev;
136
137 void * __iomem io;
138 void * __iomem dma_io;
139
140 struct hrtimer hrtimer;
141
142 int tx_dma_chan;
143 int rx_dma_chan;
144 void * __iomem ec_io;
145 void * __iomem fifo_io;
146 void * __iomem mii_io;
147 void * __iomem mac_io;
148
149 struct bhf_dma rx_buf;
150 struct rx_desc *rx_descs;
151 int rx_dnext;
152 int rx_dcount;
153
154 struct bhf_dma tx_buf;
155 struct tx_desc *tx_descs;
156 int tx_dcount;
157 int tx_dnext;
158
159 u64 stat_rx_bytes;
160 u64 stat_tx_bytes;
161};
162
163#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
164
165#define ETHERCAT_MASTER_ID 0x14
166
167static void ec_bhf_print_status(struct ec_bhf_priv *priv)
168{
169 struct device *dev = PRIV_TO_DEV(priv);
170
171 dev_dbg(dev, "Frame error counter: %d\n",
172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
173 dev_dbg(dev, "RX error counter: %d\n",
174 ioread8(priv->mac_io + MAC_RX_ERR_CNT));
175 dev_dbg(dev, "CRC error counter: %d\n",
176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
177 dev_dbg(dev, "TX frame counter: %d\n",
178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
179 dev_dbg(dev, "RX frame counter: %d\n",
180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
181 dev_dbg(dev, "TX fifo level: %d\n",
182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
183 dev_dbg(dev, "Dropped frames: %d\n",
184 ioread8(priv->mac_io + MAC_DROPPED_FRMS));
185 dev_dbg(dev, "Connected with CCAT slot: %d\n",
186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
187 dev_dbg(dev, "Link status: %d\n",
188 ioread8(priv->mii_io + MII_LINK_STATUS));
189}
190
191static void ec_bhf_reset(struct ec_bhf_priv *priv)
192{
193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
194 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
195 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
196 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
197 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
198 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
199 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
200
201 iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
202 iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
203
204 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
205}
206
207static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
208{
209 u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
210 u32 addr = (u8 *)desc - priv->tx_buf.buf;
211
212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
213
214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
215}
216
217static int ec_bhf_desc_sent(struct tx_desc *desc)
218{
219 return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
220}
221
222static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
223{
224 if (unlikely(netif_queue_stopped(priv->net_dev))) {
225 /* Make sure that we perceive changes to tx_dnext. */
226 smp_rmb();
227
228 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
229 netif_wake_queue(priv->net_dev);
230 }
231}
232
233static int ec_bhf_pkt_received(struct rx_desc *desc)
234{
235 return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
236}
237
238static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
239{
240 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
241 priv->fifo_io + FIFO_RX_REG);
242}
243
244static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
245{
246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
247 struct device *dev = PRIV_TO_DEV(priv);
248
249 while (ec_bhf_pkt_received(desc)) {
250 int pkt_size = (le16_to_cpu(desc->header.len) &
251 RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
252 u8 *data = desc->data;
253 struct sk_buff *skb;
254
255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
257
258 if (skb) {
259 memcpy(skb_put(skb, pkt_size), data, pkt_size);
260 skb->protocol = eth_type_trans(skb, priv->net_dev);
261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
262
263 priv->stat_rx_bytes += pkt_size;
264
265 netif_rx(skb);
266 } else {
267 dev_err_ratelimited(dev,
268 "Couldn't allocate a skb_buff for a packet of size %u\n",
269 pkt_size);
270 }
271
272 desc->header.recv = 0;
273
274 ec_bhf_add_rx_desc(priv, desc);
275
276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
277 desc = &priv->rx_descs[priv->rx_dnext];
278 }
279
280}
281
282static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
283{
284 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
285 hrtimer);
286 ec_bhf_process_rx(priv);
287 ec_bhf_process_tx(priv);
288
289 if (!netif_running(priv->net_dev))
290 return HRTIMER_NORESTART;
291
292 hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
293 return HRTIMER_RESTART;
294}
295
296static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
297{
298 struct device *dev = PRIV_TO_DEV(priv);
299 unsigned block_count, i;
300 void * __iomem ec_info;
301
302 dev_dbg(dev, "Info block:\n");
303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
304 dev_dbg(dev, "Revision of function: %x\n",
305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
306
307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
308 dev_dbg(dev, "Number of function blocks: %x\n", block_count);
309
310 for (i = 0; i < block_count; i++) {
311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
312 INFO_BLOCK_TYPE);
313 if (type == ETHERCAT_MASTER_ID)
314 break;
315 }
316 if (i == block_count) {
317 dev_err(dev, "EtherCAT master with DMA block not found\n");
318 return -ENODEV;
319 }
320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
321
322 ec_info = priv->io + i * INFO_BLOCK_SIZE;
323 dev_dbg(dev, "EtherCAT master revision: %d\n",
324 ioread16(ec_info + INFO_BLOCK_REV));
325
326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
328 priv->tx_dma_chan);
329
330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
332 priv->rx_dma_chan);
333
334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
338
339 dev_dbg(dev,
340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
342
343 return 0;
344}
345
346static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
347 struct net_device *net_dev)
348{
349 struct ec_bhf_priv *priv = netdev_priv(net_dev);
350 struct tx_desc *desc;
351 unsigned len;
352
353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
354
355 desc = &priv->tx_descs[priv->tx_dnext];
356
357 skb_copy_and_csum_dev(skb, desc->data);
358 len = skb->len;
359
360 memset(&desc->header, 0, sizeof(desc->header));
361 desc->header.len = cpu_to_le16(len);
362 desc->header.port = TX_HDR_PORT_0;
363
364 ec_bhf_send_packet(priv, desc);
365
366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
367
368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
369 /* Make sure that update updates to tx_dnext are perceived
370 * by timer routine.
371 */
372 smp_wmb();
373
374 netif_stop_queue(net_dev);
375
376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
377 ec_bhf_print_status(priv);
378 }
379
380 priv->stat_tx_bytes += len;
381
382 dev_kfree_skb(skb);
383
384 return NETDEV_TX_OK;
385}
386
387static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
388 struct bhf_dma *buf,
389 int channel,
390 int size)
391{
392 int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
393 struct device *dev = PRIV_TO_DEV(priv);
394 u32 mask;
395
396 iowrite32(0xffffffff, priv->dma_io + offset);
397
398 mask = ioread32(priv->dma_io + offset);
399 mask &= DMA_WINDOW_SIZE_MASK;
400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
401
402 /* We want to allocate a chunk of memory that is:
403 * - aligned to the mask we just read
404 * - is of size 2^mask bytes (at most)
405 * In order to ensure that we will allocate buffer of
406 * 2 * 2^mask bytes.
407 */
408 buf->len = min_t(int, ~mask + 1, size);
409 buf->alloc_len = 2 * buf->len;
410
411 dev_dbg(dev, "Allocating %d bytes for channel %d",
412 (int)buf->alloc_len, channel);
413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
414 GFP_KERNEL);
415 if (buf->alloc == NULL) {
416 dev_info(dev, "Failed to allocate buffer\n");
417 return -ENOMEM;
418 }
419
420 buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
421 buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
422
423 iowrite32(0, priv->dma_io + offset + 4);
424 iowrite32(buf->buf_phys, priv->dma_io + offset);
425 dev_dbg(dev, "Buffer: %x and read from dev: %x",
426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
427
428 return 0;
429}
430
431static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
432{
433 int i = 0;
434
435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
437 priv->tx_dnext = 0;
438
439 for (i = 0; i < priv->tx_dcount; i++)
440 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
441}
442
443static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
444{
445 int i;
446
447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
449 priv->rx_dnext = 0;
450
451 for (i = 0; i < priv->rx_dcount; i++) {
452 struct rx_desc *desc = &priv->rx_descs[i];
453 u32 next;
454
455 if (i != priv->rx_dcount - 1)
456 next = (u8 *)(desc + 1) - priv->rx_buf.buf;
457 else
458 next = 0;
459 next |= RXHDR_NEXT_VALID;
460 desc->header.next = cpu_to_le32(next);
461 desc->header.recv = 0;
462 ec_bhf_add_rx_desc(priv, desc);
463 }
464}
465
466static int ec_bhf_open(struct net_device *net_dev)
467{
468 struct ec_bhf_priv *priv = netdev_priv(net_dev);
469 struct device *dev = PRIV_TO_DEV(priv);
470 int err = 0;
471
472 dev_info(dev, "Opening device\n");
473
474 ec_bhf_reset(priv);
475
476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
477 FIFO_SIZE * sizeof(struct rx_desc));
478 if (err) {
479 dev_err(dev, "Failed to allocate rx buffer\n");
480 goto out;
481 }
482 ec_bhf_setup_rx_descs(priv);
483
484 dev_info(dev, "RX buffer allocated, address: %x\n",
485 (unsigned)priv->rx_buf.buf_phys);
486
487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
488 FIFO_SIZE * sizeof(struct tx_desc));
489 if (err) {
490 dev_err(dev, "Failed to allocate tx buffer\n");
491 goto error_rx_free;
492 }
493 dev_dbg(dev, "TX buffer allocated, addres: %x\n",
494 (unsigned)priv->tx_buf.buf_phys);
495
496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
497
498 ec_bhf_setup_tx_descs(priv);
499
500 netif_start_queue(net_dev);
501
502 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
503 priv->hrtimer.function = ec_bhf_timer_fun;
504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
505 HRTIMER_MODE_REL);
506
507 dev_info(PRIV_TO_DEV(priv), "Device open\n");
508
509 ec_bhf_print_status(priv);
510
511 return 0;
512
513error_rx_free:
514 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
515 priv->rx_buf.alloc_len);
516out:
517 return err;
518}
519
520static int ec_bhf_stop(struct net_device *net_dev)
521{
522 struct ec_bhf_priv *priv = netdev_priv(net_dev);
523 struct device *dev = PRIV_TO_DEV(priv);
524
525 hrtimer_cancel(&priv->hrtimer);
526
527 ec_bhf_reset(priv);
528
529 netif_tx_disable(net_dev);
530
531 dma_free_coherent(dev, priv->tx_buf.alloc_len,
532 priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
533 dma_free_coherent(dev, priv->rx_buf.alloc_len,
534 priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
535
536 return 0;
537}
538
539static struct rtnl_link_stats64 *
540ec_bhf_get_stats(struct net_device *net_dev,
541 struct rtnl_link_stats64 *stats)
542{
543 struct ec_bhf_priv *priv = netdev_priv(net_dev);
544
545 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
546 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
547 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
548 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
549 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
550 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
551
552 stats->tx_bytes = priv->stat_tx_bytes;
553 stats->rx_bytes = priv->stat_rx_bytes;
554
555 return stats;
556}
557
558static const struct net_device_ops ec_bhf_netdev_ops = {
559 .ndo_start_xmit = ec_bhf_start_xmit,
560 .ndo_open = ec_bhf_open,
561 .ndo_stop = ec_bhf_stop,
562 .ndo_get_stats64 = ec_bhf_get_stats,
563 .ndo_change_mtu = eth_change_mtu,
564 .ndo_validate_addr = eth_validate_addr,
565 .ndo_set_mac_address = eth_mac_addr
566};
567
568static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
569{
570 struct net_device *net_dev;
571 struct ec_bhf_priv *priv;
572 void * __iomem dma_io;
573 void * __iomem io;
574 int err = 0;
575
576 err = pci_enable_device(dev);
577 if (err)
578 return err;
579
580 pci_set_master(dev);
581
582 err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
583 if (err) {
584 dev_err(&dev->dev,
585 "Required dma mask not supported, failed to initialize device\n");
586 err = -EIO;
587 goto err_disable_dev;
588 }
589
590 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
591 if (err) {
592 dev_err(&dev->dev,
593 "Required dma mask not supported, failed to initialize device\n");
594 goto err_disable_dev;
595 }
596
597 err = pci_request_regions(dev, "ec_bhf");
598 if (err) {
599 dev_err(&dev->dev, "Failed to request pci memory regions\n");
600 goto err_disable_dev;
601 }
602
603 io = pci_iomap(dev, 0, 0);
604 if (!io) {
605 dev_err(&dev->dev, "Failed to map pci card memory bar 0");
606 err = -EIO;
607 goto err_release_regions;
608 }
609
610 dma_io = pci_iomap(dev, 2, 0);
611 if (!dma_io) {
612 dev_err(&dev->dev, "Failed to map pci card memory bar 2");
613 err = -EIO;
614 goto err_unmap;
615 }
616
617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
618 if (net_dev == 0) {
619 err = -ENOMEM;
620 goto err_unmap_dma_io;
621 }
622
623 pci_set_drvdata(dev, net_dev);
624 SET_NETDEV_DEV(net_dev, &dev->dev);
625
626 net_dev->features = 0;
627 net_dev->flags |= IFF_NOARP;
628
629 net_dev->netdev_ops = &ec_bhf_netdev_ops;
630
631 priv = netdev_priv(net_dev);
632 priv->net_dev = net_dev;
633 priv->io = io;
634 priv->dma_io = dma_io;
635 priv->dev = dev;
636
637 err = ec_bhf_setup_offsets(priv);
638 if (err < 0)
639 goto err_free_net_dev;
640
641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
642
643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
644 net_dev->dev_addr);
645
646 err = register_netdev(net_dev);
647 if (err < 0)
648 goto err_free_net_dev;
649
650 return 0;
651
652err_free_net_dev:
653 free_netdev(net_dev);
654err_unmap_dma_io:
655 pci_iounmap(dev, dma_io);
656err_unmap:
657 pci_iounmap(dev, io);
658err_release_regions:
659 pci_release_regions(dev);
660err_disable_dev:
661 pci_clear_master(dev);
662 pci_disable_device(dev);
663
664 return err;
665}
666
667static void ec_bhf_remove(struct pci_dev *dev)
668{
669 struct net_device *net_dev = pci_get_drvdata(dev);
670 struct ec_bhf_priv *priv = netdev_priv(net_dev);
671
672 unregister_netdev(net_dev);
673 free_netdev(net_dev);
674
675 pci_iounmap(dev, priv->dma_io);
676 pci_iounmap(dev, priv->io);
677 pci_release_regions(dev);
678 pci_clear_master(dev);
679 pci_disable_device(dev);
680}
681
682static struct pci_driver pci_driver = {
683 .name = "ec_bhf",
684 .id_table = ids,
685 .probe = ec_bhf_probe,
686 .remove = ec_bhf_remove,
687};
688
689static int __init ec_bhf_init(void)
690{
691 return pci_register_driver(&pci_driver);
692}
693
694static void __exit ec_bhf_exit(void)
695{
696 pci_unregister_driver(&pci_driver);
697}
698
699module_init(ec_bhf_init);
700module_exit(ec_bhf_exit);
701
702module_param(polling_frequency, long, S_IRUGO);
703MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
704
705MODULE_LICENSE("GPL");
706MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8ccaa2520dc3..97db5a7179df 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -374,6 +374,7 @@ enum vf_state {
374#define BE_FLAGS_NAPI_ENABLED (1 << 9) 374#define BE_FLAGS_NAPI_ENABLED (1 << 9)
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
377#define BE_FLAGS_SETUP_DONE (1 << 13)
377 378
378#define BE_UC_PMAC_COUNT 30 379#define BE_UC_PMAC_COUNT 30
379#define BE_VF_UC_PMAC_COUNT 2 380#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e6df47b6973..dc19bc5dec77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2033,11 +2033,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2033 bool dummy_wrb; 2033 bool dummy_wrb;
2034 int i, pending_txqs; 2034 int i, pending_txqs;
2035 2035
2036 /* Wait for a max of 200ms for all the tx-completions to arrive. */ 2036 /* Stop polling for compls when HW has been silent for 10ms */
2037 do { 2037 do {
2038 pending_txqs = adapter->num_tx_qs; 2038 pending_txqs = adapter->num_tx_qs;
2039 2039
2040 for_all_tx_queues(adapter, txo, i) { 2040 for_all_tx_queues(adapter, txo, i) {
2041 cmpl = 0;
2042 num_wrbs = 0;
2041 txq = &txo->q; 2043 txq = &txo->q;
2042 while ((txcp = be_tx_compl_get(&txo->cq))) { 2044 while ((txcp = be_tx_compl_get(&txo->cq))) {
2043 end_idx = 2045 end_idx =
@@ -2050,14 +2052,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2050 if (cmpl) { 2052 if (cmpl) {
2051 be_cq_notify(adapter, txo->cq.id, false, cmpl); 2053 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2052 atomic_sub(num_wrbs, &txq->used); 2054 atomic_sub(num_wrbs, &txq->used);
2053 cmpl = 0; 2055 timeo = 0;
2054 num_wrbs = 0;
2055 } 2056 }
2056 if (atomic_read(&txq->used) == 0) 2057 if (atomic_read(&txq->used) == 0)
2057 pending_txqs--; 2058 pending_txqs--;
2058 } 2059 }
2059 2060
2060 if (pending_txqs == 0 || ++timeo > 200) 2061 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2061 break; 2062 break;
2062 2063
2063 mdelay(1); 2064 mdelay(1);
@@ -2725,6 +2726,12 @@ static int be_close(struct net_device *netdev)
2725 struct be_eq_obj *eqo; 2726 struct be_eq_obj *eqo;
2726 int i; 2727 int i;
2727 2728
2729 /* This protection is needed as be_close() may be called even when the
2730 * adapter is in cleared state (after eeh perm failure)
2731 */
2732 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733 return 0;
2734
2728 be_roce_dev_close(adapter); 2735 be_roce_dev_close(adapter);
2729 2736
2730 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2737 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3055,6 +3062,7 @@ static int be_clear(struct be_adapter *adapter)
3055 be_clear_queues(adapter); 3062 be_clear_queues(adapter);
3056 3063
3057 be_msix_disable(adapter); 3064 be_msix_disable(adapter);
3065 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3058 return 0; 3066 return 0;
3059} 3067}
3060 3068
@@ -3559,6 +3567,7 @@ static int be_setup(struct be_adapter *adapter)
3559 adapter->phy.fc_autoneg = 1; 3567 adapter->phy.fc_autoneg = 1;
3560 3568
3561 be_schedule_worker(adapter); 3569 be_schedule_worker(adapter);
3570 adapter->flags |= BE_FLAGS_SETUP_DONE;
3562 return 0; 3571 return 0;
3563err: 3572err:
3564 be_clear(adapter); 3573 be_clear(adapter);
@@ -4940,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
4940 if (status) 4949 if (status)
4941 goto err; 4950 goto err;
4942 4951
4952 /* On some BE3 FW versions, after a HW reset,
4953 * interrupts will remain disabled for each function.
4954 * So, explicitly enable interrupts
4955 */
4956 be_intr_set(adapter, true);
4957
4943 /* tell fw we're ready to fire cmds */ 4958 /* tell fw we're ready to fire cmds */
4944 status = be_cmd_fw_init(adapter); 4959 status = be_cmd_fw_init(adapter);
4945 if (status) 4960 if (status)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9125d9abf099..e2d42475b006 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123static void adjust_link(struct net_device *dev); 123static void adjust_link(struct net_device *dev);
124static noinline void gfar_update_link_state(struct gfar_private *priv);
124static int init_phy(struct net_device *dev); 125static int init_phy(struct net_device *dev);
125static int gfar_probe(struct platform_device *ofdev); 126static int gfar_probe(struct platform_device *ofdev);
126static int gfar_remove(struct platform_device *ofdev); 127static int gfar_remove(struct platform_device *ofdev);
@@ -3076,41 +3077,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
3076 return IRQ_HANDLED; 3077 return IRQ_HANDLED;
3077} 3078}
3078 3079
3079static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3080{
3081 struct phy_device *phydev = priv->phydev;
3082 u32 val = 0;
3083
3084 if (!phydev->duplex)
3085 return val;
3086
3087 if (!priv->pause_aneg_en) {
3088 if (priv->tx_pause_en)
3089 val |= MACCFG1_TX_FLOW;
3090 if (priv->rx_pause_en)
3091 val |= MACCFG1_RX_FLOW;
3092 } else {
3093 u16 lcl_adv, rmt_adv;
3094 u8 flowctrl;
3095 /* get link partner capabilities */
3096 rmt_adv = 0;
3097 if (phydev->pause)
3098 rmt_adv = LPA_PAUSE_CAP;
3099 if (phydev->asym_pause)
3100 rmt_adv |= LPA_PAUSE_ASYM;
3101
3102 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3103
3104 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3105 if (flowctrl & FLOW_CTRL_TX)
3106 val |= MACCFG1_TX_FLOW;
3107 if (flowctrl & FLOW_CTRL_RX)
3108 val |= MACCFG1_RX_FLOW;
3109 }
3110
3111 return val;
3112}
3113
3114/* Called every time the controller might need to be made 3080/* Called every time the controller might need to be made
3115 * aware of new link state. The PHY code conveys this 3081 * aware of new link state. The PHY code conveys this
3116 * information through variables in the phydev structure, and this 3082 * information through variables in the phydev structure, and this
@@ -3120,83 +3086,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3120static void adjust_link(struct net_device *dev) 3086static void adjust_link(struct net_device *dev)
3121{ 3087{
3122 struct gfar_private *priv = netdev_priv(dev); 3088 struct gfar_private *priv = netdev_priv(dev);
3123 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3124 struct phy_device *phydev = priv->phydev; 3089 struct phy_device *phydev = priv->phydev;
3125 int new_state = 0;
3126 3090
3127 if (test_bit(GFAR_RESETTING, &priv->state)) 3091 if (unlikely(phydev->link != priv->oldlink ||
3128 return; 3092 phydev->duplex != priv->oldduplex ||
3129 3093 phydev->speed != priv->oldspeed))
3130 if (phydev->link) { 3094 gfar_update_link_state(priv);
3131 u32 tempval1 = gfar_read(&regs->maccfg1);
3132 u32 tempval = gfar_read(&regs->maccfg2);
3133 u32 ecntrl = gfar_read(&regs->ecntrl);
3134
3135 /* Now we make sure that we can be in full duplex mode.
3136 * If not, we operate in half-duplex mode.
3137 */
3138 if (phydev->duplex != priv->oldduplex) {
3139 new_state = 1;
3140 if (!(phydev->duplex))
3141 tempval &= ~(MACCFG2_FULL_DUPLEX);
3142 else
3143 tempval |= MACCFG2_FULL_DUPLEX;
3144
3145 priv->oldduplex = phydev->duplex;
3146 }
3147
3148 if (phydev->speed != priv->oldspeed) {
3149 new_state = 1;
3150 switch (phydev->speed) {
3151 case 1000:
3152 tempval =
3153 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3154
3155 ecntrl &= ~(ECNTRL_R100);
3156 break;
3157 case 100:
3158 case 10:
3159 tempval =
3160 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3161
3162 /* Reduced mode distinguishes
3163 * between 10 and 100
3164 */
3165 if (phydev->speed == SPEED_100)
3166 ecntrl |= ECNTRL_R100;
3167 else
3168 ecntrl &= ~(ECNTRL_R100);
3169 break;
3170 default:
3171 netif_warn(priv, link, dev,
3172 "Ack! Speed (%d) is not 10/100/1000!\n",
3173 phydev->speed);
3174 break;
3175 }
3176
3177 priv->oldspeed = phydev->speed;
3178 }
3179
3180 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3181 tempval1 |= gfar_get_flowctrl_cfg(priv);
3182
3183 gfar_write(&regs->maccfg1, tempval1);
3184 gfar_write(&regs->maccfg2, tempval);
3185 gfar_write(&regs->ecntrl, ecntrl);
3186
3187 if (!priv->oldlink) {
3188 new_state = 1;
3189 priv->oldlink = 1;
3190 }
3191 } else if (priv->oldlink) {
3192 new_state = 1;
3193 priv->oldlink = 0;
3194 priv->oldspeed = 0;
3195 priv->oldduplex = -1;
3196 }
3197
3198 if (new_state && netif_msg_link(priv))
3199 phy_print_status(phydev);
3200} 3095}
3201 3096
3202/* Update the hash table based on the current list of multicast 3097/* Update the hash table based on the current list of multicast
@@ -3442,6 +3337,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
3442 return IRQ_HANDLED; 3337 return IRQ_HANDLED;
3443} 3338}
3444 3339
3340static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
3341{
3342 struct phy_device *phydev = priv->phydev;
3343 u32 val = 0;
3344
3345 if (!phydev->duplex)
3346 return val;
3347
3348 if (!priv->pause_aneg_en) {
3349 if (priv->tx_pause_en)
3350 val |= MACCFG1_TX_FLOW;
3351 if (priv->rx_pause_en)
3352 val |= MACCFG1_RX_FLOW;
3353 } else {
3354 u16 lcl_adv, rmt_adv;
3355 u8 flowctrl;
3356 /* get link partner capabilities */
3357 rmt_adv = 0;
3358 if (phydev->pause)
3359 rmt_adv = LPA_PAUSE_CAP;
3360 if (phydev->asym_pause)
3361 rmt_adv |= LPA_PAUSE_ASYM;
3362
3363 lcl_adv = mii_advertise_flowctrl(phydev->advertising);
3364
3365 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
3366 if (flowctrl & FLOW_CTRL_TX)
3367 val |= MACCFG1_TX_FLOW;
3368 if (flowctrl & FLOW_CTRL_RX)
3369 val |= MACCFG1_RX_FLOW;
3370 }
3371
3372 return val;
3373}
3374
3375static noinline void gfar_update_link_state(struct gfar_private *priv)
3376{
3377 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3378 struct phy_device *phydev = priv->phydev;
3379
3380 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
3381 return;
3382
3383 if (phydev->link) {
3384 u32 tempval1 = gfar_read(&regs->maccfg1);
3385 u32 tempval = gfar_read(&regs->maccfg2);
3386 u32 ecntrl = gfar_read(&regs->ecntrl);
3387
3388 if (phydev->duplex != priv->oldduplex) {
3389 if (!(phydev->duplex))
3390 tempval &= ~(MACCFG2_FULL_DUPLEX);
3391 else
3392 tempval |= MACCFG2_FULL_DUPLEX;
3393
3394 priv->oldduplex = phydev->duplex;
3395 }
3396
3397 if (phydev->speed != priv->oldspeed) {
3398 switch (phydev->speed) {
3399 case 1000:
3400 tempval =
3401 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
3402
3403 ecntrl &= ~(ECNTRL_R100);
3404 break;
3405 case 100:
3406 case 10:
3407 tempval =
3408 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
3409
3410 /* Reduced mode distinguishes
3411 * between 10 and 100
3412 */
3413 if (phydev->speed == SPEED_100)
3414 ecntrl |= ECNTRL_R100;
3415 else
3416 ecntrl &= ~(ECNTRL_R100);
3417 break;
3418 default:
3419 netif_warn(priv, link, priv->ndev,
3420 "Ack! Speed (%d) is not 10/100/1000!\n",
3421 phydev->speed);
3422 break;
3423 }
3424
3425 priv->oldspeed = phydev->speed;
3426 }
3427
3428 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
3429 tempval1 |= gfar_get_flowctrl_cfg(priv);
3430
3431 gfar_write(&regs->maccfg1, tempval1);
3432 gfar_write(&regs->maccfg2, tempval);
3433 gfar_write(&regs->ecntrl, ecntrl);
3434
3435 if (!priv->oldlink)
3436 priv->oldlink = 1;
3437
3438 } else if (priv->oldlink) {
3439 priv->oldlink = 0;
3440 priv->oldspeed = 0;
3441 priv->oldduplex = -1;
3442 }
3443
3444 if (netif_msg_link(priv))
3445 phy_print_status(phydev);
3446}
3447
3445static struct of_device_id gfar_match[] = 3448static struct of_device_id gfar_match[] =
3446{ 3449{
3447 { 3450 {
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 891dbee6e6c1..76d70708f864 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
533 struct gfar __iomem *regs = priv->gfargrp[0].regs; 533 struct gfar __iomem *regs = priv->gfargrp[0].regs;
534 u32 oldadv, newadv; 534 u32 oldadv, newadv;
535 535
536 if (!phydev)
537 return -ENODEV;
538
536 if (!(phydev->supported & SUPPORTED_Pause) || 539 if (!(phydev->supported & SUPPORTED_Pause) ||
537 (!(phydev->supported & SUPPORTED_Asym_Pause) && 540 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
538 (epause->rx_pause != epause->tx_pause))) 541 (epause->rx_pause != epause->tx_pause)))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9866f264f55e..f0bbd4246d71 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
186{ 186{
187 u16 phy_reg = 0; 187 u16 phy_reg = 0;
188 u32 phy_id = 0; 188 u32 phy_id = 0;
189 s32 ret_val; 189 s32 ret_val = 0;
190 u16 retry_count; 190 u16 retry_count;
191 u32 mac_reg = 0; 191 u32 mac_reg = 0;
192 192
@@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
217 /* In case the PHY needs to be in mdio slow mode, 217 /* In case the PHY needs to be in mdio slow mode,
218 * set slow mode and try to get the PHY id again. 218 * set slow mode and try to get the PHY id again.
219 */ 219 */
220 hw->phy.ops.release(hw); 220 if (hw->mac.type < e1000_pch_lpt) {
221 ret_val = e1000_set_mdio_slow_mode_hv(hw); 221 hw->phy.ops.release(hw);
222 if (!ret_val) 222 ret_val = e1000_set_mdio_slow_mode_hv(hw);
223 ret_val = e1000e_get_phy_id(hw); 223 if (!ret_val)
224 hw->phy.ops.acquire(hw); 224 ret_val = e1000e_get_phy_id(hw);
225 hw->phy.ops.acquire(hw);
226 }
225 227
226 if (ret_val) 228 if (ret_val)
227 return false; 229 return false;
@@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
842 } 844 }
843 } 845 }
844 846
847 if (hw->phy.type == e1000_phy_82579) {
848 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
849 &data);
850 if (ret_val)
851 goto release;
852
853 data &= ~I82579_LPI_100_PLL_SHUT;
854 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
855 data);
856 }
857
845 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ 858 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
846 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); 859 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
847 if (ret_val) 860 if (ret_val)
@@ -1314,14 +1327,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1314 return ret_val; 1327 return ret_val;
1315 } 1328 }
1316 1329
1317 /* When connected at 10Mbps half-duplex, 82579 parts are excessively 1330 /* When connected at 10Mbps half-duplex, some parts are excessively
1318 * aggressive resulting in many collisions. To avoid this, increase 1331 * aggressive resulting in many collisions. To avoid this, increase
1319 * the IPG and reduce Rx latency in the PHY. 1332 * the IPG and reduce Rx latency in the PHY.
1320 */ 1333 */
1321 if ((hw->mac.type == e1000_pch2lan) && link) { 1334 if (((hw->mac.type == e1000_pch2lan) ||
1335 (hw->mac.type == e1000_pch_lpt)) && link) {
1322 u32 reg; 1336 u32 reg;
1323 reg = er32(STATUS); 1337 reg = er32(STATUS);
1324 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1338 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1339 u16 emi_addr;
1340
1325 reg = er32(TIPG); 1341 reg = er32(TIPG);
1326 reg &= ~E1000_TIPG_IPGT_MASK; 1342 reg &= ~E1000_TIPG_IPGT_MASK;
1327 reg |= 0xFF; 1343 reg |= 0xFF;
@@ -1332,8 +1348,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1332 if (ret_val) 1348 if (ret_val)
1333 return ret_val; 1349 return ret_val;
1334 1350
1335 ret_val = 1351 if (hw->mac.type == e1000_pch2lan)
1336 e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); 1352 emi_addr = I82579_RX_CONFIG;
1353 else
1354 emi_addr = I217_RX_CONFIG;
1355
1356 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
1337 1357
1338 hw->phy.ops.release(hw); 1358 hw->phy.ops.release(hw);
1339 1359
@@ -2493,51 +2513,44 @@ release:
2493 * e1000_k1_gig_workaround_lv - K1 Si workaround 2513 * e1000_k1_gig_workaround_lv - K1 Si workaround
2494 * @hw: pointer to the HW structure 2514 * @hw: pointer to the HW structure
2495 * 2515 *
2496 * Workaround to set the K1 beacon duration for 82579 parts 2516 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2517 * Disable K1 in 1000Mbps and 100Mbps
2497 **/ 2518 **/
2498static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) 2519static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2499{ 2520{
2500 s32 ret_val = 0; 2521 s32 ret_val = 0;
2501 u16 status_reg = 0; 2522 u16 status_reg = 0;
2502 u32 mac_reg;
2503 u16 phy_reg;
2504 2523
2505 if (hw->mac.type != e1000_pch2lan) 2524 if (hw->mac.type != e1000_pch2lan)
2506 return 0; 2525 return 0;
2507 2526
2508 /* Set K1 beacon duration based on 1Gbps speed or otherwise */ 2527 /* Set K1 beacon duration based on 10Mbs speed */
2509 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); 2528 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2510 if (ret_val) 2529 if (ret_val)
2511 return ret_val; 2530 return ret_val;
2512 2531
2513 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) 2532 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2514 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { 2533 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2515 mac_reg = er32(FEXTNVM4); 2534 if (status_reg &
2516 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 2535 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2517
2518 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
2519 if (ret_val)
2520 return ret_val;
2521
2522 if (status_reg & HV_M_STATUS_SPEED_1000) {
2523 u16 pm_phy_reg; 2536 u16 pm_phy_reg;
2524 2537
2525 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 2538 /* LV 1G/100 Packet drop issue wa */
2526 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2527 /* LV 1G Packet drop issue wa */
2528 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); 2539 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2529 if (ret_val) 2540 if (ret_val)
2530 return ret_val; 2541 return ret_val;
2531 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; 2542 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2532 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); 2543 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2533 if (ret_val) 2544 if (ret_val)
2534 return ret_val; 2545 return ret_val;
2535 } else { 2546 } else {
2547 u32 mac_reg;
2548
2549 mac_reg = er32(FEXTNVM4);
2550 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2536 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2551 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2537 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2552 ew32(FEXTNVM4, mac_reg);
2538 } 2553 }
2539 ew32(FEXTNVM4, mac_reg);
2540 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
2541 } 2554 }
2542 2555
2543 return ret_val; 2556 return ret_val;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index bead50f9187b..5515126c81c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -232,16 +232,19 @@
232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ 232#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 233#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ 234#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
235#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
235#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ 236#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
236#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ 237#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
237#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ 238#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
238#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ 239#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
239#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ 240#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
240#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ 241#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
242#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
241#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ 243#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
242#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ 244#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
243#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ 245#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
244#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ 246#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
247#define I217_RX_CONFIG 0xB20C /* Receive configuration */
245 248
246#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ 249#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
247#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ 250#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d50c91e50528..3e69386add04 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1165,7 +1165,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb); 1165 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1166 adapter->tx_hwtstamp_skb = NULL; 1166 adapter->tx_hwtstamp_skb = NULL;
1167 adapter->tx_hwtstamp_timeouts++; 1167 adapter->tx_hwtstamp_timeouts++;
1168 e_warn("clearing Tx timestamp hang"); 1168 e_warn("clearing Tx timestamp hang\n");
1169 } else { 1169 } else {
1170 /* reschedule to check later */ 1170 /* reschedule to check later */
1171 schedule_work(&adapter->tx_hwtstamp_work); 1171 schedule_work(&adapter->tx_hwtstamp_work);
@@ -5687,7 +5687,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu) 5687static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5688{ 5688{
5689 struct e1000_adapter *adapter = netdev_priv(netdev); 5689 struct e1000_adapter *adapter = netdev_priv(netdev);
5690 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5690 int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
5691 5691
5692 /* Jumbo frame support */ 5692 /* Jumbo frame support */
5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && 5693 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -6235,6 +6235,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6235 return 0; 6235 return 0;
6236} 6236}
6237 6237
6238#ifdef CONFIG_PM_SLEEP
6238static int e1000e_pm_thaw(struct device *dev) 6239static int e1000e_pm_thaw(struct device *dev)
6239{ 6240{
6240 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); 6241 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6255,7 +6256,6 @@ static int e1000e_pm_thaw(struct device *dev)
6255 return 0; 6256 return 0;
6256} 6257}
6257 6258
6258#ifdef CONFIG_PM_SLEEP
6259static int e1000e_pm_suspend(struct device *dev) 6259static int e1000e_pm_suspend(struct device *dev)
6260{ 6260{
6261 struct pci_dev *pdev = to_pci_dev(dev); 6261 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 3841bccf058c..537d2780b408 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 164#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
165#define HV_M_STATUS_SPEED_MASK 0x0300 165#define HV_M_STATUS_SPEED_MASK 0x0300
166#define HV_M_STATUS_SPEED_1000 0x0200 166#define HV_M_STATUS_SPEED_1000 0x0200
167#define HV_M_STATUS_SPEED_100 0x0100
167#define HV_M_STATUS_LINK_UP 0x0040 168#define HV_M_STATUS_LINK_UP 0x0040
168 169
169#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 170#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 861b722c2672..cf0761f08911 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2897,12 +2897,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); 2897 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
2898 2898
2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { 2899 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
2900 ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; 2900 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2901 i40e_ptp_tx_hwtstamp(pf); 2901 i40e_ptp_tx_hwtstamp(pf);
2902 prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
2903 } 2902 }
2904
2905 wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
2906 } 2903 }
2907 2904
2908 /* If a critical error is pending we have no choice but to reset the 2905 /* If a critical error is pending we have no choice but to reset the
@@ -4271,6 +4268,14 @@ static int i40e_open(struct net_device *netdev)
4271 if (err) 4268 if (err)
4272 return err; 4269 return err;
4273 4270
4271 /* configure global TSO hardware offload settings */
4272 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
4273 TCP_FLAG_FIN) >> 16);
4274 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
4275 TCP_FLAG_FIN |
4276 TCP_FLAG_CWR) >> 16);
4277 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
4278
4274#ifdef CONFIG_I40E_VXLAN 4279#ifdef CONFIG_I40E_VXLAN
4275 vxlan_get_rx_port(netdev); 4280 vxlan_get_rx_port(netdev);
4276#endif 4281#endif
@@ -6712,6 +6717,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6712 NETIF_F_HW_VLAN_CTAG_FILTER | 6717 NETIF_F_HW_VLAN_CTAG_FILTER |
6713 NETIF_F_IPV6_CSUM | 6718 NETIF_F_IPV6_CSUM |
6714 NETIF_F_TSO | 6719 NETIF_F_TSO |
6720 NETIF_F_TSO_ECN |
6715 NETIF_F_TSO6 | 6721 NETIF_F_TSO6 |
6716 NETIF_F_RXCSUM | 6722 NETIF_F_RXCSUM |
6717 NETIF_F_NTUPLE | 6723 NETIF_F_NTUPLE |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 262bdf11d221..81299189a47d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
160 udelay(5); 160 udelay(5);
161 } 161 }
162 if (ret_code == I40E_ERR_TIMEOUT) 162 if (ret_code == I40E_ERR_TIMEOUT)
163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); 163 hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
164 return ret_code; 164 return ret_code;
165} 165}
166 166
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e33ec6c842b7..e61e63720800 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -239,7 +239,7 @@ static void i40e_ptp_tx_work(struct work_struct *work)
239 dev_kfree_skb_any(pf->ptp_tx_skb); 239 dev_kfree_skb_any(pf->ptp_tx_skb);
240 pf->ptp_tx_skb = NULL; 240 pf->ptp_tx_skb = NULL;
241 pf->tx_hwtstamp_timeouts++; 241 pf->tx_hwtstamp_timeouts++;
242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); 242 dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
243 return; 243 return;
244 } 244 }
245 245
@@ -321,7 +321,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
321 pf->last_rx_ptp_check = jiffies; 321 pf->last_rx_ptp_check = jiffies;
322 pf->rx_hwtstamp_cleared++; 322 pf->rx_hwtstamp_cleared++;
323 dev_warn(&vsi->back->pdev->dev, 323 dev_warn(&vsi->back->pdev->dev,
324 "%s: clearing Rx timestamp hang", 324 "%s: clearing Rx timestamp hang\n",
325 __func__); 325 __func__);
326 } 326 }
327} 327}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0f5d96ad281d..9478ddc66caf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -418,7 +418,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
418 } 418 }
419 break; 419 break;
420 default: 420 default:
421 dev_info(&pf->pdev->dev, "Could not specify spec type %d", 421 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
422 input->flow_type); 422 input->flow_type);
423 ret = -EINVAL; 423 ret = -EINVAL;
424 } 424 }
@@ -478,7 +478,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; 478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
479 } 479 }
480 } else { 480 } else {
481 dev_info(&pdev->dev, "FD filter programming error"); 481 dev_info(&pdev->dev, "FD filter programming error\n");
482 } 482 }
483 } else if (error == 483 } else if (error ==
484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { 484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
@@ -1713,9 +1713,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT; 1713 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { 1714 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1715 struct vlan_ethhdr *vhdr; 1715 struct vlan_ethhdr *vhdr;
1716 if (skb_header_cloned(skb) && 1716 int rc;
1717 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1717
1718 return -ENOMEM; 1718 rc = skb_cow_head(skb, 0);
1719 if (rc < 0)
1720 return rc;
1719 vhdr = (struct vlan_ethhdr *)skb->data; 1721 vhdr = (struct vlan_ethhdr *)skb->data;
1720 vhdr->h_vlan_TCI = htons(tx_flags >> 1722 vhdr->h_vlan_TCI = htons(tx_flags >>
1721 I40E_TX_FLAGS_VLAN_SHIFT); 1723 I40E_TX_FLAGS_VLAN_SHIFT);
@@ -1743,20 +1745,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1743 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) 1745 u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1744{ 1746{
1745 u32 cd_cmd, cd_tso_len, cd_mss; 1747 u32 cd_cmd, cd_tso_len, cd_mss;
1748 struct ipv6hdr *ipv6h;
1746 struct tcphdr *tcph; 1749 struct tcphdr *tcph;
1747 struct iphdr *iph; 1750 struct iphdr *iph;
1748 u32 l4len; 1751 u32 l4len;
1749 int err; 1752 int err;
1750 struct ipv6hdr *ipv6h;
1751 1753
1752 if (!skb_is_gso(skb)) 1754 if (!skb_is_gso(skb))
1753 return 0; 1755 return 0;
1754 1756
1755 if (skb_header_cloned(skb)) { 1757 err = skb_cow_head(skb, 0);
1756 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1758 if (err < 0)
1757 if (err) 1759 return err;
1758 return err;
1759 }
1760 1760
1761 if (protocol == htons(ETH_P_IP)) { 1761 if (protocol == htons(ETH_P_IP)) {
1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1762 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index db963397cc27..f67f8a170b90 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -365,7 +365,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); 365 word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
366 if (word_address == address) { 366 if (word_address == address) {
367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword); 367 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
368 hw_dbg("Read INVM Word 0x%02x = %x", 368 hw_dbg("Read INVM Word 0x%02x = %x\n",
369 address, *data); 369 address, *data);
370 status = E1000_SUCCESS; 370 status = E1000_SUCCESS;
371 break; 371 break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 5910a932ea7c..1e0c404db81a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -929,11 +929,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
929 */ 929 */
930 if (hw->fc.requested_mode == e1000_fc_full) { 930 if (hw->fc.requested_mode == e1000_fc_full) {
931 hw->fc.current_mode = e1000_fc_full; 931 hw->fc.current_mode = e1000_fc_full;
932 hw_dbg("Flow Control = FULL.\r\n"); 932 hw_dbg("Flow Control = FULL.\n");
933 } else { 933 } else {
934 hw->fc.current_mode = e1000_fc_rx_pause; 934 hw->fc.current_mode = e1000_fc_rx_pause;
935 hw_dbg("Flow Control = " 935 hw_dbg("Flow Control = RX PAUSE frames only.\n");
936 "RX PAUSE frames only.\r\n");
937 } 936 }
938 } 937 }
939 /* For receiving PAUSE frames ONLY. 938 /* For receiving PAUSE frames ONLY.
@@ -948,7 +947,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
948 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 947 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
949 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 948 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
950 hw->fc.current_mode = e1000_fc_tx_pause; 949 hw->fc.current_mode = e1000_fc_tx_pause;
951 hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); 950 hw_dbg("Flow Control = TX PAUSE frames only.\n");
952 } 951 }
953 /* For transmitting PAUSE frames ONLY. 952 /* For transmitting PAUSE frames ONLY.
954 * 953 *
@@ -962,7 +961,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
962 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 961 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
963 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 962 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
964 hw->fc.current_mode = e1000_fc_rx_pause; 963 hw->fc.current_mode = e1000_fc_rx_pause;
965 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 964 hw_dbg("Flow Control = RX PAUSE frames only.\n");
966 } 965 }
967 /* Per the IEEE spec, at this point flow control should be 966 /* Per the IEEE spec, at this point flow control should be
968 * disabled. However, we want to consider that we could 967 * disabled. However, we want to consider that we could
@@ -988,10 +987,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
988 (hw->fc.requested_mode == e1000_fc_tx_pause) || 987 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
989 (hw->fc.strict_ieee)) { 988 (hw->fc.strict_ieee)) {
990 hw->fc.current_mode = e1000_fc_none; 989 hw->fc.current_mode = e1000_fc_none;
991 hw_dbg("Flow Control = NONE.\r\n"); 990 hw_dbg("Flow Control = NONE.\n");
992 } else { 991 } else {
993 hw->fc.current_mode = e1000_fc_rx_pause; 992 hw->fc.current_mode = e1000_fc_rx_pause;
994 hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); 993 hw_dbg("Flow Control = RX PAUSE frames only.\n");
995 } 994 }
996 995
997 /* Now we need to do one last check... If we auto- 996 /* Now we need to do one last check... If we auto-
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fb98d4602f9d..16430a8440fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5193 5193
5194 rcu_read_lock(); 5194 rcu_read_lock();
5195 for (i = 0; i < adapter->num_rx_queues; i++) { 5195 for (i = 0; i < adapter->num_rx_queues; i++) {
5196 u32 rqdpc = rd32(E1000_RQDPC(i));
5197 struct igb_ring *ring = adapter->rx_ring[i]; 5196 struct igb_ring *ring = adapter->rx_ring[i];
5197 u32 rqdpc = rd32(E1000_RQDPC(i));
5198 if (hw->mac.type >= e1000_i210)
5199 wr32(E1000_RQDPC(i), 0);
5198 5200
5199 if (rqdpc) { 5201 if (rqdpc) {
5200 ring->rx_stats.drops += rqdpc; 5202 ring->rx_stats.drops += rqdpc;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 9209d652e1c9..ab25e49365f7 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
389 adapter->ptp_tx_skb = NULL; 389 adapter->ptp_tx_skb = NULL;
390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); 390 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
391 adapter->tx_hwtstamp_timeouts++; 391 adapter->tx_hwtstamp_timeouts++;
392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); 392 dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
393 return; 393 return;
394 } 394 }
395 395
@@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
451 rd32(E1000_RXSTMPH); 451 rd32(E1000_RXSTMPH);
452 adapter->last_rx_ptp_check = jiffies; 452 adapter->last_rx_ptp_check = jiffies;
453 adapter->rx_hwtstamp_cleared++; 453 adapter->rx_hwtstamp_cleared++;
454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); 454 dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
455 } 455 }
456} 456}
457 457
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1a12c1dd7a27..c6c4ca7d68e6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -256,7 +256,6 @@ struct ixgbe_ring {
256 struct ixgbe_tx_buffer *tx_buffer_info; 256 struct ixgbe_tx_buffer *tx_buffer_info;
257 struct ixgbe_rx_buffer *rx_buffer_info; 257 struct ixgbe_rx_buffer *rx_buffer_info;
258 }; 258 };
259 unsigned long last_rx_timestamp;
260 unsigned long state; 259 unsigned long state;
261 u8 __iomem *tail; 260 u8 __iomem *tail;
262 dma_addr_t dma; /* phys. address of descriptor ring */ 261 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -770,6 +769,7 @@ struct ixgbe_adapter {
770 unsigned long ptp_tx_start; 769 unsigned long ptp_tx_start;
771 unsigned long last_overflow_check; 770 unsigned long last_overflow_check;
772 unsigned long last_rx_ptp_check; 771 unsigned long last_rx_ptp_check;
772 unsigned long last_rx_timestamp;
773 spinlock_t tmreg_lock; 773 spinlock_t tmreg_lock;
774 struct cyclecounter cc; 774 struct cyclecounter cc;
775 struct timecounter tc; 775 struct timecounter tc;
@@ -944,24 +944,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 944void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 945void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); 946void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
947void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 947void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
948 struct sk_buff *skb);
949static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
950 union ixgbe_adv_rx_desc *rx_desc,
951 struct sk_buff *skb)
952{
953 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
954 return;
955
956 __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
957
958 /*
959 * Update the last_rx_timestamp timer in order to enable watchdog check
960 * for error case of latched timestamp on a dropped packet.
961 */
962 rx_ring->last_rx_timestamp = jiffies;
963}
964
965int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 948int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
966int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); 949int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
967void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 950void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 24fba39e194e..981b8a7b100d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1195,7 +1195,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1195 */ 1195 */
1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; 1196 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1197 1197
1198 hw_dbg(hw, "Detected EEPROM page size = %d words.", 1198 hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
1199 hw->eeprom.word_page_size); 1199 hw->eeprom.word_page_size);
1200out: 1200out:
1201 return status; 1201 return status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c4c526b7f99f..d62e7a25cf97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1664,7 +1664,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1664 1664
1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1665 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1666 1666
1667 ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); 1667 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1668 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
1668 1669
1669 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1670 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1670 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1671 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 23f765263f12..a76af8e28a04 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
536 536
537 if (time_out == max_time_out) { 537 if (time_out == max_time_out) {
538 status = IXGBE_ERR_LINK_SETUP; 538 status = IXGBE_ERR_LINK_SETUP;
539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); 539 hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
540 } 540 }
541 541
542 return status; 542 return status;
@@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
745 745
746 if (time_out == max_time_out) { 746 if (time_out == max_time_out) {
747 status = IXGBE_ERR_LINK_SETUP; 747 status = IXGBE_ERR_LINK_SETUP;
748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); 748 hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
749 } 749 }
750 750
751 return status; 751 return status;
@@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1175 status = 0; 1175 status = 0;
1176 } else { 1176 } else {
1177 if (hw->allow_unsupported_sfp) { 1177 if (hw->allow_unsupported_sfp) {
1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); 1178 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1179 status = 0; 1179 status = 0;
1180 } else { 1180 } else {
1181 hw_dbg(hw, 1181 hw_dbg(hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 63515a6f67fa..8902ae683457 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) 435void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
436{ 436{
437 struct ixgbe_hw *hw = &adapter->hw; 437 struct ixgbe_hw *hw = &adapter->hw;
438 struct ixgbe_ring *rx_ring;
439 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 438 u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
440 unsigned long rx_event; 439 unsigned long rx_event;
441 int n;
442 440
443 /* if we don't have a valid timestamp in the registers, just update the 441 /* if we don't have a valid timestamp in the registers, just update the
444 * timeout counter and exit 442 * timeout counter and exit
@@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
450 448
451 /* determine the most recent watchdog or rx_timestamp event */ 449 /* determine the most recent watchdog or rx_timestamp event */
452 rx_event = adapter->last_rx_ptp_check; 450 rx_event = adapter->last_rx_ptp_check;
453 for (n = 0; n < adapter->num_rx_queues; n++) { 451 if (time_after(adapter->last_rx_timestamp, rx_event))
454 rx_ring = adapter->rx_ring[n]; 452 rx_event = adapter->last_rx_timestamp;
455 if (time_after(rx_ring->last_rx_timestamp, rx_event))
456 rx_event = rx_ring->last_rx_timestamp;
457 }
458 453
459 /* only need to read the high RXSTMP register to clear the lock */ 454 /* only need to read the high RXSTMP register to clear the lock */
460 if (time_is_before_jiffies(rx_event + 5*HZ)) { 455 if (time_is_before_jiffies(rx_event + 5*HZ)) {
461 IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 456 IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
462 adapter->last_rx_ptp_check = jiffies; 457 adapter->last_rx_ptp_check = jiffies;
463 458
464 e_warn(drv, "clearing RX Timestamp hang"); 459 e_warn(drv, "clearing RX Timestamp hang\n");
465 } 460 }
466} 461}
467 462
@@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
517 dev_kfree_skb_any(adapter->ptp_tx_skb); 512 dev_kfree_skb_any(adapter->ptp_tx_skb);
518 adapter->ptp_tx_skb = NULL; 513 adapter->ptp_tx_skb = NULL;
519 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); 514 clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
520 e_warn(drv, "clearing Tx Timestamp hang"); 515 e_warn(drv, "clearing Tx Timestamp hang\n");
521 return; 516 return;
522 } 517 }
523 518
@@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
530} 525}
531 526
532/** 527/**
533 * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp 528 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
534 * @q_vector: structure containing interrupt and ring information 529 * @adapter: pointer to adapter struct
535 * @skb: particular skb to send timestamp with 530 * @skb: particular skb to send timestamp with
536 * 531 *
537 * if the timestamp is valid, we convert it into the timecounter ns 532 * if the timestamp is valid, we convert it into the timecounter ns
538 * value, then store that result into the shhwtstamps structure which 533 * value, then store that result into the shhwtstamps structure which
539 * is passed up the network stack 534 * is passed up the network stack
540 */ 535 */
541void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, 536void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
542 struct sk_buff *skb)
543{ 537{
544 struct ixgbe_adapter *adapter; 538 struct ixgbe_hw *hw = &adapter->hw;
545 struct ixgbe_hw *hw;
546 struct skb_shared_hwtstamps *shhwtstamps; 539 struct skb_shared_hwtstamps *shhwtstamps;
547 u64 regval = 0, ns; 540 u64 regval = 0, ns;
548 u32 tsyncrxctl; 541 u32 tsyncrxctl;
549 unsigned long flags; 542 unsigned long flags;
550 543
551 /* we cannot process timestamps on a ring without a q_vector */
552 if (!q_vector || !q_vector->adapter)
553 return;
554
555 adapter = q_vector->adapter;
556 hw = &adapter->hw;
557
558 /*
559 * Read the tsyncrxctl register afterwards in order to prevent taking an
560 * I/O hit on every packet.
561 */
562 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 544 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
563 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) 545 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
564 return; 546 return;
@@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
566 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); 548 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
567 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; 549 regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
568 550
569
570 spin_lock_irqsave(&adapter->tmreg_lock, flags); 551 spin_lock_irqsave(&adapter->tmreg_lock, flags);
571 ns = timecounter_cyc2time(&adapter->tc, regval); 552 ns = timecounter_cyc2time(&adapter->tc, regval);
572 spin_unlock_irqrestore(&adapter->tmreg_lock, flags); 553 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
573 554
574 shhwtstamps = skb_hwtstamps(skb); 555 shhwtstamps = skb_hwtstamps(skb);
575 shhwtstamps->hwtstamp = ns_to_ktime(ns); 556 shhwtstamps->hwtstamp = ns_to_ktime(ns);
557
558 /* Update the last_rx_timestamp timer in order to enable watchdog check
559 * for error case of latched timestamp on a dropped packet.
560 */
561 adapter->last_rx_timestamp = jiffies;
576} 562}
577 563
578int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) 564int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
1988 return idx; 1988 return idx;
1989} 1989}
1990 1990
1991static void 1991static int
1992jme_fill_tx_map(struct pci_dev *pdev, 1992jme_fill_tx_map(struct pci_dev *pdev,
1993 struct txdesc *txdesc, 1993 struct txdesc *txdesc,
1994 struct jme_buffer_info *txbi, 1994 struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
2005 len, 2005 len,
2006 PCI_DMA_TODEVICE); 2006 PCI_DMA_TODEVICE);
2007 2007
2008 if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
2009 return -EINVAL;
2010
2008 pci_dma_sync_single_for_device(pdev, 2011 pci_dma_sync_single_for_device(pdev,
2009 dmaaddr, 2012 dmaaddr,
2010 len, 2013 len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
2021 2024
2022 txbi->mapping = dmaaddr; 2025 txbi->mapping = dmaaddr;
2023 txbi->len = len; 2026 txbi->len = len;
2027 return 0;
2024} 2028}
2025 2029
2026static void 2030static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
2031{
2032 struct jme_ring *txring = &(jme->txring[0]);
2033 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
2034 int mask = jme->tx_ring_mask;
2035 int j;
2036
2037 for (j = 0 ; j < count ; j++) {
2038 ctxbi = txbi + ((startidx + j + 2) & (mask));
2039 pci_unmap_page(jme->pdev,
2040 ctxbi->mapping,
2041 ctxbi->len,
2042 PCI_DMA_TODEVICE);
2043
2044 ctxbi->mapping = 0;
2045 ctxbi->len = 0;
2046 }
2047
2048}
2049
2050static int
2027jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) 2051jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2028{ 2052{
2029 struct jme_ring *txring = &(jme->txring[0]); 2053 struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2034 int mask = jme->tx_ring_mask; 2058 int mask = jme->tx_ring_mask;
2035 const struct skb_frag_struct *frag; 2059 const struct skb_frag_struct *frag;
2036 u32 len; 2060 u32 len;
2061 int ret = 0;
2037 2062
2038 for (i = 0 ; i < nr_frags ; ++i) { 2063 for (i = 0 ; i < nr_frags ; ++i) {
2039 frag = &skb_shinfo(skb)->frags[i]; 2064 frag = &skb_shinfo(skb)->frags[i];
2040 ctxdesc = txdesc + ((idx + i + 2) & (mask)); 2065 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041 ctxbi = txbi + ((idx + i + 2) & (mask)); 2066 ctxbi = txbi + ((idx + i + 2) & (mask));
2042 2067
2043 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 2068 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
2044 skb_frag_page(frag), 2069 skb_frag_page(frag),
2045 frag->page_offset, skb_frag_size(frag), hidma); 2070 frag->page_offset, skb_frag_size(frag), hidma);
2071 if (ret) {
2072 jme_drop_tx_map(jme, idx, i);
2073 goto out;
2074 }
2075
2046 } 2076 }
2047 2077
2048 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; 2078 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049 ctxdesc = txdesc + ((idx + 1) & (mask)); 2079 ctxdesc = txdesc + ((idx + 1) & (mask));
2050 ctxbi = txbi + ((idx + 1) & (mask)); 2080 ctxbi = txbi + ((idx + 1) & (mask));
2051 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), 2081 ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
2052 offset_in_page(skb->data), len, hidma); 2082 offset_in_page(skb->data), len, hidma);
2083 if (ret)
2084 jme_drop_tx_map(jme, idx, i);
2085
2086out:
2087 return ret;
2053 2088
2054} 2089}
2055 2090
2091
2056static int 2092static int
2057jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) 2093jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
2058{ 2094{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2131 struct txdesc *txdesc; 2167 struct txdesc *txdesc;
2132 struct jme_buffer_info *txbi; 2168 struct jme_buffer_info *txbi;
2133 u8 flags; 2169 u8 flags;
2170 int ret = 0;
2134 2171
2135 txdesc = (struct txdesc *)txring->desc + idx; 2172 txdesc = (struct txdesc *)txring->desc + idx;
2136 txbi = txring->bufinf + idx; 2173 txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
2155 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) 2192 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
2156 jme_tx_csum(jme, skb, &flags); 2193 jme_tx_csum(jme, skb, &flags);
2157 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); 2194 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
2158 jme_map_tx_skb(jme, skb, idx); 2195 ret = jme_map_tx_skb(jme, skb, idx);
2196 if (ret)
2197 return ret;
2198
2159 txdesc->desc1.flags = flags; 2199 txdesc->desc1.flags = flags;
2160 /* 2200 /*
2161 * Set tx buffer info after telling NIC to send 2201 * Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2228 return NETDEV_TX_BUSY; 2268 return NETDEV_TX_BUSY;
2229 } 2269 }
2230 2270
2231 jme_fill_tx_desc(jme, skb, idx); 2271 if (jme_fill_tx_desc(jme, skb, idx))
2272 return NETDEV_TX_OK;
2232 2273
2233 jwrite32(jme, JME_TXCS, jme->reg_txcs | 2274 jwrite32(jme, JME_TXCS, jme->reg_txcs |
2234 TXCS_SELECT_QUEUE0 | 2275 TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index b161a525fc5b..9d5ced263a5e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
232 clk_prepare_enable(dev->clk); 232 clk_prepare_enable(dev->clk);
233 233
234 dev->err_interrupt = platform_get_irq(pdev, 0); 234 dev->err_interrupt = platform_get_irq(pdev, 0);
235 if (dev->err_interrupt != -ENXIO) { 235 if (dev->err_interrupt > 0) {
236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt, 236 ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
237 orion_mdio_err_irq, 237 orion_mdio_err_irq,
238 IRQF_SHARED, pdev->name, dev); 238 IRQF_SHARED, pdev->name, dev);
@@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
241 241
242 writel(MVMDIO_ERR_INT_SMI_DONE, 242 writel(MVMDIO_ERR_INT_SMI_DONE,
243 dev->regs + MVMDIO_ERR_INT_MASK); 243 dev->regs + MVMDIO_ERR_INT_MASK);
244
245 } else if (dev->err_interrupt == -EPROBE_DEFER) {
246 return -EPROBE_DEFER;
244 } 247 }
245 248
246 mutex_init(&dev->lock); 249 mutex_init(&dev->lock);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d04b1c3c9b85..14786c8bf99e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -91,7 +91,7 @@
91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c 91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92#define MVNETA_SERDES_CFG 0x24A0 92#define MVNETA_SERDES_CFG 0x24A0
93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7 93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
94#define MVNETA_RGMII_SERDES_PROTO 0x0667 94#define MVNETA_QSGMII_SERDES_PROTO 0x0667
95#define MVNETA_TYPE_PRIO 0x24bc 95#define MVNETA_TYPE_PRIO 0x24bc
96#define MVNETA_FORCE_UNI BIT(21) 96#define MVNETA_FORCE_UNI BIT(21)
97#define MVNETA_TXQ_CMD_1 0x24e4 97#define MVNETA_TXQ_CMD_1 0x24e4
@@ -2721,29 +2721,44 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2721} 2721}
2722 2722
2723/* Power up the port */ 2723/* Power up the port */
2724static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 2724static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2725{ 2725{
2726 u32 val; 2726 u32 ctrl;
2727 2727
2728 /* MAC Cause register should be cleared */ 2728 /* MAC Cause register should be cleared */
2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2730 2730
2731 if (phy_mode == PHY_INTERFACE_MODE_SGMII) 2731 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2732 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2733 else
2734 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
2735 2732
2736 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 2733 /* Even though it might look weird, when we're configured in
2737 2734 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2738 val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 2735 */
2736 switch(phy_mode) {
2737 case PHY_INTERFACE_MODE_QSGMII:
2738 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
2739 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2740 break;
2741 case PHY_INTERFACE_MODE_SGMII:
2742 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2743 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2744 break;
2745 case PHY_INTERFACE_MODE_RGMII:
2746 case PHY_INTERFACE_MODE_RGMII_ID:
2747 ctrl |= MVNETA_GMAC2_PORT_RGMII;
2748 break;
2749 default:
2750 return -EINVAL;
2751 }
2739 2752
2740 /* Cancel Port Reset */ 2753 /* Cancel Port Reset */
2741 val &= ~MVNETA_GMAC2_PORT_RESET; 2754 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
2742 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 2755 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
2743 2756
2744 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 2757 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2745 MVNETA_GMAC2_PORT_RESET) != 0) 2758 MVNETA_GMAC2_PORT_RESET) != 0)
2746 continue; 2759 continue;
2760
2761 return 0;
2747} 2762}
2748 2763
2749/* Device initialization routine */ 2764/* Device initialization routine */
@@ -2854,7 +2869,12 @@ static int mvneta_probe(struct platform_device *pdev)
2854 dev_err(&pdev->dev, "can't init eth hal\n"); 2869 dev_err(&pdev->dev, "can't init eth hal\n");
2855 goto err_free_stats; 2870 goto err_free_stats;
2856 } 2871 }
2857 mvneta_port_power_up(pp, phy_mode); 2872
2873 err = mvneta_port_power_up(pp, phy_mode);
2874 if (err < 0) {
2875 dev_err(&pdev->dev, "can't power up port\n");
2876 goto err_deinit;
2877 }
2858 2878
2859 dram_target_info = mv_mbus_dram_info(); 2879 dram_target_info = mv_mbus_dram_info();
2860 if (dram_target_info) 2880 if (dram_target_info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78099eab7673..92d3249f63f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = {
1253 }, 1253 },
1254 { 1254 {
1255 .opcode = MLX4_CMD_UPDATE_QP, 1255 .opcode = MLX4_CMD_UPDATE_QP,
1256 .has_inbox = false, 1256 .has_inbox = true,
1257 .has_outbox = false, 1257 .has_outbox = false,
1258 .out_is_imm = false, 1258 .out_is_imm = false,
1259 .encode_slave_id = false, 1259 .encode_slave_id = false,
1260 .verify = NULL, 1260 .verify = NULL,
1261 .wrapper = mlx4_CMD_EPERM_wrapper 1261 .wrapper = mlx4_UPDATE_QP_wrapper
1262 }, 1262 },
1263 { 1263 {
1264 .opcode = MLX4_CMD_GET_OP_REQ, 1264 .opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 70e95324a97d..c2cd8d31bcad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
66 66
67 cq->ring = ring; 67 cq->ring = ring;
68 cq->is_tx = mode; 68 cq->is_tx = mode;
69 spin_lock_init(&cq->lock);
70 69
71 /* Allocate HW buffers on provided NUMA node. 70 /* Allocate HW buffers on provided NUMA node.
72 * dev->numa_node is used in mtt range allocation flow. 71 * dev->numa_node is used in mtt range allocation flow.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f085c2df5e69..7e4b1720c3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
1304{ 1304{
1305 struct mlx4_en_priv *priv = netdev_priv(dev); 1305 struct mlx4_en_priv *priv = netdev_priv(dev);
1306 struct mlx4_en_cq *cq; 1306 struct mlx4_en_cq *cq;
1307 unsigned long flags;
1308 int i; 1307 int i;
1309 1308
1310 for (i = 0; i < priv->rx_ring_num; i++) { 1309 for (i = 0; i < priv->rx_ring_num; i++) {
1311 cq = priv->rx_cq[i]; 1310 cq = priv->rx_cq[i];
1312 spin_lock_irqsave(&cq->lock, flags); 1311 napi_schedule(&cq->napi);
1313 napi_synchronize(&cq->napi);
1314 mlx4_en_process_rx_cq(dev, cq, 0);
1315 spin_unlock_irqrestore(&cq->lock, flags);
1316 } 1312 }
1317} 1313}
1318#endif 1314#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ae95f66ceb..7cf9dadcb471 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -754,10 +754,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
754 has_eth_port = true; 754 has_eth_port = true;
755 } 755 }
756 756
757 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
758 request_module_nowait(IB_DRV_NAME);
759 if (has_eth_port) 757 if (has_eth_port)
760 request_module_nowait(EN_DRV_NAME); 758 request_module_nowait(EN_DRV_NAME);
759 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
760 request_module_nowait(IB_DRV_NAME);
761} 761}
762 762
763/* 763/*
@@ -2301,13 +2301,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2303 2303
2304 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2304 dev = pci_get_drvdata(pdev);
2305 if (!priv) { 2305 priv = mlx4_priv(dev);
2306 err = -ENOMEM;
2307 goto err_release_regions;
2308 }
2309
2310 dev = &priv->dev;
2311 dev->pdev = pdev; 2306 dev->pdev = pdev;
2312 INIT_LIST_HEAD(&priv->ctx_list); 2307 INIT_LIST_HEAD(&priv->ctx_list);
2313 spin_lock_init(&priv->ctx_lock); 2308 spin_lock_init(&priv->ctx_lock);
@@ -2374,10 +2369,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2374 } else { 2369 } else {
2375 atomic_inc(&pf_loading); 2370 atomic_inc(&pf_loading);
2376 err = pci_enable_sriov(pdev, total_vfs); 2371 err = pci_enable_sriov(pdev, total_vfs);
2377 atomic_dec(&pf_loading);
2378 if (err) { 2372 if (err) {
2379 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2373 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2380 err); 2374 err);
2375 atomic_dec(&pf_loading);
2381 err = 0; 2376 err = 0;
2382 } else { 2377 } else {
2383 mlx4_warn(dev, "Running in master mode\n"); 2378 mlx4_warn(dev, "Running in master mode\n");
@@ -2445,7 +2440,8 @@ slave_start:
2445 * No return code for this call, just warn the user in case of PCI 2440 * No return code for this call, just warn the user in case of PCI
2446 * express device capabilities are under-satisfied by the bus. 2441 * express device capabilities are under-satisfied by the bus.
2447 */ 2442 */
2448 mlx4_check_pcie_caps(dev); 2443 if (!mlx4_is_slave(dev))
2444 mlx4_check_pcie_caps(dev);
2449 2445
2450 /* In master functions, the communication channel must be initialized 2446 /* In master functions, the communication channel must be initialized
2451 * after obtaining its address from fw */ 2447 * after obtaining its address from fw */
@@ -2535,8 +2531,10 @@ slave_start:
2535 mlx4_sense_init(dev); 2531 mlx4_sense_init(dev);
2536 mlx4_start_sense(dev); 2532 mlx4_start_sense(dev);
2537 2533
2538 priv->pci_dev_data = pci_dev_data; 2534 priv->removed = 0;
2539 pci_set_drvdata(pdev, dev); 2535
2536 if (mlx4_is_master(dev) && dev->num_vfs)
2537 atomic_dec(&pf_loading);
2540 2538
2541 return 0; 2539 return 0;
2542 2540
@@ -2588,6 +2586,9 @@ err_rel_own:
2588 if (!mlx4_is_slave(dev)) 2586 if (!mlx4_is_slave(dev))
2589 mlx4_free_ownership(dev); 2587 mlx4_free_ownership(dev);
2590 2588
2589 if (mlx4_is_master(dev) && dev->num_vfs)
2590 atomic_dec(&pf_loading);
2591
2591 kfree(priv->dev.dev_vfs); 2592 kfree(priv->dev.dev_vfs);
2592 2593
2593err_free_dev: 2594err_free_dev:
@@ -2604,85 +2605,110 @@ err_disable_pdev:
2604 2605
2605static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 2606static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2606{ 2607{
2608 struct mlx4_priv *priv;
2609 struct mlx4_dev *dev;
2610
2607 printk_once(KERN_INFO "%s", mlx4_version); 2611 printk_once(KERN_INFO "%s", mlx4_version);
2608 2612
2613 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2614 if (!priv)
2615 return -ENOMEM;
2616
2617 dev = &priv->dev;
2618 pci_set_drvdata(pdev, dev);
2619 priv->pci_dev_data = id->driver_data;
2620
2609 return __mlx4_init_one(pdev, id->driver_data); 2621 return __mlx4_init_one(pdev, id->driver_data);
2610} 2622}
2611 2623
2612static void mlx4_remove_one(struct pci_dev *pdev) 2624static void __mlx4_remove_one(struct pci_dev *pdev)
2613{ 2625{
2614 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2626 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2615 struct mlx4_priv *priv = mlx4_priv(dev); 2627 struct mlx4_priv *priv = mlx4_priv(dev);
2628 int pci_dev_data;
2616 int p; 2629 int p;
2617 2630
2618 if (dev) { 2631 if (priv->removed)
2619 /* in SRIOV it is not allowed to unload the pf's 2632 return;
2620 * driver while there are alive vf's */
2621 if (mlx4_is_master(dev)) {
2622 if (mlx4_how_many_lives_vf(dev))
2623 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2624 }
2625 mlx4_stop_sense(dev);
2626 mlx4_unregister_device(dev);
2627 2633
2628 for (p = 1; p <= dev->caps.num_ports; p++) { 2634 pci_dev_data = priv->pci_dev_data;
2629 mlx4_cleanup_port_info(&priv->port[p]);
2630 mlx4_CLOSE_PORT(dev, p);
2631 }
2632 2635
2633 if (mlx4_is_master(dev)) 2636 /* in SRIOV it is not allowed to unload the pf's
2634 mlx4_free_resource_tracker(dev, 2637 * driver while there are alive vf's */
2635 RES_TR_FREE_SLAVES_ONLY); 2638 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
2636 2639 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2637 mlx4_cleanup_counters_table(dev); 2640 mlx4_stop_sense(dev);
2638 mlx4_cleanup_qp_table(dev); 2641 mlx4_unregister_device(dev);
2639 mlx4_cleanup_srq_table(dev);
2640 mlx4_cleanup_cq_table(dev);
2641 mlx4_cmd_use_polling(dev);
2642 mlx4_cleanup_eq_table(dev);
2643 mlx4_cleanup_mcg_table(dev);
2644 mlx4_cleanup_mr_table(dev);
2645 mlx4_cleanup_xrcd_table(dev);
2646 mlx4_cleanup_pd_table(dev);
2647 2642
2648 if (mlx4_is_master(dev)) 2643 for (p = 1; p <= dev->caps.num_ports; p++) {
2649 mlx4_free_resource_tracker(dev, 2644 mlx4_cleanup_port_info(&priv->port[p]);
2650 RES_TR_FREE_STRUCTS_ONLY); 2645 mlx4_CLOSE_PORT(dev, p);
2651 2646 }
2652 iounmap(priv->kar); 2647
2653 mlx4_uar_free(dev, &priv->driver_uar); 2648 if (mlx4_is_master(dev))
2654 mlx4_cleanup_uar_table(dev); 2649 mlx4_free_resource_tracker(dev,
2655 if (!mlx4_is_slave(dev)) 2650 RES_TR_FREE_SLAVES_ONLY);
2656 mlx4_clear_steering(dev); 2651
2657 mlx4_free_eq_table(dev); 2652 mlx4_cleanup_counters_table(dev);
2658 if (mlx4_is_master(dev)) 2653 mlx4_cleanup_qp_table(dev);
2659 mlx4_multi_func_cleanup(dev); 2654 mlx4_cleanup_srq_table(dev);
2660 mlx4_close_hca(dev); 2655 mlx4_cleanup_cq_table(dev);
2661 if (mlx4_is_slave(dev)) 2656 mlx4_cmd_use_polling(dev);
2662 mlx4_multi_func_cleanup(dev); 2657 mlx4_cleanup_eq_table(dev);
2663 mlx4_cmd_cleanup(dev); 2658 mlx4_cleanup_mcg_table(dev);
2664 2659 mlx4_cleanup_mr_table(dev);
2665 if (dev->flags & MLX4_FLAG_MSI_X) 2660 mlx4_cleanup_xrcd_table(dev);
2666 pci_disable_msix(pdev); 2661 mlx4_cleanup_pd_table(dev);
2667 if (dev->flags & MLX4_FLAG_SRIOV) {
2668 mlx4_warn(dev, "Disabling SR-IOV\n");
2669 pci_disable_sriov(pdev);
2670 }
2671 2662
2672 if (!mlx4_is_slave(dev)) 2663 if (mlx4_is_master(dev))
2673 mlx4_free_ownership(dev); 2664 mlx4_free_resource_tracker(dev,
2665 RES_TR_FREE_STRUCTS_ONLY);
2674 2666
2675 kfree(dev->caps.qp0_tunnel); 2667 iounmap(priv->kar);
2676 kfree(dev->caps.qp0_proxy); 2668 mlx4_uar_free(dev, &priv->driver_uar);
2677 kfree(dev->caps.qp1_tunnel); 2669 mlx4_cleanup_uar_table(dev);
2678 kfree(dev->caps.qp1_proxy); 2670 if (!mlx4_is_slave(dev))
2679 kfree(dev->dev_vfs); 2671 mlx4_clear_steering(dev);
2672 mlx4_free_eq_table(dev);
2673 if (mlx4_is_master(dev))
2674 mlx4_multi_func_cleanup(dev);
2675 mlx4_close_hca(dev);
2676 if (mlx4_is_slave(dev))
2677 mlx4_multi_func_cleanup(dev);
2678 mlx4_cmd_cleanup(dev);
2680 2679
2681 kfree(priv); 2680 if (dev->flags & MLX4_FLAG_MSI_X)
2682 pci_release_regions(pdev); 2681 pci_disable_msix(pdev);
2683 pci_disable_device(pdev); 2682 if (dev->flags & MLX4_FLAG_SRIOV) {
2684 pci_set_drvdata(pdev, NULL); 2683 mlx4_warn(dev, "Disabling SR-IOV\n");
2684 pci_disable_sriov(pdev);
2685 dev->num_vfs = 0;
2685 } 2686 }
2687
2688 if (!mlx4_is_slave(dev))
2689 mlx4_free_ownership(dev);
2690
2691 kfree(dev->caps.qp0_tunnel);
2692 kfree(dev->caps.qp0_proxy);
2693 kfree(dev->caps.qp1_tunnel);
2694 kfree(dev->caps.qp1_proxy);
2695 kfree(dev->dev_vfs);
2696
2697 pci_release_regions(pdev);
2698 pci_disable_device(pdev);
2699 memset(priv, 0, sizeof(*priv));
2700 priv->pci_dev_data = pci_dev_data;
2701 priv->removed = 1;
2702}
2703
2704static void mlx4_remove_one(struct pci_dev *pdev)
2705{
2706 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2707 struct mlx4_priv *priv = mlx4_priv(dev);
2708
2709 __mlx4_remove_one(pdev);
2710 kfree(priv);
2711 pci_set_drvdata(pdev, NULL);
2686} 2712}
2687 2713
2688int mlx4_restart_one(struct pci_dev *pdev) 2714int mlx4_restart_one(struct pci_dev *pdev)
@@ -2692,7 +2718,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
2692 int pci_dev_data; 2718 int pci_dev_data;
2693 2719
2694 pci_dev_data = priv->pci_dev_data; 2720 pci_dev_data = priv->pci_dev_data;
2695 mlx4_remove_one(pdev); 2721 __mlx4_remove_one(pdev);
2696 return __mlx4_init_one(pdev, pci_dev_data); 2722 return __mlx4_init_one(pdev, pci_dev_data);
2697} 2723}
2698 2724
@@ -2747,7 +2773,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2747static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 2773static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2748 pci_channel_state_t state) 2774 pci_channel_state_t state)
2749{ 2775{
2750 mlx4_remove_one(pdev); 2776 __mlx4_remove_one(pdev);
2751 2777
2752 return state == pci_channel_io_perm_failure ? 2778 return state == pci_channel_io_perm_failure ?
2753 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 2779 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -2755,11 +2781,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2755 2781
2756static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 2782static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2757{ 2783{
2758 const struct pci_device_id *id; 2784 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2759 int ret; 2785 struct mlx4_priv *priv = mlx4_priv(dev);
2786 int ret;
2760 2787
2761 id = pci_match_id(mlx4_pci_table, pdev); 2788 ret = __mlx4_init_one(pdev, priv->pci_dev_data);
2762 ret = __mlx4_init_one(pdev, id->driver_data);
2763 2789
2764 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 2790 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2765} 2791}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf8be41abb36..212cea440f90 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -800,6 +800,7 @@ struct mlx4_priv {
800 spinlock_t ctx_lock; 800 spinlock_t ctx_lock;
801 801
802 int pci_dev_data; 802 int pci_dev_data;
803 int removed;
803 804
804 struct list_head pgdir_list; 805 struct list_head pgdir_list;
805 struct mutex pgdir_mutex; 806 struct mutex pgdir_mutex;
@@ -1194,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1194 struct mlx4_cmd_mailbox *outbox, 1195 struct mlx4_cmd_mailbox *outbox,
1195 struct mlx4_cmd_info *cmd); 1196 struct mlx4_cmd_info *cmd);
1196 1197
1198int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1199 struct mlx4_vhcr *vhcr,
1200 struct mlx4_cmd_mailbox *inbox,
1201 struct mlx4_cmd_mailbox *outbox,
1202 struct mlx4_cmd_info *cmd);
1203
1197int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1204int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1198 struct mlx4_vhcr *vhcr, 1205 struct mlx4_vhcr *vhcr,
1199 struct mlx4_cmd_mailbox *inbox, 1206 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7a733c287744..04d9b6fe3e80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -319,7 +319,6 @@ struct mlx4_en_cq {
319 struct mlx4_cq mcq; 319 struct mlx4_cq mcq;
320 struct mlx4_hwq_resources wqres; 320 struct mlx4_hwq_resources wqres;
321 int ring; 321 int ring;
322 spinlock_t lock;
323 struct net_device *dev; 322 struct net_device *dev;
324 struct napi_struct napi; 323 struct napi_struct napi;
325 int size; 324 int size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index cfcad26ed40f..b5b3549b0c8d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1106,6 +1106,9 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1106 } 1106 }
1107 1107
1108 if (found_ix >= 0) { 1108 if (found_ix >= 0) {
1109 /* Calculate a slave_gid which is the slave number in the gid
1110 * table and not a globally unique slave number.
1111 */
1109 if (found_ix < MLX4_ROCE_PF_GIDS) 1112 if (found_ix < MLX4_ROCE_PF_GIDS)
1110 slave_gid = 0; 1113 slave_gid = 0;
1111 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * 1114 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
@@ -1118,41 +1121,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1118 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / 1121 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1119 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; 1122 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1120 1123
1124 /* Calculate the globally unique slave id */
1121 if (slave_gid) { 1125 if (slave_gid) {
1122 struct mlx4_active_ports exclusive_ports; 1126 struct mlx4_active_ports exclusive_ports;
1123 struct mlx4_active_ports actv_ports; 1127 struct mlx4_active_ports actv_ports;
1124 struct mlx4_slaves_pport slaves_pport_actv; 1128 struct mlx4_slaves_pport slaves_pport_actv;
1125 unsigned max_port_p_one; 1129 unsigned max_port_p_one;
1126 int num_slaves_before = 1; 1130 int num_vfs_before = 0;
1131 int candidate_slave_gid;
1127 1132
1133 /* Calculate how many VFs are on the previous port, if exists */
1128 for (i = 1; i < port; i++) { 1134 for (i = 1; i < port; i++) {
1129 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1135 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1130 set_bit(i, exclusive_ports.ports); 1136 set_bit(i - 1, exclusive_ports.ports);
1131 slaves_pport_actv = 1137 slaves_pport_actv =
1132 mlx4_phys_to_slaves_pport_actv( 1138 mlx4_phys_to_slaves_pport_actv(
1133 dev, &exclusive_ports); 1139 dev, &exclusive_ports);
1134 num_slaves_before += bitmap_weight( 1140 num_vfs_before += bitmap_weight(
1135 slaves_pport_actv.slaves, 1141 slaves_pport_actv.slaves,
1136 dev->num_vfs + 1); 1142 dev->num_vfs + 1);
1137 } 1143 }
1138 1144
1139 if (slave_gid < num_slaves_before) { 1145 /* candidate_slave_gid isn't necessarily the correct slave, but
1140 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); 1146 * it has the same number of ports and is assigned to the same
1141 set_bit(port - 1, exclusive_ports.ports); 1147 * ports as the real slave we're looking for. On dual port VF,
1142 slaves_pport_actv = 1148 * slave_gid = [single port VFs on port <port>] +
1143 mlx4_phys_to_slaves_pport_actv( 1149 * [offset of the current slave from the first dual port VF] +
1144 dev, &exclusive_ports); 1150 * 1 (for the PF).
1145 slave_gid += bitmap_weight( 1151 */
1146 slaves_pport_actv.slaves, 1152 candidate_slave_gid = slave_gid + num_vfs_before;
1147 dev->num_vfs + 1) - 1153
1148 num_slaves_before; 1154 actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1149 }
1150 actv_ports = mlx4_get_active_ports(dev, slave_gid);
1151 max_port_p_one = find_first_bit( 1155 max_port_p_one = find_first_bit(
1152 actv_ports.ports, dev->caps.num_ports) + 1156 actv_ports.ports, dev->caps.num_ports) +
1153 bitmap_weight(actv_ports.ports, 1157 bitmap_weight(actv_ports.ports,
1154 dev->caps.num_ports) + 1; 1158 dev->caps.num_ports) + 1;
1155 1159
1160 /* Calculate the real slave number */
1156 for (i = 1; i < max_port_p_one; i++) { 1161 for (i = 1; i < max_port_p_one; i++) {
1157 if (i == port) 1162 if (i == port)
1158 continue; 1163 continue;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 61d64ebffd56..fbd32af89c7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
389 389
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params)
396{
397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0;
400 int err = 0;
401
402 mailbox = mlx4_alloc_cmd_mailbox(dev);
403 if (IS_ERR(mailbox))
404 return PTR_ERR(mailbox);
405
406 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
407
408 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
409 return -EINVAL;
410
411 if (attr & MLX4_UPDATE_QP_SMAC) {
412 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 }
415
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
417
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE);
421
422 mlx4_free_cmd_mailbox(dev, mailbox);
423 return err;
424}
425EXPORT_SYMBOL_GPL(mlx4_update_qp);
426
392void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) 427void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
393{ 428{
394 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 429 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 3b5f53ef29b2..8f1254a79832 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3733,6 +3733,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3733 } 3733 }
3734} 3734}
3735 3735
3736static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3737 u8 *gid, enum mlx4_protocol prot)
3738{
3739 int real_port;
3740
3741 if (prot != MLX4_PROT_ETH)
3742 return 0;
3743
3744 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3745 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3746 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3747 if (real_port < 0)
3748 return -EINVAL;
3749 gid[5] = real_port;
3750 }
3751
3752 return 0;
3753}
3754
3736int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3755int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3737 struct mlx4_vhcr *vhcr, 3756 struct mlx4_vhcr *vhcr,
3738 struct mlx4_cmd_mailbox *inbox, 3757 struct mlx4_cmd_mailbox *inbox,
@@ -3768,6 +3787,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3768 if (err) 3787 if (err)
3769 goto ex_detach; 3788 goto ex_detach;
3770 } else { 3789 } else {
3790 err = mlx4_adjust_port(dev, slave, gid, prot);
3791 if (err)
3792 goto ex_put;
3793
3771 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id); 3794 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3772 if (err) 3795 if (err)
3773 goto ex_put; 3796 goto ex_put;
@@ -3872,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
3872 3895
3873} 3896}
3874 3897
3898#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3899int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3900 struct mlx4_vhcr *vhcr,
3901 struct mlx4_cmd_mailbox *inbox,
3902 struct mlx4_cmd_mailbox *outbox,
3903 struct mlx4_cmd_info *cmd_info)
3904{
3905 int err;
3906 u32 qpn = vhcr->in_modifier & 0xffffff;
3907 struct res_qp *rqp;
3908 u64 mac;
3909 unsigned port;
3910 u64 pri_addr_path_mask;
3911 struct mlx4_update_qp_context *cmd;
3912 int smac_index;
3913
3914 cmd = (struct mlx4_update_qp_context *)inbox->buf;
3915
3916 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3917 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3918 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3919 return -EPERM;
3920
3921 /* Just change the smac for the QP */
3922 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3923 if (err) {
3924 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3925 return err;
3926 }
3927
3928 port = (rqp->sched_queue >> 6 & 1) + 1;
3929 smac_index = cmd->qp_context.pri_path.grh_mylmc;
3930 err = mac_find_smac_ix_in_slave(dev, slave, port,
3931 smac_index, &mac);
3932 if (err) {
3933 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3934 qpn, smac_index);
3935 goto err_mac;
3936 }
3937
3938 err = mlx4_cmd(dev, inbox->dma,
3939 vhcr->in_modifier, 0,
3940 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3941 MLX4_CMD_NATIVE);
3942 if (err) {
3943 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3944 goto err_mac;
3945 }
3946
3947err_mac:
3948 put_res(dev, slave, qpn, RES_QP);
3949 return err;
3950}
3951
3875int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, 3952int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3876 struct mlx4_vhcr *vhcr, 3953 struct mlx4_vhcr *vhcr,
3877 struct mlx4_cmd_mailbox *inbox, 3954 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7b52a88923ef..f785d01c7d12 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1719 tx_ring->producer; 1719 tx_ring->producer;
1720} 1720}
1721 1721
1722static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
1723 struct net_device *netdev)
1724{
1725 int err;
1726
1727 netdev->num_tx_queues = adapter->drv_tx_rings;
1728 netdev->real_num_tx_queues = adapter->drv_tx_rings;
1729
1730 err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
1731 if (err)
1732 netdev_err(netdev, "failed to set %d Tx queues\n",
1733 adapter->drv_tx_rings);
1734
1735 return err;
1736}
1737
1738struct qlcnic_nic_template { 1722struct qlcnic_nic_template {
1739 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1723 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
1740 int (*config_led) (struct qlcnic_adapter *, u32, u32); 1724 int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b48737dcd3c5..ba20c721ee97 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2139,8 +2139,6 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2139 ahw->max_mac_filters = nic_info.max_mac_filters; 2139 ahw->max_mac_filters = nic_info.max_mac_filters;
2140 ahw->max_mtu = nic_info.max_mtu; 2140 ahw->max_mtu = nic_info.max_mtu;
2141 2141
2142 adapter->max_tx_rings = ahw->max_tx_ques;
2143 adapter->max_sds_rings = ahw->max_rx_ques;
2144 /* eSwitch capability indicates vNIC mode. 2142 /* eSwitch capability indicates vNIC mode.
2145 * vNIC and SRIOV are mutually exclusive operational modes. 2143 * vNIC and SRIOV are mutually exclusive operational modes.
2146 * If SR-IOV capability is detected, SR-IOV physical function 2144 * If SR-IOV capability is detected, SR-IOV physical function
@@ -2161,6 +2159,7 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2161int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 2159int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2162{ 2160{
2163 struct qlcnic_hardware_context *ahw = adapter->ahw; 2161 struct qlcnic_hardware_context *ahw = adapter->ahw;
2162 u16 max_sds_rings, max_tx_rings;
2164 int ret; 2163 int ret;
2165 2164
2166 ret = qlcnic_83xx_get_nic_configuration(adapter); 2165 ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2173,18 +2172,21 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2173 if (qlcnic_83xx_config_vnic_opmode(adapter)) 2172 if (qlcnic_83xx_config_vnic_opmode(adapter))
2174 return -EIO; 2173 return -EIO;
2175 2174
2176 adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; 2175 max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
2177 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; 2176 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
2178 } else if (ret == QLC_83XX_DEFAULT_OPMODE) { 2177 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
2179 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2178 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2180 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2179 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2181 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2180 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2182 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; 2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2183 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; 2182 max_tx_rings = QLCNIC_MAX_TX_RINGS;
2184 } else { 2183 } else {
2185 return -EIO; 2184 return -EIO;
2186 } 2185 }
2187 2186
2187 adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
2188 adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
2189
2188 return 0; 2190 return 0;
2189} 2191}
2190 2192
@@ -2348,15 +2350,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2348 goto disable_intr; 2350 goto disable_intr;
2349 } 2351 }
2350 2352
2353 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2354
2351 err = qlcnic_83xx_setup_mbx_intr(adapter); 2355 err = qlcnic_83xx_setup_mbx_intr(adapter);
2352 if (err) 2356 if (err)
2353 goto disable_mbx_intr; 2357 goto disable_mbx_intr;
2354 2358
2355 qlcnic_83xx_clear_function_resources(adapter); 2359 qlcnic_83xx_clear_function_resources(adapter);
2356 2360 qlcnic_dcb_enable(adapter->dcb);
2357 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2358
2359 qlcnic_83xx_initialize_nic(adapter, 1); 2361 qlcnic_83xx_initialize_nic(adapter, 1);
2362 qlcnic_dcb_get_info(adapter->dcb);
2360 2363
2361 /* Configure default, SR-IOV or Virtual NIC mode of operation */ 2364 /* Configure default, SR-IOV or Virtual NIC mode of operation */
2362 err = qlcnic_83xx_configure_opmode(adapter); 2365 err = qlcnic_83xx_configure_opmode(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 64dcbf33d8f0..c1e11f5715b0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -883,8 +883,6 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
886 adapter->max_tx_rings = npar_info->max_tx_ques;
887 adapter->max_sds_rings = npar_info->max_rx_ques;
888 } 886 }
889 887
890 qlcnic_free_mbx_args(&cmd); 888 qlcnic_free_mbx_args(&cmd);
@@ -1356,6 +1354,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1356 arg2 &= ~BIT_3; 1354 arg2 &= ~BIT_3;
1357 break; 1355 break;
1358 case QLCNIC_ADD_VLAN: 1356 case QLCNIC_ADD_VLAN:
1357 arg1 &= ~(0x0ffff << 16);
1359 arg1 |= (BIT_2 | BIT_5); 1358 arg1 |= (BIT_2 | BIT_5);
1360 arg1 |= (esw_cfg->vlan_id << 16); 1359 arg1 |= (esw_cfg->vlan_id << 16);
1361 break; 1360 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 7d4f54912bad..a51fe18f09a8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -330,8 +330,6 @@ static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
330 goto out_free_cfg; 330 goto out_free_cfg;
331 } 331 }
332 332
333 qlcnic_dcb_get_info(dcb);
334
335 return 0; 333 return 0;
336out_free_cfg: 334out_free_cfg:
337 kfree(dcb->cfg); 335 kfree(dcb->cfg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 309d05640883..7e55e88a81bf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -670,7 +670,7 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
670 else 670 else
671 num_msix += adapter->drv_tx_rings; 671 num_msix += adapter->drv_tx_rings;
672 672
673 if (adapter->drv_rss_rings > 0) 673 if (adapter->drv_rss_rings > 0)
674 num_msix += adapter->drv_rss_rings; 674 num_msix += adapter->drv_rss_rings;
675 else 675 else
676 num_msix += adapter->drv_sds_rings; 676 num_msix += adapter->drv_sds_rings;
@@ -686,19 +686,15 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
686 return -ENOMEM; 686 return -ENOMEM;
687 } 687 }
688 688
689restore:
690 for (vector = 0; vector < num_msix; vector++) 689 for (vector = 0; vector < num_msix; vector++)
691 adapter->msix_entries[vector].entry = vector; 690 adapter->msix_entries[vector].entry = vector;
692 691
692restore:
693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
694 if (err == 0) { 694 if (err > 0) {
695 adapter->ahw->num_msix = num_msix; 695 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
696 if (adapter->drv_tss_rings > 0) 696 return -ENOSPC;
697 adapter->drv_tx_rings = adapter->drv_tss_rings;
698 697
699 if (adapter->drv_rss_rings > 0)
700 adapter->drv_sds_rings = adapter->drv_rss_rings;
701 } else {
702 netdev_info(adapter->netdev, 698 netdev_info(adapter->netdev,
703 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 699 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
704 num_msix, err); 700 num_msix, err);
@@ -716,12 +712,20 @@ restore:
716 "Restoring %d Tx, %d SDS rings for total %d vectors.\n", 712 "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
717 adapter->drv_tx_rings, adapter->drv_sds_rings, 713 adapter->drv_tx_rings, adapter->drv_sds_rings,
718 num_msix); 714 num_msix);
719 goto restore;
720 715
721 err = -EIO; 716 goto restore;
717 } else if (err < 0) {
718 return err;
722 } 719 }
723 720
724 return err; 721 adapter->ahw->num_msix = num_msix;
722 if (adapter->drv_tss_rings > 0)
723 adapter->drv_tx_rings = adapter->drv_tss_rings;
724
725 if (adapter->drv_rss_rings > 0)
726 adapter->drv_sds_rings = adapter->drv_rss_rings;
727
728 return 0;
725} 729}
726 730
727int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 731int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
@@ -2202,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
2202 ahw->max_uc_count = count; 2206 ahw->max_uc_count = count;
2203} 2207}
2204 2208
2209static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
2210 u8 tx_queues, u8 rx_queues)
2211{
2212 struct net_device *netdev = adapter->netdev;
2213 int err = 0;
2214
2215 if (tx_queues) {
2216 err = netif_set_real_num_tx_queues(netdev, tx_queues);
2217 if (err) {
2218 netdev_err(netdev, "failed to set %d Tx queues\n",
2219 tx_queues);
2220 return err;
2221 }
2222 }
2223
2224 if (rx_queues) {
2225 err = netif_set_real_num_rx_queues(netdev, rx_queues);
2226 if (err)
2227 netdev_err(netdev, "failed to set %d Rx queues\n",
2228 rx_queues);
2229 }
2230
2231 return err;
2232}
2233
2205int 2234int
2206qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 2235qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2207 int pci_using_dac) 2236 int pci_using_dac)
@@ -2265,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2265 netdev->priv_flags |= IFF_UNICAST_FLT; 2294 netdev->priv_flags |= IFF_UNICAST_FLT;
2266 netdev->irq = adapter->msix_entries[0].vector; 2295 netdev->irq = adapter->msix_entries[0].vector;
2267 2296
2268 err = qlcnic_set_real_num_queues(adapter, netdev); 2297 err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
2298 adapter->drv_sds_rings);
2269 if (err) 2299 if (err)
2270 return err; 2300 return err;
2271 2301
@@ -2370,6 +2400,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
2370 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); 2400 qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
2371} 2401}
2372 2402
2403/* Reset firmware API lock */
2404static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
2405{
2406 qlcnic_api_lock(adapter);
2407 qlcnic_api_unlock(adapter);
2408}
2409
2410
2373static int 2411static int
2374qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2412qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2375{ 2413{
@@ -2472,6 +2510,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2472 if (qlcnic_82xx_check(adapter)) { 2510 if (qlcnic_82xx_check(adapter)) {
2473 qlcnic_check_vf(adapter, ent); 2511 qlcnic_check_vf(adapter, ent);
2474 adapter->portnum = adapter->ahw->pci_func; 2512 adapter->portnum = adapter->ahw->pci_func;
2513 qlcnic_reset_api_lock(adapter);
2475 err = qlcnic_start_firmware(adapter); 2514 err = qlcnic_start_firmware(adapter);
2476 if (err) { 2515 if (err) {
2477 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" 2516 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
@@ -2528,8 +2567,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2528 goto err_out_free_hw; 2567 goto err_out_free_hw;
2529 } 2568 }
2530 2569
2531 qlcnic_dcb_enable(adapter->dcb);
2532
2533 if (qlcnic_read_mac_addr(adapter)) 2570 if (qlcnic_read_mac_addr(adapter))
2534 dev_warn(&pdev->dev, "failed to read mac addr\n"); 2571 dev_warn(&pdev->dev, "failed to read mac addr\n");
2535 2572
@@ -2549,7 +2586,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2549 "Device does not support MSI interrupts\n"); 2586 "Device does not support MSI interrupts\n");
2550 2587
2551 if (qlcnic_82xx_check(adapter)) { 2588 if (qlcnic_82xx_check(adapter)) {
2589 qlcnic_dcb_enable(adapter->dcb);
2590 qlcnic_dcb_get_info(adapter->dcb);
2552 err = qlcnic_setup_intr(adapter); 2591 err = qlcnic_setup_intr(adapter);
2592
2553 if (err) { 2593 if (err) {
2554 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 2594 dev_err(&pdev->dev, "Failed to setup interrupt\n");
2555 goto err_out_disable_msi; 2595 goto err_out_disable_msi;
@@ -2929,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
2929 tx_ring->tx_stats.xmit_called, 2969 tx_ring->tx_stats.xmit_called,
2930 tx_ring->tx_stats.xmit_on, 2970 tx_ring->tx_stats.xmit_on,
2931 tx_ring->tx_stats.xmit_off); 2971 tx_ring->tx_stats.xmit_off);
2972
2973 if (tx_ring->crb_intr_mask)
2974 netdev_info(netdev, "crb_intr_mask=%d\n",
2975 readl(tx_ring->crb_intr_mask));
2976
2932 netdev_info(netdev, 2977 netdev_info(netdev,
2933 "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", 2978 "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
2934 readl(tx_ring->crb_intr_mask),
2935 readl(tx_ring->crb_cmd_producer), 2979 readl(tx_ring->crb_cmd_producer),
2936 tx_ring->producer, tx_ring->sw_consumer, 2980 tx_ring->producer, tx_ring->sw_consumer,
2937 le32_to_cpu(*(tx_ring->hw_consumer))); 2981 le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3964,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
3964int qlcnic_setup_rings(struct qlcnic_adapter *adapter) 4008int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3965{ 4009{
3966 struct net_device *netdev = adapter->netdev; 4010 struct net_device *netdev = adapter->netdev;
4011 u8 tx_rings, rx_rings;
3967 int err; 4012 int err;
3968 4013
3969 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 4014 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3970 return -EBUSY; 4015 return -EBUSY;
3971 4016
4017 tx_rings = adapter->drv_tss_rings;
4018 rx_rings = adapter->drv_rss_rings;
4019
3972 netif_device_detach(netdev); 4020 netif_device_detach(netdev);
4021
4022 err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
4023 if (err)
4024 goto done;
4025
3973 if (netif_running(netdev)) 4026 if (netif_running(netdev))
3974 __qlcnic_down(adapter, netdev); 4027 __qlcnic_down(adapter, netdev);
3975 4028
@@ -3989,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
3989 return err; 4042 return err;
3990 } 4043 }
3991 4044
3992 netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); 4045 /* Check if we need to update real_num_{tx|rx}_queues because
4046 * qlcnic_setup_intr() may change Tx/Rx rings size
4047 */
4048 if ((tx_rings != adapter->drv_tx_rings) ||
4049 (rx_rings != adapter->drv_sds_rings)) {
4050 err = qlcnic_set_real_num_queues(adapter,
4051 adapter->drv_tx_rings,
4052 adapter->drv_sds_rings);
4053 if (err)
4054 goto done;
4055 }
3993 4056
3994 if (qlcnic_83xx_check(adapter)) { 4057 if (qlcnic_83xx_check(adapter)) {
3995 qlcnic_83xx_initialize_nic(adapter, 1); 4058 qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 0638c1810d54..6afe9c1f5ab9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1370,7 +1370,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1370 1370
1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1372 if (rsp) 1372 if (rsp)
1373 return rsp; 1373 goto free_cmd;
1374 1374
1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1376 if (rsp) 1376 if (rsp)
@@ -1425,6 +1425,13 @@ err_out:
1425 1425
1426cleanup_transaction: 1426cleanup_transaction:
1427 qlcnic_sriov_cleanup_transaction(trans); 1427 qlcnic_sriov_cleanup_transaction(trans);
1428
1429free_cmd:
1430 if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1431 qlcnic_free_mbx_args(cmd);
1432 kfree(cmd);
1433 }
1434
1428 return rsp; 1435 return rsp;
1429} 1436}
1430 1437
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 14f748cbf0de..280137991544 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -461,6 +461,16 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
461{ 461{
462 struct net_device *netdev = adapter->netdev; 462 struct net_device *netdev = adapter->netdev;
463 463
464 if (pci_vfs_assigned(adapter->pdev)) {
465 netdev_err(adapter->netdev,
466 "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
467 adapter->portnum);
468 netdev_info(adapter->netdev,
469 "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
470 adapter->portnum);
471 return -EPERM;
472 }
473
464 rtnl_lock(); 474 rtnl_lock();
465 if (netif_running(netdev)) 475 if (netif_running(netdev))
466 __qlcnic_down(adapter, netdev); 476 __qlcnic_down(adapter, netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 448d156c3d08..cd346e27f2e1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -354,7 +354,7 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
354{ 354{
355 int i; 355 int i;
356 356
357 for (i = 0; i < adapter->ahw->max_vnic_func; i++) { 357 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
358 if (adapter->npars[i].pci_func == pci_func) 358 if (adapter->npars[i].pci_func == pci_func)
359 return i; 359 return i;
360 } 360 }
@@ -720,6 +720,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
721 struct qlcnic_npar_func_cfg *np_cfg; 721 struct qlcnic_npar_func_cfg *np_cfg;
722 struct qlcnic_info nic_info; 722 struct qlcnic_info nic_info;
723 u8 pci_func;
723 int i, ret; 724 int i, ret;
724 u32 count; 725 u32 count;
725 726
@@ -729,26 +730,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
729 730
730 count = size / sizeof(struct qlcnic_npar_func_cfg); 731 count = size / sizeof(struct qlcnic_npar_func_cfg);
731 for (i = 0; i < adapter->ahw->total_nic_func; i++) { 732 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
732 if (qlcnic_is_valid_nic_func(adapter, i) < 0)
733 continue;
734 if (adapter->npars[i].pci_func >= count) { 733 if (adapter->npars[i].pci_func >= count) {
735 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", 734 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
736 __func__, adapter->ahw->total_nic_func, count); 735 __func__, adapter->ahw->total_nic_func, count);
737 continue; 736 continue;
738 } 737 }
739 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
740 if (ret)
741 return ret;
742 if (!adapter->npars[i].eswitch_status) 738 if (!adapter->npars[i].eswitch_status)
743 continue; 739 continue;
744 np_cfg[i].pci_func = i; 740 pci_func = adapter->npars[i].pci_func;
745 np_cfg[i].op_mode = (u8)nic_info.op_mode; 741 if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
746 np_cfg[i].port_num = nic_info.phys_port; 742 continue;
747 np_cfg[i].fw_capab = nic_info.capabilities; 743 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
748 np_cfg[i].min_bw = nic_info.min_tx_bw; 744 if (ret)
749 np_cfg[i].max_bw = nic_info.max_tx_bw; 745 return ret;
750 np_cfg[i].max_tx_queues = nic_info.max_tx_ques; 746
751 np_cfg[i].max_rx_queues = nic_info.max_rx_ques; 747 np_cfg[pci_func].pci_func = pci_func;
748 np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
749 np_cfg[pci_func].port_num = nic_info.phys_port;
750 np_cfg[pci_func].fw_capab = nic_info.capabilities;
751 np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
752 np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
753 np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
754 np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
752 } 755 }
753 return size; 756 return size;
754} 757}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 6203c7d8550f..45019649bbbd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -358,6 +358,8 @@ struct sxgbe_core_ops {
358 /* Enable disable checksum offload operations */ 358 /* Enable disable checksum offload operations */
359 void (*enable_rx_csum)(void __iomem *ioaddr); 359 void (*enable_rx_csum)(void __iomem *ioaddr);
360 void (*disable_rx_csum)(void __iomem *ioaddr); 360 void (*disable_rx_csum)(void __iomem *ioaddr);
361 void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
362 void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
361}; 363};
362 364
363const struct sxgbe_core_ops *sxgbe_get_core_ops(void); 365const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index c4da7a2b002a..58c35692560e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); 165 writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
166} 166}
167 167
168static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
169{
170 u32 reg_val;
171
172 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
173 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
174 reg_val |= SXGBE_CORE_RXQ_ENABLE;
175 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
176}
177
178static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
179{
180 u32 reg_val;
181
182 reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
183 reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
184 reg_val |= SXGBE_CORE_RXQ_DISABLE;
185 writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
186}
187
168static void sxgbe_set_eee_mode(void __iomem *ioaddr) 188static void sxgbe_set_eee_mode(void __iomem *ioaddr)
169{ 189{
170 u32 ctrl; 190 u32 ctrl;
@@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
254 .set_eee_pls = sxgbe_set_eee_pls, 274 .set_eee_pls = sxgbe_set_eee_pls,
255 .enable_rx_csum = sxgbe_enable_rx_csum, 275 .enable_rx_csum = sxgbe_enable_rx_csum,
256 .disable_rx_csum = sxgbe_disable_rx_csum, 276 .disable_rx_csum = sxgbe_disable_rx_csum,
277 .enable_rxqueue = sxgbe_core_enable_rxqueue,
278 .disable_rxqueue = sxgbe_core_disable_rxqueue,
257}; 279};
258 280
259const struct sxgbe_core_ops *sxgbe_get_core_ops(void) 281const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
index e896dbbd2e15..2686bb5b6765 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
45 p->tdes23.tx_rd_des23.first_desc = is_fd; 45 p->tdes23.tx_rd_des23.first_desc = is_fd;
46 p->tdes23.tx_rd_des23.buf1_size = buf1_len; 46 p->tdes23.tx_rd_des23.buf1_size = buf1_len;
47 47
48 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; 48 p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
49 49
50 if (cksum) 50 if (cksum)
51 p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; 51 p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
52} 52}
53 53
54/* Set VLAN control information */ 54/* Set VLAN control information */
@@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
233 p->rdes23.rx_rd_des23.own_bit = 1; 233 p->rdes23.rx_rd_des23.own_bit = 1;
234} 234}
235 235
236/* Set Interrupt on completion bit */
237static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
238{
239 p->rdes23.rx_rd_des23.int_on_com = 1;
240}
241
236/* Get the receive frame size */ 242/* Get the receive frame size */
237static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p) 243static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
238{ 244{
@@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
498 .init_rx_desc = sxgbe_init_rx_desc, 504 .init_rx_desc = sxgbe_init_rx_desc,
499 .get_rx_owner = sxgbe_get_rx_owner, 505 .get_rx_owner = sxgbe_get_rx_owner,
500 .set_rx_owner = sxgbe_set_rx_owner, 506 .set_rx_owner = sxgbe_set_rx_owner,
507 .set_rx_int_on_com = sxgbe_set_rx_int_on_com,
501 .get_rx_frame_len = sxgbe_get_rx_frame_len, 508 .get_rx_frame_len = sxgbe_get_rx_frame_len,
502 .get_rx_fd_status = sxgbe_get_rx_fd_status, 509 .get_rx_fd_status = sxgbe_get_rx_fd_status,
503 .get_rx_ld_status = sxgbe_get_rx_ld_status, 510 .get_rx_ld_status = sxgbe_get_rx_ld_status,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
index 838cb9fb0ea9..18609324db72 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
39 u32 int_on_com:1; 39 u32 int_on_com:1;
40 /* TDES3 */ 40 /* TDES3 */
41 union { 41 union {
42 u32 tcp_payload_len:18; 42 u16 tcp_payload_len;
43 struct { 43 struct {
44 u32 total_pkt_len:15; 44 u32 total_pkt_len:15;
45 u32 reserved1:1; 45 u32 reserved1:1;
46 u32 cksum_ctl:2; 46 } pkt_len;
47 } cksum_pktlen;
48 } tx_pkt_len; 47 } tx_pkt_len;
49 48
50 u32 tse_bit:1; 49 u16 cksum_ctl:2;
51 u32 tcp_hdr_len:4; 50 u16 tse_bit:1;
52 u32 sa_insert_ctl:3; 51 u16 tcp_hdr_len:4;
53 u32 crc_pad_ctl:2; 52 u16 sa_insert_ctl:3;
54 u32 last_desc:1; 53 u16 crc_pad_ctl:2;
55 u32 first_desc:1; 54 u16 last_desc:1;
56 u32 ctxt_bit:1; 55 u16 first_desc:1;
57 u32 own_bit:1; 56 u16 ctxt_bit:1;
57 u16 own_bit:1;
58 } tx_rd_des23; 58 } tx_rd_des23;
59 59
60 /* tx write back Desc 2,3 */ 60 /* tx write back Desc 2,3 */
@@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
70 70
71struct sxgbe_rx_norm_desc { 71struct sxgbe_rx_norm_desc {
72 union { 72 union {
73 u32 rdes0; /* buf1 address */ 73 u64 rdes01; /* buf1 address */
74 struct { 74 union {
75 u32 out_vlan_tag:16; 75 u32 out_vlan_tag:16;
76 u32 in_vlan_tag:16; 76 u32 in_vlan_tag:16;
77 } wb_rx_des0; 77 u32 rss_hash;
78 } rd_wb_des0; 78 } rx_wb_des01;
79 79 } rdes01;
80 union {
81 u32 rdes1; /* buf2 address or buf1[63:32] */
82 u32 rss_hash; /* Write-back RX */
83 } rd_wb_des1;
84 80
85 union { 81 union {
86 /* RX Read format Desc 2,3 */ 82 /* RX Read format Desc 2,3 */
87 struct{ 83 struct{
88 /* RDES2 */ 84 /* RDES2 */
89 u32 buf2_addr; 85 u64 buf2_addr:62;
90 /* RDES3 */ 86 /* RDES3 */
91 u32 buf2_hi_addr:30;
92 u32 int_on_com:1; 87 u32 int_on_com:1;
93 u32 own_bit:1; 88 u32 own_bit:1;
94 } rx_rd_des23; 89 } rx_rd_des23;
@@ -263,6 +258,9 @@ struct sxgbe_desc_ops {
263 /* Set own bit */ 258 /* Set own bit */
264 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); 259 void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
265 260
261 /* Set Interrupt on completion bit */
262 void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
263
266 /* Get the receive frame size */ 264 /* Get the receive frame size */
267 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p); 265 int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
268 266
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
index 4d989ff6c978..bb9b5b8afc5f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -23,21 +23,8 @@
23/* DMA core initialization */ 23/* DMA core initialization */
24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) 24static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
25{ 25{
26 int retry_count = 10;
27 u32 reg_val; 26 u32 reg_val;
28 27
29 /* reset the DMA */
30 writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
31 while (retry_count--) {
32 if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
33 SXGBE_DMA_SOFT_RESET))
34 break;
35 mdelay(10);
36 }
37
38 if (retry_count < 0)
39 return -EBUSY;
40
41 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); 28 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
42 29
43 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register. 30 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 27e8c824b204..82a9a983869f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
1076 1076
1077 /* Initialize the MAC Core */ 1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr); 1078 priv->hw->mac->core_init(priv->ioaddr);
1079 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1080 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
1081 }
1079 1082
1080 /* Request the IRQ lines */ 1083 /* Request the IRQ lines */
1081 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, 1084 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
@@ -1453,6 +1456,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1453 /* Added memory barrier for RX descriptor modification */ 1456 /* Added memory barrier for RX descriptor modification */
1454 wmb(); 1457 wmb();
1455 priv->hw->desc->set_rx_owner(p); 1458 priv->hw->desc->set_rx_owner(p);
1459 priv->hw->desc->set_rx_int_on_com(p);
1456 /* Added memory barrier for RX descriptor modification */ 1460 /* Added memory barrier for RX descriptor modification */
1457 wmb(); 1461 wmb();
1458 } 1462 }
@@ -2070,6 +2074,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2070 return 0; 2074 return 0;
2071} 2075}
2072 2076
2077static int sxgbe_sw_reset(void __iomem *addr)
2078{
2079 int retry_count = 10;
2080
2081 writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
2082 while (retry_count--) {
2083 if (!(readl(addr + SXGBE_DMA_MODE_REG) &
2084 SXGBE_DMA_SOFT_RESET))
2085 break;
2086 mdelay(10);
2087 }
2088
2089 if (retry_count < 0)
2090 return -EBUSY;
2091
2092 return 0;
2093}
2094
2073/** 2095/**
2074 * sxgbe_drv_probe 2096 * sxgbe_drv_probe
2075 * @device: device pointer 2097 * @device: device pointer
@@ -2102,6 +2124,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2102 priv->plat = plat_dat; 2124 priv->plat = plat_dat;
2103 priv->ioaddr = addr; 2125 priv->ioaddr = addr;
2104 2126
2127 ret = sxgbe_sw_reset(priv->ioaddr);
2128 if (ret)
2129 goto error_free_netdev;
2130
2105 /* Verify driver arguments */ 2131 /* Verify driver arguments */
2106 sxgbe_verify_args(); 2132 sxgbe_verify_args();
2107 2133
@@ -2218,9 +2244,14 @@ error_free_netdev:
2218int sxgbe_drv_remove(struct net_device *ndev) 2244int sxgbe_drv_remove(struct net_device *ndev)
2219{ 2245{
2220 struct sxgbe_priv_data *priv = netdev_priv(ndev); 2246 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2247 u8 queue_num;
2221 2248
2222 netdev_info(ndev, "%s: removing driver\n", __func__); 2249 netdev_info(ndev, "%s: removing driver\n", __func__);
2223 2250
2251 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
2252 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
2253 }
2254
2224 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); 2255 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2225 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); 2256 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2226 2257
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index 01af2cbb479d..43ccb4a6de15 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -27,7 +27,7 @@
27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ 27#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
28#define SXGBE_SMA_READ_CMD 0x03 /* read command */ 28#define SXGBE_SMA_READ_CMD 0x03 /* read command */
29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ 29#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
30#define SXGBE_MII_BUSY 0x00800000 /* mii busy */ 30#define SXGBE_MII_BUSY 0x00400000 /* mii busy */
31 31
32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) 32static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
33{ 33{
@@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; 147 struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
148 int err, phy_addr; 148 int err, phy_addr;
149 int *irqlist; 149 int *irqlist;
150 bool phy_found = false;
150 bool act; 151 bool act;
151 152
152 /* allocate the new mdio bus */ 153 /* allocate the new mdio bus */
@@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
162 irqlist = priv->mii_irq; 163 irqlist = priv->mii_irq;
163 164
164 /* assign mii bus fields */ 165 /* assign mii bus fields */
165 mdio_bus->name = "samsxgbe"; 166 mdio_bus->name = "sxgbe";
166 mdio_bus->read = &sxgbe_mdio_read; 167 mdio_bus->read = &sxgbe_mdio_read;
167 mdio_bus->write = &sxgbe_mdio_write; 168 mdio_bus->write = &sxgbe_mdio_write;
168 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", 169 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
216 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", 217 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
217 phy->phy_id, phy_addr, irq_str, 218 phy->phy_id, phy_addr, irq_str,
218 dev_name(&phy->dev), act ? " active" : ""); 219 dev_name(&phy->dev), act ? " active" : "");
220 phy_found = true;
219 } 221 }
220 } 222 }
221 223
224 if (!phy_found) {
225 netdev_err(ndev, "PHY not found\n");
226 goto phyfound_err;
227 }
228
222 priv->mii = mdio_bus; 229 priv->mii = mdio_bus;
223 230
224 return 0; 231 return 0;
225 232
233phyfound_err:
234 err = -ENODEV;
235 mdiobus_unregister(mdio_bus);
226mdiobus_err: 236mdiobus_err:
227 mdiobus_free(mdio_bus); 237 mdiobus_free(mdio_bus);
228 return err; 238 return err;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 5a89acb4c505..56f8bf5a3f1b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -52,6 +52,10 @@
52#define SXGBE_CORE_RX_CTL2_REG 0x00A8 52#define SXGBE_CORE_RX_CTL2_REG 0x00A8
53#define SXGBE_CORE_RX_CTL3_REG 0x00AC 53#define SXGBE_CORE_RX_CTL3_REG 0x00AC
54 54
55#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003
56#define SXGBE_CORE_RXQ_ENABLE 0x0002
57#define SXGBE_CORE_RXQ_DISABLE 0x0000
58
55/* Interrupt Registers */ 59/* Interrupt Registers */
56#define SXGBE_CORE_INT_STATUS_REG 0x00B0 60#define SXGBE_CORE_INT_STATUS_REG 0x00B0
57#define SXGBE_CORE_INT_ENABLE_REG 0x00B4 61#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21c20ea0dad0..b5ed30a39144 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -738,8 +738,11 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
738 /* If it was a port reset, trigger reallocation of MC resources. 738 /* If it was a port reset, trigger reallocation of MC resources.
739 * Note that on an MC reset nothing needs to be done now because we'll 739 * Note that on an MC reset nothing needs to be done now because we'll
740 * detect the MC reset later and handle it then. 740 * detect the MC reset later and handle it then.
741 * For an FLR, we never get an MC reset event, but the MC has reset all
742 * resources assigned to us, so we have to trigger reallocation now.
741 */ 743 */
742 if (reset_type == RESET_TYPE_ALL && !rc) 744 if ((reset_type == RESET_TYPE_ALL ||
745 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
743 efx_ef10_reset_mc_allocations(efx); 746 efx_ef10_reset_mc_allocations(efx);
744 return rc; 747 return rc;
745} 748}
@@ -2141,6 +2144,11 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2141 return 0; 2144 return 0;
2142} 2145}
2143 2146
2147static void efx_ef10_prepare_flr(struct efx_nic *efx)
2148{
2149 atomic_set(&efx->active_queues, 0);
2150}
2151
2144static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, 2152static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2145 const struct efx_filter_spec *right) 2153 const struct efx_filter_spec *right)
2146{ 2154{
@@ -3603,6 +3611,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
3603 .probe_port = efx_mcdi_port_probe, 3611 .probe_port = efx_mcdi_port_probe,
3604 .remove_port = efx_mcdi_port_remove, 3612 .remove_port = efx_mcdi_port_remove,
3605 .fini_dmaq = efx_ef10_fini_dmaq, 3613 .fini_dmaq = efx_ef10_fini_dmaq,
3614 .prepare_flr = efx_ef10_prepare_flr,
3615 .finish_flr = efx_port_dummy_op_void,
3606 .describe_stats = efx_ef10_describe_stats, 3616 .describe_stats = efx_ef10_describe_stats,
3607 .update_stats = efx_ef10_update_stats, 3617 .update_stats = efx_ef10_update_stats,
3608 .start_stats = efx_mcdi_mac_start_stats, 3618 .start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 57b971e5e6b2..63d595fd3cc5 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -76,6 +76,7 @@ const char *const efx_reset_type_names[] = {
76 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 76 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
77 [RESET_TYPE_WORLD] = "WORLD", 77 [RESET_TYPE_WORLD] = "WORLD",
78 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 78 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
79 [RESET_TYPE_MC_BIST] = "MC_BIST",
79 [RESET_TYPE_DISABLE] = "DISABLE", 80 [RESET_TYPE_DISABLE] = "DISABLE",
80 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
81 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 82 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
@@ -83,7 +84,7 @@ const char *const efx_reset_type_names[] = {
83 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 84 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
84 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 85 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
85 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 86 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
86 [RESET_TYPE_MC_BIST] = "MC_BIST", 87 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
87}; 88};
88 89
89/* Reset workqueue. If any NIC has a hardware failure then a reset will be 90/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -1739,7 +1740,8 @@ static void efx_start_all(struct efx_nic *efx)
1739 1740
1740 /* Check that it is appropriate to restart the interface. All 1741 /* Check that it is appropriate to restart the interface. All
1741 * of these flags are safe to read under just the rtnl lock */ 1742 * of these flags are safe to read under just the rtnl lock */
1742 if (efx->port_enabled || !netif_running(efx->net_dev)) 1743 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1744 efx->reset_pending)
1743 return; 1745 return;
1744 1746
1745 efx_start_port(efx); 1747 efx_start_port(efx);
@@ -2334,6 +2336,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2334{ 2336{
2335 EFX_ASSERT_RESET_SERIALISED(efx); 2337 EFX_ASSERT_RESET_SERIALISED(efx);
2336 2338
2339 if (method == RESET_TYPE_MCDI_TIMEOUT)
2340 efx->type->prepare_flr(efx);
2341
2337 efx_stop_all(efx); 2342 efx_stop_all(efx);
2338 efx_disable_interrupts(efx); 2343 efx_disable_interrupts(efx);
2339 2344
@@ -2354,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2354 2359
2355 EFX_ASSERT_RESET_SERIALISED(efx); 2360 EFX_ASSERT_RESET_SERIALISED(efx);
2356 2361
2362 if (method == RESET_TYPE_MCDI_TIMEOUT)
2363 efx->type->finish_flr(efx);
2364
2365 /* Ensure that SRAM is initialised even if we're disabling the device */
2357 rc = efx->type->init(efx); 2366 rc = efx->type->init(efx);
2358 if (rc) { 2367 if (rc) {
2359 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2368 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
@@ -2417,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2417 /* Clear flags for the scopes we covered. We assume the NIC and 2426 /* Clear flags for the scopes we covered. We assume the NIC and
2418 * driver are now quiescent so that there is no race here. 2427 * driver are now quiescent so that there is no race here.
2419 */ 2428 */
2420 efx->reset_pending &= -(1 << (method + 1)); 2429 if (method < RESET_TYPE_MAX_METHOD)
2430 efx->reset_pending &= -(1 << (method + 1));
2431 else /* it doesn't fit into the well-ordered scope hierarchy */
2432 __clear_bit(method, &efx->reset_pending);
2421 2433
2422 /* Reinitialise bus-mastering, which may have been turned off before 2434 /* Reinitialise bus-mastering, which may have been turned off before
2423 * the reset was scheduled. This is still appropriate, even in the 2435 * the reset was scheduled. This is still appropriate, even in the
@@ -2546,6 +2558,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2546 case RESET_TYPE_DISABLE: 2558 case RESET_TYPE_DISABLE:
2547 case RESET_TYPE_RECOVER_OR_DISABLE: 2559 case RESET_TYPE_RECOVER_OR_DISABLE:
2548 case RESET_TYPE_MC_BIST: 2560 case RESET_TYPE_MC_BIST:
2561 case RESET_TYPE_MCDI_TIMEOUT:
2549 method = type; 2562 method = type;
2550 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2563 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2551 RESET_TYPE(method)); 2564 RESET_TYPE(method));
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 75ef7ef6450b..d1dbb5fb31bb 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
143 * @RESET_TYPE_WORLD: Reset as much as possible 143 * @RESET_TYPE_WORLD: Reset as much as possible
144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if 144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
145 * unsuccessful. 145 * unsuccessful.
146 * @RESET_TYPE_MC_BIST: MC entering BIST mode.
146 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled 147 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 148 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
148 * @RESET_TYPE_INT_ERROR: reset due to internal error 149 * @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,14 +151,16 @@ enum efx_loopback_mode {
150 * @RESET_TYPE_DMA_ERROR: DMA error 151 * @RESET_TYPE_DMA_ERROR: DMA error
151 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors 152 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
152 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion 153 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
154 * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
153 */ 155 */
154enum reset_type { 156enum reset_type {
155 RESET_TYPE_INVISIBLE = 0, 157 RESET_TYPE_INVISIBLE,
156 RESET_TYPE_RECOVER_OR_ALL = 1, 158 RESET_TYPE_RECOVER_OR_ALL,
157 RESET_TYPE_ALL = 2, 159 RESET_TYPE_ALL,
158 RESET_TYPE_WORLD = 3, 160 RESET_TYPE_WORLD,
159 RESET_TYPE_RECOVER_OR_DISABLE = 4, 161 RESET_TYPE_RECOVER_OR_DISABLE,
160 RESET_TYPE_DISABLE = 5, 162 RESET_TYPE_MC_BIST,
163 RESET_TYPE_DISABLE,
161 RESET_TYPE_MAX_METHOD, 164 RESET_TYPE_MAX_METHOD,
162 RESET_TYPE_TX_WATCHDOG, 165 RESET_TYPE_TX_WATCHDOG,
163 RESET_TYPE_INT_ERROR, 166 RESET_TYPE_INT_ERROR,
@@ -165,7 +168,13 @@ enum reset_type {
165 RESET_TYPE_DMA_ERROR, 168 RESET_TYPE_DMA_ERROR,
166 RESET_TYPE_TX_SKIP, 169 RESET_TYPE_TX_SKIP,
167 RESET_TYPE_MC_FAILURE, 170 RESET_TYPE_MC_FAILURE,
168 RESET_TYPE_MC_BIST, 171 /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
172 * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
173 * We encode this by having its enum value be greater than
174 * RESET_TYPE_MAX_METHOD. This also prevents issuing it with
175 * efx_ioctl_reset.
176 */
177 RESET_TYPE_MCDI_TIMEOUT,
169 RESET_TYPE_MAX, 178 RESET_TYPE_MAX,
170}; 179};
171 180
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ec20b713cc6..fae25a418647 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2696,6 +2696,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
2696 .fini_dmaq = efx_farch_fini_dmaq, 2696 .fini_dmaq = efx_farch_fini_dmaq,
2697 .prepare_flush = falcon_prepare_flush, 2697 .prepare_flush = falcon_prepare_flush,
2698 .finish_flush = efx_port_dummy_op_void, 2698 .finish_flush = efx_port_dummy_op_void,
2699 .prepare_flr = efx_port_dummy_op_void,
2700 .finish_flr = efx_farch_finish_flr,
2699 .describe_stats = falcon_describe_nic_stats, 2701 .describe_stats = falcon_describe_nic_stats,
2700 .update_stats = falcon_update_nic_stats, 2702 .update_stats = falcon_update_nic_stats,
2701 .start_stats = falcon_start_nic_stats, 2703 .start_stats = falcon_start_nic_stats,
@@ -2790,6 +2792,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
2790 .fini_dmaq = efx_farch_fini_dmaq, 2792 .fini_dmaq = efx_farch_fini_dmaq,
2791 .prepare_flush = falcon_prepare_flush, 2793 .prepare_flush = falcon_prepare_flush,
2792 .finish_flush = efx_port_dummy_op_void, 2794 .finish_flush = efx_port_dummy_op_void,
2795 .prepare_flr = efx_port_dummy_op_void,
2796 .finish_flr = efx_farch_finish_flr,
2793 .describe_stats = falcon_describe_nic_stats, 2797 .describe_stats = falcon_describe_nic_stats,
2794 .update_stats = falcon_update_nic_stats, 2798 .update_stats = falcon_update_nic_stats,
2795 .start_stats = falcon_start_nic_stats, 2799 .start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index a08761360cdf..0537381cd2f6 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -741,6 +741,28 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
741 return rc; 741 return rc;
742} 742}
743 743
744/* Reset queue and flush accounting after FLR
745 *
746 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
747 * mastering was disabled), in which case we don't receive (RXQ) flush
748 * completion events. This means that efx->rxq_flush_outstanding remained at 4
749 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
750 * events were received, and we didn't go through efx_check_tx_flush_complete())
751 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
752 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
753 * for batched flush requests; and the efx->active_queues gets messed up because
754 * we keep incrementing for the newly initialised queues, but it never went to
755 * zero previously. Then we get a timeout every time we try to restart the
756 * queues, as it doesn't go back to zero when we should be flushing the queues.
757 */
758void efx_farch_finish_flr(struct efx_nic *efx)
759{
760 atomic_set(&efx->rxq_flush_pending, 0);
761 atomic_set(&efx->rxq_flush_outstanding, 0);
762 atomic_set(&efx->active_queues, 0);
763}
764
765
744/************************************************************************** 766/**************************************************************************
745 * 767 *
746 * Event queue processing 768 * Event queue processing
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 7bd4b14bf3b3..5239cf9bdc56 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -52,12 +52,7 @@ static void efx_mcdi_timeout_async(unsigned long context);
52static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 52static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
53 bool *was_attached_out); 53 bool *was_attached_out);
54static bool efx_mcdi_poll_once(struct efx_nic *efx); 54static bool efx_mcdi_poll_once(struct efx_nic *efx);
55 55static void efx_mcdi_abandon(struct efx_nic *efx);
56static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
57{
58 EFX_BUG_ON_PARANOID(!efx->mcdi);
59 return &efx->mcdi->iface;
60}
61 56
62int efx_mcdi_init(struct efx_nic *efx) 57int efx_mcdi_init(struct efx_nic *efx)
63{ 58{
@@ -558,6 +553,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
558 rc = 0; 553 rc = 0;
559 } 554 }
560 555
556 efx_mcdi_abandon(efx);
557
561 /* Close the race with efx_mcdi_ev_cpl() executing just too late 558 /* Close the race with efx_mcdi_ev_cpl() executing just too late
562 * and completing a request we've just cancelled, by ensuring 559 * and completing a request we've just cancelled, by ensuring
563 * that the seqno check therein fails. 560 * that the seqno check therein fails.
@@ -672,6 +669,9 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
672 if (efx->mc_bist_for_other_fn) 669 if (efx->mc_bist_for_other_fn)
673 return -ENETDOWN; 670 return -ENETDOWN;
674 671
672 if (mcdi->mode == MCDI_MODE_FAIL)
673 return -ENETDOWN;
674
675 efx_mcdi_acquire_sync(mcdi); 675 efx_mcdi_acquire_sync(mcdi);
676 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 676 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
677 return 0; 677 return 0;
@@ -812,7 +812,11 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
812 return; 812 return;
813 813
814 mcdi = efx_mcdi(efx); 814 mcdi = efx_mcdi(efx);
815 if (mcdi->mode == MCDI_MODE_POLL) 815 /* If already in polling mode, nothing to do.
816 * If in fail-fast state, don't switch to polled completion.
817 * FLR recovery will do that later.
818 */
819 if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
816 return; 820 return;
817 821
818 /* We can switch from event completion to polled completion, because 822 /* We can switch from event completion to polled completion, because
@@ -841,8 +845,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
841 845
842 mcdi = efx_mcdi(efx); 846 mcdi = efx_mcdi(efx);
843 847
844 /* We must be in polling mode so no more requests can be queued */ 848 /* We must be in poll or fail mode so no more requests can be queued */
845 BUG_ON(mcdi->mode != MCDI_MODE_POLL); 849 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
846 850
847 del_timer_sync(&mcdi->async_timer); 851 del_timer_sync(&mcdi->async_timer);
848 852
@@ -875,8 +879,11 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
875 return; 879 return;
876 880
877 mcdi = efx_mcdi(efx); 881 mcdi = efx_mcdi(efx);
878 882 /* If already in event completion mode, nothing to do.
879 if (mcdi->mode == MCDI_MODE_EVENTS) 883 * If in fail-fast state, don't switch to event completion. FLR
884 * recovery will do that later.
885 */
886 if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
880 return; 887 return;
881 888
882 /* We can't switch from polled to event completion in the middle of a 889 /* We can't switch from polled to event completion in the middle of a
@@ -966,6 +973,19 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
966 spin_unlock(&mcdi->iface_lock); 973 spin_unlock(&mcdi->iface_lock);
967} 974}
968 975
976/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
977 * to recover.
978 */
979static void efx_mcdi_abandon(struct efx_nic *efx)
980{
981 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
982
983 if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
984 return; /* it had already been done */
985 netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
986 efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
987}
988
969/* Called from falcon_process_eventq for MCDI events */ 989/* Called from falcon_process_eventq for MCDI events */
970void efx_mcdi_process_event(struct efx_channel *channel, 990void efx_mcdi_process_event(struct efx_channel *channel,
971 efx_qword_t *event) 991 efx_qword_t *event)
@@ -1512,6 +1532,19 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1512{ 1532{
1513 int rc; 1533 int rc;
1514 1534
1535 /* If MCDI is down, we can't handle_assertion */
1536 if (method == RESET_TYPE_MCDI_TIMEOUT) {
1537 rc = pci_reset_function(efx->pci_dev);
1538 if (rc)
1539 return rc;
1540 /* Re-enable polled MCDI completion */
1541 if (efx->mcdi) {
1542 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1543 mcdi->mode = MCDI_MODE_POLL;
1544 }
1545 return 0;
1546 }
1547
1515 /* Recover from a failed assertion pre-reset */ 1548 /* Recover from a failed assertion pre-reset */
1516 rc = efx_mcdi_handle_assertion(efx); 1549 rc = efx_mcdi_handle_assertion(efx);
1517 if (rc) 1550 if (rc)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 52931aebf3c3..56465f7465a2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -28,9 +28,16 @@ enum efx_mcdi_state {
28 MCDI_STATE_COMPLETED, 28 MCDI_STATE_COMPLETED,
29}; 29};
30 30
31/**
32 * enum efx_mcdi_mode - MCDI transaction mode
33 * @MCDI_MODE_POLL: poll for MCDI completion, until timeout
34 * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
35 * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
36 */
31enum efx_mcdi_mode { 37enum efx_mcdi_mode {
32 MCDI_MODE_POLL, 38 MCDI_MODE_POLL,
33 MCDI_MODE_EVENTS, 39 MCDI_MODE_EVENTS,
40 MCDI_MODE_FAIL,
34}; 41};
35 42
36/** 43/**
@@ -104,6 +111,12 @@ struct efx_mcdi_data {
104 u32 fn_flags; 111 u32 fn_flags;
105}; 112};
106 113
114static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
115{
116 EFX_BUG_ON_PARANOID(!efx->mcdi);
117 return &efx->mcdi->iface;
118}
119
107#ifdef CONFIG_SFC_MCDI_MON 120#ifdef CONFIG_SFC_MCDI_MON
108static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) 121static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
109{ 122{
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8a400a0595eb..5bdae8ed7c57 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -972,6 +972,8 @@ struct efx_mtd_partition {
972 * (for Falcon architecture) 972 * (for Falcon architecture)
973 * @finish_flush: Clean up after flushing the DMA queues (for Falcon 973 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
974 * architecture) 974 * architecture)
975 * @prepare_flr: Prepare for an FLR
976 * @finish_flr: Clean up after an FLR
975 * @describe_stats: Describe statistics for ethtool 977 * @describe_stats: Describe statistics for ethtool
976 * @update_stats: Update statistics not provided by event handling. 978 * @update_stats: Update statistics not provided by event handling.
977 * Either argument may be %NULL. 979 * Either argument may be %NULL.
@@ -1100,6 +1102,8 @@ struct efx_nic_type {
1100 int (*fini_dmaq)(struct efx_nic *efx); 1102 int (*fini_dmaq)(struct efx_nic *efx);
1101 void (*prepare_flush)(struct efx_nic *efx); 1103 void (*prepare_flush)(struct efx_nic *efx);
1102 void (*finish_flush)(struct efx_nic *efx); 1104 void (*finish_flush)(struct efx_nic *efx);
1105 void (*prepare_flr)(struct efx_nic *efx);
1106 void (*finish_flr)(struct efx_nic *efx);
1103 size_t (*describe_stats)(struct efx_nic *efx, u8 *names); 1107 size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
1104 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, 1108 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
1105 struct rtnl_link_stats64 *core_stats); 1109 struct rtnl_link_stats64 *core_stats);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
156 efx->net_dev->rx_cpu_rmap = NULL; 156 efx->net_dev->rx_cpu_rmap = NULL;
157#endif 157#endif
158 158
159 /* Disable MSI/MSI-X interrupts */ 159 if (EFX_INT_MODE_USE_MSI(efx)) {
160 efx_for_each_channel(channel, efx) 160 /* Disable MSI/MSI-X interrupts */
161 free_irq(channel->irq, &efx->msi_context[channel->channel]); 161 efx_for_each_channel(channel, efx)
162 162 free_irq(channel->irq,
163 /* Disable legacy interrupt */ 163 &efx->msi_context[channel->channel]);
164 if (efx->legacy_irq) 164 } else {
165 /* Disable legacy interrupt */
165 free_irq(efx->legacy_irq, efx); 166 free_irq(efx->legacy_irq, efx);
167 }
166} 168}
167 169
168/* Register dump */ 170/* Register dump */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index a001fae1a8d7..d3ad8ed8d901 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -757,6 +757,7 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
757int efx_nic_flush_queues(struct efx_nic *efx); 757int efx_nic_flush_queues(struct efx_nic *efx);
758void siena_prepare_flush(struct efx_nic *efx); 758void siena_prepare_flush(struct efx_nic *efx);
759int efx_farch_fini_dmaq(struct efx_nic *efx); 759int efx_farch_fini_dmaq(struct efx_nic *efx);
760void efx_farch_finish_flr(struct efx_nic *efx);
760void siena_finish_flush(struct efx_nic *efx); 761void siena_finish_flush(struct efx_nic *efx);
761void falcon_start_nic_stats(struct efx_nic *efx); 762void falcon_start_nic_stats(struct efx_nic *efx);
762void falcon_stop_nic_stats(struct efx_nic *efx); 763void falcon_stop_nic_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 23f3a6f7737a..50ffefed492c 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -921,6 +921,8 @@ const struct efx_nic_type siena_a0_nic_type = {
921 .fini_dmaq = efx_farch_fini_dmaq, 921 .fini_dmaq = efx_farch_fini_dmaq,
922 .prepare_flush = siena_prepare_flush, 922 .prepare_flush = siena_prepare_flush,
923 .finish_flush = siena_finish_flush, 923 .finish_flush = siena_finish_flush,
924 .prepare_flr = efx_port_dummy_op_void,
925 .finish_flr = efx_farch_finish_flr,
924 .describe_stats = siena_describe_nic_stats, 926 .describe_stats = siena_describe_nic_stats,
925 .update_stats = siena_update_nic_stats, 927 .update_stats = siena_update_nic_stats,
926 .start_stats = efx_mcdi_mac_start_stats, 928 .start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index d1b4dca53a9d..bcaa41af1e62 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
147 */ 147 */
148#define MII_DELAY 1 148#define MII_DELAY 1
149 149
150#if SMC_DEBUG > 0 150#define DBG(n, dev, fmt, ...) \
151#define DBG(n, dev, args...) \ 151 do { \
152 do { \ 152 if (SMC_DEBUG >= (n)) \
153 if (SMC_DEBUG >= (n)) \ 153 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
154 netdev_dbg(dev, args); \
155 } while (0) 154 } while (0)
156 155
157#define PRINTK(dev, args...) netdev_info(dev, args) 156#define PRINTK(dev, fmt, ...) \
158#else 157 do { \
159#define DBG(n, dev, args...) do { } while (0) 158 if (SMC_DEBUG > 0) \
160#define PRINTK(dev, args...) netdev_dbg(dev, args) 159 netdev_info(dev, fmt, ##__VA_ARGS__); \
161#endif 160 else \
161 netdev_dbg(dev, fmt, ##__VA_ARGS__); \
162 } while (0)
162 163
163#if SMC_DEBUG > 3 164#if SMC_DEBUG > 3
164static void PRINT_PKT(u_char *buf, int length) 165static void PRINT_PKT(u_char *buf, int length)
@@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
191 pr_cont("\n"); 192 pr_cont("\n");
192} 193}
193#else 194#else
194#define PRINT_PKT(x...) do { } while (0) 195static inline void PRINT_PKT(u_char *buf, int length) { }
195#endif 196#endif
196 197
197 198
@@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
1781 int timeout = 20; 1782 int timeout = 20;
1782 unsigned long cookie; 1783 unsigned long cookie;
1783 1784
1784 DBG(2, dev, "%s: %s\n", CARDNAME, __func__); 1785 DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
1785 1786
1786 cookie = probe_irq_on(); 1787 cookie = probe_irq_on();
1787 1788
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d940034acdd4..0f4841d2e8dc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
1704 if (ret) { 1704 if (ret) {
1705 pr_err("%s: Cannot attach to PHY (error: %d)\n", 1705 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1706 __func__, ret); 1706 __func__, ret);
1707 goto phy_error; 1707 return ret;
1708 } 1708 }
1709 } 1709 }
1710 1710
@@ -1779,8 +1779,6 @@ init_error:
1779dma_desc_error: 1779dma_desc_error:
1780 if (priv->phydev) 1780 if (priv->phydev)
1781 phy_disconnect(priv->phydev); 1781 phy_disconnect(priv->phydev);
1782phy_error:
1783 clk_disable_unprepare(priv->stmmac_clk);
1784 1782
1785 return ret; 1783 return ret;
1786} 1784}
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
246 int i; 246 int i;
247 247
248 for (i = 0; i < N_TX_RINGS; i++) 248 for (i = 0; i < N_TX_RINGS; i++)
249 spin_lock(&cp->tx_lock[i]); 249 spin_lock_nested(&cp->tx_lock[i], i);
250} 250}
251 251
252static inline void cas_lock_all(struct cas *cp) 252static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 36aa109416c4..c331b7ebc812 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 1871 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1872 phyid = be32_to_cpup(parp+1); 1872 phyid = be32_to_cpup(parp+1);
1873 mdio = of_find_device_by_node(mdio_node); 1873 mdio = of_find_device_by_node(mdio_node);
1874 1874 of_node_put(mdio_node);
1875 if (strncmp(mdio->name, "gpio", 4) == 0) { 1875 if (!mdio) {
1876 /* GPIO bitbang MDIO driver attached */ 1876 pr_err("Missing mdio platform device\n");
1877 struct mii_bus *bus = dev_get_drvdata(&mdio->dev); 1877 return -EINVAL;
1878
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, bus->id, phyid);
1881 } else {
1882 /* davinci MDIO driver attached */
1883 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1884 PHY_ID_FMT, mdio->name, phyid);
1885 } 1878 }
1879 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1880 PHY_ID_FMT, mdio->name, phyid);
1886 1881
1887 mac_addr = of_get_mac_address(slave_node); 1882 mac_addr = of_get_mac_address(slave_node);
1888 if (mac_addr) 1883 if (mac_addr)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 31e55fba7cad..7918d5132c1f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -382,6 +382,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
382 if (skb_is_gso(skb)) 382 if (skb_is_gso(skb))
383 goto do_lso; 383 goto do_lso;
384 384
385 if ((skb->ip_summed == CHECKSUM_NONE) ||
386 (skb->ip_summed == CHECKSUM_UNNECESSARY))
387 goto do_send;
388
385 rndis_msg_size += NDIS_CSUM_PPI_SIZE; 389 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
386 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, 390 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
387 TCPIP_CHKSUM_PKTINFO); 391 TCPIP_CHKSUM_PKTINFO);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 430bb0db9bc4..e36f194673a4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -365,7 +365,7 @@ __at86rf230_read_subreg(struct at86rf230_local *lp,
365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); 365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
366 366
367 if (status == 0) 367 if (status == 0)
368 *data = buf[1]; 368 *data = (buf[1] & mask) >> shift;
369 369
370 return status; 370 return status;
371} 371}
@@ -1025,14 +1025,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
1025 return -EINVAL; 1025 return -EINVAL;
1026 } 1026 }
1027 1027
1028 rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
1029 if (rc)
1030 return rc;
1031 if (!status) {
1032 dev_err(&lp->spi->dev, "AVDD error\n");
1033 return -EINVAL;
1034 }
1035
1036 return 0; 1028 return 0;
1037} 1029}
1038 1030
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 753a8c23d15d..d53e299ae1d9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
263 const struct macvlan_dev *vlan = netdev_priv(dev); 263 const struct macvlan_dev *vlan = netdev_priv(dev);
264 const struct macvlan_port *port = vlan->port; 264 const struct macvlan_port *port = vlan->port;
265 const struct macvlan_dev *dest; 265 const struct macvlan_dev *dest;
266 __u8 ip_summed = skb->ip_summed;
267 266
268 if (vlan->mode == MACVLAN_MODE_BRIDGE) { 267 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
269 const struct ethhdr *eth = (void *)skb->data; 268 const struct ethhdr *eth = (void *)skb->data;
270 skb->ip_summed = CHECKSUM_UNNECESSARY;
271 269
272 /* send to other bridge ports directly */ 270 /* send to other bridge ports directly */
273 if (is_multicast_ether_addr(eth->h_dest)) { 271 if (is_multicast_ether_addr(eth->h_dest)) {
@@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
285 } 283 }
286 284
287xmit_world: 285xmit_world:
288 skb->ip_summed = ip_summed;
289 skb->dev = vlan->lowerdev; 286 skb->dev = vlan->lowerdev;
290 return dev_queue_xmit(skb); 287 return dev_queue_xmit(skb);
291} 288}
@@ -461,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
461 struct macvlan_dev *vlan = netdev_priv(dev); 458 struct macvlan_dev *vlan = netdev_priv(dev);
462 struct net_device *lowerdev = vlan->lowerdev; 459 struct net_device *lowerdev = vlan->lowerdev;
463 460
464 if (change & IFF_ALLMULTI) 461 if (dev->flags & IFF_UP) {
465 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 462 if (change & IFF_ALLMULTI)
463 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
464 }
466} 465}
467 466
468static void macvlan_set_mac_lists(struct net_device *dev) 467static void macvlan_set_mac_lists(struct net_device *dev)
@@ -518,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
518#define MACVLAN_STATE_MASK \ 517#define MACVLAN_STATE_MASK \
519 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) 518 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
520 519
520static int macvlan_get_nest_level(struct net_device *dev)
521{
522 return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
523}
524
521static void macvlan_set_lockdep_class_one(struct net_device *dev, 525static void macvlan_set_lockdep_class_one(struct net_device *dev,
522 struct netdev_queue *txq, 526 struct netdev_queue *txq,
523 void *_unused) 527 void *_unused)
@@ -528,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
528 532
529static void macvlan_set_lockdep_class(struct net_device *dev) 533static void macvlan_set_lockdep_class(struct net_device *dev)
530{ 534{
531 lockdep_set_class(&dev->addr_list_lock, 535 lockdep_set_class_and_subclass(&dev->addr_list_lock,
532 &macvlan_netdev_addr_lock_key); 536 &macvlan_netdev_addr_lock_key,
537 macvlan_get_nest_level(dev));
533 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); 538 netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
534} 539}
535 540
@@ -724,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
724 .ndo_fdb_add = macvlan_fdb_add, 729 .ndo_fdb_add = macvlan_fdb_add,
725 .ndo_fdb_del = macvlan_fdb_del, 730 .ndo_fdb_del = macvlan_fdb_del,
726 .ndo_fdb_dump = ndo_dflt_fdb_dump, 731 .ndo_fdb_dump = ndo_dflt_fdb_dump,
732 .ndo_get_lock_subclass = macvlan_get_nest_level,
727}; 733};
728 734
729void macvlan_common_setup(struct net_device *dev) 735void macvlan_common_setup(struct net_device *dev)
@@ -852,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
852 vlan->dev = dev; 858 vlan->dev = dev;
853 vlan->port = port; 859 vlan->port = port;
854 vlan->set_features = MACVLAN_FEATURES; 860 vlan->set_features = MACVLAN_FEATURES;
861 vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
855 862
856 vlan->mode = MACVLAN_MODE_VEPA; 863 vlan->mode = MACVLAN_MODE_VEPA;
857 if (data && data[IFLA_MACVLAN_MODE]) 864 if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ff111a89e17f..3381c4f91a8c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
322 segs = nskb; 322 segs = nskb;
323 } 323 }
324 } else { 324 } else {
325 /* If we receive a partial checksum and the tap side
326 * doesn't support checksum offload, compute the checksum.
327 * Note: it doesn't matter which checksum feature to
328 * check, we either support them all or none.
329 */
330 if (skb->ip_summed == CHECKSUM_PARTIAL &&
331 !(features & NETIF_F_ALL_CSUM) &&
332 skb_checksum_help(skb))
333 goto drop;
325 skb_queue_tail(&q->sk.sk_receive_queue, skb); 334 skb_queue_tail(&q->sk.sk_receive_queue, skb);
326 } 335 }
327 336
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index e701433bf52f..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -32,29 +32,39 @@
32 32
33struct mdio_gpio_info { 33struct mdio_gpio_info {
34 struct mdiobb_ctrl ctrl; 34 struct mdiobb_ctrl ctrl;
35 int mdc, mdio; 35 int mdc, mdio, mdo;
36 int mdc_active_low, mdio_active_low, mdo_active_low;
36}; 37};
37 38
38static void *mdio_gpio_of_get_data(struct platform_device *pdev) 39static void *mdio_gpio_of_get_data(struct platform_device *pdev)
39{ 40{
40 struct device_node *np = pdev->dev.of_node; 41 struct device_node *np = pdev->dev.of_node;
41 struct mdio_gpio_platform_data *pdata; 42 struct mdio_gpio_platform_data *pdata;
43 enum of_gpio_flags flags;
42 int ret; 44 int ret;
43 45
44 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 46 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
45 if (!pdata) 47 if (!pdata)
46 return NULL; 48 return NULL;
47 49
48 ret = of_get_gpio(np, 0); 50 ret = of_get_gpio_flags(np, 0, &flags);
49 if (ret < 0) 51 if (ret < 0)
50 return NULL; 52 return NULL;
51 53
52 pdata->mdc = ret; 54 pdata->mdc = ret;
55 pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW;
53 56
54 ret = of_get_gpio(np, 1); 57 ret = of_get_gpio_flags(np, 1, &flags);
55 if (ret < 0) 58 if (ret < 0)
56 return NULL; 59 return NULL;
57 pdata->mdio = ret; 60 pdata->mdio = ret;
61 pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW;
62
63 ret = of_get_gpio_flags(np, 2, &flags);
64 if (ret > 0) {
65 pdata->mdo = ret;
66 pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW;
67 }
58 68
59 return pdata; 69 return pdata;
60} 70}
@@ -64,8 +74,19 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
64 struct mdio_gpio_info *bitbang = 74 struct mdio_gpio_info *bitbang =
65 container_of(ctrl, struct mdio_gpio_info, ctrl); 75 container_of(ctrl, struct mdio_gpio_info, ctrl);
66 76
77 if (bitbang->mdo) {
78 /* Separate output pin. Always set its value to high
79 * when changing direction. If direction is input,
80 * assume the pin serves as pull-up. If direction is
81 * output, the default value is high.
82 */
83 gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
84 return;
85 }
86
67 if (dir) 87 if (dir)
68 gpio_direction_output(bitbang->mdio, 1); 88 gpio_direction_output(bitbang->mdio,
89 1 ^ bitbang->mdio_active_low);
69 else 90 else
70 gpio_direction_input(bitbang->mdio); 91 gpio_direction_input(bitbang->mdio);
71} 92}
@@ -75,7 +96,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
75 struct mdio_gpio_info *bitbang = 96 struct mdio_gpio_info *bitbang =
76 container_of(ctrl, struct mdio_gpio_info, ctrl); 97 container_of(ctrl, struct mdio_gpio_info, ctrl);
77 98
78 return gpio_get_value(bitbang->mdio); 99 return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
79} 100}
80 101
81static void mdio_set(struct mdiobb_ctrl *ctrl, int what) 102static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -83,7 +104,10 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
83 struct mdio_gpio_info *bitbang = 104 struct mdio_gpio_info *bitbang =
84 container_of(ctrl, struct mdio_gpio_info, ctrl); 105 container_of(ctrl, struct mdio_gpio_info, ctrl);
85 106
86 gpio_set_value(bitbang->mdio, what); 107 if (bitbang->mdo)
108 gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
109 else
110 gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
87} 111}
88 112
89static void mdc_set(struct mdiobb_ctrl *ctrl, int what) 113static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -91,7 +115,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
91 struct mdio_gpio_info *bitbang = 115 struct mdio_gpio_info *bitbang =
92 container_of(ctrl, struct mdio_gpio_info, ctrl); 116 container_of(ctrl, struct mdio_gpio_info, ctrl);
93 117
94 gpio_set_value(bitbang->mdc, what); 118 gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
95} 119}
96 120
97static struct mdiobb_ops mdio_gpio_ops = { 121static struct mdiobb_ops mdio_gpio_ops = {
@@ -110,18 +134,22 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
110 struct mdio_gpio_info *bitbang; 134 struct mdio_gpio_info *bitbang;
111 int i; 135 int i;
112 136
113 bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); 137 bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
114 if (!bitbang) 138 if (!bitbang)
115 goto out; 139 goto out;
116 140
117 bitbang->ctrl.ops = &mdio_gpio_ops; 141 bitbang->ctrl.ops = &mdio_gpio_ops;
118 bitbang->ctrl.reset = pdata->reset; 142 bitbang->ctrl.reset = pdata->reset;
119 bitbang->mdc = pdata->mdc; 143 bitbang->mdc = pdata->mdc;
144 bitbang->mdc_active_low = pdata->mdc_active_low;
120 bitbang->mdio = pdata->mdio; 145 bitbang->mdio = pdata->mdio;
146 bitbang->mdio_active_low = pdata->mdio_active_low;
147 bitbang->mdo = pdata->mdo;
148 bitbang->mdo_active_low = pdata->mdo_active_low;
121 149
122 new_bus = alloc_mdio_bitbang(&bitbang->ctrl); 150 new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
123 if (!new_bus) 151 if (!new_bus)
124 goto out_free_bitbang; 152 goto out;
125 153
126 new_bus->name = "GPIO Bitbanged MDIO", 154 new_bus->name = "GPIO Bitbanged MDIO",
127 155
@@ -138,11 +166,18 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
138 166
139 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); 167 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
140 168
141 if (gpio_request(bitbang->mdc, "mdc")) 169 if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
170 goto out_free_bus;
171
172 if (devm_gpio_request(dev, bitbang->mdio, "mdio"))
142 goto out_free_bus; 173 goto out_free_bus;
143 174
144 if (gpio_request(bitbang->mdio, "mdio")) 175 if (bitbang->mdo) {
145 goto out_free_mdc; 176 if (devm_gpio_request(dev, bitbang->mdo, "mdo"))
177 goto out_free_bus;
178 gpio_direction_output(bitbang->mdo, 1);
179 gpio_direction_input(bitbang->mdio);
180 }
146 181
147 gpio_direction_output(bitbang->mdc, 0); 182 gpio_direction_output(bitbang->mdc, 0);
148 183
@@ -150,12 +185,8 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
150 185
151 return new_bus; 186 return new_bus;
152 187
153out_free_mdc:
154 gpio_free(bitbang->mdc);
155out_free_bus: 188out_free_bus:
156 free_mdio_bitbang(new_bus); 189 free_mdio_bitbang(new_bus);
157out_free_bitbang:
158 kfree(bitbang);
159out: 190out:
160 return NULL; 191 return NULL;
161} 192}
@@ -163,13 +194,8 @@ out:
163static void mdio_gpio_bus_deinit(struct device *dev) 194static void mdio_gpio_bus_deinit(struct device *dev)
164{ 195{
165 struct mii_bus *bus = dev_get_drvdata(dev); 196 struct mii_bus *bus = dev_get_drvdata(dev);
166 struct mdio_gpio_info *bitbang = bus->priv;
167 197
168 dev_set_drvdata(dev, NULL);
169 gpio_free(bitbang->mdio);
170 gpio_free(bitbang->mdc);
171 free_mdio_bitbang(bus); 198 free_mdio_bitbang(bus);
172 kfree(bitbang);
173} 199}
174 200
175static void mdio_gpio_bus_destroy(struct device *dev) 201static void mdio_gpio_bus_destroy(struct device *dev)
@@ -189,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
189 if (pdev->dev.of_node) { 215 if (pdev->dev.of_node) {
190 pdata = mdio_gpio_of_get_data(pdev); 216 pdata = mdio_gpio_of_get_data(pdev);
191 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); 217 bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
218 if (bus_id < 0) {
219 dev_warn(&pdev->dev, "failed to get alias id\n");
220 bus_id = 0;
221 }
192 } else { 222 } else {
193 pdata = dev_get_platdata(&pdev->dev); 223 pdata = dev_get_platdata(&pdev->dev);
194 bus_id = pdev->id; 224 bus_id = pdev->id;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5ad971a55c5d..d849684231c1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
246 if (val1 != -1) 246 if (val1 != -1)
247 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0); 247 newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
248 248
249 if (val2 != -1) 249 if (val2 != -2)
250 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4); 250 newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
251 251
252 if (val3 != -1) 252 if (val3 != -3)
253 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8); 253 newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
254 254
255 if (val4 != -1) 255 if (val4 != -4)
256 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12); 256 newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
257 257
258 return kszphy_extended_write(phydev, reg, newval); 258 return kszphy_extended_write(phydev, reg, newval);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1b6d09aef427..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
715 struct delayed_work *dwork = to_delayed_work(work); 715 struct delayed_work *dwork = to_delayed_work(work);
716 struct phy_device *phydev = 716 struct phy_device *phydev =
717 container_of(dwork, struct phy_device, state_queue); 717 container_of(dwork, struct phy_device, state_queue);
718 int needs_aneg = 0, do_suspend = 0; 718 bool needs_aneg = false, do_suspend = false, do_resume = false;
719 int err = 0; 719 int err = 0;
720 720
721 mutex_lock(&phydev->lock); 721 mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
727 case PHY_PENDING: 727 case PHY_PENDING:
728 break; 728 break;
729 case PHY_UP: 729 case PHY_UP:
730 needs_aneg = 1; 730 needs_aneg = true;
731 731
732 phydev->link_timeout = PHY_AN_TIMEOUT; 732 phydev->link_timeout = PHY_AN_TIMEOUT;
733 733
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
757 phydev->adjust_link(phydev->attached_dev); 757 phydev->adjust_link(phydev->attached_dev);
758 758
759 } else if (0 == phydev->link_timeout--) 759 } else if (0 == phydev->link_timeout--)
760 needs_aneg = 1; 760 needs_aneg = true;
761 break; 761 break;
762 case PHY_NOLINK: 762 case PHY_NOLINK:
763 err = phy_read_status(phydev); 763 err = phy_read_status(phydev);
@@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work)
765 break; 765 break;
766 766
767 if (phydev->link) { 767 if (phydev->link) {
768 if (AUTONEG_ENABLE == phydev->autoneg) {
769 err = phy_aneg_done(phydev);
770 if (err < 0)
771 break;
772
773 if (!err) {
774 phydev->state = PHY_AN;
775 phydev->link_timeout = PHY_AN_TIMEOUT;
776 break;
777 }
778 }
768 phydev->state = PHY_RUNNING; 779 phydev->state = PHY_RUNNING;
769 netif_carrier_on(phydev->attached_dev); 780 netif_carrier_on(phydev->attached_dev);
770 phydev->adjust_link(phydev->attached_dev); 781 phydev->adjust_link(phydev->attached_dev);
@@ -780,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
780 netif_carrier_on(phydev->attached_dev); 791 netif_carrier_on(phydev->attached_dev);
781 } else { 792 } else {
782 if (0 == phydev->link_timeout--) 793 if (0 == phydev->link_timeout--)
783 needs_aneg = 1; 794 needs_aneg = true;
784 } 795 }
785 796
786 phydev->adjust_link(phydev->attached_dev); 797 phydev->adjust_link(phydev->attached_dev);
@@ -816,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
816 phydev->link = 0; 827 phydev->link = 0;
817 netif_carrier_off(phydev->attached_dev); 828 netif_carrier_off(phydev->attached_dev);
818 phydev->adjust_link(phydev->attached_dev); 829 phydev->adjust_link(phydev->attached_dev);
819 do_suspend = 1; 830 do_suspend = true;
820 } 831 }
821 break; 832 break;
822 case PHY_RESUMING: 833 case PHY_RESUMING:
@@ -865,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
865 } 876 }
866 phydev->adjust_link(phydev->attached_dev); 877 phydev->adjust_link(phydev->attached_dev);
867 } 878 }
879 do_resume = true;
868 break; 880 break;
869 } 881 }
870 882
@@ -872,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
872 884
873 if (needs_aneg) 885 if (needs_aneg)
874 err = phy_start_aneg(phydev); 886 err = phy_start_aneg(phydev);
875 887 else if (do_suspend)
876 if (do_suspend)
877 phy_suspend(phydev); 888 phy_suspend(phydev);
889 else if (do_resume)
890 phy_resume(phydev);
878 891
879 if (err < 0) 892 if (err < 0)
880 phy_error(phydev); 893 phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0ce606624296..4987a1c6dc52 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
614 err = phy_init_hw(phydev); 614 err = phy_init_hw(phydev);
615 if (err) 615 if (err)
616 phy_detach(phydev); 616 phy_detach(phydev);
617 617 else
618 phy_resume(phydev); 618 phy_resume(phydev);
619 619
620 return err; 620 return err;
621} 621}
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc70ecfc7062..ad4a94e9ff57 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
430 return; 430 return;
431 431
432 spin_lock(&sl->lock); 432 spin_lock_bh(&sl->lock);
433 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
434 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
435 * transmission of another packet */ 435 * transmission of another packet */
436 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
438 spin_unlock(&sl->lock); 438 spin_unlock_bh(&sl->lock);
439 sl_unlock(sl); 439 sl_unlock(sl);
440 return; 440 return;
441 } 441 }
@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
443 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
444 sl->xleft -= actual; 444 sl->xleft -= actual;
445 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock(&sl->lock); 446 spin_unlock_bh(&sl->lock);
447} 447}
448 448
449static void sl_tx_timeout(struct net_device *dev) 449static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 33008c1d1d67..767fe61b5ac9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2834,8 +2834,10 @@ static int team_device_event(struct notifier_block *unused,
2834 case NETDEV_UP: 2834 case NETDEV_UP:
2835 if (netif_carrier_ok(dev)) 2835 if (netif_carrier_ok(dev))
2836 team_port_change_check(port, true); 2836 team_port_change_check(port, true);
2837 break;
2837 case NETDEV_DOWN: 2838 case NETDEV_DOWN:
2838 team_port_change_check(port, false); 2839 team_port_change_check(port, false);
2840 break;
2839 case NETDEV_CHANGE: 2841 case NETDEV_CHANGE:
2840 if (netif_running(port->dev)) 2842 if (netif_running(port->dev))
2841 team_port_change_check(port, 2843 team_port_change_check(port,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
120 cdc_ncm_unbind(dev, intf); 120 cdc_ncm_unbind(dev, intf);
121} 121}
122 122
123/* verify that the ethernet protocol is IPv4 or IPv6 */
124static bool is_ip_proto(__be16 proto)
125{
126 switch (proto) {
127 case htons(ETH_P_IP):
128 case htons(ETH_P_IPV6):
129 return true;
130 }
131 return false;
132}
123 133
124static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) 134static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
125{ 135{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
128 struct cdc_ncm_ctx *ctx = info->ctx; 138 struct cdc_ncm_ctx *ctx = info->ctx;
129 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); 139 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
130 u16 tci = 0; 140 u16 tci = 0;
141 bool is_ip;
131 u8 *c; 142 u8 *c;
132 143
133 if (!ctx) 144 if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
137 if (skb->len <= ETH_HLEN) 148 if (skb->len <= ETH_HLEN)
138 goto error; 149 goto error;
139 150
151 /* Some applications using e.g. packet sockets will
152 * bypass the VLAN acceleration and create tagged
153 * ethernet frames directly. We primarily look for
154 * the accelerated out-of-band tag, but fall back if
155 * required
156 */
157 skb_reset_mac_header(skb);
158 if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
159 __vlan_get_tag(skb, &tci) == 0) {
160 is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
161 skb_pull(skb, VLAN_ETH_HLEN);
162 } else {
163 is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
164 skb_pull(skb, ETH_HLEN);
165 }
166
140 /* mapping VLANs to MBIM sessions: 167 /* mapping VLANs to MBIM sessions:
141 * no tag => IPS session <0> 168 * no tag => IPS session <0>
142 * 1 - 255 => IPS session <vlanid> 169 * 1 - 255 => IPS session <vlanid>
143 * 256 - 511 => DSS session <vlanid - 256> 170 * 256 - 511 => DSS session <vlanid - 256>
144 * 512 - 4095 => unsupported, drop 171 * 512 - 4095 => unsupported, drop
145 */ 172 */
146 vlan_get_tag(skb, &tci);
147
148 switch (tci & 0x0f00) { 173 switch (tci & 0x0f00) {
149 case 0x0000: /* VLAN ID 0 - 255 */ 174 case 0x0000: /* VLAN ID 0 - 255 */
150 /* verify that datagram is IPv4 or IPv6 */ 175 if (!is_ip)
151 skb_reset_mac_header(skb);
152 switch (eth_hdr(skb)->h_proto) {
153 case htons(ETH_P_IP):
154 case htons(ETH_P_IPV6):
155 break;
156 default:
157 goto error; 176 goto error;
158 }
159 c = (u8 *)&sign; 177 c = (u8 *)&sign;
160 c[3] = tci; 178 c[3] = tci;
161 break; 179 break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
169 "unsupported tci=0x%04x\n", tci); 187 "unsupported tci=0x%04x\n", tci);
170 goto error; 188 goto error;
171 } 189 }
172 skb_pull(skb, ETH_HLEN);
173 } 190 }
174 191
175 spin_lock_bh(&ctx->mtx); 192 spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
204 return; 221 return;
205 222
206 /* need to send the NA on the VLAN dev, if any */ 223 /* need to send the NA on the VLAN dev, if any */
207 if (tci) 224 rcu_read_lock();
225 if (tci) {
208 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), 226 netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
209 tci); 227 tci);
210 else 228 if (!netdev) {
229 rcu_read_unlock();
230 return;
231 }
232 } else {
211 netdev = dev->net; 233 netdev = dev->net;
212 if (!netdev) 234 }
213 return; 235 dev_hold(netdev);
236 rcu_read_unlock();
214 237
215 in6_dev = in6_dev_get(netdev); 238 in6_dev = in6_dev_get(netdev);
216 if (!in6_dev) 239 if (!in6_dev)
217 return; 240 goto out;
218 is_router = !!in6_dev->cnf.forwarding; 241 is_router = !!in6_dev->cnf.forwarding;
219 in6_dev_put(in6_dev); 242 in6_dev_put(in6_dev);
220 243
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
224 true /* solicited */, 247 true /* solicited */,
225 false /* override */, 248 false /* override */,
226 true /* inc_opt */); 249 true /* inc_opt */);
250out:
251 dev_put(netdev);
227} 252}
228 253
229static bool is_neigh_solicit(u8 *buf, size_t len) 254static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 549dbac710ed..9a2bd11943eb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -785,7 +785,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
785 skb_out->len > CDC_NCM_MIN_TX_PKT) 785 skb_out->len > CDC_NCM_MIN_TX_PKT)
786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, 786 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
787 ctx->tx_max - skb_out->len); 787 ctx->tx_max - skb_out->len);
788 else if ((skb_out->len % dev->maxpacket) == 0) 788 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
789 *skb_put(skb_out, 1) = 0; /* force short packet */ 789 *skb_put(skb_out, 1) = 0; /* force short packet */
790 790
791 /* set final frame length */ 791 /* set final frame length */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e3458e3c44f1..83208d4fdc59 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -669,6 +669,22 @@ static const struct usb_device_id products[] = {
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
673 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
674 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
675 {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
676 {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
677 {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
678 {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
679 {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
680 {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
681 {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
682 {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
683 {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
684 {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
685 {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
686 {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
687 {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
672 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, 688 {QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
673 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, 689 {QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
674 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, 690 {QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -730,16 +746,28 @@ static const struct usb_device_id products[] = {
730 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 746 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
731 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 747 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
732 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ 748 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
749 {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
750 {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
751 {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
733 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 752 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
753 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
754 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
734 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 755 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
735 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 756 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
757 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
736 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 758 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
737 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 759 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
738 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 760 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
739 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ 761 {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
740 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ 762 {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
763 {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
741 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ 764 {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
742 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ 765 {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
766 {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
767 {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
768 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
769 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
770 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
743 771
744 /* 4. Gobi 1000 devices */ 772 /* 4. Gobi 1000 devices */
745 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 773 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7b687469199b..8a852b5f215f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev,
1285 if (channels->rx_count || channels->tx_count || channels->other_count) 1285 if (channels->rx_count || channels->tx_count || channels->other_count)
1286 return -EINVAL; 1286 return -EINVAL;
1287 1287
1288 if (queue_pairs > vi->max_queue_pairs) 1288 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1289 return -EINVAL; 1289 return -EINVAL;
1290 1290
1291 get_online_cpus(); 1291 get_online_cpus();
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c55e316373a1..4dbb2ed85b97 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -389,8 +389,8 @@ static inline size_t vxlan_nlmsg_size(void)
389 + nla_total_size(sizeof(struct nda_cacheinfo)); 389 + nla_total_size(sizeof(struct nda_cacheinfo));
390} 390}
391 391
392static void vxlan_fdb_notify(struct vxlan_dev *vxlan, 392static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
393 struct vxlan_fdb *fdb, int type) 393 struct vxlan_rdst *rd, int type)
394{ 394{
395 struct net *net = dev_net(vxlan->dev); 395 struct net *net = dev_net(vxlan->dev);
396 struct sk_buff *skb; 396 struct sk_buff *skb;
@@ -400,8 +400,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
400 if (skb == NULL) 400 if (skb == NULL)
401 goto errout; 401 goto errout;
402 402
403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, 403 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
404 first_remote_rtnl(fdb));
405 if (err < 0) { 404 if (err < 0) {
406 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 405 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
407 WARN_ON(err == -EMSGSIZE); 406 WARN_ON(err == -EMSGSIZE);
@@ -427,10 +426,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
427 .remote_vni = VXLAN_N_VID, 426 .remote_vni = VXLAN_N_VID,
428 }; 427 };
429 428
430 INIT_LIST_HEAD(&f.remotes); 429 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
431 list_add_rcu(&remote.list, &f.remotes);
432
433 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
434} 430}
435 431
436static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) 432static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -438,11 +434,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
438 struct vxlan_fdb f = { 434 struct vxlan_fdb f = {
439 .state = NUD_STALE, 435 .state = NUD_STALE,
440 }; 436 };
437 struct vxlan_rdst remote = { };
441 438
442 INIT_LIST_HEAD(&f.remotes);
443 memcpy(f.eth_addr, eth_addr, ETH_ALEN); 439 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
444 440
445 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 441 vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
446} 442}
447 443
448/* Hash Ethernet address */ 444/* Hash Ethernet address */
@@ -533,7 +529,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
533 529
534/* Add/update destinations for multicast */ 530/* Add/update destinations for multicast */
535static int vxlan_fdb_append(struct vxlan_fdb *f, 531static int vxlan_fdb_append(struct vxlan_fdb *f,
536 union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) 532 union vxlan_addr *ip, __be16 port, __u32 vni,
533 __u32 ifindex, struct vxlan_rdst **rdp)
537{ 534{
538 struct vxlan_rdst *rd; 535 struct vxlan_rdst *rd;
539 536
@@ -551,6 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
551 548
552 list_add_tail_rcu(&rd->list, &f->remotes); 549 list_add_tail_rcu(&rd->list, &f->remotes);
553 550
551 *rdp = rd;
554 return 1; 552 return 1;
555} 553}
556 554
@@ -690,6 +688,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
690 __be16 port, __u32 vni, __u32 ifindex, 688 __be16 port, __u32 vni, __u32 ifindex,
691 __u8 ndm_flags) 689 __u8 ndm_flags)
692{ 690{
691 struct vxlan_rdst *rd = NULL;
693 struct vxlan_fdb *f; 692 struct vxlan_fdb *f;
694 int notify = 0; 693 int notify = 0;
695 694
@@ -726,7 +725,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
726 if ((flags & NLM_F_APPEND) && 725 if ((flags & NLM_F_APPEND) &&
727 (is_multicast_ether_addr(f->eth_addr) || 726 (is_multicast_ether_addr(f->eth_addr) ||
728 is_zero_ether_addr(f->eth_addr))) { 727 is_zero_ether_addr(f->eth_addr))) {
729 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); 728 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
729 &rd);
730 730
731 if (rc < 0) 731 if (rc < 0)
732 return rc; 732 return rc;
@@ -756,15 +756,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
756 INIT_LIST_HEAD(&f->remotes); 756 INIT_LIST_HEAD(&f->remotes);
757 memcpy(f->eth_addr, mac, ETH_ALEN); 757 memcpy(f->eth_addr, mac, ETH_ALEN);
758 758
759 vxlan_fdb_append(f, ip, port, vni, ifindex); 759 vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
760 760
761 ++vxlan->addrcnt; 761 ++vxlan->addrcnt;
762 hlist_add_head_rcu(&f->hlist, 762 hlist_add_head_rcu(&f->hlist,
763 vxlan_fdb_head(vxlan, mac)); 763 vxlan_fdb_head(vxlan, mac));
764 } 764 }
765 765
766 if (notify) 766 if (notify) {
767 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 767 if (rd == NULL)
768 rd = first_remote_rtnl(f);
769 vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
770 }
768 771
769 return 0; 772 return 0;
770} 773}
@@ -785,7 +788,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
785 "delete %pM\n", f->eth_addr); 788 "delete %pM\n", f->eth_addr);
786 789
787 --vxlan->addrcnt; 790 --vxlan->addrcnt;
788 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); 791 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
789 792
790 hlist_del_rcu(&f->hlist); 793 hlist_del_rcu(&f->hlist);
791 call_rcu(&f->rcu, vxlan_fdb_free); 794 call_rcu(&f->rcu, vxlan_fdb_free);
@@ -919,6 +922,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
919 */ 922 */
920 if (rd && !list_is_singular(&f->remotes)) { 923 if (rd && !list_is_singular(&f->remotes)) {
921 list_del_rcu(&rd->list); 924 list_del_rcu(&rd->list);
925 vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
922 kfree_rcu(rd, rcu); 926 kfree_rcu(rd, rcu);
923 goto out; 927 goto out;
924 } 928 }
@@ -993,7 +997,7 @@ static bool vxlan_snoop(struct net_device *dev,
993 997
994 rdst->remote_ip = *src_ip; 998 rdst->remote_ip = *src_ip;
995 f->updated = jiffies; 999 f->updated = jiffies;
996 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); 1000 vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
997 } else { 1001 } else {
998 /* learned new entry */ 1002 /* learned new entry */
999 spin_lock(&vxlan->hash_lock); 1003 spin_lock(&vxlan->hash_lock);
@@ -1755,8 +1759,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1755 if (err) 1759 if (err)
1756 return err; 1760 return err;
1757 1761
1758 return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, 1762 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
1759 false); 1763 tos, ttl, df, false);
1760} 1764}
1761EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1765EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1762 1766
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 84734a805092..83c39e2858bf 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1521,11 +1521,7 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
1521 cosa_putstatus(cosa, 0); 1521 cosa_putstatus(cosa, 0);
1522 cosa_getdata8(cosa); 1522 cosa_getdata8(cosa);
1523 cosa_putstatus(cosa, SR_RST); 1523 cosa_putstatus(cosa, SR_RST);
1524#ifdef MODULE
1525 msleep(500); 1524 msleep(500);
1526#else
1527 udelay(5*100000);
1528#endif
1529 /* Disable all IRQs from the card */ 1525 /* Disable all IRQs from the card */
1530 cosa_putstatus(cosa, 0); 1526 cosa_putstatus(cosa, 0);
1531 1527
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index a0398fe3eb28..be3eb2a8d602 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
86 int irq; 86 int irq;
87 int ret = 0; 87 int ret = 0;
88 struct ath_hw *ah; 88 struct ath_hw *ah;
89 struct ath_common *common;
90 char hw_name[64]; 89 char hw_name[64];
91 90
92 if (!dev_get_platdata(&pdev->dev)) { 91 if (!dev_get_platdata(&pdev->dev)) {
@@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
146 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 145 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
147 hw_name, (unsigned long)mem, irq); 146 hw_name, (unsigned long)mem, irq);
148 147
149 common = ath9k_hw_common(sc->sc_ah);
150 /* Will be cleared in ath9k_start() */
151 set_bit(ATH_OP_INVALID, &common->op_flags);
152 return 0; 148 return 0;
153 149
154 err_irq: 150 err_irq:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 6d47783f2e5b..ba502a2d199b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
155 ATH9K_ANI_RSSI_THR_LOW, 155 ATH9K_ANI_RSSI_THR_LOW,
156 ATH9K_ANI_RSSI_THR_HIGH); 156 ATH9K_ANI_RSSI_THR_HIGH);
157 157
158 if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
159 immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
160
158 if (!scan) 161 if (!scan)
159 aniState->ofdmNoiseImmunityLevel = immunityLevel; 162 aniState->ofdmNoiseImmunityLevel = immunityLevel;
160 163
@@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
235 BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW, 238 BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
236 ATH9K_ANI_RSSI_THR_HIGH); 239 ATH9K_ANI_RSSI_THR_HIGH);
237 240
241 if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
242 immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
243
238 if (ah->opmode == NL80211_IFTYPE_STATION && 244 if (ah->opmode == NL80211_IFTYPE_STATION &&
239 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW && 245 BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
240 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) 246 immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 44d74495c4de..3ba03dde4215 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -251,7 +251,6 @@ struct ath_atx_tid {
251 251
252 s8 bar_index; 252 s8 bar_index;
253 bool sched; 253 bool sched;
254 bool paused;
255 bool active; 254 bool active;
256}; 255};
257 256
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index d76e6e0120d2..ffca918ff16a 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
72 ath_txq_lock(sc, txq); 72 ath_txq_lock(sc, txq);
73 if (tid->active) { 73 if (tid->active) {
74 len += scnprintf(buf + len, size - len, 74 len += scnprintf(buf + len, size - len,
75 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n", 75 "%3d%11d%10d%10d%10d%10d%9d%6d\n",
76 tid->tidno, 76 tid->tidno,
77 tid->seq_start, 77 tid->seq_start,
78 tid->seq_next, 78 tid->seq_next,
@@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
80 tid->baw_head, 80 tid->baw_head,
81 tid->baw_tail, 81 tid->baw_tail,
82 tid->bar_index, 82 tid->bar_index,
83 tid->sched, 83 tid->sched);
84 tid->paused);
85 } 84 }
86 ath_txq_unlock(sc, txq); 85 ath_txq_unlock(sc, txq);
87 } 86 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
95 95
96 if ((vif->type == NL80211_IFTYPE_AP || 96 if ((vif->type == NL80211_IFTYPE_AP ||
97 vif->type == NL80211_IFTYPE_MESH_POINT) && 97 vif->type == NL80211_IFTYPE_MESH_POINT) &&
98 bss_conf->enable_beacon) 98 bss_conf->enable_beacon) {
99 priv->reconfig_beacon = true; 99 priv->reconfig_beacon = true;
100 priv->rearm_ani = true;
101 }
100 102
101 if (bss_conf->assoc) { 103 if (bss_conf->assoc) {
102 priv->rearm_ani = true; 104 priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
257 259
258 ath9k_htc_ps_wakeup(priv); 260 ath9k_htc_ps_wakeup(priv);
259 261
262 ath9k_htc_stop_ani(priv);
260 del_timer_sync(&priv->tx.cleanup_timer); 263 del_timer_sync(&priv->tx.cleanup_timer);
261 ath9k_htc_tx_drain(priv); 264 ath9k_htc_tx_drain(priv);
262 265
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index cbbb02a6b13b..36ae6490e554 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -783,6 +783,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
783 common = ath9k_hw_common(ah); 783 common = ath9k_hw_common(ah);
784 ath9k_set_hw_capab(sc, hw); 784 ath9k_set_hw_capab(sc, hw);
785 785
786 /* Will be cleared in ath9k_start() */
787 set_bit(ATH_OP_INVALID, &common->op_flags);
788
786 /* Initialize regulatory */ 789 /* Initialize regulatory */
787 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 790 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
788 ath9k_reg_notifier); 791 ath9k_reg_notifier);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 25304adece57..914dbc6b1720 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
784{ 784{
785 struct ath_softc *sc; 785 struct ath_softc *sc;
786 struct ieee80211_hw *hw; 786 struct ieee80211_hw *hw;
787 struct ath_common *common;
788 u8 csz; 787 u8 csz;
789 u32 val; 788 u32 val;
790 int ret = 0; 789 int ret = 0;
@@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
877 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", 876 wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
878 hw_name, (unsigned long)sc->mem, pdev->irq); 877 hw_name, (unsigned long)sc->mem, pdev->irq);
879 878
880 /* Will be cleared in ath9k_start() */
881 common = ath9k_hw_common(sc->sc_ah);
882 set_bit(ATH_OP_INVALID, &common->op_flags);
883
884 return 0; 879 return 0;
885 880
886err_init: 881err_init:
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6c9accdb52e4..19df969ec909 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -975,6 +975,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
975 u64 tsf = 0; 975 u64 tsf = 0;
976 unsigned long flags; 976 unsigned long flags;
977 dma_addr_t new_buf_addr; 977 dma_addr_t new_buf_addr;
978 unsigned int budget = 512;
978 979
979 if (edma) 980 if (edma)
980 dma_type = DMA_BIDIRECTIONAL; 981 dma_type = DMA_BIDIRECTIONAL;
@@ -1113,15 +1114,17 @@ requeue_drop_frag:
1113 } 1114 }
1114requeue: 1115requeue:
1115 list_add_tail(&bf->list, &sc->rx.rxbuf); 1116 list_add_tail(&bf->list, &sc->rx.rxbuf);
1116 if (flush)
1117 continue;
1118 1117
1119 if (edma) { 1118 if (edma) {
1120 ath_rx_edma_buf_link(sc, qtype); 1119 ath_rx_edma_buf_link(sc, qtype);
1121 } else { 1120 } else {
1122 ath_rx_buf_relink(sc, bf); 1121 ath_rx_buf_relink(sc, bf);
1123 ath9k_hw_rxena(ah); 1122 if (!flush)
1123 ath9k_hw_rxena(ah);
1124 } 1124 }
1125
1126 if (!budget--)
1127 break;
1125 } while (1); 1128 } while (1);
1126 1129
1127 if (!(ah->imask & ATH9K_INT_RXEOL)) { 1130 if (!(ah->imask & ATH9K_INT_RXEOL)) {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 87cbec47fb48..66acb2cbd9df 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
107{ 107{
108 struct ath_atx_ac *ac = tid->ac; 108 struct ath_atx_ac *ac = tid->ac;
109 109
110 if (tid->paused)
111 return;
112
113 if (tid->sched) 110 if (tid->sched)
114 return; 111 return;
115 112
@@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1407 ath_tx_tid_change_state(sc, txtid); 1404 ath_tx_tid_change_state(sc, txtid);
1408 1405
1409 txtid->active = true; 1406 txtid->active = true;
1410 txtid->paused = true;
1411 *ssn = txtid->seq_start = txtid->seq_next; 1407 *ssn = txtid->seq_start = txtid->seq_next;
1412 txtid->bar_index = -1; 1408 txtid->bar_index = -1;
1413 1409
@@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1427 1423
1428 ath_txq_lock(sc, txq); 1424 ath_txq_lock(sc, txq);
1429 txtid->active = false; 1425 txtid->active = false;
1430 txtid->paused = false;
1431 ath_tx_flush_tid(sc, txtid); 1426 ath_tx_flush_tid(sc, txtid);
1432 ath_tx_tid_change_state(sc, txtid); 1427 ath_tx_tid_change_state(sc, txtid);
1433 ath_txq_unlock_complete(sc, txq); 1428 ath_txq_unlock_complete(sc, txq);
@@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1487 ath_txq_lock(sc, txq); 1482 ath_txq_lock(sc, txq);
1488 ac->clear_ps_filter = true; 1483 ac->clear_ps_filter = true;
1489 1484
1490 if (!tid->paused && ath_tid_has_buffered(tid)) { 1485 if (ath_tid_has_buffered(tid)) {
1491 ath_tx_queue_tid(txq, tid); 1486 ath_tx_queue_tid(txq, tid);
1492 ath_txq_schedule(sc, txq); 1487 ath_txq_schedule(sc, txq);
1493 } 1488 }
@@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
1510 ath_txq_lock(sc, txq); 1505 ath_txq_lock(sc, txq);
1511 1506
1512 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; 1507 tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
1513 tid->paused = false;
1514 1508
1515 if (ath_tid_has_buffered(tid)) { 1509 if (ath_tid_has_buffered(tid)) {
1516 ath_tx_queue_tid(txq, tid); 1510 ath_tx_queue_tid(txq, tid);
@@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
1544 continue; 1538 continue;
1545 1539
1546 tid = ATH_AN_2_TID(an, i); 1540 tid = ATH_AN_2_TID(an, i);
1547 if (tid->paused)
1548 continue;
1549 1541
1550 ath_txq_lock(sc, tid->ac->txq); 1542 ath_txq_lock(sc, tid->ac->txq);
1551 while (nframes > 0) { 1543 while (nframes > 0) {
@@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1844 list_del(&tid->list); 1836 list_del(&tid->list);
1845 tid->sched = false; 1837 tid->sched = false;
1846 1838
1847 if (tid->paused)
1848 continue;
1849
1850 if (ath_tx_sched_aggr(sc, txq, tid, &stop)) 1839 if (ath_tx_sched_aggr(sc, txq, tid, &stop))
1851 sent = true; 1840 sent = true;
1852 1841
@@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2698 tid->baw_size = WME_MAX_BA; 2687 tid->baw_size = WME_MAX_BA;
2699 tid->baw_head = tid->baw_tail = 0; 2688 tid->baw_head = tid->baw_tail = 0;
2700 tid->sched = false; 2689 tid->sched = false;
2701 tid->paused = false;
2702 tid->active = false; 2690 tid->active = false;
2703 __skb_queue_head_init(&tid->buf_q); 2691 __skb_queue_head_init(&tid->buf_q);
2704 __skb_queue_head_init(&tid->retry_q); 2692 __skb_queue_head_init(&tid->retry_q);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index df130ef53d1c..c7c9f15c0fe0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
303 303
304 ci = core->chip; 304 ci = core->chip;
305 305
306 /* if core is already in reset, just return */ 306 /* if core is already in reset, skip reset */
307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); 307 regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
308 if ((regdata & BCMA_RESET_CTL_RESET) != 0) 308 if ((regdata & BCMA_RESET_CTL_RESET) != 0)
309 return; 309 goto in_reset_configure;
310 310
311 /* configure reset */ 311 /* configure reset */
312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 312 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
@@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != 322 SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
323 BCMA_RESET_CTL_RESET, 300); 323 BCMA_RESET_CTL_RESET, 300);
324 324
325in_reset_configure:
325 /* in-reset configure */ 326 /* in-reset configure */
326 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, 327 ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
327 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); 328 reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
4948 if (!err) { 4948 if (!err) {
4949 /* only set 2G bandwidth using bw_cap command */ 4949 /* only set 2G bandwidth using bw_cap command */
4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G); 4950 band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); 4951 band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, 4952 err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
4953 sizeof(band_bwcap)); 4953 sizeof(band_bwcap));
4954 } else { 4954 } else {
diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c
index e323b4d54338..34f97c31eecf 100644
--- a/drivers/net/wireless/cw1200/debug.c
+++ b/drivers/net/wireless/cw1200/debug.c
@@ -41,6 +41,8 @@ static const char * const cw1200_debug_link_id[] = {
41 "REQ", 41 "REQ",
42 "SOFT", 42 "SOFT",
43 "HARD", 43 "HARD",
44 "RESET",
45 "RESET_REMAP",
44}; 46};
45 47
46static const char *cw1200_debug_mode(int mode) 48static const char *cw1200_debug_mode(int mode)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 003a546571d4..4c2d4ef28b22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL7260_UCODE_API_MAX 8 70#define IWL7260_UCODE_API_MAX 9
71#define IWL3160_UCODE_API_MAX 8 71#define IWL3160_UCODE_API_MAX 9
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 8 74#define IWL7260_UCODE_API_OK 8
@@ -244,3 +244,4 @@ const struct iwl_cfg iwl7265_n_cfg = {
244 244
245MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 245MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
246MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 246MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
247MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 685f7e8e6943..0489314425cb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -190,7 +190,7 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
190 cpu_to_le32(0xcc00aaaa), 190 cpu_to_le32(0xcc00aaaa),
191 cpu_to_le32(0x0000aaaa), 191 cpu_to_le32(0x0000aaaa),
192 cpu_to_le32(0xc0004000), 192 cpu_to_le32(0xc0004000),
193 cpu_to_le32(0x00000000), 193 cpu_to_le32(0x00004000),
194 cpu_to_le32(0xf0005000), 194 cpu_to_le32(0xf0005000),
195 cpu_to_le32(0xf0005000), 195 cpu_to_le32(0xf0005000),
196 }, 196 },
@@ -213,16 +213,16 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
213 /* Tx Tx disabled */ 213 /* Tx Tx disabled */
214 cpu_to_le32(0xaaaaaaaa), 214 cpu_to_le32(0xaaaaaaaa),
215 cpu_to_le32(0xaaaaaaaa), 215 cpu_to_le32(0xaaaaaaaa),
216 cpu_to_le32(0xaaaaaaaa), 216 cpu_to_le32(0xeeaaaaaa),
217 cpu_to_le32(0xaaaaaaaa), 217 cpu_to_le32(0xaaaaaaaa),
218 cpu_to_le32(0xcc00ff28), 218 cpu_to_le32(0xcc00ff28),
219 cpu_to_le32(0x0000aaaa), 219 cpu_to_le32(0x0000aaaa),
220 cpu_to_le32(0xcc00aaaa), 220 cpu_to_le32(0xcc00aaaa),
221 cpu_to_le32(0x0000aaaa), 221 cpu_to_le32(0x0000aaaa),
222 cpu_to_le32(0xC0004000), 222 cpu_to_le32(0xc0004000),
223 cpu_to_le32(0xC0004000), 223 cpu_to_le32(0xc0004000),
224 cpu_to_le32(0xF0005000), 224 cpu_to_le32(0xf0005000),
225 cpu_to_le32(0xF0005000), 225 cpu_to_le32(0xf0005000),
226 }, 226 },
227}; 227};
228 228
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); 611 bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
612 612
613 if (IWL_MVM_BT_COEX_CORUNNING) { 613 if (IWL_MVM_BT_COEX_CORUNNING) {
614 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | 614 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
615 BT_VALID_CORUN_LUT_40); 615 BT_VALID_CORUN_LUT_40);
616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); 616 bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
617 } 617 }
618 618
619 if (IWL_MVM_BT_COEX_MPLUT) { 619 if (IWL_MVM_BT_COEX_MPLUT) {
620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); 620 bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
621 bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); 621 bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
622 } 622 }
623 623
624 if (mvm->cfg->bt_shared_single_ant) 624 if (mvm->cfg->bt_shared_single_ant)
@@ -1262,6 +1262,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1262 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1262 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1263 u32 ant_isolation = le32_to_cpup((void *)pkt->data); 1263 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
1264 u8 __maybe_unused lower_bound, upper_bound; 1264 u8 __maybe_unused lower_bound, upper_bound;
1265 int ret;
1265 u8 lut; 1266 u8 lut;
1266 1267
1267 struct iwl_bt_coex_cmd *bt_cmd; 1268 struct iwl_bt_coex_cmd *bt_cmd;
@@ -1318,5 +1319,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1318 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, 1319 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
1319 sizeof(bt_cmd->bt4_corun_lut40)); 1320 sizeof(bt_cmd->bt4_corun_lut40));
1320 1321
1321 return 0; 1322 ret = iwl_mvm_send_cmd(mvm, &cmd);
1323
1324 kfree(bt_cmd);
1325 return ret;
1322} 1326}
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 9426905de6b2..d73a89ecd78a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -183,9 +183,9 @@ enum iwl_scan_type {
183 * this number of packets were received (typically 1) 183 * this number of packets were received (typically 1)
184 * @passive2active: is auto switching from passive to active during scan allowed 184 * @passive2active: is auto switching from passive to active during scan allowed
185 * @rxchain_sel_flags: RXON_RX_CHAIN_* 185 * @rxchain_sel_flags: RXON_RX_CHAIN_*
186 * @max_out_time: in usecs, max out of serving channel time 186 * @max_out_time: in TUs, max out of serving channel time
187 * @suspend_time: how long to pause scan when returning to service channel: 187 * @suspend_time: how long to pause scan when returning to service channel:
188 * bits 0-19: beacon interal in usecs (suspend before executing) 188 * bits 0-19: beacon interal in TUs (suspend before executing)
189 * bits 20-23: reserved 189 * bits 20-23: reserved
190 * bits 24-31: number of beacons (suspend between channels) 190 * bits 24-31: number of beacons (suspend between channels)
191 * @rxon_flags: RXON_FLG_* 191 * @rxon_flags: RXON_FLG_*
@@ -383,8 +383,8 @@ enum scan_framework_client {
383 * @quiet_plcp_th: quiet channel num of packets threshold 383 * @quiet_plcp_th: quiet channel num of packets threshold
384 * @good_CRC_th: passive to active promotion threshold 384 * @good_CRC_th: passive to active promotion threshold
385 * @rx_chain: RXON rx chain. 385 * @rx_chain: RXON rx chain.
386 * @max_out_time: max uSec to be out of assoceated channel 386 * @max_out_time: max TUs to be out of assoceated channel
387 * @suspend_time: pause scan this long when returning to service channel 387 * @suspend_time: pause scan this TUs when returning to service channel
388 * @flags: RXON flags 388 * @flags: RXON flags
389 * @filter_flags: RXONfilter 389 * @filter_flags: RXONfilter
390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. 390 * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 4dd9ff43b8b6..b41dc84e9431 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1007 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1008 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1009 1009
1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); 1010 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
1011 if (ret) 1011 if (ret)
1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1012 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1013} 1013}
@@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1023 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1024 return; 1024 return;
1025 1025
1026 ieee80211_iterate_active_interfaces( 1026 ieee80211_iterate_active_interfaces_atomic(
1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1027 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1028 iwl_mvm_mc_iface_iterator, &iter_data); 1028 iwl_mvm_mc_iface_iterator, &iter_data);
1029} 1029}
@@ -1332,6 +1332,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1332 */ 1332 */
1333 iwl_mvm_remove_time_event(mvm, mvmvif, 1333 iwl_mvm_remove_time_event(mvm, mvmvif,
1334 &mvmvif->time_event_data); 1334 &mvmvif->time_event_data);
1335 iwl_mvm_sf_update(mvm, vif, false);
1335 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC)); 1336 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
1336 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | 1337 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
1337 BSS_CHANGED_QOS)) { 1338 BSS_CHANGED_QOS)) {
@@ -1806,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
1806 1807
1807 mutex_lock(&mvm->mutex); 1808 mutex_lock(&mvm->mutex);
1808 1809
1810 if (!iwl_mvm_is_idle(mvm)) {
1811 ret = -EBUSY;
1812 goto out;
1813 }
1814
1809 switch (mvm->scan_status) { 1815 switch (mvm->scan_status) {
1810 case IWL_MVM_SCAN_OS: 1816 case IWL_MVM_SCAN_OS:
1811 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); 1817 IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d564233a65da..f1ec0986c3c9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
1003 return mvmvif->low_latency; 1003 return mvmvif->low_latency;
1004} 1004}
1005 1005
1006/* Assoc status */
1007bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
1008
1006/* Thermal management and CT-kill */ 1009/* Thermal management and CT-kill */
1007void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); 1010void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1008void iwl_mvm_tt_handler(struct iwl_mvm *mvm); 1011void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 568abd61b14f..e1c838899363 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -59,7 +59,7 @@
59/* max allowed rate miss before sync LQ cmd */ 59/* max allowed rate miss before sync LQ cmd */
60#define IWL_MISSED_RATE_MAX 15 60#define IWL_MISSED_RATE_MAX 15
61#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ) 61#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
62 62#define RS_IDLE_TIMEOUT (5*HZ)
63 63
64static u8 rs_ht_to_legacy[] = { 64static u8 rs_ht_to_legacy[] = {
65 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, 65 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
@@ -142,7 +142,7 @@ enum rs_column_mode {
142 RS_MIMO2, 142 RS_MIMO2,
143}; 143};
144 144
145#define MAX_NEXT_COLUMNS 5 145#define MAX_NEXT_COLUMNS 7
146#define MAX_COLUMN_CHECKS 3 146#define MAX_COLUMN_CHECKS 3
147 147
148typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, 148typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
@@ -212,8 +212,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
212 RS_COLUMN_LEGACY_ANT_B, 212 RS_COLUMN_LEGACY_ANT_B,
213 RS_COLUMN_SISO_ANT_A, 213 RS_COLUMN_SISO_ANT_A,
214 RS_COLUMN_SISO_ANT_B, 214 RS_COLUMN_SISO_ANT_B,
215 RS_COLUMN_MIMO2, 215 RS_COLUMN_INVALID,
216 RS_COLUMN_MIMO2_SGI, 216 RS_COLUMN_INVALID,
217 RS_COLUMN_INVALID,
218 RS_COLUMN_INVALID,
217 }, 219 },
218 }, 220 },
219 [RS_COLUMN_LEGACY_ANT_B] = { 221 [RS_COLUMN_LEGACY_ANT_B] = {
@@ -223,8 +225,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
223 RS_COLUMN_LEGACY_ANT_A, 225 RS_COLUMN_LEGACY_ANT_A,
224 RS_COLUMN_SISO_ANT_A, 226 RS_COLUMN_SISO_ANT_A,
225 RS_COLUMN_SISO_ANT_B, 227 RS_COLUMN_SISO_ANT_B,
226 RS_COLUMN_MIMO2, 228 RS_COLUMN_INVALID,
227 RS_COLUMN_MIMO2_SGI, 229 RS_COLUMN_INVALID,
230 RS_COLUMN_INVALID,
231 RS_COLUMN_INVALID,
228 }, 232 },
229 }, 233 },
230 [RS_COLUMN_SISO_ANT_A] = { 234 [RS_COLUMN_SISO_ANT_A] = {
@@ -235,7 +239,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
235 RS_COLUMN_MIMO2, 239 RS_COLUMN_MIMO2,
236 RS_COLUMN_SISO_ANT_A_SGI, 240 RS_COLUMN_SISO_ANT_A_SGI,
237 RS_COLUMN_SISO_ANT_B_SGI, 241 RS_COLUMN_SISO_ANT_B_SGI,
238 RS_COLUMN_MIMO2_SGI, 242 RS_COLUMN_LEGACY_ANT_A,
243 RS_COLUMN_LEGACY_ANT_B,
244 RS_COLUMN_INVALID,
239 }, 245 },
240 .checks = { 246 .checks = {
241 rs_siso_allow, 247 rs_siso_allow,
@@ -249,7 +255,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
249 RS_COLUMN_MIMO2, 255 RS_COLUMN_MIMO2,
250 RS_COLUMN_SISO_ANT_B_SGI, 256 RS_COLUMN_SISO_ANT_B_SGI,
251 RS_COLUMN_SISO_ANT_A_SGI, 257 RS_COLUMN_SISO_ANT_A_SGI,
252 RS_COLUMN_MIMO2_SGI, 258 RS_COLUMN_LEGACY_ANT_A,
259 RS_COLUMN_LEGACY_ANT_B,
260 RS_COLUMN_INVALID,
253 }, 261 },
254 .checks = { 262 .checks = {
255 rs_siso_allow, 263 rs_siso_allow,
@@ -265,6 +273,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
265 RS_COLUMN_SISO_ANT_A, 273 RS_COLUMN_SISO_ANT_A,
266 RS_COLUMN_SISO_ANT_B, 274 RS_COLUMN_SISO_ANT_B,
267 RS_COLUMN_MIMO2, 275 RS_COLUMN_MIMO2,
276 RS_COLUMN_LEGACY_ANT_A,
277 RS_COLUMN_LEGACY_ANT_B,
268 }, 278 },
269 .checks = { 279 .checks = {
270 rs_siso_allow, 280 rs_siso_allow,
@@ -281,6 +291,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
281 RS_COLUMN_SISO_ANT_B, 291 RS_COLUMN_SISO_ANT_B,
282 RS_COLUMN_SISO_ANT_A, 292 RS_COLUMN_SISO_ANT_A,
283 RS_COLUMN_MIMO2, 293 RS_COLUMN_MIMO2,
294 RS_COLUMN_LEGACY_ANT_A,
295 RS_COLUMN_LEGACY_ANT_B,
284 }, 296 },
285 .checks = { 297 .checks = {
286 rs_siso_allow, 298 rs_siso_allow,
@@ -296,6 +308,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
296 RS_COLUMN_SISO_ANT_A_SGI, 308 RS_COLUMN_SISO_ANT_A_SGI,
297 RS_COLUMN_SISO_ANT_B_SGI, 309 RS_COLUMN_SISO_ANT_B_SGI,
298 RS_COLUMN_MIMO2_SGI, 310 RS_COLUMN_MIMO2_SGI,
311 RS_COLUMN_LEGACY_ANT_A,
312 RS_COLUMN_LEGACY_ANT_B,
299 }, 313 },
300 .checks = { 314 .checks = {
301 rs_mimo_allow, 315 rs_mimo_allow,
@@ -311,6 +325,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
311 RS_COLUMN_SISO_ANT_A, 325 RS_COLUMN_SISO_ANT_A,
312 RS_COLUMN_SISO_ANT_B, 326 RS_COLUMN_SISO_ANT_B,
313 RS_COLUMN_MIMO2, 327 RS_COLUMN_MIMO2,
328 RS_COLUMN_LEGACY_ANT_A,
329 RS_COLUMN_LEGACY_ANT_B,
314 }, 330 },
315 .checks = { 331 .checks = {
316 rs_mimo_allow, 332 rs_mimo_allow,
@@ -503,10 +519,12 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
503 window->average_tpt = IWL_INVALID_VALUE; 519 window->average_tpt = IWL_INVALID_VALUE;
504} 520}
505 521
506static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl) 522static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
523 struct iwl_scale_tbl_info *tbl)
507{ 524{
508 int i; 525 int i;
509 526
527 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
510 for (i = 0; i < IWL_RATE_COUNT; i++) 528 for (i = 0; i < IWL_RATE_COUNT; i++)
511 rs_rate_scale_clear_window(&tbl->win[i]); 529 rs_rate_scale_clear_window(&tbl->win[i]);
512} 530}
@@ -992,6 +1010,13 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
992 return; 1010 return;
993 } 1011 }
994 1012
1013#ifdef CONFIG_MAC80211_DEBUGFS
1014 /* Disable last tx check if we are debugging with fixed rate */
1015 if (lq_sta->dbg_fixed_rate) {
1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
1017 return;
1018 }
1019#endif
995 if (!ieee80211_is_data(hdr->frame_control) || 1020 if (!ieee80211_is_data(hdr->frame_control) ||
996 info->flags & IEEE80211_TX_CTL_NO_ACK) 1021 info->flags & IEEE80211_TX_CTL_NO_ACK)
997 return; 1022 return;
@@ -1034,6 +1059,18 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1034 mac_index++; 1059 mac_index++;
1035 } 1060 }
1036 1061
1062 if (time_after(jiffies,
1063 (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
1064 int tid;
1065 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
1066 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
1067 ieee80211_stop_tx_ba_session(sta, tid);
1068
1069 iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
1070 return;
1071 }
1072 lq_sta->last_tx = jiffies;
1073
1037 /* Here we actually compare this rate to the latest LQ command */ 1074 /* Here we actually compare this rate to the latest LQ command */
1038 if ((mac_index < 0) || 1075 if ((mac_index < 0) ||
1039 (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 1076 (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
@@ -1186,9 +1223,26 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1186 lq_sta->visited_columns = 0; 1223 lq_sta->visited_columns = 0;
1187} 1224}
1188 1225
1226static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
1227 const struct rs_tx_column *column)
1228{
1229 switch (column->mode) {
1230 case RS_LEGACY:
1231 return lq_sta->max_legacy_rate_idx;
1232 case RS_SISO:
1233 return lq_sta->max_siso_rate_idx;
1234 case RS_MIMO2:
1235 return lq_sta->max_mimo2_rate_idx;
1236 default:
1237 WARN_ON_ONCE(1);
1238 }
1239
1240 return lq_sta->max_legacy_rate_idx;
1241}
1242
1189static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, 1243static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1190 const struct rs_tx_column *column, 1244 const struct rs_tx_column *column,
1191 u32 bw) 1245 u32 bw)
1192{ 1246{
1193 /* Used to choose among HT tables */ 1247 /* Used to choose among HT tables */
1194 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; 1248 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
@@ -1438,7 +1492,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1438 1492
1439 IWL_DEBUG_RATE(mvm, 1493 IWL_DEBUG_RATE(mvm,
1440 "LQ: stay in table clear win\n"); 1494 "LQ: stay in table clear win\n");
1441 rs_rate_scale_clear_tbl_windows(tbl); 1495 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1442 } 1496 }
1443 } 1497 }
1444 1498
@@ -1446,8 +1500,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1446 * bitmaps and stats in active table (this will become the new 1500 * bitmaps and stats in active table (this will become the new
1447 * "search" table). */ 1501 * "search" table). */
1448 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { 1502 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
1449 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); 1503 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1450 rs_rate_scale_clear_tbl_windows(tbl);
1451 } 1504 }
1452 } 1505 }
1453} 1506}
@@ -1485,14 +1538,14 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1485 struct ieee80211_sta *sta, 1538 struct ieee80211_sta *sta,
1486 struct iwl_scale_tbl_info *tbl) 1539 struct iwl_scale_tbl_info *tbl)
1487{ 1540{
1488 int i, j, n; 1541 int i, j, max_rate;
1489 enum rs_column next_col_id; 1542 enum rs_column next_col_id;
1490 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; 1543 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1491 const struct rs_tx_column *next_col; 1544 const struct rs_tx_column *next_col;
1492 allow_column_func_t allow_func; 1545 allow_column_func_t allow_func;
1493 u8 valid_ants = mvm->fw->valid_tx_ant; 1546 u8 valid_ants = mvm->fw->valid_tx_ant;
1494 const u16 *expected_tpt_tbl; 1547 const u16 *expected_tpt_tbl;
1495 s32 tpt, max_expected_tpt; 1548 u16 tpt, max_expected_tpt;
1496 1549
1497 for (i = 0; i < MAX_NEXT_COLUMNS; i++) { 1550 for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
1498 next_col_id = curr_col->next_columns[i]; 1551 next_col_id = curr_col->next_columns[i];
@@ -1535,11 +1588,11 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1535 if (WARN_ON_ONCE(!expected_tpt_tbl)) 1588 if (WARN_ON_ONCE(!expected_tpt_tbl))
1536 continue; 1589 continue;
1537 1590
1538 max_expected_tpt = 0; 1591 max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
1539 for (n = 0; n < IWL_RATE_COUNT; n++) 1592 if (WARN_ON_ONCE(max_rate == IWL_RATE_INVALID))
1540 if (expected_tpt_tbl[n] > max_expected_tpt) 1593 continue;
1541 max_expected_tpt = expected_tpt_tbl[n];
1542 1594
1595 max_expected_tpt = expected_tpt_tbl[max_rate];
1543 if (tpt >= max_expected_tpt) { 1596 if (tpt >= max_expected_tpt) {
1544 IWL_DEBUG_RATE(mvm, 1597 IWL_DEBUG_RATE(mvm,
1545 "Skip column %d: can't beat current TPT. Max expected %d current %d\n", 1598 "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
@@ -1547,14 +1600,15 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1547 continue; 1600 continue;
1548 } 1601 }
1549 1602
1603 IWL_DEBUG_RATE(mvm,
1604 "Found potential column %d. Max expected %d current %d\n",
1605 next_col_id, max_expected_tpt, tpt);
1550 break; 1606 break;
1551 } 1607 }
1552 1608
1553 if (i == MAX_NEXT_COLUMNS) 1609 if (i == MAX_NEXT_COLUMNS)
1554 return RS_COLUMN_INVALID; 1610 return RS_COLUMN_INVALID;
1555 1611
1556 IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
1557
1558 return next_col_id; 1612 return next_col_id;
1559} 1613}
1560 1614
@@ -1640,85 +1694,76 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1640{ 1694{
1641 enum rs_action action = RS_ACTION_STAY; 1695 enum rs_action action = RS_ACTION_STAY;
1642 1696
1643 /* Too many failures, decrease rate */
1644 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) { 1697 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
1645 IWL_DEBUG_RATE(mvm, 1698 IWL_DEBUG_RATE(mvm,
1646 "decrease rate because of low SR\n"); 1699 "Decrease rate because of low SR\n");
1647 action = RS_ACTION_DOWNSCALE; 1700 return RS_ACTION_DOWNSCALE;
1648 /* No throughput measured yet for adjacent rates; try increase. */
1649 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1650 (high_tpt == IWL_INVALID_VALUE)) {
1651 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
1652 IWL_DEBUG_RATE(mvm,
1653 "Good SR and no high rate measurement. "
1654 "Increase rate\n");
1655 action = RS_ACTION_UPSCALE;
1656 } else if (low != IWL_RATE_INVALID) {
1657 IWL_DEBUG_RATE(mvm,
1658 "Remain in current rate\n");
1659 action = RS_ACTION_STAY;
1660 }
1661 } 1701 }
1662 1702
1663 /* Both adjacent throughputs are measured, but neither one has better 1703 if ((low_tpt == IWL_INVALID_VALUE) &&
1664 * throughput; we're using the best rate, don't change it! 1704 (high_tpt == IWL_INVALID_VALUE) &&
1665 */ 1705 (high != IWL_RATE_INVALID)) {
1666 else if ((low_tpt != IWL_INVALID_VALUE) &&
1667 (high_tpt != IWL_INVALID_VALUE) &&
1668 (low_tpt < current_tpt) &&
1669 (high_tpt < current_tpt)) {
1670 IWL_DEBUG_RATE(mvm, 1706 IWL_DEBUG_RATE(mvm,
1671 "Both high and low are worse. " 1707 "No data about high/low rates. Increase rate\n");
1672 "Maintain rate\n"); 1708 return RS_ACTION_UPSCALE;
1673 action = RS_ACTION_STAY;
1674 } 1709 }
1675 1710
1676 /* At least one adjacent rate's throughput is measured, 1711 if ((high_tpt == IWL_INVALID_VALUE) &&
1677 * and may have better performance. 1712 (high != IWL_RATE_INVALID) &&
1678 */ 1713 (low_tpt != IWL_INVALID_VALUE) &&
1679 else { 1714 (low_tpt < current_tpt)) {
1680 /* Higher adjacent rate's throughput is measured */ 1715 IWL_DEBUG_RATE(mvm,
1681 if (high_tpt != IWL_INVALID_VALUE) { 1716 "No data about high rate and low rate is worse. Increase rate\n");
1682 /* Higher rate has better throughput */ 1717 return RS_ACTION_UPSCALE;
1683 if (high_tpt > current_tpt && 1718 }
1684 sr >= IWL_RATE_INCREASE_TH) {
1685 IWL_DEBUG_RATE(mvm,
1686 "Higher rate is better and good "
1687 "SR. Increate rate\n");
1688 action = RS_ACTION_UPSCALE;
1689 } else {
1690 IWL_DEBUG_RATE(mvm,
1691 "Higher rate isn't better OR "
1692 "no good SR. Maintain rate\n");
1693 action = RS_ACTION_STAY;
1694 }
1695 1719
1696 /* Lower adjacent rate's throughput is measured */ 1720 if ((high_tpt != IWL_INVALID_VALUE) &&
1697 } else if (low_tpt != IWL_INVALID_VALUE) { 1721 (high_tpt > current_tpt)) {
1698 /* Lower rate has better throughput */ 1722 IWL_DEBUG_RATE(mvm,
1699 if (low_tpt > current_tpt) { 1723 "Higher rate is better. Increate rate\n");
1700 IWL_DEBUG_RATE(mvm, 1724 return RS_ACTION_UPSCALE;
1701 "Lower rate is better. "
1702 "Decrease rate\n");
1703 action = RS_ACTION_DOWNSCALE;
1704 } else if (sr >= IWL_RATE_INCREASE_TH) {
1705 IWL_DEBUG_RATE(mvm,
1706 "Lower rate isn't better and "
1707 "good SR. Increase rate\n");
1708 action = RS_ACTION_UPSCALE;
1709 }
1710 }
1711 } 1725 }
1712 1726
1713 /* Sanity check; asked for decrease, but success rate or throughput 1727 if ((low_tpt != IWL_INVALID_VALUE) &&
1714 * has been good at old rate. Don't change it. 1728 (high_tpt != IWL_INVALID_VALUE) &&
1715 */ 1729 (low_tpt < current_tpt) &&
1716 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) && 1730 (high_tpt < current_tpt)) {
1717 ((sr > IWL_RATE_HIGH_TH) || 1731 IWL_DEBUG_RATE(mvm,
1718 (current_tpt > (100 * tbl->expected_tpt[low])))) { 1732 "Both high and low are worse. Maintain rate\n");
1733 return RS_ACTION_STAY;
1734 }
1735
1736 if ((low_tpt != IWL_INVALID_VALUE) &&
1737 (low_tpt > current_tpt)) {
1738 IWL_DEBUG_RATE(mvm,
1739 "Lower rate is better\n");
1740 action = RS_ACTION_DOWNSCALE;
1741 goto out;
1742 }
1743
1744 if ((low_tpt == IWL_INVALID_VALUE) &&
1745 (low != IWL_RATE_INVALID)) {
1719 IWL_DEBUG_RATE(mvm, 1746 IWL_DEBUG_RATE(mvm,
1720 "Sanity check failed. Maintain rate\n"); 1747 "No data about lower rate\n");
1721 action = RS_ACTION_STAY; 1748 action = RS_ACTION_DOWNSCALE;
1749 goto out;
1750 }
1751
1752 IWL_DEBUG_RATE(mvm, "Maintain rate\n");
1753
1754out:
1755 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
1756 if (sr >= RS_SR_NO_DECREASE) {
1757 IWL_DEBUG_RATE(mvm,
1758 "SR is above NO DECREASE. Avoid downscale\n");
1759 action = RS_ACTION_STAY;
1760 } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
1761 IWL_DEBUG_RATE(mvm,
1762 "Current TPT is higher than max expected in low rate. Avoid downscale\n");
1763 action = RS_ACTION_STAY;
1764 } else {
1765 IWL_DEBUG_RATE(mvm, "Decrease rate\n");
1766 }
1722 } 1767 }
1723 1768
1724 return action; 1769 return action;
@@ -1792,6 +1837,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1792 "Aggregation changed: prev %d current %d. Update expected TPT table\n", 1837 "Aggregation changed: prev %d current %d. Update expected TPT table\n",
1793 prev_agg, lq_sta->is_agg); 1838 prev_agg, lq_sta->is_agg);
1794 rs_set_expected_tpt_table(lq_sta, tbl); 1839 rs_set_expected_tpt_table(lq_sta, tbl);
1840 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1795 } 1841 }
1796 1842
1797 /* current tx rate */ 1843 /* current tx rate */
@@ -2021,7 +2067,7 @@ lq_update:
2021 if (lq_sta->search_better_tbl) { 2067 if (lq_sta->search_better_tbl) {
2022 /* Access the "search" table, clear its history. */ 2068 /* Access the "search" table, clear its history. */
2023 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 2069 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2024 rs_rate_scale_clear_tbl_windows(tbl); 2070 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2025 2071
2026 /* Use new "search" start rate */ 2072 /* Use new "search" start rate */
2027 index = tbl->rate.index; 2073 index = tbl->rate.index;
@@ -2042,8 +2088,18 @@ lq_update:
2042 * stay with best antenna legacy modulation for a while 2088 * stay with best antenna legacy modulation for a while
2043 * before next round of mode comparisons. */ 2089 * before next round of mode comparisons. */
2044 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); 2090 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2045 if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) { 2091 if (is_legacy(&tbl1->rate)) {
2046 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); 2092 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
2093
2094 if (tid != IWL_MAX_TID_COUNT) {
2095 tid_data = &sta_priv->tid_data[tid];
2096 if (tid_data->state != IWL_AGG_OFF) {
2097 IWL_DEBUG_RATE(mvm,
2098 "Stop aggregation on tid %d\n",
2099 tid);
2100 ieee80211_stop_tx_ba_session(sta, tid);
2101 }
2102 }
2047 rs_set_stay_in_table(mvm, 1, lq_sta); 2103 rs_set_stay_in_table(mvm, 1, lq_sta);
2048 } else { 2104 } else {
2049 /* If we're in an HT mode, and all 3 mode switch actions 2105 /* If we're in an HT mode, and all 3 mode switch actions
@@ -2342,9 +2398,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2342 lq_sta->lq.sta_id = sta_priv->sta_id; 2398 lq_sta->lq.sta_id = sta_priv->sta_id;
2343 2399
2344 for (j = 0; j < LQ_SIZE; j++) 2400 for (j = 0; j < LQ_SIZE; j++)
2345 rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]); 2401 rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
2346 2402
2347 lq_sta->flush_timer = 0; 2403 lq_sta->flush_timer = 0;
2404 lq_sta->last_tx = jiffies;
2348 2405
2349 IWL_DEBUG_RATE(mvm, 2406 IWL_DEBUG_RATE(mvm,
2350 "LQ: *** rate scale station global init for station %d ***\n", 2407 "LQ: *** rate scale station global init for station %d ***\n",
@@ -2388,11 +2445,22 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2388 lq_sta->is_vht = true; 2445 lq_sta->is_vht = true;
2389 } 2446 }
2390 2447
2391 IWL_DEBUG_RATE(mvm, 2448 lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate,
2392 "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n", 2449 BITS_PER_LONG);
2450 lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate,
2451 BITS_PER_LONG);
2452 lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate,
2453 BITS_PER_LONG);
2454
2455 IWL_DEBUG_RATE(mvm, "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d\n",
2456 lq_sta->active_legacy_rate,
2393 lq_sta->active_siso_rate, 2457 lq_sta->active_siso_rate,
2394 lq_sta->active_mimo2_rate, 2458 lq_sta->active_mimo2_rate,
2395 lq_sta->is_vht); 2459 lq_sta->is_vht);
2460 IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
2461 lq_sta->max_legacy_rate_idx,
2462 lq_sta->max_siso_rate_idx,
2463 lq_sta->max_mimo2_rate_idx);
2396 2464
2397 /* These values will be overridden later */ 2465 /* These values will be overridden later */
2398 lq_sta->lq.single_stream_ant_msk = 2466 lq_sta->lq.single_stream_ant_msk =
@@ -2547,6 +2615,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2547 if (is_siso(&rate)) { 2615 if (is_siso(&rate)) {
2548 num_rates = RS_SECONDARY_SISO_NUM_RATES; 2616 num_rates = RS_SECONDARY_SISO_NUM_RATES;
2549 num_retries = RS_SECONDARY_SISO_RETRIES; 2617 num_retries = RS_SECONDARY_SISO_RETRIES;
2618 lq_cmd->mimo_delim = index;
2550 } else if (is_legacy(&rate)) { 2619 } else if (is_legacy(&rate)) {
2551 num_rates = RS_SECONDARY_LEGACY_NUM_RATES; 2620 num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
2552 num_retries = RS_LEGACY_RETRIES_PER_RATE; 2621 num_retries = RS_LEGACY_RETRIES_PER_RATE;
@@ -2749,7 +2818,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2749 return -ENOMEM; 2818 return -ENOMEM;
2750 2819
2751 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2820 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2752 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2821 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
2753 lq_sta->total_failed, lq_sta->total_success, 2822 lq_sta->total_failed, lq_sta->total_success,
2754 lq_sta->active_legacy_rate); 2823 lq_sta->active_legacy_rate);
2755 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2824 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 3332b396011e..0acfac96a56c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -156,6 +156,7 @@ enum {
156#define IWL_RATE_HIGH_TH 10880 /* 85% */ 156#define IWL_RATE_HIGH_TH 10880 /* 85% */
157#define IWL_RATE_INCREASE_TH 6400 /* 50% */ 157#define IWL_RATE_INCREASE_TH 6400 /* 50% */
158#define RS_SR_FORCE_DECREASE 1920 /* 15% */ 158#define RS_SR_FORCE_DECREASE 1920 /* 15% */
159#define RS_SR_NO_DECREASE 10880 /* 85% */
159 160
160#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 161#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
161#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 162#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
@@ -310,13 +311,20 @@ struct iwl_lq_sta {
310 u32 visited_columns; /* Bitmask marking which Tx columns were 311 u32 visited_columns; /* Bitmask marking which Tx columns were
311 * explored during a search cycle 312 * explored during a search cycle
312 */ 313 */
314 u64 last_tx;
313 bool is_vht; 315 bool is_vht;
314 enum ieee80211_band band; 316 enum ieee80211_band band;
315 317
316 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 318 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
317 u16 active_legacy_rate; 319 unsigned long active_legacy_rate;
318 u16 active_siso_rate; 320 unsigned long active_siso_rate;
319 u16 active_mimo2_rate; 321 unsigned long active_mimo2_rate;
322
323 /* Highest rate per Tx mode */
324 u8 max_legacy_rate_idx;
325 u8 max_siso_rate_idx;
326 u8 max_mimo2_rate_idx;
327
320 s8 max_rate_idx; /* Max rate set by user */ 328 s8 max_rate_idx; /* Max rate set by user */
321 u8 missed_rate_counter; 329 u8 missed_rate_counter;
322 330
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index c91dc8498852..c28de54c75d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
277 IEEE80211_IFACE_ITER_NORMAL, 277 IEEE80211_IFACE_ITER_NORMAL,
278 iwl_mvm_scan_condition_iterator, 278 iwl_mvm_scan_condition_iterator,
279 &global_bound); 279 &global_bound);
280 /*
281 * Under low latency traffic passive scan is fragmented meaning
282 * that dwell on a particular channel will be fragmented. Each fragment
283 * dwell time is 20ms and fragments period is 105ms. Skipping to next
284 * channel will be delayed by the same period - 105ms. So suspend_time
285 * parameter describing both fragments and channels skipping periods is
286 * set to 105ms. This value is chosen so that overall passive scan
287 * duration will not be too long. Max_out_time in this case is set to
288 * 70ms, so for active scanning operating channel will be left for 70ms
289 * while for passive still for 20ms (fragment dwell).
290 */
291 if (global_bound) {
292 if (!iwl_mvm_low_latency(mvm)) {
293 params->suspend_time = ieee80211_tu_to_usec(100);
294 params->max_out_time = ieee80211_tu_to_usec(600);
295 } else {
296 params->suspend_time = ieee80211_tu_to_usec(105);
297 /* P2P doesn't support fragmented passive scan, so
298 * configure max_out_time to be at least longest dwell
299 * time for passive scan.
300 */
301 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
302 params->max_out_time = ieee80211_tu_to_usec(70);
303 params->passive_fragmented = true;
304 } else {
305 u32 passive_dwell;
306 280
307 /* 281 if (!global_bound)
308 * Use band G so that passive channel dwell time 282 goto not_bound;
309 * will be assigned with maximum value. 283
310 */ 284 params->suspend_time = 100;
311 band = IEEE80211_BAND_2GHZ; 285 params->max_out_time = 600;
312 passive_dwell = iwl_mvm_get_passive_dwell(band); 286
313 params->max_out_time = 287 if (iwl_mvm_low_latency(mvm)) {
314 ieee80211_tu_to_usec(passive_dwell); 288 params->suspend_time = 250;
315 } 289 params->max_out_time = 250;
316 }
317 } 290 }
318 291
292not_bound:
293
319 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { 294 for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
320 if (params->passive_fragmented) 295 params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
321 params->dwell[band].passive = 20;
322 else
323 params->dwell[band].passive =
324 iwl_mvm_get_passive_dwell(band);
325 params->dwell[band].active = iwl_mvm_get_active_dwell(band, 296 params->dwell[band].active = iwl_mvm_get_active_dwell(band,
326 n_ssids); 297 n_ssids);
327 } 298 }
@@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
761 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 732 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
762 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 733 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
763 int head = 0; 734 int head = 0;
764 int tail = band_2ghz + band_5ghz; 735 int tail = band_2ghz + band_5ghz - 1;
765 u32 ssid_bitmap; 736 u32 ssid_bitmap;
766 int cmd_len; 737 int cmd_len;
767 int ret; 738 int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 8401627c0030..88809b2d1654 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -274,7 +274,8 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
274 return -EINVAL; 274 return -EINVAL;
275 if (changed_vif->type != NL80211_IFTYPE_STATION) { 275 if (changed_vif->type != NL80211_IFTYPE_STATION) {
276 new_state = SF_UNINIT; 276 new_state = SF_UNINIT;
277 } else if (changed_vif->bss_conf.assoc) { 277 } else if (changed_vif->bss_conf.assoc &&
278 changed_vif->bss_conf.dtim_period) {
278 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); 279 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
279 sta_id = mvmvif->ap_sta_id; 280 sta_id = mvmvif->ap_sta_id;
280 new_state = SF_FULL_ON; 281 new_state = SF_FULL_ON;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index d619851745a1..2180902266ae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
644 644
645 return result; 645 return result;
646} 646}
647
648static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
649{
650 bool *idle = _data;
651
652 if (!vif->bss_conf.idle)
653 *idle = false;
654}
655
656bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
657{
658 bool idle = true;
659
660 ieee80211_iterate_active_interfaces_atomic(
661 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
662 iwl_mvm_idle_iter, &idle);
663
664 return idle;
665}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index edb015c99049..3d1d57f9f5bc 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -373,12 +373,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
376 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 377 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
377 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, 379 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 380 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 384 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dcfd6d866d09..2365553f1ef7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1749 * PCI Tx retries from interfering with C3 CPU state */ 1749 * PCI Tx retries from interfering with C3 CPU state */
1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 1750 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1751 1751
1752 trans->dev = &pdev->dev;
1753 trans_pcie->pci_dev = pdev;
1754 iwl_disable_interrupts(trans);
1755
1752 err = pci_enable_msi(pdev); 1756 err = pci_enable_msi(pdev);
1753 if (err) { 1757 if (err) {
1754 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 1758 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1760 } 1764 }
1761 } 1765 }
1762 1766
1763 trans->dev = &pdev->dev;
1764 trans_pcie->pci_dev = pdev;
1765 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 1767 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1766 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 1768 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1767 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 1769 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1787 goto out_pci_disable_msi; 1789 goto out_pci_disable_msi;
1788 } 1790 }
1789 1791
1790 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1791
1792 if (iwl_pcie_alloc_ict(trans)) 1792 if (iwl_pcie_alloc_ict(trans))
1793 goto out_free_cmd_pool; 1793 goto out_free_cmd_pool;
1794 1794
@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1800 goto out_free_ict; 1800 goto out_free_ict;
1801 } 1801 }
1802 1802
1803 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1804
1803 return trans; 1805 return trans;
1804 1806
1805out_free_ict: 1807out_free_ict:
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 77db0886c6e2..9c771b3e9918 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -292,6 +292,12 @@ process_start:
292 while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) 292 while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
293 mwifiex_handle_rx_packet(adapter, skb); 293 mwifiex_handle_rx_packet(adapter, skb);
294 294
295 /* Check for event */
296 if (adapter->event_received) {
297 adapter->event_received = false;
298 mwifiex_process_event(adapter);
299 }
300
295 /* Check for Cmd Resp */ 301 /* Check for Cmd Resp */
296 if (adapter->cmd_resp_received) { 302 if (adapter->cmd_resp_received) {
297 adapter->cmd_resp_received = false; 303 adapter->cmd_resp_received = false;
@@ -304,12 +310,6 @@ process_start:
304 } 310 }
305 } 311 }
306 312
307 /* Check for event */
308 if (adapter->event_received) {
309 adapter->event_received = false;
310 mwifiex_process_event(adapter);
311 }
312
313 /* Check if we need to confirm Sleep Request 313 /* Check if we need to confirm Sleep Request
314 received previously */ 314 received previously */
315 if (adapter->ps_state == PS_STATE_PRE_SLEEP) { 315 if (adapter->ps_state == PS_STATE_PRE_SLEEP) {
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 894270611f2c..536c14aa71f3 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -60,9 +60,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
60 int status; 60 int status;
61 61
62 /* Wait for completion */ 62 /* Wait for completion */
63 status = wait_event_interruptible(adapter->cmd_wait_q.wait, 63 status = wait_event_interruptible_timeout(adapter->cmd_wait_q.wait,
64 *(cmd_queued->condition)); 64 *(cmd_queued->condition),
65 if (status) { 65 (12 * HZ));
66 if (status <= 0) {
66 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); 67 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
67 mwifiex_cancel_all_pending_cmd(adapter); 68 mwifiex_cancel_all_pending_cmd(adapter);
68 return status; 69 return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 1a8d32138593..cf61d6e3eaa7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -88,7 +88,7 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
88 bool recontend_queue = false; 88 bool recontend_queue = false;
89 u32 q_len = 0; 89 u32 q_len = 0;
90 u8 q_num = INVALID_QUEUE; 90 u8 q_num = INVALID_QUEUE;
91 u8 ii, min = 0; 91 u8 ii = 0, min = 0;
92 92
93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { 93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
94 if (!common->mgmt_q_block) 94 if (!common->mgmt_q_block)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 73694295648f..1b28cda6ca88 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -841,16 +841,6 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
841 rsi_dbg(MGMT_TX_ZONE, 841 rsi_dbg(MGMT_TX_ZONE,
842 "%s: Sending scan req frame\n", __func__); 842 "%s: Sending scan req frame\n", __func__);
843 843
844 skb = dev_alloc_skb(FRAME_DESC_SZ);
845 if (!skb) {
846 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
847 __func__);
848 return -ENOMEM;
849 }
850
851 memset(skb->data, 0, FRAME_DESC_SZ);
852 mgmt_frame = (struct rsi_mac_frame *)skb->data;
853
854 if (common->band == IEEE80211_BAND_5GHZ) { 844 if (common->band == IEEE80211_BAND_5GHZ) {
855 if ((channel >= 36) && (channel <= 64)) 845 if ((channel >= 36) && (channel <= 64))
856 channel = ((channel - 32) / 4); 846 channel = ((channel - 32) / 4);
@@ -868,6 +858,16 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
868 } 858 }
869 } 859 }
870 860
861 skb = dev_alloc_skb(FRAME_DESC_SZ);
862 if (!skb) {
863 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
864 __func__);
865 return -ENOMEM;
866 }
867
868 memset(skb->data, 0, FRAME_DESC_SZ);
869 mgmt_frame = (struct rsi_mac_frame *)skb->data;
870
871 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); 871 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
872 mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); 872 mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
873 mgmt_frame->desc_word[4] = cpu_to_le16(channel); 873 mgmt_frame->desc_word[4] = cpu_to_le16(channel);
@@ -966,6 +966,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
966 if (!selected_rates) { 966 if (!selected_rates) {
967 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", 967 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
968 __func__); 968 __func__);
969 dev_kfree_skb(skb);
969 return -ENOMEM; 970 return -ENOMEM;
970 } 971 }
971 972
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index ddeb5a709aa3..a87ee9b6585a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -621,20 +621,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
621 bss_conf->bssid); 621 bss_conf->bssid);
622 622
623 /* 623 /*
624 * Update the beacon. This is only required on USB devices. PCI
625 * devices fetch beacons periodically.
626 */
627 if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
628 rt2x00queue_update_beacon(rt2x00dev, vif);
629
630 /*
631 * Start/stop beaconing. 624 * Start/stop beaconing.
632 */ 625 */
633 if (changes & BSS_CHANGED_BEACON_ENABLED) { 626 if (changes & BSS_CHANGED_BEACON_ENABLED) {
634 if (!bss_conf->enable_beacon && intf->enable_beacon) { 627 if (!bss_conf->enable_beacon && intf->enable_beacon) {
635 rt2x00queue_clear_beacon(rt2x00dev, vif);
636 rt2x00dev->intf_beaconing--; 628 rt2x00dev->intf_beaconing--;
637 intf->enable_beacon = false; 629 intf->enable_beacon = false;
630 /*
631 * Clear beacon in the H/W for this vif. This is needed
632 * to disable beaconing on this particular interface
633 * and keep it running on other interfaces.
634 */
635 rt2x00queue_clear_beacon(rt2x00dev, vif);
638 636
639 if (rt2x00dev->intf_beaconing == 0) { 637 if (rt2x00dev->intf_beaconing == 0) {
640 /* 638 /*
@@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
645 rt2x00queue_stop_queue(rt2x00dev->bcn); 643 rt2x00queue_stop_queue(rt2x00dev->bcn);
646 mutex_unlock(&intf->beacon_skb_mutex); 644 mutex_unlock(&intf->beacon_skb_mutex);
647 } 645 }
648
649
650 } else if (bss_conf->enable_beacon && !intf->enable_beacon) { 646 } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
651 rt2x00dev->intf_beaconing++; 647 rt2x00dev->intf_beaconing++;
652 intf->enable_beacon = true; 648 intf->enable_beacon = true;
649 /*
650 * Upload beacon to the H/W. This is only required on
651 * USB devices. PCI devices fetch beacons periodically.
652 */
653 if (rt2x00_is_usb(rt2x00dev))
654 rt2x00queue_update_beacon(rt2x00dev, vif);
653 655
654 if (rt2x00dev->intf_beaconing == 1) { 656 if (rt2x00dev->intf_beaconing == 1) {
655 /* 657 /*
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 06ef47cd6203..5b4c225396f2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
293 u8 *psaddr; 293 u8 *psaddr;
294 __le16 fc; 294 __le16 fc;
295 u16 type, ufc; 295 u16 type, ufc;
296 bool match_bssid, packet_toself, packet_beacon, addr; 296 bool match_bssid, packet_toself, packet_beacon = false, addr;
297 297
298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; 298 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
299 299
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 68b5c7e92cfb..07cb06da6729 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
1001 err = _rtl92cu_init_mac(hw); 1001 err = _rtl92cu_init_mac(hw);
1002 if (err) { 1002 if (err) {
1003 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); 1003 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
1004 return err; 1004 goto exit;
1005 } 1005 }
1006 err = rtl92c_download_fw(hw); 1006 err = rtl92c_download_fw(hw);
1007 if (err) { 1007 if (err) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36b48be8329c..2b3c78baa9f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
49 if (ieee80211_is_nullfunc(fc)) 49 if (ieee80211_is_nullfunc(fc))
50 return QSLT_HIGH; 50 return QSLT_HIGH;
51 51
52 /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
53 * queue V0 at priority 7; however, the RTL8192SE appears to have
54 * that queue at priority 6
55 */
56 if (skb->priority == 7)
57 return QSLT_VO;
52 return skb->priority; 58 return skb->priority;
53} 59}
54 60
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 398f3d2c0a6c..a76e98eb8372 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -68,6 +68,26 @@ struct wl18xx_event_mailbox {
68 68
69 /* bitmap of inactive stations (by HLID) */ 69 /* bitmap of inactive stations (by HLID) */
70 __le32 inactive_sta_bitmap; 70 __le32 inactive_sta_bitmap;
71
72 /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */
73 u8 rx_ba_role_id;
74 u8 rx_ba_link_id;
75 u8 rx_ba_win_size;
76 u8 padding;
77
78 /* smart config */
79 u8 sc_ssid_len;
80 u8 sc_pwd_len;
81 u8 sc_token_len;
82 u8 padding1;
83 u8 sc_ssid[32];
84 u8 sc_pwd[32];
85 u8 sc_token[32];
86
87 /* smart config sync channel */
88 u8 sc_sync_channel;
89 u8 sc_sync_band;
90 u8 padding2[2];
71} __packed; 91} __packed;
72 92
73int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, 93int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1f9a36031b06..16d10281798d 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -158,6 +158,11 @@ EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
158 158
159void wlcore_event_dummy_packet(struct wl1271 *wl) 159void wlcore_event_dummy_packet(struct wl1271 *wl)
160{ 160{
161 if (wl->plt) {
162 wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
163 return;
164 }
165
161 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); 166 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
162 wl1271_tx_dummy_packet(wl); 167 wl1271_tx_dummy_packet(wl);
163} 168}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
226 grant_ref_t rx_ring_ref); 226 grant_ref_t rx_ring_ref);
227 227
228/* Check for SKBs from frontend and schedule backend processing */ 228/* Check for SKBs from frontend and schedule backend processing */
229void xenvif_check_rx_xenvif(struct xenvif *vif); 229void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
230 230
231/* Prevent the device from generating any further traffic. */ 231/* Prevent the device from generating any further traffic. */
232void xenvif_carrier_off(struct xenvif *vif); 232void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ef05c5c49d41..20e9defa1060 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
75 work_done = xenvif_tx_action(vif, budget); 75 work_done = xenvif_tx_action(vif, budget);
76 76
77 if (work_done < budget) { 77 if (work_done < budget) {
78 int more_to_do = 0; 78 napi_complete(napi);
79 unsigned long flags; 79 xenvif_napi_schedule_or_enable_events(vif);
80
81 /* It is necessary to disable IRQ before calling
82 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
83 * lose event from the frontend.
84 *
85 * Consider:
86 * RING_HAS_UNCONSUMED_REQUESTS
87 * <frontend generates event to trigger napi_schedule>
88 * __napi_complete
89 *
90 * This handler is still in scheduled state so the
91 * event has no effect at all. After __napi_complete
92 * this handler is descheduled and cannot get
93 * scheduled again. We lose event in this case and the ring
94 * will be completely stalled.
95 */
96
97 local_irq_save(flags);
98
99 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
100 if (!more_to_do)
101 __napi_complete(napi);
102
103 local_irq_restore(flags);
104 } 80 }
105 81
106 return work_done; 82 return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
194 enable_irq(vif->tx_irq); 170 enable_irq(vif->tx_irq);
195 if (vif->tx_irq != vif->rx_irq) 171 if (vif->tx_irq != vif->rx_irq)
196 enable_irq(vif->rx_irq); 172 enable_irq(vif->rx_irq);
197 xenvif_check_rx_xenvif(vif); 173 xenvif_napi_schedule_or_enable_events(vif);
198} 174}
199 175
200static void xenvif_down(struct xenvif *vif) 176static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
104 104
105/* Find the containing VIF's structure from a pointer in pending_tx_info array 105/* Find the containing VIF's structure from a pointer in pending_tx_info array
106 */ 106 */
107static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) 107static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
108{ 108{
109 u16 pending_idx = ubuf->desc; 109 u16 pending_idx = ubuf->desc;
110 struct pending_tx_info *temp = 110 struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
323} 323}
324 324
325/* 325/*
326 * Find the grant ref for a given frag in a chain of struct ubuf_info's
327 * skb: the skb itself
328 * i: the frag's number
329 * ubuf: a pointer to an element in the chain. It should not be NULL
330 *
331 * Returns a pointer to the element in the chain where the page were found. If
332 * not found, returns NULL.
333 * See the definition of callback_struct in common.h for more details about
334 * the chain.
335 */
336static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
337 const int i,
338 const struct ubuf_info *ubuf)
339{
340 struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
341
342 do {
343 u16 pending_idx = ubuf->desc;
344
345 if (skb_shinfo(skb)->frags[i].page.p ==
346 foreign_vif->mmap_pages[pending_idx])
347 break;
348 ubuf = (struct ubuf_info *) ubuf->ctx;
349 } while (ubuf);
350
351 return ubuf;
352}
353
354/*
326 * Prepare an SKB to be transmitted to the frontend. 355 * Prepare an SKB to be transmitted to the frontend.
327 * 356 *
328 * This function is responsible for allocating grant operations, meta 357 * This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
346 int head = 1; 375 int head = 1;
347 int old_meta_prod; 376 int old_meta_prod;
348 int gso_type; 377 int gso_type;
349 struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; 378 const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
350 grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; 379 const struct ubuf_info *const head_ubuf = ubuf;
351 struct xenvif *foreign_vif = NULL;
352 380
353 old_meta_prod = npo->meta_prod; 381 old_meta_prod = npo->meta_prod;
354 382
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
386 npo->copy_off = 0; 414 npo->copy_off = 0;
387 npo->copy_gref = req->gref; 415 npo->copy_gref = req->gref;
388 416
389 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
390 (ubuf->callback == &xenvif_zerocopy_callback)) {
391 int i = 0;
392 foreign_vif = ubuf_to_vif(ubuf);
393
394 do {
395 u16 pending_idx = ubuf->desc;
396 foreign_grefs[i++] =
397 foreign_vif->pending_tx_info[pending_idx].req.gref;
398 ubuf = (struct ubuf_info *) ubuf->ctx;
399 } while (ubuf);
400 }
401
402 data = skb->data; 417 data = skb->data;
403 while (data < skb_tail_pointer(skb)) { 418 while (data < skb_tail_pointer(skb)) {
404 unsigned int offset = offset_in_page(data); 419 unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
415 } 430 }
416 431
417 for (i = 0; i < nr_frags; i++) { 432 for (i = 0; i < nr_frags; i++) {
433 /* This variable also signals whether foreign_gref has a real
434 * value or not.
435 */
436 struct xenvif *foreign_vif = NULL;
437 grant_ref_t foreign_gref;
438
439 if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
440 (ubuf->callback == &xenvif_zerocopy_callback)) {
441 const struct ubuf_info *const startpoint = ubuf;
442
443 /* Ideally ubuf points to the chain element which
444 * belongs to this frag. Or if frags were removed from
445 * the beginning, then shortly before it.
446 */
447 ubuf = xenvif_find_gref(skb, i, ubuf);
448
449 /* Try again from the beginning of the list, if we
450 * haven't tried from there. This only makes sense in
451 * the unlikely event of reordering the original frags.
452 * For injected local pages it's an unnecessary second
453 * run.
454 */
455 if (unlikely(!ubuf) && startpoint != head_ubuf)
456 ubuf = xenvif_find_gref(skb, i, head_ubuf);
457
458 if (likely(ubuf)) {
459 u16 pending_idx = ubuf->desc;
460
461 foreign_vif = ubuf_to_vif(ubuf);
462 foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
463 /* Just a safety measure. If this was the last
464 * element on the list, the for loop will
465 * iterate again if a local page were added to
466 * the end. Using head_ubuf here prevents the
467 * second search on the chain. Or the original
468 * frags changed order, but that's less likely.
469 * In any way, ubuf shouldn't be NULL.
470 */
471 ubuf = ubuf->ctx ?
472 (struct ubuf_info *) ubuf->ctx :
473 head_ubuf;
474 } else
475 /* This frag was a local page, added to the
476 * array after the skb left netback.
477 */
478 ubuf = head_ubuf;
479 }
418 xenvif_gop_frag_copy(vif, skb, npo, 480 xenvif_gop_frag_copy(vif, skb, npo,
419 skb_frag_page(&skb_shinfo(skb)->frags[i]), 481 skb_frag_page(&skb_shinfo(skb)->frags[i]),
420 skb_frag_size(&skb_shinfo(skb)->frags[i]), 482 skb_frag_size(&skb_shinfo(skb)->frags[i]),
421 skb_shinfo(skb)->frags[i].page_offset, 483 skb_shinfo(skb)->frags[i].page_offset,
422 &head, 484 &head,
423 foreign_vif, 485 foreign_vif,
424 foreign_grefs[i]); 486 foreign_vif ? foreign_gref : UINT_MAX);
425 } 487 }
426 488
427 return npo->meta_prod - old_meta_prod; 489 return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
654 notify_remote_via_irq(vif->rx_irq); 716 notify_remote_via_irq(vif->rx_irq);
655} 717}
656 718
657void xenvif_check_rx_xenvif(struct xenvif *vif) 719void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
658{ 720{
659 int more_to_do; 721 int more_to_do;
660 722
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
688{ 750{
689 struct xenvif *vif = (struct xenvif *)data; 751 struct xenvif *vif = (struct xenvif *)data;
690 tx_add_credit(vif); 752 tx_add_credit(vif);
691 xenvif_check_rx_xenvif(vif); 753 xenvif_napi_schedule_or_enable_events(vif);
692} 754}
693 755
694static void xenvif_tx_err(struct xenvif *vif, 756static void xenvif_tx_err(struct xenvif *vif,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f72d19b7e5d2..32e969d95319 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1828,17 +1828,17 @@ int of_update_property(struct device_node *np, struct property *newprop)
1828 next = &(*next)->next; 1828 next = &(*next)->next;
1829 } 1829 }
1830 raw_spin_unlock_irqrestore(&devtree_lock, flags); 1830 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1831 if (rc) 1831 if (!found)
1832 return rc; 1832 return -ENODEV;
1833
1834 /* At early boot, bail out and defer setup to of_init() */
1835 if (!of_kset)
1836 return found ? 0 : -ENODEV;
1833 1837
1834 /* Update the sysfs attribute */ 1838 /* Update the sysfs attribute */
1835 if (oldprop) 1839 sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
1836 sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
1837 __of_add_property_sysfs(np, newprop); 1840 __of_add_property_sysfs(np, newprop);
1838 1841
1839 if (!found)
1840 return -ENODEV;
1841
1842 return 0; 1842 return 0;
1843} 1843}
1844 1844
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index fa16a912a927..7a2ef7bb8022 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -491,7 +491,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
491 * in /reserved-memory matches the values supported by the current implementation, 491 * in /reserved-memory matches the values supported by the current implementation,
492 * also check if ranges property has been provided 492 * also check if ranges property has been provided
493 */ 493 */
494static int __reserved_mem_check_root(unsigned long node) 494static int __init __reserved_mem_check_root(unsigned long node)
495{ 495{
496 __be32 *prop; 496 __be32 *prop;
497 497
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 9bcf2cf19357..5aeb89411350 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
364 364
365 memset(r, 0, sizeof(*r)); 365 memset(r, 0, sizeof(*r));
366 /* 366 /*
367 * Get optional "interrupts-names" property to add a name 367 * Get optional "interrupt-names" property to add a name
368 * to the resource. 368 * to the resource.
369 */ 369 */
370 of_property_read_string_index(dev, "interrupt-names", index, 370 of_property_read_string_index(dev, "interrupt-names", index,
@@ -380,6 +380,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
380EXPORT_SYMBOL_GPL(of_irq_to_resource); 380EXPORT_SYMBOL_GPL(of_irq_to_resource);
381 381
382/** 382/**
383 * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
384 * @dev: pointer to device tree node
385 * @index: zero-based index of the irq
386 *
387 * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
388 * is not yet created.
389 *
390 */
391int of_irq_get(struct device_node *dev, int index)
392{
393 int rc;
394 struct of_phandle_args oirq;
395 struct irq_domain *domain;
396
397 rc = of_irq_parse_one(dev, index, &oirq);
398 if (rc)
399 return rc;
400
401 domain = irq_find_host(oirq.np);
402 if (!domain)
403 return -EPROBE_DEFER;
404
405 return irq_create_of_mapping(&oirq);
406}
407
408/**
383 * of_irq_count - Count the number of IRQs a node uses 409 * of_irq_count - Count the number of IRQs a node uses
384 * @dev: pointer to device tree node 410 * @dev: pointer to device tree node
385 */ 411 */
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 404d1daebefa..bd47fbc53dc9 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
168 rc = of_address_to_resource(np, i, res); 168 rc = of_address_to_resource(np, i, res);
169 WARN_ON(rc); 169 WARN_ON(rc);
170 } 170 }
171 WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); 171 if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
172 pr_debug("not all legacy IRQ resources mapped for %s\n",
173 np->name);
172 } 174 }
173 175
174 dev->dev.of_node = of_node_get(np); 176 dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index ae4450070503..fe70b86bcffb 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/of_irq.h> 12#include <linux/of_irq.h>
13#include <linux/of_platform.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/mutex.h> 15#include <linux/mutex.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
@@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void)
427 } 428 }
428} 429}
429 430
431static void __init of_selftest_platform_populate(void)
432{
433 int irq;
434 struct device_node *np;
435 struct platform_device *pdev;
436
437 np = of_find_node_by_path("/testcase-data");
438 of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
439
440 /* Test that a missing irq domain returns -EPROBE_DEFER */
441 np = of_find_node_by_path("/testcase-data/testcase-device1");
442 pdev = of_find_device_by_node(np);
443 if (!pdev)
444 selftest(0, "device 1 creation failed\n");
445 irq = platform_get_irq(pdev, 0);
446 if (irq != -EPROBE_DEFER)
447 selftest(0, "device deferred probe failed - %d\n", irq);
448
449 /* Test that a parsing failure does not return -EPROBE_DEFER */
450 np = of_find_node_by_path("/testcase-data/testcase-device2");
451 pdev = of_find_device_by_node(np);
452 if (!pdev)
453 selftest(0, "device 2 creation failed\n");
454 irq = platform_get_irq(pdev, 0);
455 if (irq >= 0 || irq == -EPROBE_DEFER)
456 selftest(0, "device parsing error failed - %d\n", irq);
457
458 selftest(1, "passed");
459}
460
430static int __init of_selftest(void) 461static int __init of_selftest(void)
431{ 462{
432 struct device_node *np; 463 struct device_node *np;
@@ -445,6 +476,7 @@ static int __init of_selftest(void)
445 of_selftest_parse_interrupts(); 476 of_selftest_parse_interrupts();
446 of_selftest_parse_interrupts_extended(); 477 of_selftest_parse_interrupts_extended();
447 of_selftest_match_node(); 478 of_selftest_match_node();
479 of_selftest_platform_populate();
448 pr_info("end of selftest - %i passed, %i failed\n", 480 pr_info("end of selftest - %i passed, %i failed\n",
449 selftest_results.passed, selftest_results.failed); 481 selftest_results.passed, selftest_results.failed);
450 return 0; 482 return 0;
diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
index c843720bd3e5..da4695f60351 100644
--- a/drivers/of/testcase-data/tests-interrupts.dtsi
+++ b/drivers/of/testcase-data/tests-interrupts.dtsi
@@ -54,5 +54,18 @@
54 <&test_intmap1 1 2>; 54 <&test_intmap1 1 2>;
55 }; 55 };
56 }; 56 };
57
58 testcase-device1 {
59 compatible = "testcase-device";
60 interrupt-parent = <&test_intc0>;
61 interrupts = <1>;
62 };
63
64 testcase-device2 {
65 compatible = "testcase-device";
66 interrupt-parent = <&test_intc2>;
67 interrupts = <1>; /* invalid specifier - too short */
68 };
57 }; 69 };
70
58}; 71};
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index d3d1cfd51e09..e384e2534594 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
293 return PCIBIOS_SUCCESSFUL; 293 return PCIBIOS_SUCCESSFUL;
294} 294}
295 295
296/*
297 * Remove windows, starting from the largest ones to the smallest
298 * ones.
299 */
300static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
301 phys_addr_t base, size_t size)
302{
303 while (size) {
304 size_t sz = 1 << (fls(size) - 1);
305
306 mvebu_mbus_del_window(base, sz);
307 base += sz;
308 size -= sz;
309 }
310}
311
312/*
313 * MBus windows can only have a power of two size, but PCI BARs do not
314 * have this constraint. Therefore, we have to split the PCI BAR into
315 * areas each having a power of two size. We start from the largest
316 * one (i.e highest order bit set in the size).
317 */
318static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
319 unsigned int target, unsigned int attribute,
320 phys_addr_t base, size_t size,
321 phys_addr_t remap)
322{
323 size_t size_mapped = 0;
324
325 while (size) {
326 size_t sz = 1 << (fls(size) - 1);
327 int ret;
328
329 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
330 sz, remap);
331 if (ret) {
332 dev_err(&port->pcie->pdev->dev,
333 "Could not create MBus window at 0x%x, size 0x%x: %d\n",
334 base, sz, ret);
335 mvebu_pcie_del_windows(port, base - size_mapped,
336 size_mapped);
337 return;
338 }
339
340 size -= sz;
341 size_mapped += sz;
342 base += sz;
343 if (remap != MVEBU_MBUS_NO_REMAP)
344 remap += sz;
345 }
346}
347
296static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) 348static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
297{ 349{
298 phys_addr_t iobase; 350 phys_addr_t iobase;
@@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
304 356
305 /* If a window was configured, remove it */ 357 /* If a window was configured, remove it */
306 if (port->iowin_base) { 358 if (port->iowin_base) {
307 mvebu_mbus_del_window(port->iowin_base, 359 mvebu_pcie_del_windows(port, port->iowin_base,
308 port->iowin_size); 360 port->iowin_size);
309 port->iowin_base = 0; 361 port->iowin_base = 0;
310 port->iowin_size = 0; 362 port->iowin_size = 0;
311 } 363 }
@@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
331 port->iowin_base = port->pcie->io.start + iobase; 383 port->iowin_base = port->pcie->io.start + iobase;
332 port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | 384 port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
333 (port->bridge.iolimitupper << 16)) - 385 (port->bridge.iolimitupper << 16)) -
334 iobase); 386 iobase) + 1;
335 387
336 mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, 388 mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
337 port->iowin_base, port->iowin_size, 389 port->iowin_base, port->iowin_size,
338 iobase); 390 iobase);
339} 391}
340 392
341static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) 393static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
@@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
346 398
347 /* If a window was configured, remove it */ 399 /* If a window was configured, remove it */
348 if (port->memwin_base) { 400 if (port->memwin_base) {
349 mvebu_mbus_del_window(port->memwin_base, 401 mvebu_pcie_del_windows(port, port->memwin_base,
350 port->memwin_size); 402 port->memwin_size);
351 port->memwin_base = 0; 403 port->memwin_base = 0;
352 port->memwin_size = 0; 404 port->memwin_size = 0;
353 } 405 }
@@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
364 port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); 416 port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
365 port->memwin_size = 417 port->memwin_size =
366 (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - 418 (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
367 port->memwin_base; 419 port->memwin_base + 1;
368 420
369 mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr, 421 mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
370 port->memwin_base, port->memwin_size); 422 port->memwin_base, port->memwin_size,
423 MVEBU_MBUS_NO_REMAP);
371} 424}
372 425
373/* 426/*
@@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
743 796
744 /* 797 /*
745 * On the PCI-to-PCI bridge side, the I/O windows must have at 798 * On the PCI-to-PCI bridge side, the I/O windows must have at
746 * least a 64 KB size and be aligned on their size, and the 799 * least a 64 KB size and the memory windows must have at
747 * memory windows must have at least a 1 MB size and be 800 * least a 1 MB size. Moreover, MBus windows need to have a
748 * aligned on their size 801 * base address aligned on their size, and their size must be
802 * a power of two. This means that if the BAR doesn't have a
803 * power of two size, several MBus windows will actually be
804 * created. We need to ensure that the biggest MBus window
805 * (which will be the first one) is aligned on its size, which
806 * explains the rounddown_pow_of_two() being done here.
749 */ 807 */
750 if (res->flags & IORESOURCE_IO) 808 if (res->flags & IORESOURCE_IO)
751 return round_up(start, max_t(resource_size_t, SZ_64K, size)); 809 return round_up(start, max_t(resource_size_t, SZ_64K,
810 rounddown_pow_of_two(size)));
752 else if (res->flags & IORESOURCE_MEM) 811 else if (res->flags & IORESOURCE_MEM)
753 return round_up(start, max_t(resource_size_t, SZ_1M, size)); 812 return round_up(start, max_t(resource_size_t, SZ_1M,
813 rounddown_pow_of_two(size)));
754 else 814 else
755 return start; 815 return start;
756} 816}
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index fd3e3ab56509..4fe349dcaf59 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -15,6 +15,7 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/of_pci.h>
18#include <linux/pci.h> 19#include <linux/pci.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
@@ -180,8 +181,13 @@ static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
180{ 181{
181 struct pci_sys_data *sys = dev->bus->sysdata; 182 struct pci_sys_data *sys = dev->bus->sysdata;
182 struct rcar_pci_priv *priv = sys->private_data; 183 struct rcar_pci_priv *priv = sys->private_data;
184 int irq;
185
186 irq = of_irq_parse_and_map_pci(dev, slot, pin);
187 if (!irq)
188 irq = priv->irq;
183 189
184 return priv->irq; 190 return irq;
185} 191}
186 192
187#ifdef CONFIG_PCI_DEBUG 193#ifdef CONFIG_PCI_DEBUG
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 330f7e3a32dd..083cf37ca047 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -639,10 +639,15 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
639static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) 639static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
640{ 640{
641 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata); 641 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
642 int irq;
642 643
643 tegra_cpuidle_pcie_irqs_in_use(); 644 tegra_cpuidle_pcie_irqs_in_use();
644 645
645 return pcie->irq; 646 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
647 if (!irq)
648 irq = pcie->irq;
649
650 return irq;
646} 651}
647 652
648static void tegra_pcie_add_bus(struct pci_bus *bus) 653static void tegra_pcie_add_bus(struct pci_bus *bus)
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 509a29d84509..c4e373294476 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/msi.h> 18#include <linux/msi.h>
19#include <linux/of_address.h> 19#include <linux/of_address.h>
20#include <linux/of_pci.h>
20#include <linux/pci.h> 21#include <linux/pci.h>
21#include <linux/pci_regs.h> 22#include <linux/pci_regs.h>
22#include <linux/types.h> 23#include <linux/types.h>
@@ -490,7 +491,7 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
490 dw_pci.nr_controllers = 1; 491 dw_pci.nr_controllers = 1;
491 dw_pci.private_data = (void **)&pp; 492 dw_pci.private_data = (void **)&pp;
492 493
493 pci_common_init(&dw_pci); 494 pci_common_init_dev(pp->dev, &dw_pci);
494 pci_assign_unassigned_resources(); 495 pci_assign_unassigned_resources();
495#ifdef CONFIG_PCI_DOMAINS 496#ifdef CONFIG_PCI_DOMAINS
496 dw_pci.domain++; 497 dw_pci.domain++;
@@ -520,13 +521,13 @@ static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
520 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, 521 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
521 PCIE_ATU_VIEWPORT); 522 PCIE_ATU_VIEWPORT);
522 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); 523 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
523 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
524 dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE); 524 dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
525 dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE); 525 dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
526 dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1, 526 dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
527 PCIE_ATU_LIMIT); 527 PCIE_ATU_LIMIT);
528 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 528 dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
529 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 529 dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
530 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
530} 531}
531 532
532static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) 533static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
@@ -535,7 +536,6 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
535 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, 536 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
536 PCIE_ATU_VIEWPORT); 537 PCIE_ATU_VIEWPORT);
537 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); 538 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
538 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
539 dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE); 539 dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
540 dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE); 540 dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
541 dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1, 541 dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
@@ -543,6 +543,7 @@ static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
543 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET); 543 dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
544 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr), 544 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
545 PCIE_ATU_UPPER_TARGET); 545 PCIE_ATU_UPPER_TARGET);
546 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
546} 547}
547 548
548static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) 549static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
@@ -551,7 +552,6 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
551 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, 552 dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
552 PCIE_ATU_VIEWPORT); 553 PCIE_ATU_VIEWPORT);
553 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); 554 dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
554 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
555 dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE); 555 dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
556 dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE); 556 dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
557 dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1, 557 dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
@@ -559,6 +559,7 @@ static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
559 dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET); 559 dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
560 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr), 560 dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
561 PCIE_ATU_UPPER_TARGET); 561 PCIE_ATU_UPPER_TARGET);
562 dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
562} 563}
563 564
564static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 565static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
@@ -723,7 +724,7 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
723 724
724 if (pp) { 725 if (pp) {
725 pp->root_bus_nr = sys->busnr; 726 pp->root_bus_nr = sys->busnr;
726 bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops, 727 bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
727 sys, &sys->resources); 728 sys, &sys->resources);
728 } else { 729 } else {
729 bus = NULL; 730 bus = NULL;
@@ -736,8 +737,13 @@ static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
736static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 737static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
737{ 738{
738 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); 739 struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
740 int irq;
741
742 irq = of_irq_parse_and_map_pci(dev, slot, pin);
743 if (!irq)
744 irq = pp->irq;
739 745
740 return pp->irq; 746 return irq;
741} 747}
742 748
743static void dw_pcie_add_bus(struct pci_bus *bus) 749static void dw_pcie_add_bus(struct pci_bus *bus)
@@ -764,7 +770,7 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
764 u32 membase; 770 u32 membase;
765 u32 memlimit; 771 u32 memlimit;
766 772
767 /* set the number of lines as 4 */ 773 /* set the number of lanes */
768 dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); 774 dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
769 val &= ~PORT_LINK_MODE_MASK; 775 val &= ~PORT_LINK_MODE_MASK;
770 switch (pp->lanes) { 776 switch (pp->lanes) {
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 58499277903a..6efc2ec5e4db 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
282 return WRONG_BUS_FREQUENCY; 282 return WRONG_BUS_FREQUENCY;
283 } 283 }
284 284
285 bsp = ctrl->pci_dev->bus->cur_bus_speed; 285 bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
286 msp = ctrl->pci_dev->bus->max_bus_speed; 286 msp = ctrl->pci_dev->subordinate->max_bus_speed;
287 287
288 /* Check if there are other slots or devices on the same bus */ 288 /* Check if there are other slots or devices on the same bus */
289 if (!list_empty(&ctrl->pci_dev->subordinate->devices)) 289 if (!list_empty(&ctrl->pci_dev->subordinate->devices))
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7325d43bf030..759475ef6ff3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
3067 if (!pci_is_pcie(dev)) 3067 if (!pci_is_pcie(dev))
3068 return 1; 3068 return 1;
3069 3069
3070 return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); 3070 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3071 PCI_EXP_DEVSTA_TRPND);
3071} 3072}
3072EXPORT_SYMBOL(pci_wait_for_pending_transaction); 3073EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3073 3074
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3109 return 0; 3110 return 0;
3110 3111
3111 /* Wait for Transaction Pending bit clean */ 3112 /* Wait for Transaction Pending bit clean */
3112 if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) 3113 if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
3113 goto clear; 3114 goto clear;
3114 3115
3115 dev_err(&dev->dev, "transaction is not cleared; " 3116 dev_err(&dev->dev, "transaction is not cleared; "
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 3bb05f17b9b4..4906c27fa3bd 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -33,6 +33,7 @@ config PHY_MVEBU_SATA
33 33
34config OMAP_CONTROL_PHY 34config OMAP_CONTROL_PHY
35 tristate "OMAP CONTROL PHY Driver" 35 tristate "OMAP CONTROL PHY Driver"
36 depends on ARCH_OMAP2PLUS || COMPILE_TEST
36 help 37 help
37 Enable this to add support for the PHY part present in the control 38 Enable this to add support for the PHY part present in the control
38 module. This driver has API to power on the USB2 PHY and to write to 39 module. This driver has API to power on the USB2 PHY and to write to
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 2faf78edc864..7728518572a4 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -13,8 +13,9 @@ obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
13obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o 13obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
14obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o 14obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o
15obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o 15obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
16obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-samsung-usb2.o 16obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
17obj-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o 17phy-exynos-usb2-y += phy-samsung-usb2.o
18obj-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o 18phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
19obj-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o 19phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
20phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
20obj-$(CONFIG_PHY_XGENE) += phy-xgene.o 21obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 623b71c54b3e..c64a2f3b2d62 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -64,6 +64,9 @@ static struct phy *phy_lookup(struct device *device, const char *port)
64 class_dev_iter_init(&iter, phy_class, NULL, NULL); 64 class_dev_iter_init(&iter, phy_class, NULL, NULL);
65 while ((dev = class_dev_iter_next(&iter))) { 65 while ((dev = class_dev_iter_next(&iter))) {
66 phy = to_phy(dev); 66 phy = to_phy(dev);
67
68 if (!phy->init_data)
69 continue;
67 count = phy->init_data->num_consumers; 70 count = phy->init_data->num_consumers;
68 consumers = phy->init_data->consumers; 71 consumers = phy->init_data->consumers;
69 while (count--) { 72 while (count--) {
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index e49324032611..e00c02d0a094 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -104,16 +104,16 @@ config PINCTRL_BCM2835
104 select PINMUX 104 select PINMUX
105 select PINCONF 105 select PINCONF
106 106
107config PINCTRL_CAPRI 107config PINCTRL_BCM281XX
108 bool "Broadcom Capri pinctrl driver" 108 bool "Broadcom BCM281xx pinctrl driver"
109 depends on OF 109 depends on OF
110 select PINMUX 110 select PINMUX
111 select PINCONF 111 select PINCONF
112 select GENERIC_PINCONF 112 select GENERIC_PINCONF
113 select REGMAP_MMIO 113 select REGMAP_MMIO
114 help 114 help
115 Say Y here to support Broadcom Capri pinctrl driver, which is used for 115 Say Y here to support Broadcom BCM281xx pinctrl driver, which is used
116 the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351, 116 for the BCM281xx SoC family, including BCM11130, BCM11140, BCM11351,
117 BCM28145, and BCM28155 SoCs. This driver requires the pinctrl 117 BCM28145, and BCM28155 SoCs. This driver requires the pinctrl
118 framework. GPIO is provided by a separate GPIO driver. 118 framework. GPIO is provided by a separate GPIO driver.
119 119
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 4b835880cf80..6d3fd62b9ae8 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
21obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o 21obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
22obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o 22obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
23obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o 23obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
24obj-$(CONFIG_PINCTRL_CAPRI) += pinctrl-capri.o 24obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
25obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o 25obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
26obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o 26obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
27obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o 27obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 92ed4b2e3c07..c862f9c0e9ce 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -64,7 +64,6 @@ struct as3722_pin_function {
64}; 64};
65 65
66struct as3722_gpio_pin_control { 66struct as3722_gpio_pin_control {
67 bool enable_gpio_invert;
68 unsigned mode_prop; 67 unsigned mode_prop;
69 int io_function; 68 int io_function;
70}; 69};
@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
320 return mode; 319 return mode;
321 } 320 }
322 321
323 if (as_pci->gpio_control[offset].enable_gpio_invert) 322 return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
324 mode |= AS3722_GPIO_INV; 323 AS3722_GPIO_MODE_MASK, mode);
325
326 return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
327} 324}
328 325
329static const struct pinmux_ops as3722_pinmux_ops = { 326static const struct pinmux_ops as3722_pinmux_ops = {
@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
496{ 493{
497 struct as3722_pctrl_info *as_pci = to_as_pci(chip); 494 struct as3722_pctrl_info *as_pci = to_as_pci(chip);
498 struct as3722 *as3722 = as_pci->as3722; 495 struct as3722 *as3722 = as_pci->as3722;
499 int en_invert = as_pci->gpio_control[offset].enable_gpio_invert; 496 int en_invert;
500 u32 val; 497 u32 val;
501 int ret; 498 int ret;
502 499
500 ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
501 if (ret < 0) {
502 dev_err(as_pci->dev,
503 "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
504 return;
505 }
506 en_invert = !!(val & AS3722_GPIO_INV);
507
503 if (value) 508 if (value)
504 val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset); 509 val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
505 else 510 else
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c
new file mode 100644
index 000000000000..3bed792b2c03
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-bcm281xx.c
@@ -0,0 +1,1461 @@
1/*
2 * Copyright (C) 2013 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/pinctrl/pinctrl.h>
19#include <linux/pinctrl/pinmux.h>
20#include <linux/pinctrl/pinconf.h>
21#include <linux/pinctrl/pinconf-generic.h>
22#include <linux/regmap.h>
23#include <linux/slab.h>
24#include "core.h"
25#include "pinctrl-utils.h"
26
27/* BCM281XX Pin Control Registers Definitions */
28
29/* Function Select bits are the same for all pin control registers */
30#define BCM281XX_PIN_REG_F_SEL_MASK 0x0700
31#define BCM281XX_PIN_REG_F_SEL_SHIFT 8
32
33/* Standard pin register */
34#define BCM281XX_STD_PIN_REG_DRV_STR_MASK 0x0007
35#define BCM281XX_STD_PIN_REG_DRV_STR_SHIFT 0
36#define BCM281XX_STD_PIN_REG_INPUT_DIS_MASK 0x0008
37#define BCM281XX_STD_PIN_REG_INPUT_DIS_SHIFT 3
38#define BCM281XX_STD_PIN_REG_SLEW_MASK 0x0010
39#define BCM281XX_STD_PIN_REG_SLEW_SHIFT 4
40#define BCM281XX_STD_PIN_REG_PULL_UP_MASK 0x0020
41#define BCM281XX_STD_PIN_REG_PULL_UP_SHIFT 5
42#define BCM281XX_STD_PIN_REG_PULL_DN_MASK 0x0040
43#define BCM281XX_STD_PIN_REG_PULL_DN_SHIFT 6
44#define BCM281XX_STD_PIN_REG_HYST_MASK 0x0080
45#define BCM281XX_STD_PIN_REG_HYST_SHIFT 7
46
47/* I2C pin register */
48#define BCM281XX_I2C_PIN_REG_INPUT_DIS_MASK 0x0004
49#define BCM281XX_I2C_PIN_REG_INPUT_DIS_SHIFT 2
50#define BCM281XX_I2C_PIN_REG_SLEW_MASK 0x0008
51#define BCM281XX_I2C_PIN_REG_SLEW_SHIFT 3
52#define BCM281XX_I2C_PIN_REG_PULL_UP_STR_MASK 0x0070
53#define BCM281XX_I2C_PIN_REG_PULL_UP_STR_SHIFT 4
54
55/* HDMI pin register */
56#define BCM281XX_HDMI_PIN_REG_INPUT_DIS_MASK 0x0008
57#define BCM281XX_HDMI_PIN_REG_INPUT_DIS_SHIFT 3
58#define BCM281XX_HDMI_PIN_REG_MODE_MASK 0x0010
59#define BCM281XX_HDMI_PIN_REG_MODE_SHIFT 4
60
61/**
62 * bcm281xx_pin_type - types of pin register
63 */
64enum bcm281xx_pin_type {
65 BCM281XX_PIN_TYPE_UNKNOWN = 0,
66 BCM281XX_PIN_TYPE_STD,
67 BCM281XX_PIN_TYPE_I2C,
68 BCM281XX_PIN_TYPE_HDMI,
69};
70
71static enum bcm281xx_pin_type std_pin = BCM281XX_PIN_TYPE_STD;
72static enum bcm281xx_pin_type i2c_pin = BCM281XX_PIN_TYPE_I2C;
73static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI;
74
75/**
76 * bcm281xx_pin_function- define pin function
77 */
78struct bcm281xx_pin_function {
79 const char *name;
80 const char * const *groups;
81 const unsigned ngroups;
82};
83
84/**
85 * bcm281xx_pinctrl_data - Broadcom-specific pinctrl data
86 * @reg_base - base of pinctrl registers
87 */
88struct bcm281xx_pinctrl_data {
89 void __iomem *reg_base;
90
91 /* List of all pins */
92 const struct pinctrl_pin_desc *pins;
93 const unsigned npins;
94
95 const struct bcm281xx_pin_function *functions;
96 const unsigned nfunctions;
97
98 struct regmap *regmap;
99};
100
101/*
102 * Pin number definition. The order here must be the same as defined in the
103 * PADCTRLREG block in the RDB.
104 */
105#define BCM281XX_PIN_ADCSYNC 0
106#define BCM281XX_PIN_BAT_RM 1
107#define BCM281XX_PIN_BSC1_SCL 2
108#define BCM281XX_PIN_BSC1_SDA 3
109#define BCM281XX_PIN_BSC2_SCL 4
110#define BCM281XX_PIN_BSC2_SDA 5
111#define BCM281XX_PIN_CLASSGPWR 6
112#define BCM281XX_PIN_CLK_CX8 7
113#define BCM281XX_PIN_CLKOUT_0 8
114#define BCM281XX_PIN_CLKOUT_1 9
115#define BCM281XX_PIN_CLKOUT_2 10
116#define BCM281XX_PIN_CLKOUT_3 11
117#define BCM281XX_PIN_CLKREQ_IN_0 12
118#define BCM281XX_PIN_CLKREQ_IN_1 13
119#define BCM281XX_PIN_CWS_SYS_REQ1 14
120#define BCM281XX_PIN_CWS_SYS_REQ2 15
121#define BCM281XX_PIN_CWS_SYS_REQ3 16
122#define BCM281XX_PIN_DIGMIC1_CLK 17
123#define BCM281XX_PIN_DIGMIC1_DQ 18
124#define BCM281XX_PIN_DIGMIC2_CLK 19
125#define BCM281XX_PIN_DIGMIC2_DQ 20
126#define BCM281XX_PIN_GPEN13 21
127#define BCM281XX_PIN_GPEN14 22
128#define BCM281XX_PIN_GPEN15 23
129#define BCM281XX_PIN_GPIO00 24
130#define BCM281XX_PIN_GPIO01 25
131#define BCM281XX_PIN_GPIO02 26
132#define BCM281XX_PIN_GPIO03 27
133#define BCM281XX_PIN_GPIO04 28
134#define BCM281XX_PIN_GPIO05 29
135#define BCM281XX_PIN_GPIO06 30
136#define BCM281XX_PIN_GPIO07 31
137#define BCM281XX_PIN_GPIO08 32
138#define BCM281XX_PIN_GPIO09 33
139#define BCM281XX_PIN_GPIO10 34
140#define BCM281XX_PIN_GPIO11 35
141#define BCM281XX_PIN_GPIO12 36
142#define BCM281XX_PIN_GPIO13 37
143#define BCM281XX_PIN_GPIO14 38
144#define BCM281XX_PIN_GPS_PABLANK 39
145#define BCM281XX_PIN_GPS_TMARK 40
146#define BCM281XX_PIN_HDMI_SCL 41
147#define BCM281XX_PIN_HDMI_SDA 42
148#define BCM281XX_PIN_IC_DM 43
149#define BCM281XX_PIN_IC_DP 44
150#define BCM281XX_PIN_KP_COL_IP_0 45
151#define BCM281XX_PIN_KP_COL_IP_1 46
152#define BCM281XX_PIN_KP_COL_IP_2 47
153#define BCM281XX_PIN_KP_COL_IP_3 48
154#define BCM281XX_PIN_KP_ROW_OP_0 49
155#define BCM281XX_PIN_KP_ROW_OP_1 50
156#define BCM281XX_PIN_KP_ROW_OP_2 51
157#define BCM281XX_PIN_KP_ROW_OP_3 52
158#define BCM281XX_PIN_LCD_B_0 53
159#define BCM281XX_PIN_LCD_B_1 54
160#define BCM281XX_PIN_LCD_B_2 55
161#define BCM281XX_PIN_LCD_B_3 56
162#define BCM281XX_PIN_LCD_B_4 57
163#define BCM281XX_PIN_LCD_B_5 58
164#define BCM281XX_PIN_LCD_B_6 59
165#define BCM281XX_PIN_LCD_B_7 60
166#define BCM281XX_PIN_LCD_G_0 61
167#define BCM281XX_PIN_LCD_G_1 62
168#define BCM281XX_PIN_LCD_G_2 63
169#define BCM281XX_PIN_LCD_G_3 64
170#define BCM281XX_PIN_LCD_G_4 65
171#define BCM281XX_PIN_LCD_G_5 66
172#define BCM281XX_PIN_LCD_G_6 67
173#define BCM281XX_PIN_LCD_G_7 68
174#define BCM281XX_PIN_LCD_HSYNC 69
175#define BCM281XX_PIN_LCD_OE 70
176#define BCM281XX_PIN_LCD_PCLK 71
177#define BCM281XX_PIN_LCD_R_0 72
178#define BCM281XX_PIN_LCD_R_1 73
179#define BCM281XX_PIN_LCD_R_2 74
180#define BCM281XX_PIN_LCD_R_3 75
181#define BCM281XX_PIN_LCD_R_4 76
182#define BCM281XX_PIN_LCD_R_5 77
183#define BCM281XX_PIN_LCD_R_6 78
184#define BCM281XX_PIN_LCD_R_7 79
185#define BCM281XX_PIN_LCD_VSYNC 80
186#define BCM281XX_PIN_MDMGPIO0 81
187#define BCM281XX_PIN_MDMGPIO1 82
188#define BCM281XX_PIN_MDMGPIO2 83
189#define BCM281XX_PIN_MDMGPIO3 84
190#define BCM281XX_PIN_MDMGPIO4 85
191#define BCM281XX_PIN_MDMGPIO5 86
192#define BCM281XX_PIN_MDMGPIO6 87
193#define BCM281XX_PIN_MDMGPIO7 88
194#define BCM281XX_PIN_MDMGPIO8 89
195#define BCM281XX_PIN_MPHI_DATA_0 90
196#define BCM281XX_PIN_MPHI_DATA_1 91
197#define BCM281XX_PIN_MPHI_DATA_2 92
198#define BCM281XX_PIN_MPHI_DATA_3 93
199#define BCM281XX_PIN_MPHI_DATA_4 94
200#define BCM281XX_PIN_MPHI_DATA_5 95
201#define BCM281XX_PIN_MPHI_DATA_6 96
202#define BCM281XX_PIN_MPHI_DATA_7 97
203#define BCM281XX_PIN_MPHI_DATA_8 98
204#define BCM281XX_PIN_MPHI_DATA_9 99
205#define BCM281XX_PIN_MPHI_DATA_10 100
206#define BCM281XX_PIN_MPHI_DATA_11 101
207#define BCM281XX_PIN_MPHI_DATA_12 102
208#define BCM281XX_PIN_MPHI_DATA_13 103
209#define BCM281XX_PIN_MPHI_DATA_14 104
210#define BCM281XX_PIN_MPHI_DATA_15 105
211#define BCM281XX_PIN_MPHI_HA0 106
212#define BCM281XX_PIN_MPHI_HAT0 107
213#define BCM281XX_PIN_MPHI_HAT1 108
214#define BCM281XX_PIN_MPHI_HCE0_N 109
215#define BCM281XX_PIN_MPHI_HCE1_N 110
216#define BCM281XX_PIN_MPHI_HRD_N 111
217#define BCM281XX_PIN_MPHI_HWR_N 112
218#define BCM281XX_PIN_MPHI_RUN0 113
219#define BCM281XX_PIN_MPHI_RUN1 114
220#define BCM281XX_PIN_MTX_SCAN_CLK 115
221#define BCM281XX_PIN_MTX_SCAN_DATA 116
222#define BCM281XX_PIN_NAND_AD_0 117
223#define BCM281XX_PIN_NAND_AD_1 118
224#define BCM281XX_PIN_NAND_AD_2 119
225#define BCM281XX_PIN_NAND_AD_3 120
226#define BCM281XX_PIN_NAND_AD_4 121
227#define BCM281XX_PIN_NAND_AD_5 122
228#define BCM281XX_PIN_NAND_AD_6 123
229#define BCM281XX_PIN_NAND_AD_7 124
230#define BCM281XX_PIN_NAND_ALE 125
231#define BCM281XX_PIN_NAND_CEN_0 126
232#define BCM281XX_PIN_NAND_CEN_1 127
233#define BCM281XX_PIN_NAND_CLE 128
234#define BCM281XX_PIN_NAND_OEN 129
235#define BCM281XX_PIN_NAND_RDY_0 130
236#define BCM281XX_PIN_NAND_RDY_1 131
237#define BCM281XX_PIN_NAND_WEN 132
238#define BCM281XX_PIN_NAND_WP 133
239#define BCM281XX_PIN_PC1 134
240#define BCM281XX_PIN_PC2 135
241#define BCM281XX_PIN_PMU_INT 136
242#define BCM281XX_PIN_PMU_SCL 137
243#define BCM281XX_PIN_PMU_SDA 138
244#define BCM281XX_PIN_RFST2G_MTSLOTEN3G 139
245#define BCM281XX_PIN_RGMII_0_RX_CTL 140
246#define BCM281XX_PIN_RGMII_0_RXC 141
247#define BCM281XX_PIN_RGMII_0_RXD_0 142
248#define BCM281XX_PIN_RGMII_0_RXD_1 143
249#define BCM281XX_PIN_RGMII_0_RXD_2 144
250#define BCM281XX_PIN_RGMII_0_RXD_3 145
251#define BCM281XX_PIN_RGMII_0_TX_CTL 146
252#define BCM281XX_PIN_RGMII_0_TXC 147
253#define BCM281XX_PIN_RGMII_0_TXD_0 148
254#define BCM281XX_PIN_RGMII_0_TXD_1 149
255#define BCM281XX_PIN_RGMII_0_TXD_2 150
256#define BCM281XX_PIN_RGMII_0_TXD_3 151
257#define BCM281XX_PIN_RGMII_1_RX_CTL 152
258#define BCM281XX_PIN_RGMII_1_RXC 153
259#define BCM281XX_PIN_RGMII_1_RXD_0 154
260#define BCM281XX_PIN_RGMII_1_RXD_1 155
261#define BCM281XX_PIN_RGMII_1_RXD_2 156
262#define BCM281XX_PIN_RGMII_1_RXD_3 157
263#define BCM281XX_PIN_RGMII_1_TX_CTL 158
264#define BCM281XX_PIN_RGMII_1_TXC 159
265#define BCM281XX_PIN_RGMII_1_TXD_0 160
266#define BCM281XX_PIN_RGMII_1_TXD_1 161
267#define BCM281XX_PIN_RGMII_1_TXD_2 162
268#define BCM281XX_PIN_RGMII_1_TXD_3 163
269#define BCM281XX_PIN_RGMII_GPIO_0 164
270#define BCM281XX_PIN_RGMII_GPIO_1 165
271#define BCM281XX_PIN_RGMII_GPIO_2 166
272#define BCM281XX_PIN_RGMII_GPIO_3 167
273#define BCM281XX_PIN_RTXDATA2G_TXDATA3G1 168
274#define BCM281XX_PIN_RTXEN2G_TXDATA3G2 169
275#define BCM281XX_PIN_RXDATA3G0 170
276#define BCM281XX_PIN_RXDATA3G1 171
277#define BCM281XX_PIN_RXDATA3G2 172
278#define BCM281XX_PIN_SDIO1_CLK 173
279#define BCM281XX_PIN_SDIO1_CMD 174
280#define BCM281XX_PIN_SDIO1_DATA_0 175
281#define BCM281XX_PIN_SDIO1_DATA_1 176
282#define BCM281XX_PIN_SDIO1_DATA_2 177
283#define BCM281XX_PIN_SDIO1_DATA_3 178
284#define BCM281XX_PIN_SDIO4_CLK 179
285#define BCM281XX_PIN_SDIO4_CMD 180
286#define BCM281XX_PIN_SDIO4_DATA_0 181
287#define BCM281XX_PIN_SDIO4_DATA_1 182
288#define BCM281XX_PIN_SDIO4_DATA_2 183
289#define BCM281XX_PIN_SDIO4_DATA_3 184
290#define BCM281XX_PIN_SIM_CLK 185
291#define BCM281XX_PIN_SIM_DATA 186
292#define BCM281XX_PIN_SIM_DET 187
293#define BCM281XX_PIN_SIM_RESETN 188
294#define BCM281XX_PIN_SIM2_CLK 189
295#define BCM281XX_PIN_SIM2_DATA 190
296#define BCM281XX_PIN_SIM2_DET 191
297#define BCM281XX_PIN_SIM2_RESETN 192
298#define BCM281XX_PIN_SRI_C 193
299#define BCM281XX_PIN_SRI_D 194
300#define BCM281XX_PIN_SRI_E 195
301#define BCM281XX_PIN_SSP_EXTCLK 196
302#define BCM281XX_PIN_SSP0_CLK 197
303#define BCM281XX_PIN_SSP0_FS 198
304#define BCM281XX_PIN_SSP0_RXD 199
305#define BCM281XX_PIN_SSP0_TXD 200
306#define BCM281XX_PIN_SSP2_CLK 201
307#define BCM281XX_PIN_SSP2_FS_0 202
308#define BCM281XX_PIN_SSP2_FS_1 203
309#define BCM281XX_PIN_SSP2_FS_2 204
310#define BCM281XX_PIN_SSP2_FS_3 205
311#define BCM281XX_PIN_SSP2_RXD_0 206
312#define BCM281XX_PIN_SSP2_RXD_1 207
313#define BCM281XX_PIN_SSP2_TXD_0 208
314#define BCM281XX_PIN_SSP2_TXD_1 209
315#define BCM281XX_PIN_SSP3_CLK 210
316#define BCM281XX_PIN_SSP3_FS 211
317#define BCM281XX_PIN_SSP3_RXD 212
318#define BCM281XX_PIN_SSP3_TXD 213
319#define BCM281XX_PIN_SSP4_CLK 214
320#define BCM281XX_PIN_SSP4_FS 215
321#define BCM281XX_PIN_SSP4_RXD 216
322#define BCM281XX_PIN_SSP4_TXD 217
323#define BCM281XX_PIN_SSP5_CLK 218
324#define BCM281XX_PIN_SSP5_FS 219
325#define BCM281XX_PIN_SSP5_RXD 220
326#define BCM281XX_PIN_SSP5_TXD 221
327#define BCM281XX_PIN_SSP6_CLK 222
328#define BCM281XX_PIN_SSP6_FS 223
329#define BCM281XX_PIN_SSP6_RXD 224
330#define BCM281XX_PIN_SSP6_TXD 225
331#define BCM281XX_PIN_STAT_1 226
332#define BCM281XX_PIN_STAT_2 227
333#define BCM281XX_PIN_SYSCLKEN 228
334#define BCM281XX_PIN_TRACECLK 229
335#define BCM281XX_PIN_TRACEDT00 230
336#define BCM281XX_PIN_TRACEDT01 231
337#define BCM281XX_PIN_TRACEDT02 232
338#define BCM281XX_PIN_TRACEDT03 233
339#define BCM281XX_PIN_TRACEDT04 234
340#define BCM281XX_PIN_TRACEDT05 235
341#define BCM281XX_PIN_TRACEDT06 236
342#define BCM281XX_PIN_TRACEDT07 237
343#define BCM281XX_PIN_TRACEDT08 238
344#define BCM281XX_PIN_TRACEDT09 239
345#define BCM281XX_PIN_TRACEDT10 240
346#define BCM281XX_PIN_TRACEDT11 241
347#define BCM281XX_PIN_TRACEDT12 242
348#define BCM281XX_PIN_TRACEDT13 243
349#define BCM281XX_PIN_TRACEDT14 244
350#define BCM281XX_PIN_TRACEDT15 245
351#define BCM281XX_PIN_TXDATA3G0 246
352#define BCM281XX_PIN_TXPWRIND 247
353#define BCM281XX_PIN_UARTB1_UCTS 248
354#define BCM281XX_PIN_UARTB1_URTS 249
355#define BCM281XX_PIN_UARTB1_URXD 250
356#define BCM281XX_PIN_UARTB1_UTXD 251
357#define BCM281XX_PIN_UARTB2_URXD 252
358#define BCM281XX_PIN_UARTB2_UTXD 253
359#define BCM281XX_PIN_UARTB3_UCTS 254
360#define BCM281XX_PIN_UARTB3_URTS 255
361#define BCM281XX_PIN_UARTB3_URXD 256
362#define BCM281XX_PIN_UARTB3_UTXD 257
363#define BCM281XX_PIN_UARTB4_UCTS 258
364#define BCM281XX_PIN_UARTB4_URTS 259
365#define BCM281XX_PIN_UARTB4_URXD 260
366#define BCM281XX_PIN_UARTB4_UTXD 261
367#define BCM281XX_PIN_VC_CAM1_SCL 262
368#define BCM281XX_PIN_VC_CAM1_SDA 263
369#define BCM281XX_PIN_VC_CAM2_SCL 264
370#define BCM281XX_PIN_VC_CAM2_SDA 265
371#define BCM281XX_PIN_VC_CAM3_SCL 266
372#define BCM281XX_PIN_VC_CAM3_SDA 267
373
374#define BCM281XX_PIN_DESC(a, b, c) \
375 { .number = a, .name = b, .drv_data = &c##_pin }
376
377/*
378 * Pin description definition. The order here must be the same as defined in
379 * the PADCTRLREG block in the RDB, since the pin number is used as an index
380 * into this array.
381 */
382static const struct pinctrl_pin_desc bcm281xx_pinctrl_pins[] = {
383 BCM281XX_PIN_DESC(BCM281XX_PIN_ADCSYNC, "adcsync", std),
384 BCM281XX_PIN_DESC(BCM281XX_PIN_BAT_RM, "bat_rm", std),
385 BCM281XX_PIN_DESC(BCM281XX_PIN_BSC1_SCL, "bsc1_scl", i2c),
386 BCM281XX_PIN_DESC(BCM281XX_PIN_BSC1_SDA, "bsc1_sda", i2c),
387 BCM281XX_PIN_DESC(BCM281XX_PIN_BSC2_SCL, "bsc2_scl", i2c),
388 BCM281XX_PIN_DESC(BCM281XX_PIN_BSC2_SDA, "bsc2_sda", i2c),
389 BCM281XX_PIN_DESC(BCM281XX_PIN_CLASSGPWR, "classgpwr", std),
390 BCM281XX_PIN_DESC(BCM281XX_PIN_CLK_CX8, "clk_cx8", std),
391 BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_0, "clkout_0", std),
392 BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_1, "clkout_1", std),
393 BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_2, "clkout_2", std),
394 BCM281XX_PIN_DESC(BCM281XX_PIN_CLKOUT_3, "clkout_3", std),
395 BCM281XX_PIN_DESC(BCM281XX_PIN_CLKREQ_IN_0, "clkreq_in_0", std),
396 BCM281XX_PIN_DESC(BCM281XX_PIN_CLKREQ_IN_1, "clkreq_in_1", std),
397 BCM281XX_PIN_DESC(BCM281XX_PIN_CWS_SYS_REQ1, "cws_sys_req1", std),
398 BCM281XX_PIN_DESC(BCM281XX_PIN_CWS_SYS_REQ2, "cws_sys_req2", std),
399 BCM281XX_PIN_DESC(BCM281XX_PIN_CWS_SYS_REQ3, "cws_sys_req3", std),
400 BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC1_CLK, "digmic1_clk", std),
401 BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC1_DQ, "digmic1_dq", std),
402 BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC2_CLK, "digmic2_clk", std),
403 BCM281XX_PIN_DESC(BCM281XX_PIN_DIGMIC2_DQ, "digmic2_dq", std),
404 BCM281XX_PIN_DESC(BCM281XX_PIN_GPEN13, "gpen13", std),
405 BCM281XX_PIN_DESC(BCM281XX_PIN_GPEN14, "gpen14", std),
406 BCM281XX_PIN_DESC(BCM281XX_PIN_GPEN15, "gpen15", std),
407 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO00, "gpio00", std),
408 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO01, "gpio01", std),
409 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO02, "gpio02", std),
410 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO03, "gpio03", std),
411 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO04, "gpio04", std),
412 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO05, "gpio05", std),
413 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO06, "gpio06", std),
414 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO07, "gpio07", std),
415 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO08, "gpio08", std),
416 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO09, "gpio09", std),
417 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO10, "gpio10", std),
418 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO11, "gpio11", std),
419 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO12, "gpio12", std),
420 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO13, "gpio13", std),
421 BCM281XX_PIN_DESC(BCM281XX_PIN_GPIO14, "gpio14", std),
422 BCM281XX_PIN_DESC(BCM281XX_PIN_GPS_PABLANK, "gps_pablank", std),
423 BCM281XX_PIN_DESC(BCM281XX_PIN_GPS_TMARK, "gps_tmark", std),
424 BCM281XX_PIN_DESC(BCM281XX_PIN_HDMI_SCL, "hdmi_scl", hdmi),
425 BCM281XX_PIN_DESC(BCM281XX_PIN_HDMI_SDA, "hdmi_sda", hdmi),
426 BCM281XX_PIN_DESC(BCM281XX_PIN_IC_DM, "ic_dm", std),
427 BCM281XX_PIN_DESC(BCM281XX_PIN_IC_DP, "ic_dp", std),
428 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_0, "kp_col_ip_0", std),
429 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_1, "kp_col_ip_1", std),
430 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_2, "kp_col_ip_2", std),
431 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_COL_IP_3, "kp_col_ip_3", std),
432 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_0, "kp_row_op_0", std),
433 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_1, "kp_row_op_1", std),
434 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_2, "kp_row_op_2", std),
435 BCM281XX_PIN_DESC(BCM281XX_PIN_KP_ROW_OP_3, "kp_row_op_3", std),
436 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_0, "lcd_b_0", std),
437 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_1, "lcd_b_1", std),
438 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_2, "lcd_b_2", std),
439 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_3, "lcd_b_3", std),
440 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_4, "lcd_b_4", std),
441 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_5, "lcd_b_5", std),
442 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_6, "lcd_b_6", std),
443 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_B_7, "lcd_b_7", std),
444 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_0, "lcd_g_0", std),
445 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_1, "lcd_g_1", std),
446 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_2, "lcd_g_2", std),
447 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_3, "lcd_g_3", std),
448 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_4, "lcd_g_4", std),
449 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_5, "lcd_g_5", std),
450 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_6, "lcd_g_6", std),
451 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_G_7, "lcd_g_7", std),
452 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_HSYNC, "lcd_hsync", std),
453 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_OE, "lcd_oe", std),
454 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_PCLK, "lcd_pclk", std),
455 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_0, "lcd_r_0", std),
456 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_1, "lcd_r_1", std),
457 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_2, "lcd_r_2", std),
458 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_3, "lcd_r_3", std),
459 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_4, "lcd_r_4", std),
460 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_5, "lcd_r_5", std),
461 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_6, "lcd_r_6", std),
462 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_R_7, "lcd_r_7", std),
463 BCM281XX_PIN_DESC(BCM281XX_PIN_LCD_VSYNC, "lcd_vsync", std),
464 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO0, "mdmgpio0", std),
465 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO1, "mdmgpio1", std),
466 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO2, "mdmgpio2", std),
467 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO3, "mdmgpio3", std),
468 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO4, "mdmgpio4", std),
469 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO5, "mdmgpio5", std),
470 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO6, "mdmgpio6", std),
471 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO7, "mdmgpio7", std),
472 BCM281XX_PIN_DESC(BCM281XX_PIN_MDMGPIO8, "mdmgpio8", std),
473 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_0, "mphi_data_0", std),
474 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_1, "mphi_data_1", std),
475 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_2, "mphi_data_2", std),
476 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_3, "mphi_data_3", std),
477 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_4, "mphi_data_4", std),
478 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_5, "mphi_data_5", std),
479 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_6, "mphi_data_6", std),
480 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_7, "mphi_data_7", std),
481 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_8, "mphi_data_8", std),
482 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_9, "mphi_data_9", std),
483 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_10, "mphi_data_10", std),
484 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_11, "mphi_data_11", std),
485 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_12, "mphi_data_12", std),
486 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_13, "mphi_data_13", std),
487 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_14, "mphi_data_14", std),
488 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_DATA_15, "mphi_data_15", std),
489 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HA0, "mphi_ha0", std),
490 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HAT0, "mphi_hat0", std),
491 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HAT1, "mphi_hat1", std),
492 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HCE0_N, "mphi_hce0_n", std),
493 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HCE1_N, "mphi_hce1_n", std),
494 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HRD_N, "mphi_hrd_n", std),
495 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_HWR_N, "mphi_hwr_n", std),
496 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_RUN0, "mphi_run0", std),
497 BCM281XX_PIN_DESC(BCM281XX_PIN_MPHI_RUN1, "mphi_run1", std),
498 BCM281XX_PIN_DESC(BCM281XX_PIN_MTX_SCAN_CLK, "mtx_scan_clk", std),
499 BCM281XX_PIN_DESC(BCM281XX_PIN_MTX_SCAN_DATA, "mtx_scan_data", std),
500 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_0, "nand_ad_0", std),
501 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_1, "nand_ad_1", std),
502 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_2, "nand_ad_2", std),
503 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_3, "nand_ad_3", std),
504 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_4, "nand_ad_4", std),
505 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_5, "nand_ad_5", std),
506 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_6, "nand_ad_6", std),
507 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_AD_7, "nand_ad_7", std),
508 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_ALE, "nand_ale", std),
509 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_CEN_0, "nand_cen_0", std),
510 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_CEN_1, "nand_cen_1", std),
511 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_CLE, "nand_cle", std),
512 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_OEN, "nand_oen", std),
513 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_RDY_0, "nand_rdy_0", std),
514 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_RDY_1, "nand_rdy_1", std),
515 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_WEN, "nand_wen", std),
516 BCM281XX_PIN_DESC(BCM281XX_PIN_NAND_WP, "nand_wp", std),
517 BCM281XX_PIN_DESC(BCM281XX_PIN_PC1, "pc1", std),
518 BCM281XX_PIN_DESC(BCM281XX_PIN_PC2, "pc2", std),
519 BCM281XX_PIN_DESC(BCM281XX_PIN_PMU_INT, "pmu_int", std),
520 BCM281XX_PIN_DESC(BCM281XX_PIN_PMU_SCL, "pmu_scl", i2c),
521 BCM281XX_PIN_DESC(BCM281XX_PIN_PMU_SDA, "pmu_sda", i2c),
522 BCM281XX_PIN_DESC(BCM281XX_PIN_RFST2G_MTSLOTEN3G, "rfst2g_mtsloten3g",
523 std),
524 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RX_CTL, "rgmii_0_rx_ctl", std),
525 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXC, "rgmii_0_rxc", std),
526 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_0, "rgmii_0_rxd_0", std),
527 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_1, "rgmii_0_rxd_1", std),
528 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_2, "rgmii_0_rxd_2", std),
529 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_RXD_3, "rgmii_0_rxd_3", std),
530 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TX_CTL, "rgmii_0_tx_ctl", std),
531 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXC, "rgmii_0_txc", std),
532 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_0, "rgmii_0_txd_0", std),
533 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_1, "rgmii_0_txd_1", std),
534 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_2, "rgmii_0_txd_2", std),
535 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_0_TXD_3, "rgmii_0_txd_3", std),
536 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RX_CTL, "rgmii_1_rx_ctl", std),
537 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXC, "rgmii_1_rxc", std),
538 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_0, "rgmii_1_rxd_0", std),
539 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_1, "rgmii_1_rxd_1", std),
540 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_2, "rgmii_1_rxd_2", std),
541 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_RXD_3, "rgmii_1_rxd_3", std),
542 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TX_CTL, "rgmii_1_tx_ctl", std),
543 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXC, "rgmii_1_txc", std),
544 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_0, "rgmii_1_txd_0", std),
545 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_1, "rgmii_1_txd_1", std),
546 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_2, "rgmii_1_txd_2", std),
547 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_1_TXD_3, "rgmii_1_txd_3", std),
548 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_0, "rgmii_gpio_0", std),
549 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_1, "rgmii_gpio_1", std),
550 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_2, "rgmii_gpio_2", std),
551 BCM281XX_PIN_DESC(BCM281XX_PIN_RGMII_GPIO_3, "rgmii_gpio_3", std),
552 BCM281XX_PIN_DESC(BCM281XX_PIN_RTXDATA2G_TXDATA3G1,
553 "rtxdata2g_txdata3g1", std),
554 BCM281XX_PIN_DESC(BCM281XX_PIN_RTXEN2G_TXDATA3G2, "rtxen2g_txdata3g2",
555 std),
556 BCM281XX_PIN_DESC(BCM281XX_PIN_RXDATA3G0, "rxdata3g0", std),
557 BCM281XX_PIN_DESC(BCM281XX_PIN_RXDATA3G1, "rxdata3g1", std),
558 BCM281XX_PIN_DESC(BCM281XX_PIN_RXDATA3G2, "rxdata3g2", std),
559 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_CLK, "sdio1_clk", std),
560 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_CMD, "sdio1_cmd", std),
561 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_0, "sdio1_data_0", std),
562 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_1, "sdio1_data_1", std),
563 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_2, "sdio1_data_2", std),
564 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO1_DATA_3, "sdio1_data_3", std),
565 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_CLK, "sdio4_clk", std),
566 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_CMD, "sdio4_cmd", std),
567 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_0, "sdio4_data_0", std),
568 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_1, "sdio4_data_1", std),
569 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_2, "sdio4_data_2", std),
570 BCM281XX_PIN_DESC(BCM281XX_PIN_SDIO4_DATA_3, "sdio4_data_3", std),
571 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_CLK, "sim_clk", std),
572 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_DATA, "sim_data", std),
573 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_DET, "sim_det", std),
574 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM_RESETN, "sim_resetn", std),
575 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_CLK, "sim2_clk", std),
576 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_DATA, "sim2_data", std),
577 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_DET, "sim2_det", std),
578 BCM281XX_PIN_DESC(BCM281XX_PIN_SIM2_RESETN, "sim2_resetn", std),
579 BCM281XX_PIN_DESC(BCM281XX_PIN_SRI_C, "sri_c", std),
580 BCM281XX_PIN_DESC(BCM281XX_PIN_SRI_D, "sri_d", std),
581 BCM281XX_PIN_DESC(BCM281XX_PIN_SRI_E, "sri_e", std),
582 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP_EXTCLK, "ssp_extclk", std),
583 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_CLK, "ssp0_clk", std),
584 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_FS, "ssp0_fs", std),
585 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_RXD, "ssp0_rxd", std),
586 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP0_TXD, "ssp0_txd", std),
587 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_CLK, "ssp2_clk", std),
588 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_0, "ssp2_fs_0", std),
589 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_1, "ssp2_fs_1", std),
590 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_2, "ssp2_fs_2", std),
591 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_FS_3, "ssp2_fs_3", std),
592 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_RXD_0, "ssp2_rxd_0", std),
593 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_RXD_1, "ssp2_rxd_1", std),
594 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_TXD_0, "ssp2_txd_0", std),
595 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP2_TXD_1, "ssp2_txd_1", std),
596 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_CLK, "ssp3_clk", std),
597 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_FS, "ssp3_fs", std),
598 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_RXD, "ssp3_rxd", std),
599 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP3_TXD, "ssp3_txd", std),
600 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_CLK, "ssp4_clk", std),
601 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_FS, "ssp4_fs", std),
602 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_RXD, "ssp4_rxd", std),
603 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP4_TXD, "ssp4_txd", std),
604 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_CLK, "ssp5_clk", std),
605 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_FS, "ssp5_fs", std),
606 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_RXD, "ssp5_rxd", std),
607 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP5_TXD, "ssp5_txd", std),
608 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_CLK, "ssp6_clk", std),
609 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_FS, "ssp6_fs", std),
610 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_RXD, "ssp6_rxd", std),
611 BCM281XX_PIN_DESC(BCM281XX_PIN_SSP6_TXD, "ssp6_txd", std),
612 BCM281XX_PIN_DESC(BCM281XX_PIN_STAT_1, "stat_1", std),
613 BCM281XX_PIN_DESC(BCM281XX_PIN_STAT_2, "stat_2", std),
614 BCM281XX_PIN_DESC(BCM281XX_PIN_SYSCLKEN, "sysclken", std),
615 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACECLK, "traceclk", std),
616 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT00, "tracedt00", std),
617 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT01, "tracedt01", std),
618 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT02, "tracedt02", std),
619 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT03, "tracedt03", std),
620 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT04, "tracedt04", std),
621 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT05, "tracedt05", std),
622 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT06, "tracedt06", std),
623 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT07, "tracedt07", std),
624 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT08, "tracedt08", std),
625 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT09, "tracedt09", std),
626 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT10, "tracedt10", std),
627 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT11, "tracedt11", std),
628 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT12, "tracedt12", std),
629 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT13, "tracedt13", std),
630 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT14, "tracedt14", std),
631 BCM281XX_PIN_DESC(BCM281XX_PIN_TRACEDT15, "tracedt15", std),
632 BCM281XX_PIN_DESC(BCM281XX_PIN_TXDATA3G0, "txdata3g0", std),
633 BCM281XX_PIN_DESC(BCM281XX_PIN_TXPWRIND, "txpwrind", std),
634 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_UCTS, "uartb1_ucts", std),
635 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_URTS, "uartb1_urts", std),
636 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_URXD, "uartb1_urxd", std),
637 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB1_UTXD, "uartb1_utxd", std),
638 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB2_URXD, "uartb2_urxd", std),
639 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB2_UTXD, "uartb2_utxd", std),
640 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_UCTS, "uartb3_ucts", std),
641 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_URTS, "uartb3_urts", std),
642 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_URXD, "uartb3_urxd", std),
643 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB3_UTXD, "uartb3_utxd", std),
644 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_UCTS, "uartb4_ucts", std),
645 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_URTS, "uartb4_urts", std),
646 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_URXD, "uartb4_urxd", std),
647 BCM281XX_PIN_DESC(BCM281XX_PIN_UARTB4_UTXD, "uartb4_utxd", std),
648 BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM1_SCL, "vc_cam1_scl", i2c),
649 BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM1_SDA, "vc_cam1_sda", i2c),
650 BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM2_SCL, "vc_cam2_scl", i2c),
651 BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM2_SDA, "vc_cam2_sda", i2c),
652 BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM3_SCL, "vc_cam3_scl", i2c),
653 BCM281XX_PIN_DESC(BCM281XX_PIN_VC_CAM3_SDA, "vc_cam3_sda", i2c),
654};
655
656static const char * const bcm281xx_alt_groups[] = {
657 "adcsync",
658 "bat_rm",
659 "bsc1_scl",
660 "bsc1_sda",
661 "bsc2_scl",
662 "bsc2_sda",
663 "classgpwr",
664 "clk_cx8",
665 "clkout_0",
666 "clkout_1",
667 "clkout_2",
668 "clkout_3",
669 "clkreq_in_0",
670 "clkreq_in_1",
671 "cws_sys_req1",
672 "cws_sys_req2",
673 "cws_sys_req3",
674 "digmic1_clk",
675 "digmic1_dq",
676 "digmic2_clk",
677 "digmic2_dq",
678 "gpen13",
679 "gpen14",
680 "gpen15",
681 "gpio00",
682 "gpio01",
683 "gpio02",
684 "gpio03",
685 "gpio04",
686 "gpio05",
687 "gpio06",
688 "gpio07",
689 "gpio08",
690 "gpio09",
691 "gpio10",
692 "gpio11",
693 "gpio12",
694 "gpio13",
695 "gpio14",
696 "gps_pablank",
697 "gps_tmark",
698 "hdmi_scl",
699 "hdmi_sda",
700 "ic_dm",
701 "ic_dp",
702 "kp_col_ip_0",
703 "kp_col_ip_1",
704 "kp_col_ip_2",
705 "kp_col_ip_3",
706 "kp_row_op_0",
707 "kp_row_op_1",
708 "kp_row_op_2",
709 "kp_row_op_3",
710 "lcd_b_0",
711 "lcd_b_1",
712 "lcd_b_2",
713 "lcd_b_3",
714 "lcd_b_4",
715 "lcd_b_5",
716 "lcd_b_6",
717 "lcd_b_7",
718 "lcd_g_0",
719 "lcd_g_1",
720 "lcd_g_2",
721 "lcd_g_3",
722 "lcd_g_4",
723 "lcd_g_5",
724 "lcd_g_6",
725 "lcd_g_7",
726 "lcd_hsync",
727 "lcd_oe",
728 "lcd_pclk",
729 "lcd_r_0",
730 "lcd_r_1",
731 "lcd_r_2",
732 "lcd_r_3",
733 "lcd_r_4",
734 "lcd_r_5",
735 "lcd_r_6",
736 "lcd_r_7",
737 "lcd_vsync",
738 "mdmgpio0",
739 "mdmgpio1",
740 "mdmgpio2",
741 "mdmgpio3",
742 "mdmgpio4",
743 "mdmgpio5",
744 "mdmgpio6",
745 "mdmgpio7",
746 "mdmgpio8",
747 "mphi_data_0",
748 "mphi_data_1",
749 "mphi_data_2",
750 "mphi_data_3",
751 "mphi_data_4",
752 "mphi_data_5",
753 "mphi_data_6",
754 "mphi_data_7",
755 "mphi_data_8",
756 "mphi_data_9",
757 "mphi_data_10",
758 "mphi_data_11",
759 "mphi_data_12",
760 "mphi_data_13",
761 "mphi_data_14",
762 "mphi_data_15",
763 "mphi_ha0",
764 "mphi_hat0",
765 "mphi_hat1",
766 "mphi_hce0_n",
767 "mphi_hce1_n",
768 "mphi_hrd_n",
769 "mphi_hwr_n",
770 "mphi_run0",
771 "mphi_run1",
772 "mtx_scan_clk",
773 "mtx_scan_data",
774 "nand_ad_0",
775 "nand_ad_1",
776 "nand_ad_2",
777 "nand_ad_3",
778 "nand_ad_4",
779 "nand_ad_5",
780 "nand_ad_6",
781 "nand_ad_7",
782 "nand_ale",
783 "nand_cen_0",
784 "nand_cen_1",
785 "nand_cle",
786 "nand_oen",
787 "nand_rdy_0",
788 "nand_rdy_1",
789 "nand_wen",
790 "nand_wp",
791 "pc1",
792 "pc2",
793 "pmu_int",
794 "pmu_scl",
795 "pmu_sda",
796 "rfst2g_mtsloten3g",
797 "rgmii_0_rx_ctl",
798 "rgmii_0_rxc",
799 "rgmii_0_rxd_0",
800 "rgmii_0_rxd_1",
801 "rgmii_0_rxd_2",
802 "rgmii_0_rxd_3",
803 "rgmii_0_tx_ctl",
804 "rgmii_0_txc",
805 "rgmii_0_txd_0",
806 "rgmii_0_txd_1",
807 "rgmii_0_txd_2",
808 "rgmii_0_txd_3",
809 "rgmii_1_rx_ctl",
810 "rgmii_1_rxc",
811 "rgmii_1_rxd_0",
812 "rgmii_1_rxd_1",
813 "rgmii_1_rxd_2",
814 "rgmii_1_rxd_3",
815 "rgmii_1_tx_ctl",
816 "rgmii_1_txc",
817 "rgmii_1_txd_0",
818 "rgmii_1_txd_1",
819 "rgmii_1_txd_2",
820 "rgmii_1_txd_3",
821 "rgmii_gpio_0",
822 "rgmii_gpio_1",
823 "rgmii_gpio_2",
824 "rgmii_gpio_3",
825 "rtxdata2g_txdata3g1",
826 "rtxen2g_txdata3g2",
827 "rxdata3g0",
828 "rxdata3g1",
829 "rxdata3g2",
830 "sdio1_clk",
831 "sdio1_cmd",
832 "sdio1_data_0",
833 "sdio1_data_1",
834 "sdio1_data_2",
835 "sdio1_data_3",
836 "sdio4_clk",
837 "sdio4_cmd",
838 "sdio4_data_0",
839 "sdio4_data_1",
840 "sdio4_data_2",
841 "sdio4_data_3",
842 "sim_clk",
843 "sim_data",
844 "sim_det",
845 "sim_resetn",
846 "sim2_clk",
847 "sim2_data",
848 "sim2_det",
849 "sim2_resetn",
850 "sri_c",
851 "sri_d",
852 "sri_e",
853 "ssp_extclk",
854 "ssp0_clk",
855 "ssp0_fs",
856 "ssp0_rxd",
857 "ssp0_txd",
858 "ssp2_clk",
859 "ssp2_fs_0",
860 "ssp2_fs_1",
861 "ssp2_fs_2",
862 "ssp2_fs_3",
863 "ssp2_rxd_0",
864 "ssp2_rxd_1",
865 "ssp2_txd_0",
866 "ssp2_txd_1",
867 "ssp3_clk",
868 "ssp3_fs",
869 "ssp3_rxd",
870 "ssp3_txd",
871 "ssp4_clk",
872 "ssp4_fs",
873 "ssp4_rxd",
874 "ssp4_txd",
875 "ssp5_clk",
876 "ssp5_fs",
877 "ssp5_rxd",
878 "ssp5_txd",
879 "ssp6_clk",
880 "ssp6_fs",
881 "ssp6_rxd",
882 "ssp6_txd",
883 "stat_1",
884 "stat_2",
885 "sysclken",
886 "traceclk",
887 "tracedt00",
888 "tracedt01",
889 "tracedt02",
890 "tracedt03",
891 "tracedt04",
892 "tracedt05",
893 "tracedt06",
894 "tracedt07",
895 "tracedt08",
896 "tracedt09",
897 "tracedt10",
898 "tracedt11",
899 "tracedt12",
900 "tracedt13",
901 "tracedt14",
902 "tracedt15",
903 "txdata3g0",
904 "txpwrind",
905 "uartb1_ucts",
906 "uartb1_urts",
907 "uartb1_urxd",
908 "uartb1_utxd",
909 "uartb2_urxd",
910 "uartb2_utxd",
911 "uartb3_ucts",
912 "uartb3_urts",
913 "uartb3_urxd",
914 "uartb3_utxd",
915 "uartb4_ucts",
916 "uartb4_urts",
917 "uartb4_urxd",
918 "uartb4_utxd",
919 "vc_cam1_scl",
920 "vc_cam1_sda",
921 "vc_cam2_scl",
922 "vc_cam2_sda",
923 "vc_cam3_scl",
924 "vc_cam3_sda",
925};
926
927/* Every pin can implement all ALT1-ALT4 functions */
928#define BCM281XX_PIN_FUNCTION(fcn_name) \
929{ \
930 .name = #fcn_name, \
931 .groups = bcm281xx_alt_groups, \
932 .ngroups = ARRAY_SIZE(bcm281xx_alt_groups), \
933}
934
935static const struct bcm281xx_pin_function bcm281xx_functions[] = {
936 BCM281XX_PIN_FUNCTION(alt1),
937 BCM281XX_PIN_FUNCTION(alt2),
938 BCM281XX_PIN_FUNCTION(alt3),
939 BCM281XX_PIN_FUNCTION(alt4),
940};
941
942static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = {
943 .pins = bcm281xx_pinctrl_pins,
944 .npins = ARRAY_SIZE(bcm281xx_pinctrl_pins),
945 .functions = bcm281xx_functions,
946 .nfunctions = ARRAY_SIZE(bcm281xx_functions),
947};
948
949static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev,
950 unsigned pin)
951{
952 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
953
954 if (pin >= pdata->npins)
955 return BCM281XX_PIN_TYPE_UNKNOWN;
956
957 return *(enum bcm281xx_pin_type *)(pdata->pins[pin].drv_data);
958}
959
960#define BCM281XX_PIN_SHIFT(type, param) \
961 (BCM281XX_ ## type ## _PIN_REG_ ## param ## _SHIFT)
962
963#define BCM281XX_PIN_MASK(type, param) \
964 (BCM281XX_ ## type ## _PIN_REG_ ## param ## _MASK)
965
966/*
967 * This helper function is used to build up the value and mask used to write to
968 * a pin register, but does not actually write to the register.
969 */
970static inline void bcm281xx_pin_update(u32 *reg_val, u32 *reg_mask,
971 u32 param_val, u32 param_shift,
972 u32 param_mask)
973{
974 *reg_val &= ~param_mask;
975 *reg_val |= (param_val << param_shift) & param_mask;
976 *reg_mask |= param_mask;
977}
978
979static struct regmap_config bcm281xx_pinctrl_regmap_config = {
980 .reg_bits = 32,
981 .reg_stride = 4,
982 .val_bits = 32,
983 .max_register = BCM281XX_PIN_VC_CAM3_SDA,
984};
985
986static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
987{
988 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
989
990 return pdata->npins;
991}
992
993static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
994 unsigned group)
995{
996 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
997
998 return pdata->pins[group].name;
999}
1000
1001static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
1002 unsigned group,
1003 const unsigned **pins,
1004 unsigned *num_pins)
1005{
1006 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1007
1008 *pins = &pdata->pins[group].number;
1009 *num_pins = 1;
1010
1011 return 0;
1012}
1013
1014static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
1015 struct seq_file *s,
1016 unsigned offset)
1017{
1018 seq_printf(s, " %s", dev_name(pctldev->dev));
1019}
1020
1021static struct pinctrl_ops bcm281xx_pinctrl_ops = {
1022 .get_groups_count = bcm281xx_pinctrl_get_groups_count,
1023 .get_group_name = bcm281xx_pinctrl_get_group_name,
1024 .get_group_pins = bcm281xx_pinctrl_get_group_pins,
1025 .pin_dbg_show = bcm281xx_pinctrl_pin_dbg_show,
1026 .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
1027 .dt_free_map = pinctrl_utils_dt_free_map,
1028};
1029
1030static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
1031{
1032 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1033
1034 return pdata->nfunctions;
1035}
1036
1037static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
1038 unsigned function)
1039{
1040 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1041
1042 return pdata->functions[function].name;
1043}
1044
1045static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
1046 unsigned function,
1047 const char * const **groups,
1048 unsigned * const num_groups)
1049{
1050 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1051
1052 *groups = pdata->functions[function].groups;
1053 *num_groups = pdata->functions[function].ngroups;
1054
1055 return 0;
1056}
1057
1058static int bcm281xx_pinmux_enable(struct pinctrl_dev *pctldev,
1059 unsigned function,
1060 unsigned group)
1061{
1062 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1063 const struct bcm281xx_pin_function *f = &pdata->functions[function];
1064 u32 offset = 4 * pdata->pins[group].number;
1065 int rc = 0;
1066
1067 dev_dbg(pctldev->dev,
1068 "%s(): Enable function %s (%d) of pin %s (%d) @offset 0x%x.\n",
1069 __func__, f->name, function, pdata->pins[group].name,
1070 pdata->pins[group].number, offset);
1071
1072 rc = regmap_update_bits(pdata->regmap, offset,
1073 BCM281XX_PIN_REG_F_SEL_MASK,
1074 function << BCM281XX_PIN_REG_F_SEL_SHIFT);
1075 if (rc)
1076 dev_err(pctldev->dev,
1077 "Error updating register for pin %s (%d).\n",
1078 pdata->pins[group].name, pdata->pins[group].number);
1079
1080 return rc;
1081}
1082
1083static struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = {
1084 .get_functions_count = bcm281xx_pinctrl_get_fcns_count,
1085 .get_function_name = bcm281xx_pinctrl_get_fcn_name,
1086 .get_function_groups = bcm281xx_pinctrl_get_fcn_groups,
1087 .enable = bcm281xx_pinmux_enable,
1088};
1089
1090static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
1091 unsigned pin,
1092 unsigned long *config)
1093{
1094 return -ENOTSUPP;
1095}
1096
1097
1098/* Goes through the configs and update register val/mask */
1099static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev,
1100 unsigned pin,
1101 unsigned long *configs,
1102 unsigned num_configs,
1103 u32 *val,
1104 u32 *mask)
1105{
1106 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1107 int i;
1108 enum pin_config_param param;
1109 u16 arg;
1110
1111 for (i = 0; i < num_configs; i++) {
1112 param = pinconf_to_config_param(configs[i]);
1113 arg = pinconf_to_config_argument(configs[i]);
1114
1115 switch (param) {
1116 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
1117 arg = (arg >= 1 ? 1 : 0);
1118 bcm281xx_pin_update(val, mask, arg,
1119 BCM281XX_PIN_SHIFT(STD, HYST),
1120 BCM281XX_PIN_MASK(STD, HYST));
1121 break;
1122 /*
1123 * The pin bias can only be one of pull-up, pull-down, or
1124 * disable. The user does not need to specify a value for the
1125 * property, and the default value from pinconf-generic is
1126 * ignored.
1127 */
1128 case PIN_CONFIG_BIAS_DISABLE:
1129 bcm281xx_pin_update(val, mask, 0,
1130 BCM281XX_PIN_SHIFT(STD, PULL_UP),
1131 BCM281XX_PIN_MASK(STD, PULL_UP));
1132 bcm281xx_pin_update(val, mask, 0,
1133 BCM281XX_PIN_SHIFT(STD, PULL_DN),
1134 BCM281XX_PIN_MASK(STD, PULL_DN));
1135 break;
1136
1137 case PIN_CONFIG_BIAS_PULL_UP:
1138 bcm281xx_pin_update(val, mask, 1,
1139 BCM281XX_PIN_SHIFT(STD, PULL_UP),
1140 BCM281XX_PIN_MASK(STD, PULL_UP));
1141 bcm281xx_pin_update(val, mask, 0,
1142 BCM281XX_PIN_SHIFT(STD, PULL_DN),
1143 BCM281XX_PIN_MASK(STD, PULL_DN));
1144 break;
1145
1146 case PIN_CONFIG_BIAS_PULL_DOWN:
1147 bcm281xx_pin_update(val, mask, 0,
1148 BCM281XX_PIN_SHIFT(STD, PULL_UP),
1149 BCM281XX_PIN_MASK(STD, PULL_UP));
1150 bcm281xx_pin_update(val, mask, 1,
1151 BCM281XX_PIN_SHIFT(STD, PULL_DN),
1152 BCM281XX_PIN_MASK(STD, PULL_DN));
1153 break;
1154
1155 case PIN_CONFIG_SLEW_RATE:
1156 arg = (arg >= 1 ? 1 : 0);
1157 bcm281xx_pin_update(val, mask, arg,
1158 BCM281XX_PIN_SHIFT(STD, SLEW),
1159 BCM281XX_PIN_MASK(STD, SLEW));
1160 break;
1161
1162 case PIN_CONFIG_INPUT_ENABLE:
1163 /* inversed since register is for input _disable_ */
1164 arg = (arg >= 1 ? 0 : 1);
1165 bcm281xx_pin_update(val, mask, arg,
1166 BCM281XX_PIN_SHIFT(STD, INPUT_DIS),
1167 BCM281XX_PIN_MASK(STD, INPUT_DIS));
1168 break;
1169
1170 case PIN_CONFIG_DRIVE_STRENGTH:
1171 /* Valid range is 2-16 mA, even numbers only */
1172 if ((arg < 2) || (arg > 16) || (arg % 2)) {
1173 dev_err(pctldev->dev,
1174 "Invalid Drive Strength value (%d) for "
1175 "pin %s (%d). Valid values are "
1176 "(2..16) mA, even numbers only.\n",
1177 arg, pdata->pins[pin].name, pin);
1178 return -EINVAL;
1179 }
1180 bcm281xx_pin_update(val, mask, (arg/2)-1,
1181 BCM281XX_PIN_SHIFT(STD, DRV_STR),
1182 BCM281XX_PIN_MASK(STD, DRV_STR));
1183 break;
1184
1185 default:
1186 dev_err(pctldev->dev,
1187 "Unrecognized pin config %d for pin %s (%d).\n",
1188 param, pdata->pins[pin].name, pin);
1189 return -EINVAL;
1190
1191 } /* switch config */
1192 } /* for each config */
1193
1194 return 0;
1195}
1196
1197/*
1198 * The pull-up strength for an I2C pin is represented by bits 4-6 in the
1199 * register with the following mapping:
1200 * 0b000: No pull-up
1201 * 0b001: 1200 Ohm
1202 * 0b010: 1800 Ohm
1203 * 0b011: 720 Ohm
1204 * 0b100: 2700 Ohm
1205 * 0b101: 831 Ohm
1206 * 0b110: 1080 Ohm
1207 * 0b111: 568 Ohm
1208 * This array maps pull-up strength in Ohms to register values (1+index).
1209 */
1210static const u16 bcm281xx_pullup_map[] = {
1211 1200, 1800, 720, 2700, 831, 1080, 568
1212};
1213
1214/* Goes through the configs and update register val/mask */
1215static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev,
1216 unsigned pin,
1217 unsigned long *configs,
1218 unsigned num_configs,
1219 u32 *val,
1220 u32 *mask)
1221{
1222 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1223 int i, j;
1224 enum pin_config_param param;
1225 u16 arg;
1226
1227 for (i = 0; i < num_configs; i++) {
1228 param = pinconf_to_config_param(configs[i]);
1229 arg = pinconf_to_config_argument(configs[i]);
1230
1231 switch (param) {
1232 case PIN_CONFIG_BIAS_PULL_UP:
1233 for (j = 0; j < ARRAY_SIZE(bcm281xx_pullup_map); j++)
1234 if (bcm281xx_pullup_map[j] == arg)
1235 break;
1236
1237 if (j == ARRAY_SIZE(bcm281xx_pullup_map)) {
1238 dev_err(pctldev->dev,
1239 "Invalid pull-up value (%d) for pin %s "
1240 "(%d). Valid values are 568, 720, 831, "
1241 "1080, 1200, 1800, 2700 Ohms.\n",
1242 arg, pdata->pins[pin].name, pin);
1243 return -EINVAL;
1244 }
1245
1246 bcm281xx_pin_update(val, mask, j+1,
1247 BCM281XX_PIN_SHIFT(I2C, PULL_UP_STR),
1248 BCM281XX_PIN_MASK(I2C, PULL_UP_STR));
1249 break;
1250
1251 case PIN_CONFIG_BIAS_DISABLE:
1252 bcm281xx_pin_update(val, mask, 0,
1253 BCM281XX_PIN_SHIFT(I2C, PULL_UP_STR),
1254 BCM281XX_PIN_MASK(I2C, PULL_UP_STR));
1255 break;
1256
1257 case PIN_CONFIG_SLEW_RATE:
1258 arg = (arg >= 1 ? 1 : 0);
1259 bcm281xx_pin_update(val, mask, arg,
1260 BCM281XX_PIN_SHIFT(I2C, SLEW),
1261 BCM281XX_PIN_MASK(I2C, SLEW));
1262 break;
1263
1264 case PIN_CONFIG_INPUT_ENABLE:
1265 /* inversed since register is for input _disable_ */
1266 arg = (arg >= 1 ? 0 : 1);
1267 bcm281xx_pin_update(val, mask, arg,
1268 BCM281XX_PIN_SHIFT(I2C, INPUT_DIS),
1269 BCM281XX_PIN_MASK(I2C, INPUT_DIS));
1270 break;
1271
1272 default:
1273 dev_err(pctldev->dev,
1274 "Unrecognized pin config %d for pin %s (%d).\n",
1275 param, pdata->pins[pin].name, pin);
1276 return -EINVAL;
1277
1278 } /* switch config */
1279 } /* for each config */
1280
1281 return 0;
1282}
1283
1284/* Goes through the configs and update register val/mask */
1285static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev,
1286 unsigned pin,
1287 unsigned long *configs,
1288 unsigned num_configs,
1289 u32 *val,
1290 u32 *mask)
1291{
1292 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1293 int i;
1294 enum pin_config_param param;
1295 u16 arg;
1296
1297 for (i = 0; i < num_configs; i++) {
1298 param = pinconf_to_config_param(configs[i]);
1299 arg = pinconf_to_config_argument(configs[i]);
1300
1301 switch (param) {
1302 case PIN_CONFIG_SLEW_RATE:
1303 arg = (arg >= 1 ? 1 : 0);
1304 bcm281xx_pin_update(val, mask, arg,
1305 BCM281XX_PIN_SHIFT(HDMI, MODE),
1306 BCM281XX_PIN_MASK(HDMI, MODE));
1307 break;
1308
1309 case PIN_CONFIG_INPUT_ENABLE:
1310 /* inversed since register is for input _disable_ */
1311 arg = (arg >= 1 ? 0 : 1);
1312 bcm281xx_pin_update(val, mask, arg,
1313 BCM281XX_PIN_SHIFT(HDMI, INPUT_DIS),
1314 BCM281XX_PIN_MASK(HDMI, INPUT_DIS));
1315 break;
1316
1317 default:
1318 dev_err(pctldev->dev,
1319 "Unrecognized pin config %d for pin %s (%d).\n",
1320 param, pdata->pins[pin].name, pin);
1321 return -EINVAL;
1322
1323 } /* switch config */
1324 } /* for each config */
1325
1326 return 0;
1327}
1328
1329static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
1330 unsigned pin,
1331 unsigned long *configs,
1332 unsigned num_configs)
1333{
1334 struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1335 enum bcm281xx_pin_type pin_type;
1336 u32 offset = 4 * pin;
1337 u32 cfg_val, cfg_mask;
1338 int rc;
1339
1340 cfg_val = 0;
1341 cfg_mask = 0;
1342 pin_type = pin_type_get(pctldev, pin);
1343
1344 /* Different pins have different configuration options */
1345 switch (pin_type) {
1346 case BCM281XX_PIN_TYPE_STD:
1347 rc = bcm281xx_std_pin_update(pctldev, pin, configs,
1348 num_configs, &cfg_val, &cfg_mask);
1349 break;
1350
1351 case BCM281XX_PIN_TYPE_I2C:
1352 rc = bcm281xx_i2c_pin_update(pctldev, pin, configs,
1353 num_configs, &cfg_val, &cfg_mask);
1354 break;
1355
1356 case BCM281XX_PIN_TYPE_HDMI:
1357 rc = bcm281xx_hdmi_pin_update(pctldev, pin, configs,
1358 num_configs, &cfg_val, &cfg_mask);
1359 break;
1360
1361 default:
1362 dev_err(pctldev->dev, "Unknown pin type for pin %s (%d).\n",
1363 pdata->pins[pin].name, pin);
1364 return -EINVAL;
1365
1366 } /* switch pin type */
1367
1368 if (rc)
1369 return rc;
1370
1371 dev_dbg(pctldev->dev,
1372 "%s(): Set pin %s (%d) with config 0x%x, mask 0x%x\n",
1373 __func__, pdata->pins[pin].name, pin, cfg_val, cfg_mask);
1374
1375 rc = regmap_update_bits(pdata->regmap, offset, cfg_mask, cfg_val);
1376 if (rc) {
1377 dev_err(pctldev->dev,
1378 "Error updating register for pin %s (%d).\n",
1379 pdata->pins[pin].name, pin);
1380 return rc;
1381 }
1382
1383 return 0;
1384}
1385
1386static struct pinconf_ops bcm281xx_pinctrl_pinconf_ops = {
1387 .pin_config_get = bcm281xx_pinctrl_pin_config_get,
1388 .pin_config_set = bcm281xx_pinctrl_pin_config_set,
1389};
1390
1391static struct pinctrl_desc bcm281xx_pinctrl_desc = {
1392 /* name, pins, npins members initialized in probe function */
1393 .pctlops = &bcm281xx_pinctrl_ops,
1394 .pmxops = &bcm281xx_pinctrl_pinmux_ops,
1395 .confops = &bcm281xx_pinctrl_pinconf_ops,
1396 .owner = THIS_MODULE,
1397};
1398
1399int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
1400{
1401 struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
1402 struct resource *res;
1403 struct pinctrl_dev *pctl;
1404
1405 /* So far We can assume there is only 1 bank of registers */
1406 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1407 if (!res) {
1408 dev_err(&pdev->dev, "Missing MEM resource\n");
1409 return -ENODEV;
1410 }
1411
1412 pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
1413 if (IS_ERR(pdata->reg_base)) {
1414 dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
1415 return -ENODEV;
1416 }
1417
1418 /* Initialize the dynamic part of pinctrl_desc */
1419 pdata->regmap = devm_regmap_init_mmio(&pdev->dev, pdata->reg_base,
1420 &bcm281xx_pinctrl_regmap_config);
1421 if (IS_ERR(pdata->regmap)) {
1422 dev_err(&pdev->dev, "Regmap MMIO init failed.\n");
1423 return -ENODEV;
1424 }
1425
1426 bcm281xx_pinctrl_desc.name = dev_name(&pdev->dev);
1427 bcm281xx_pinctrl_desc.pins = bcm281xx_pinctrl.pins;
1428 bcm281xx_pinctrl_desc.npins = bcm281xx_pinctrl.npins;
1429
1430 pctl = pinctrl_register(&bcm281xx_pinctrl_desc,
1431 &pdev->dev,
1432 pdata);
1433 if (!pctl) {
1434 dev_err(&pdev->dev, "Failed to register pinctrl\n");
1435 return -ENODEV;
1436 }
1437
1438 platform_set_drvdata(pdev, pdata);
1439
1440 return 0;
1441}
1442
1443static struct of_device_id bcm281xx_pinctrl_of_match[] = {
1444 { .compatible = "brcm,bcm11351-pinctrl", },
1445 { },
1446};
1447
1448static struct platform_driver bcm281xx_pinctrl_driver = {
1449 .driver = {
1450 .name = "bcm281xx-pinctrl",
1451 .owner = THIS_MODULE,
1452 .of_match_table = bcm281xx_pinctrl_of_match,
1453 },
1454};
1455
1456module_platform_driver_probe(bcm281xx_pinctrl_driver, bcm281xx_pinctrl_probe);
1457
1458MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
1459MODULE_AUTHOR("Sherman Yin <syin@broadcom.com>");
1460MODULE_DESCRIPTION("Broadcom BCM281xx pinctrl driver");
1461MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
deleted file mode 100644
index eb2500212147..000000000000
--- a/drivers/pinctrl/pinctrl-capri.c
+++ /dev/null
@@ -1,1454 +0,0 @@
1/*
2 * Copyright (C) 2013 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/platform_device.h>
18#include <linux/pinctrl/pinctrl.h>
19#include <linux/pinctrl/pinmux.h>
20#include <linux/pinctrl/pinconf.h>
21#include <linux/pinctrl/pinconf-generic.h>
22#include <linux/regmap.h>
23#include <linux/slab.h>
24#include "core.h"
25#include "pinctrl-utils.h"
26
27/* Capri Pin Control Registers Definitions */
28
29/* Function Select bits are the same for all pin control registers */
30#define CAPRI_PIN_REG_F_SEL_MASK 0x0700
31#define CAPRI_PIN_REG_F_SEL_SHIFT 8
32
33/* Standard pin register */
34#define CAPRI_STD_PIN_REG_DRV_STR_MASK 0x0007
35#define CAPRI_STD_PIN_REG_DRV_STR_SHIFT 0
36#define CAPRI_STD_PIN_REG_INPUT_DIS_MASK 0x0008
37#define CAPRI_STD_PIN_REG_INPUT_DIS_SHIFT 3
38#define CAPRI_STD_PIN_REG_SLEW_MASK 0x0010
39#define CAPRI_STD_PIN_REG_SLEW_SHIFT 4
40#define CAPRI_STD_PIN_REG_PULL_UP_MASK 0x0020
41#define CAPRI_STD_PIN_REG_PULL_UP_SHIFT 5
42#define CAPRI_STD_PIN_REG_PULL_DN_MASK 0x0040
43#define CAPRI_STD_PIN_REG_PULL_DN_SHIFT 6
44#define CAPRI_STD_PIN_REG_HYST_MASK 0x0080
45#define CAPRI_STD_PIN_REG_HYST_SHIFT 7
46
47/* I2C pin register */
48#define CAPRI_I2C_PIN_REG_INPUT_DIS_MASK 0x0004
49#define CAPRI_I2C_PIN_REG_INPUT_DIS_SHIFT 2
50#define CAPRI_I2C_PIN_REG_SLEW_MASK 0x0008
51#define CAPRI_I2C_PIN_REG_SLEW_SHIFT 3
52#define CAPRI_I2C_PIN_REG_PULL_UP_STR_MASK 0x0070
53#define CAPRI_I2C_PIN_REG_PULL_UP_STR_SHIFT 4
54
55/* HDMI pin register */
56#define CAPRI_HDMI_PIN_REG_INPUT_DIS_MASK 0x0008
57#define CAPRI_HDMI_PIN_REG_INPUT_DIS_SHIFT 3
58#define CAPRI_HDMI_PIN_REG_MODE_MASK 0x0010
59#define CAPRI_HDMI_PIN_REG_MODE_SHIFT 4
60
61/**
62 * capri_pin_type - types of pin register
63 */
64enum capri_pin_type {
65 CAPRI_PIN_TYPE_UNKNOWN = 0,
66 CAPRI_PIN_TYPE_STD,
67 CAPRI_PIN_TYPE_I2C,
68 CAPRI_PIN_TYPE_HDMI,
69};
70
71static enum capri_pin_type std_pin = CAPRI_PIN_TYPE_STD;
72static enum capri_pin_type i2c_pin = CAPRI_PIN_TYPE_I2C;
73static enum capri_pin_type hdmi_pin = CAPRI_PIN_TYPE_HDMI;
74
75/**
76 * capri_pin_function- define pin function
77 */
78struct capri_pin_function {
79 const char *name;
80 const char * const *groups;
81 const unsigned ngroups;
82};
83
84/**
85 * capri_pinctrl_data - Broadcom-specific pinctrl data
86 * @reg_base - base of pinctrl registers
87 */
88struct capri_pinctrl_data {
89 void __iomem *reg_base;
90
91 /* List of all pins */
92 const struct pinctrl_pin_desc *pins;
93 const unsigned npins;
94
95 const struct capri_pin_function *functions;
96 const unsigned nfunctions;
97
98 struct regmap *regmap;
99};
100
101/*
102 * Pin number definition. The order here must be the same as defined in the
103 * PADCTRLREG block in the RDB.
104 */
105#define CAPRI_PIN_ADCSYNC 0
106#define CAPRI_PIN_BAT_RM 1
107#define CAPRI_PIN_BSC1_SCL 2
108#define CAPRI_PIN_BSC1_SDA 3
109#define CAPRI_PIN_BSC2_SCL 4
110#define CAPRI_PIN_BSC2_SDA 5
111#define CAPRI_PIN_CLASSGPWR 6
112#define CAPRI_PIN_CLK_CX8 7
113#define CAPRI_PIN_CLKOUT_0 8
114#define CAPRI_PIN_CLKOUT_1 9
115#define CAPRI_PIN_CLKOUT_2 10
116#define CAPRI_PIN_CLKOUT_3 11
117#define CAPRI_PIN_CLKREQ_IN_0 12
118#define CAPRI_PIN_CLKREQ_IN_1 13
119#define CAPRI_PIN_CWS_SYS_REQ1 14
120#define CAPRI_PIN_CWS_SYS_REQ2 15
121#define CAPRI_PIN_CWS_SYS_REQ3 16
122#define CAPRI_PIN_DIGMIC1_CLK 17
123#define CAPRI_PIN_DIGMIC1_DQ 18
124#define CAPRI_PIN_DIGMIC2_CLK 19
125#define CAPRI_PIN_DIGMIC2_DQ 20
126#define CAPRI_PIN_GPEN13 21
127#define CAPRI_PIN_GPEN14 22
128#define CAPRI_PIN_GPEN15 23
129#define CAPRI_PIN_GPIO00 24
130#define CAPRI_PIN_GPIO01 25
131#define CAPRI_PIN_GPIO02 26
132#define CAPRI_PIN_GPIO03 27
133#define CAPRI_PIN_GPIO04 28
134#define CAPRI_PIN_GPIO05 29
135#define CAPRI_PIN_GPIO06 30
136#define CAPRI_PIN_GPIO07 31
137#define CAPRI_PIN_GPIO08 32
138#define CAPRI_PIN_GPIO09 33
139#define CAPRI_PIN_GPIO10 34
140#define CAPRI_PIN_GPIO11 35
141#define CAPRI_PIN_GPIO12 36
142#define CAPRI_PIN_GPIO13 37
143#define CAPRI_PIN_GPIO14 38
144#define CAPRI_PIN_GPS_PABLANK 39
145#define CAPRI_PIN_GPS_TMARK 40
146#define CAPRI_PIN_HDMI_SCL 41
147#define CAPRI_PIN_HDMI_SDA 42
148#define CAPRI_PIN_IC_DM 43
149#define CAPRI_PIN_IC_DP 44
150#define CAPRI_PIN_KP_COL_IP_0 45
151#define CAPRI_PIN_KP_COL_IP_1 46
152#define CAPRI_PIN_KP_COL_IP_2 47
153#define CAPRI_PIN_KP_COL_IP_3 48
154#define CAPRI_PIN_KP_ROW_OP_0 49
155#define CAPRI_PIN_KP_ROW_OP_1 50
156#define CAPRI_PIN_KP_ROW_OP_2 51
157#define CAPRI_PIN_KP_ROW_OP_3 52
158#define CAPRI_PIN_LCD_B_0 53
159#define CAPRI_PIN_LCD_B_1 54
160#define CAPRI_PIN_LCD_B_2 55
161#define CAPRI_PIN_LCD_B_3 56
162#define CAPRI_PIN_LCD_B_4 57
163#define CAPRI_PIN_LCD_B_5 58
164#define CAPRI_PIN_LCD_B_6 59
165#define CAPRI_PIN_LCD_B_7 60
166#define CAPRI_PIN_LCD_G_0 61
167#define CAPRI_PIN_LCD_G_1 62
168#define CAPRI_PIN_LCD_G_2 63
169#define CAPRI_PIN_LCD_G_3 64
170#define CAPRI_PIN_LCD_G_4 65
171#define CAPRI_PIN_LCD_G_5 66
172#define CAPRI_PIN_LCD_G_6 67
173#define CAPRI_PIN_LCD_G_7 68
174#define CAPRI_PIN_LCD_HSYNC 69
175#define CAPRI_PIN_LCD_OE 70
176#define CAPRI_PIN_LCD_PCLK 71
177#define CAPRI_PIN_LCD_R_0 72
178#define CAPRI_PIN_LCD_R_1 73
179#define CAPRI_PIN_LCD_R_2 74
180#define CAPRI_PIN_LCD_R_3 75
181#define CAPRI_PIN_LCD_R_4 76
182#define CAPRI_PIN_LCD_R_5 77
183#define CAPRI_PIN_LCD_R_6 78
184#define CAPRI_PIN_LCD_R_7 79
185#define CAPRI_PIN_LCD_VSYNC 80
186#define CAPRI_PIN_MDMGPIO0 81
187#define CAPRI_PIN_MDMGPIO1 82
188#define CAPRI_PIN_MDMGPIO2 83
189#define CAPRI_PIN_MDMGPIO3 84
190#define CAPRI_PIN_MDMGPIO4 85
191#define CAPRI_PIN_MDMGPIO5 86
192#define CAPRI_PIN_MDMGPIO6 87
193#define CAPRI_PIN_MDMGPIO7 88
194#define CAPRI_PIN_MDMGPIO8 89
195#define CAPRI_PIN_MPHI_DATA_0 90
196#define CAPRI_PIN_MPHI_DATA_1 91
197#define CAPRI_PIN_MPHI_DATA_2 92
198#define CAPRI_PIN_MPHI_DATA_3 93
199#define CAPRI_PIN_MPHI_DATA_4 94
200#define CAPRI_PIN_MPHI_DATA_5 95
201#define CAPRI_PIN_MPHI_DATA_6 96
202#define CAPRI_PIN_MPHI_DATA_7 97
203#define CAPRI_PIN_MPHI_DATA_8 98
204#define CAPRI_PIN_MPHI_DATA_9 99
205#define CAPRI_PIN_MPHI_DATA_10 100
206#define CAPRI_PIN_MPHI_DATA_11 101
207#define CAPRI_PIN_MPHI_DATA_12 102
208#define CAPRI_PIN_MPHI_DATA_13 103
209#define CAPRI_PIN_MPHI_DATA_14 104
210#define CAPRI_PIN_MPHI_DATA_15 105
211#define CAPRI_PIN_MPHI_HA0 106
212#define CAPRI_PIN_MPHI_HAT0 107
213#define CAPRI_PIN_MPHI_HAT1 108
214#define CAPRI_PIN_MPHI_HCE0_N 109
215#define CAPRI_PIN_MPHI_HCE1_N 110
216#define CAPRI_PIN_MPHI_HRD_N 111
217#define CAPRI_PIN_MPHI_HWR_N 112
218#define CAPRI_PIN_MPHI_RUN0 113
219#define CAPRI_PIN_MPHI_RUN1 114
220#define CAPRI_PIN_MTX_SCAN_CLK 115
221#define CAPRI_PIN_MTX_SCAN_DATA 116
222#define CAPRI_PIN_NAND_AD_0 117
223#define CAPRI_PIN_NAND_AD_1 118
224#define CAPRI_PIN_NAND_AD_2 119
225#define CAPRI_PIN_NAND_AD_3 120
226#define CAPRI_PIN_NAND_AD_4 121
227#define CAPRI_PIN_NAND_AD_5 122
228#define CAPRI_PIN_NAND_AD_6 123
229#define CAPRI_PIN_NAND_AD_7 124
230#define CAPRI_PIN_NAND_ALE 125
231#define CAPRI_PIN_NAND_CEN_0 126
232#define CAPRI_PIN_NAND_CEN_1 127
233#define CAPRI_PIN_NAND_CLE 128
234#define CAPRI_PIN_NAND_OEN 129
235#define CAPRI_PIN_NAND_RDY_0 130
236#define CAPRI_PIN_NAND_RDY_1 131
237#define CAPRI_PIN_NAND_WEN 132
238#define CAPRI_PIN_NAND_WP 133
239#define CAPRI_PIN_PC1 134
240#define CAPRI_PIN_PC2 135
241#define CAPRI_PIN_PMU_INT 136
242#define CAPRI_PIN_PMU_SCL 137
243#define CAPRI_PIN_PMU_SDA 138
244#define CAPRI_PIN_RFST2G_MTSLOTEN3G 139
245#define CAPRI_PIN_RGMII_0_RX_CTL 140
246#define CAPRI_PIN_RGMII_0_RXC 141
247#define CAPRI_PIN_RGMII_0_RXD_0 142
248#define CAPRI_PIN_RGMII_0_RXD_1 143
249#define CAPRI_PIN_RGMII_0_RXD_2 144
250#define CAPRI_PIN_RGMII_0_RXD_3 145
251#define CAPRI_PIN_RGMII_0_TX_CTL 146
252#define CAPRI_PIN_RGMII_0_TXC 147
253#define CAPRI_PIN_RGMII_0_TXD_0 148
254#define CAPRI_PIN_RGMII_0_TXD_1 149
255#define CAPRI_PIN_RGMII_0_TXD_2 150
256#define CAPRI_PIN_RGMII_0_TXD_3 151
257#define CAPRI_PIN_RGMII_1_RX_CTL 152
258#define CAPRI_PIN_RGMII_1_RXC 153
259#define CAPRI_PIN_RGMII_1_RXD_0 154
260#define CAPRI_PIN_RGMII_1_RXD_1 155
261#define CAPRI_PIN_RGMII_1_RXD_2 156
262#define CAPRI_PIN_RGMII_1_RXD_3 157
263#define CAPRI_PIN_RGMII_1_TX_CTL 158
264#define CAPRI_PIN_RGMII_1_TXC 159
265#define CAPRI_PIN_RGMII_1_TXD_0 160
266#define CAPRI_PIN_RGMII_1_TXD_1 161
267#define CAPRI_PIN_RGMII_1_TXD_2 162
268#define CAPRI_PIN_RGMII_1_TXD_3 163
269#define CAPRI_PIN_RGMII_GPIO_0 164
270#define CAPRI_PIN_RGMII_GPIO_1 165
271#define CAPRI_PIN_RGMII_GPIO_2 166
272#define CAPRI_PIN_RGMII_GPIO_3 167
273#define CAPRI_PIN_RTXDATA2G_TXDATA3G1 168
274#define CAPRI_PIN_RTXEN2G_TXDATA3G2 169
275#define CAPRI_PIN_RXDATA3G0 170
276#define CAPRI_PIN_RXDATA3G1 171
277#define CAPRI_PIN_RXDATA3G2 172
278#define CAPRI_PIN_SDIO1_CLK 173
279#define CAPRI_PIN_SDIO1_CMD 174
280#define CAPRI_PIN_SDIO1_DATA_0 175
281#define CAPRI_PIN_SDIO1_DATA_1 176
282#define CAPRI_PIN_SDIO1_DATA_2 177
283#define CAPRI_PIN_SDIO1_DATA_3 178
284#define CAPRI_PIN_SDIO4_CLK 179
285#define CAPRI_PIN_SDIO4_CMD 180
286#define CAPRI_PIN_SDIO4_DATA_0 181
287#define CAPRI_PIN_SDIO4_DATA_1 182
288#define CAPRI_PIN_SDIO4_DATA_2 183
289#define CAPRI_PIN_SDIO4_DATA_3 184
290#define CAPRI_PIN_SIM_CLK 185
291#define CAPRI_PIN_SIM_DATA 186
292#define CAPRI_PIN_SIM_DET 187
293#define CAPRI_PIN_SIM_RESETN 188
294#define CAPRI_PIN_SIM2_CLK 189
295#define CAPRI_PIN_SIM2_DATA 190
296#define CAPRI_PIN_SIM2_DET 191
297#define CAPRI_PIN_SIM2_RESETN 192
298#define CAPRI_PIN_SRI_C 193
299#define CAPRI_PIN_SRI_D 194
300#define CAPRI_PIN_SRI_E 195
301#define CAPRI_PIN_SSP_EXTCLK 196
302#define CAPRI_PIN_SSP0_CLK 197
303#define CAPRI_PIN_SSP0_FS 198
304#define CAPRI_PIN_SSP0_RXD 199
305#define CAPRI_PIN_SSP0_TXD 200
306#define CAPRI_PIN_SSP2_CLK 201
307#define CAPRI_PIN_SSP2_FS_0 202
308#define CAPRI_PIN_SSP2_FS_1 203
309#define CAPRI_PIN_SSP2_FS_2 204
310#define CAPRI_PIN_SSP2_FS_3 205
311#define CAPRI_PIN_SSP2_RXD_0 206
312#define CAPRI_PIN_SSP2_RXD_1 207
313#define CAPRI_PIN_SSP2_TXD_0 208
314#define CAPRI_PIN_SSP2_TXD_1 209
315#define CAPRI_PIN_SSP3_CLK 210
316#define CAPRI_PIN_SSP3_FS 211
317#define CAPRI_PIN_SSP3_RXD 212
318#define CAPRI_PIN_SSP3_TXD 213
319#define CAPRI_PIN_SSP4_CLK 214
320#define CAPRI_PIN_SSP4_FS 215
321#define CAPRI_PIN_SSP4_RXD 216
322#define CAPRI_PIN_SSP4_TXD 217
323#define CAPRI_PIN_SSP5_CLK 218
324#define CAPRI_PIN_SSP5_FS 219
325#define CAPRI_PIN_SSP5_RXD 220
326#define CAPRI_PIN_SSP5_TXD 221
327#define CAPRI_PIN_SSP6_CLK 222
328#define CAPRI_PIN_SSP6_FS 223
329#define CAPRI_PIN_SSP6_RXD 224
330#define CAPRI_PIN_SSP6_TXD 225
331#define CAPRI_PIN_STAT_1 226
332#define CAPRI_PIN_STAT_2 227
333#define CAPRI_PIN_SYSCLKEN 228
334#define CAPRI_PIN_TRACECLK 229
335#define CAPRI_PIN_TRACEDT00 230
336#define CAPRI_PIN_TRACEDT01 231
337#define CAPRI_PIN_TRACEDT02 232
338#define CAPRI_PIN_TRACEDT03 233
339#define CAPRI_PIN_TRACEDT04 234
340#define CAPRI_PIN_TRACEDT05 235
341#define CAPRI_PIN_TRACEDT06 236
342#define CAPRI_PIN_TRACEDT07 237
343#define CAPRI_PIN_TRACEDT08 238
344#define CAPRI_PIN_TRACEDT09 239
345#define CAPRI_PIN_TRACEDT10 240
346#define CAPRI_PIN_TRACEDT11 241
347#define CAPRI_PIN_TRACEDT12 242
348#define CAPRI_PIN_TRACEDT13 243
349#define CAPRI_PIN_TRACEDT14 244
350#define CAPRI_PIN_TRACEDT15 245
351#define CAPRI_PIN_TXDATA3G0 246
352#define CAPRI_PIN_TXPWRIND 247
353#define CAPRI_PIN_UARTB1_UCTS 248
354#define CAPRI_PIN_UARTB1_URTS 249
355#define CAPRI_PIN_UARTB1_URXD 250
356#define CAPRI_PIN_UARTB1_UTXD 251
357#define CAPRI_PIN_UARTB2_URXD 252
358#define CAPRI_PIN_UARTB2_UTXD 253
359#define CAPRI_PIN_UARTB3_UCTS 254
360#define CAPRI_PIN_UARTB3_URTS 255
361#define CAPRI_PIN_UARTB3_URXD 256
362#define CAPRI_PIN_UARTB3_UTXD 257
363#define CAPRI_PIN_UARTB4_UCTS 258
364#define CAPRI_PIN_UARTB4_URTS 259
365#define CAPRI_PIN_UARTB4_URXD 260
366#define CAPRI_PIN_UARTB4_UTXD 261
367#define CAPRI_PIN_VC_CAM1_SCL 262
368#define CAPRI_PIN_VC_CAM1_SDA 263
369#define CAPRI_PIN_VC_CAM2_SCL 264
370#define CAPRI_PIN_VC_CAM2_SDA 265
371#define CAPRI_PIN_VC_CAM3_SCL 266
372#define CAPRI_PIN_VC_CAM3_SDA 267
373
374#define CAPRI_PIN_DESC(a, b, c) \
375 { .number = a, .name = b, .drv_data = &c##_pin }
376
377/*
378 * Pin description definition. The order here must be the same as defined in
379 * the PADCTRLREG block in the RDB, since the pin number is used as an index
380 * into this array.
381 */
382static const struct pinctrl_pin_desc capri_pinctrl_pins[] = {
383 CAPRI_PIN_DESC(CAPRI_PIN_ADCSYNC, "adcsync", std),
384 CAPRI_PIN_DESC(CAPRI_PIN_BAT_RM, "bat_rm", std),
385 CAPRI_PIN_DESC(CAPRI_PIN_BSC1_SCL, "bsc1_scl", i2c),
386 CAPRI_PIN_DESC(CAPRI_PIN_BSC1_SDA, "bsc1_sda", i2c),
387 CAPRI_PIN_DESC(CAPRI_PIN_BSC2_SCL, "bsc2_scl", i2c),
388 CAPRI_PIN_DESC(CAPRI_PIN_BSC2_SDA, "bsc2_sda", i2c),
389 CAPRI_PIN_DESC(CAPRI_PIN_CLASSGPWR, "classgpwr", std),
390 CAPRI_PIN_DESC(CAPRI_PIN_CLK_CX8, "clk_cx8", std),
391 CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_0, "clkout_0", std),
392 CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_1, "clkout_1", std),
393 CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_2, "clkout_2", std),
394 CAPRI_PIN_DESC(CAPRI_PIN_CLKOUT_3, "clkout_3", std),
395 CAPRI_PIN_DESC(CAPRI_PIN_CLKREQ_IN_0, "clkreq_in_0", std),
396 CAPRI_PIN_DESC(CAPRI_PIN_CLKREQ_IN_1, "clkreq_in_1", std),
397 CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ1, "cws_sys_req1", std),
398 CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ2, "cws_sys_req2", std),
399 CAPRI_PIN_DESC(CAPRI_PIN_CWS_SYS_REQ3, "cws_sys_req3", std),
400 CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC1_CLK, "digmic1_clk", std),
401 CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC1_DQ, "digmic1_dq", std),
402 CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC2_CLK, "digmic2_clk", std),
403 CAPRI_PIN_DESC(CAPRI_PIN_DIGMIC2_DQ, "digmic2_dq", std),
404 CAPRI_PIN_DESC(CAPRI_PIN_GPEN13, "gpen13", std),
405 CAPRI_PIN_DESC(CAPRI_PIN_GPEN14, "gpen14", std),
406 CAPRI_PIN_DESC(CAPRI_PIN_GPEN15, "gpen15", std),
407 CAPRI_PIN_DESC(CAPRI_PIN_GPIO00, "gpio00", std),
408 CAPRI_PIN_DESC(CAPRI_PIN_GPIO01, "gpio01", std),
409 CAPRI_PIN_DESC(CAPRI_PIN_GPIO02, "gpio02", std),
410 CAPRI_PIN_DESC(CAPRI_PIN_GPIO03, "gpio03", std),
411 CAPRI_PIN_DESC(CAPRI_PIN_GPIO04, "gpio04", std),
412 CAPRI_PIN_DESC(CAPRI_PIN_GPIO05, "gpio05", std),
413 CAPRI_PIN_DESC(CAPRI_PIN_GPIO06, "gpio06", std),
414 CAPRI_PIN_DESC(CAPRI_PIN_GPIO07, "gpio07", std),
415 CAPRI_PIN_DESC(CAPRI_PIN_GPIO08, "gpio08", std),
416 CAPRI_PIN_DESC(CAPRI_PIN_GPIO09, "gpio09", std),
417 CAPRI_PIN_DESC(CAPRI_PIN_GPIO10, "gpio10", std),
418 CAPRI_PIN_DESC(CAPRI_PIN_GPIO11, "gpio11", std),
419 CAPRI_PIN_DESC(CAPRI_PIN_GPIO12, "gpio12", std),
420 CAPRI_PIN_DESC(CAPRI_PIN_GPIO13, "gpio13", std),
421 CAPRI_PIN_DESC(CAPRI_PIN_GPIO14, "gpio14", std),
422 CAPRI_PIN_DESC(CAPRI_PIN_GPS_PABLANK, "gps_pablank", std),
423 CAPRI_PIN_DESC(CAPRI_PIN_GPS_TMARK, "gps_tmark", std),
424 CAPRI_PIN_DESC(CAPRI_PIN_HDMI_SCL, "hdmi_scl", hdmi),
425 CAPRI_PIN_DESC(CAPRI_PIN_HDMI_SDA, "hdmi_sda", hdmi),
426 CAPRI_PIN_DESC(CAPRI_PIN_IC_DM, "ic_dm", std),
427 CAPRI_PIN_DESC(CAPRI_PIN_IC_DP, "ic_dp", std),
428 CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_0, "kp_col_ip_0", std),
429 CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_1, "kp_col_ip_1", std),
430 CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_2, "kp_col_ip_2", std),
431 CAPRI_PIN_DESC(CAPRI_PIN_KP_COL_IP_3, "kp_col_ip_3", std),
432 CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_0, "kp_row_op_0", std),
433 CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_1, "kp_row_op_1", std),
434 CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_2, "kp_row_op_2", std),
435 CAPRI_PIN_DESC(CAPRI_PIN_KP_ROW_OP_3, "kp_row_op_3", std),
436 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_0, "lcd_b_0", std),
437 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_1, "lcd_b_1", std),
438 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_2, "lcd_b_2", std),
439 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_3, "lcd_b_3", std),
440 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_4, "lcd_b_4", std),
441 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_5, "lcd_b_5", std),
442 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_6, "lcd_b_6", std),
443 CAPRI_PIN_DESC(CAPRI_PIN_LCD_B_7, "lcd_b_7", std),
444 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_0, "lcd_g_0", std),
445 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_1, "lcd_g_1", std),
446 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_2, "lcd_g_2", std),
447 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_3, "lcd_g_3", std),
448 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_4, "lcd_g_4", std),
449 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_5, "lcd_g_5", std),
450 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_6, "lcd_g_6", std),
451 CAPRI_PIN_DESC(CAPRI_PIN_LCD_G_7, "lcd_g_7", std),
452 CAPRI_PIN_DESC(CAPRI_PIN_LCD_HSYNC, "lcd_hsync", std),
453 CAPRI_PIN_DESC(CAPRI_PIN_LCD_OE, "lcd_oe", std),
454 CAPRI_PIN_DESC(CAPRI_PIN_LCD_PCLK, "lcd_pclk", std),
455 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_0, "lcd_r_0", std),
456 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_1, "lcd_r_1", std),
457 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_2, "lcd_r_2", std),
458 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_3, "lcd_r_3", std),
459 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_4, "lcd_r_4", std),
460 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_5, "lcd_r_5", std),
461 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_6, "lcd_r_6", std),
462 CAPRI_PIN_DESC(CAPRI_PIN_LCD_R_7, "lcd_r_7", std),
463 CAPRI_PIN_DESC(CAPRI_PIN_LCD_VSYNC, "lcd_vsync", std),
464 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO0, "mdmgpio0", std),
465 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO1, "mdmgpio1", std),
466 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO2, "mdmgpio2", std),
467 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO3, "mdmgpio3", std),
468 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO4, "mdmgpio4", std),
469 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO5, "mdmgpio5", std),
470 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO6, "mdmgpio6", std),
471 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO7, "mdmgpio7", std),
472 CAPRI_PIN_DESC(CAPRI_PIN_MDMGPIO8, "mdmgpio8", std),
473 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_0, "mphi_data_0", std),
474 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_1, "mphi_data_1", std),
475 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_2, "mphi_data_2", std),
476 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_3, "mphi_data_3", std),
477 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_4, "mphi_data_4", std),
478 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_5, "mphi_data_5", std),
479 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_6, "mphi_data_6", std),
480 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_7, "mphi_data_7", std),
481 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_8, "mphi_data_8", std),
482 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_9, "mphi_data_9", std),
483 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_10, "mphi_data_10", std),
484 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_11, "mphi_data_11", std),
485 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_12, "mphi_data_12", std),
486 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_13, "mphi_data_13", std),
487 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_14, "mphi_data_14", std),
488 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_DATA_15, "mphi_data_15", std),
489 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HA0, "mphi_ha0", std),
490 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HAT0, "mphi_hat0", std),
491 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HAT1, "mphi_hat1", std),
492 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HCE0_N, "mphi_hce0_n", std),
493 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HCE1_N, "mphi_hce1_n", std),
494 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HRD_N, "mphi_hrd_n", std),
495 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_HWR_N, "mphi_hwr_n", std),
496 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_RUN0, "mphi_run0", std),
497 CAPRI_PIN_DESC(CAPRI_PIN_MPHI_RUN1, "mphi_run1", std),
498 CAPRI_PIN_DESC(CAPRI_PIN_MTX_SCAN_CLK, "mtx_scan_clk", std),
499 CAPRI_PIN_DESC(CAPRI_PIN_MTX_SCAN_DATA, "mtx_scan_data", std),
500 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_0, "nand_ad_0", std),
501 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_1, "nand_ad_1", std),
502 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_2, "nand_ad_2", std),
503 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_3, "nand_ad_3", std),
504 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_4, "nand_ad_4", std),
505 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_5, "nand_ad_5", std),
506 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_6, "nand_ad_6", std),
507 CAPRI_PIN_DESC(CAPRI_PIN_NAND_AD_7, "nand_ad_7", std),
508 CAPRI_PIN_DESC(CAPRI_PIN_NAND_ALE, "nand_ale", std),
509 CAPRI_PIN_DESC(CAPRI_PIN_NAND_CEN_0, "nand_cen_0", std),
510 CAPRI_PIN_DESC(CAPRI_PIN_NAND_CEN_1, "nand_cen_1", std),
511 CAPRI_PIN_DESC(CAPRI_PIN_NAND_CLE, "nand_cle", std),
512 CAPRI_PIN_DESC(CAPRI_PIN_NAND_OEN, "nand_oen", std),
513 CAPRI_PIN_DESC(CAPRI_PIN_NAND_RDY_0, "nand_rdy_0", std),
514 CAPRI_PIN_DESC(CAPRI_PIN_NAND_RDY_1, "nand_rdy_1", std),
515 CAPRI_PIN_DESC(CAPRI_PIN_NAND_WEN, "nand_wen", std),
516 CAPRI_PIN_DESC(CAPRI_PIN_NAND_WP, "nand_wp", std),
517 CAPRI_PIN_DESC(CAPRI_PIN_PC1, "pc1", std),
518 CAPRI_PIN_DESC(CAPRI_PIN_PC2, "pc2", std),
519 CAPRI_PIN_DESC(CAPRI_PIN_PMU_INT, "pmu_int", std),
520 CAPRI_PIN_DESC(CAPRI_PIN_PMU_SCL, "pmu_scl", i2c),
521 CAPRI_PIN_DESC(CAPRI_PIN_PMU_SDA, "pmu_sda", i2c),
522 CAPRI_PIN_DESC(CAPRI_PIN_RFST2G_MTSLOTEN3G, "rfst2g_mtsloten3g", std),
523 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RX_CTL, "rgmii_0_rx_ctl", std),
524 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXC, "rgmii_0_rxc", std),
525 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_0, "rgmii_0_rxd_0", std),
526 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_1, "rgmii_0_rxd_1", std),
527 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_2, "rgmii_0_rxd_2", std),
528 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_RXD_3, "rgmii_0_rxd_3", std),
529 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TX_CTL, "rgmii_0_tx_ctl", std),
530 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXC, "rgmii_0_txc", std),
531 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_0, "rgmii_0_txd_0", std),
532 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_1, "rgmii_0_txd_1", std),
533 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_2, "rgmii_0_txd_2", std),
534 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_0_TXD_3, "rgmii_0_txd_3", std),
535 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RX_CTL, "rgmii_1_rx_ctl", std),
536 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXC, "rgmii_1_rxc", std),
537 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_0, "rgmii_1_rxd_0", std),
538 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_1, "rgmii_1_rxd_1", std),
539 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_2, "rgmii_1_rxd_2", std),
540 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_RXD_3, "rgmii_1_rxd_3", std),
541 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TX_CTL, "rgmii_1_tx_ctl", std),
542 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXC, "rgmii_1_txc", std),
543 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_0, "rgmii_1_txd_0", std),
544 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_1, "rgmii_1_txd_1", std),
545 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_2, "rgmii_1_txd_2", std),
546 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_1_TXD_3, "rgmii_1_txd_3", std),
547 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_0, "rgmii_gpio_0", std),
548 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_1, "rgmii_gpio_1", std),
549 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_2, "rgmii_gpio_2", std),
550 CAPRI_PIN_DESC(CAPRI_PIN_RGMII_GPIO_3, "rgmii_gpio_3", std),
551 CAPRI_PIN_DESC(CAPRI_PIN_RTXDATA2G_TXDATA3G1, "rtxdata2g_txdata3g1",
552 std),
553 CAPRI_PIN_DESC(CAPRI_PIN_RTXEN2G_TXDATA3G2, "rtxen2g_txdata3g2", std),
554 CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G0, "rxdata3g0", std),
555 CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G1, "rxdata3g1", std),
556 CAPRI_PIN_DESC(CAPRI_PIN_RXDATA3G2, "rxdata3g2", std),
557 CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_CLK, "sdio1_clk", std),
558 CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_CMD, "sdio1_cmd", std),
559 CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_0, "sdio1_data_0", std),
560 CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_1, "sdio1_data_1", std),
561 CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_2, "sdio1_data_2", std),
562 CAPRI_PIN_DESC(CAPRI_PIN_SDIO1_DATA_3, "sdio1_data_3", std),
563 CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_CLK, "sdio4_clk", std),
564 CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_CMD, "sdio4_cmd", std),
565 CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_0, "sdio4_data_0", std),
566 CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_1, "sdio4_data_1", std),
567 CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_2, "sdio4_data_2", std),
568 CAPRI_PIN_DESC(CAPRI_PIN_SDIO4_DATA_3, "sdio4_data_3", std),
569 CAPRI_PIN_DESC(CAPRI_PIN_SIM_CLK, "sim_clk", std),
570 CAPRI_PIN_DESC(CAPRI_PIN_SIM_DATA, "sim_data", std),
571 CAPRI_PIN_DESC(CAPRI_PIN_SIM_DET, "sim_det", std),
572 CAPRI_PIN_DESC(CAPRI_PIN_SIM_RESETN, "sim_resetn", std),
573 CAPRI_PIN_DESC(CAPRI_PIN_SIM2_CLK, "sim2_clk", std),
574 CAPRI_PIN_DESC(CAPRI_PIN_SIM2_DATA, "sim2_data", std),
575 CAPRI_PIN_DESC(CAPRI_PIN_SIM2_DET, "sim2_det", std),
576 CAPRI_PIN_DESC(CAPRI_PIN_SIM2_RESETN, "sim2_resetn", std),
577 CAPRI_PIN_DESC(CAPRI_PIN_SRI_C, "sri_c", std),
578 CAPRI_PIN_DESC(CAPRI_PIN_SRI_D, "sri_d", std),
579 CAPRI_PIN_DESC(CAPRI_PIN_SRI_E, "sri_e", std),
580 CAPRI_PIN_DESC(CAPRI_PIN_SSP_EXTCLK, "ssp_extclk", std),
581 CAPRI_PIN_DESC(CAPRI_PIN_SSP0_CLK, "ssp0_clk", std),
582 CAPRI_PIN_DESC(CAPRI_PIN_SSP0_FS, "ssp0_fs", std),
583 CAPRI_PIN_DESC(CAPRI_PIN_SSP0_RXD, "ssp0_rxd", std),
584 CAPRI_PIN_DESC(CAPRI_PIN_SSP0_TXD, "ssp0_txd", std),
585 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_CLK, "ssp2_clk", std),
586 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_0, "ssp2_fs_0", std),
587 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_1, "ssp2_fs_1", std),
588 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_2, "ssp2_fs_2", std),
589 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_FS_3, "ssp2_fs_3", std),
590 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_RXD_0, "ssp2_rxd_0", std),
591 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_RXD_1, "ssp2_rxd_1", std),
592 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_TXD_0, "ssp2_txd_0", std),
593 CAPRI_PIN_DESC(CAPRI_PIN_SSP2_TXD_1, "ssp2_txd_1", std),
594 CAPRI_PIN_DESC(CAPRI_PIN_SSP3_CLK, "ssp3_clk", std),
595 CAPRI_PIN_DESC(CAPRI_PIN_SSP3_FS, "ssp3_fs", std),
596 CAPRI_PIN_DESC(CAPRI_PIN_SSP3_RXD, "ssp3_rxd", std),
597 CAPRI_PIN_DESC(CAPRI_PIN_SSP3_TXD, "ssp3_txd", std),
598 CAPRI_PIN_DESC(CAPRI_PIN_SSP4_CLK, "ssp4_clk", std),
599 CAPRI_PIN_DESC(CAPRI_PIN_SSP4_FS, "ssp4_fs", std),
600 CAPRI_PIN_DESC(CAPRI_PIN_SSP4_RXD, "ssp4_rxd", std),
601 CAPRI_PIN_DESC(CAPRI_PIN_SSP4_TXD, "ssp4_txd", std),
602 CAPRI_PIN_DESC(CAPRI_PIN_SSP5_CLK, "ssp5_clk", std),
603 CAPRI_PIN_DESC(CAPRI_PIN_SSP5_FS, "ssp5_fs", std),
604 CAPRI_PIN_DESC(CAPRI_PIN_SSP5_RXD, "ssp5_rxd", std),
605 CAPRI_PIN_DESC(CAPRI_PIN_SSP5_TXD, "ssp5_txd", std),
606 CAPRI_PIN_DESC(CAPRI_PIN_SSP6_CLK, "ssp6_clk", std),
607 CAPRI_PIN_DESC(CAPRI_PIN_SSP6_FS, "ssp6_fs", std),
608 CAPRI_PIN_DESC(CAPRI_PIN_SSP6_RXD, "ssp6_rxd", std),
609 CAPRI_PIN_DESC(CAPRI_PIN_SSP6_TXD, "ssp6_txd", std),
610 CAPRI_PIN_DESC(CAPRI_PIN_STAT_1, "stat_1", std),
611 CAPRI_PIN_DESC(CAPRI_PIN_STAT_2, "stat_2", std),
612 CAPRI_PIN_DESC(CAPRI_PIN_SYSCLKEN, "sysclken", std),
613 CAPRI_PIN_DESC(CAPRI_PIN_TRACECLK, "traceclk", std),
614 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT00, "tracedt00", std),
615 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT01, "tracedt01", std),
616 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT02, "tracedt02", std),
617 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT03, "tracedt03", std),
618 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT04, "tracedt04", std),
619 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT05, "tracedt05", std),
620 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT06, "tracedt06", std),
621 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT07, "tracedt07", std),
622 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT08, "tracedt08", std),
623 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT09, "tracedt09", std),
624 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT10, "tracedt10", std),
625 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT11, "tracedt11", std),
626 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT12, "tracedt12", std),
627 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT13, "tracedt13", std),
628 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT14, "tracedt14", std),
629 CAPRI_PIN_DESC(CAPRI_PIN_TRACEDT15, "tracedt15", std),
630 CAPRI_PIN_DESC(CAPRI_PIN_TXDATA3G0, "txdata3g0", std),
631 CAPRI_PIN_DESC(CAPRI_PIN_TXPWRIND, "txpwrind", std),
632 CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_UCTS, "uartb1_ucts", std),
633 CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_URTS, "uartb1_urts", std),
634 CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_URXD, "uartb1_urxd", std),
635 CAPRI_PIN_DESC(CAPRI_PIN_UARTB1_UTXD, "uartb1_utxd", std),
636 CAPRI_PIN_DESC(CAPRI_PIN_UARTB2_URXD, "uartb2_urxd", std),
637 CAPRI_PIN_DESC(CAPRI_PIN_UARTB2_UTXD, "uartb2_utxd", std),
638 CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_UCTS, "uartb3_ucts", std),
639 CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_URTS, "uartb3_urts", std),
640 CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_URXD, "uartb3_urxd", std),
641 CAPRI_PIN_DESC(CAPRI_PIN_UARTB3_UTXD, "uartb3_utxd", std),
642 CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_UCTS, "uartb4_ucts", std),
643 CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_URTS, "uartb4_urts", std),
644 CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_URXD, "uartb4_urxd", std),
645 CAPRI_PIN_DESC(CAPRI_PIN_UARTB4_UTXD, "uartb4_utxd", std),
646 CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM1_SCL, "vc_cam1_scl", i2c),
647 CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM1_SDA, "vc_cam1_sda", i2c),
648 CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM2_SCL, "vc_cam2_scl", i2c),
649 CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM2_SDA, "vc_cam2_sda", i2c),
650 CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM3_SCL, "vc_cam3_scl", i2c),
651 CAPRI_PIN_DESC(CAPRI_PIN_VC_CAM3_SDA, "vc_cam3_sda", i2c),
652};
653
654static const char * const capri_alt_groups[] = {
655 "adcsync",
656 "bat_rm",
657 "bsc1_scl",
658 "bsc1_sda",
659 "bsc2_scl",
660 "bsc2_sda",
661 "classgpwr",
662 "clk_cx8",
663 "clkout_0",
664 "clkout_1",
665 "clkout_2",
666 "clkout_3",
667 "clkreq_in_0",
668 "clkreq_in_1",
669 "cws_sys_req1",
670 "cws_sys_req2",
671 "cws_sys_req3",
672 "digmic1_clk",
673 "digmic1_dq",
674 "digmic2_clk",
675 "digmic2_dq",
676 "gpen13",
677 "gpen14",
678 "gpen15",
679 "gpio00",
680 "gpio01",
681 "gpio02",
682 "gpio03",
683 "gpio04",
684 "gpio05",
685 "gpio06",
686 "gpio07",
687 "gpio08",
688 "gpio09",
689 "gpio10",
690 "gpio11",
691 "gpio12",
692 "gpio13",
693 "gpio14",
694 "gps_pablank",
695 "gps_tmark",
696 "hdmi_scl",
697 "hdmi_sda",
698 "ic_dm",
699 "ic_dp",
700 "kp_col_ip_0",
701 "kp_col_ip_1",
702 "kp_col_ip_2",
703 "kp_col_ip_3",
704 "kp_row_op_0",
705 "kp_row_op_1",
706 "kp_row_op_2",
707 "kp_row_op_3",
708 "lcd_b_0",
709 "lcd_b_1",
710 "lcd_b_2",
711 "lcd_b_3",
712 "lcd_b_4",
713 "lcd_b_5",
714 "lcd_b_6",
715 "lcd_b_7",
716 "lcd_g_0",
717 "lcd_g_1",
718 "lcd_g_2",
719 "lcd_g_3",
720 "lcd_g_4",
721 "lcd_g_5",
722 "lcd_g_6",
723 "lcd_g_7",
724 "lcd_hsync",
725 "lcd_oe",
726 "lcd_pclk",
727 "lcd_r_0",
728 "lcd_r_1",
729 "lcd_r_2",
730 "lcd_r_3",
731 "lcd_r_4",
732 "lcd_r_5",
733 "lcd_r_6",
734 "lcd_r_7",
735 "lcd_vsync",
736 "mdmgpio0",
737 "mdmgpio1",
738 "mdmgpio2",
739 "mdmgpio3",
740 "mdmgpio4",
741 "mdmgpio5",
742 "mdmgpio6",
743 "mdmgpio7",
744 "mdmgpio8",
745 "mphi_data_0",
746 "mphi_data_1",
747 "mphi_data_2",
748 "mphi_data_3",
749 "mphi_data_4",
750 "mphi_data_5",
751 "mphi_data_6",
752 "mphi_data_7",
753 "mphi_data_8",
754 "mphi_data_9",
755 "mphi_data_10",
756 "mphi_data_11",
757 "mphi_data_12",
758 "mphi_data_13",
759 "mphi_data_14",
760 "mphi_data_15",
761 "mphi_ha0",
762 "mphi_hat0",
763 "mphi_hat1",
764 "mphi_hce0_n",
765 "mphi_hce1_n",
766 "mphi_hrd_n",
767 "mphi_hwr_n",
768 "mphi_run0",
769 "mphi_run1",
770 "mtx_scan_clk",
771 "mtx_scan_data",
772 "nand_ad_0",
773 "nand_ad_1",
774 "nand_ad_2",
775 "nand_ad_3",
776 "nand_ad_4",
777 "nand_ad_5",
778 "nand_ad_6",
779 "nand_ad_7",
780 "nand_ale",
781 "nand_cen_0",
782 "nand_cen_1",
783 "nand_cle",
784 "nand_oen",
785 "nand_rdy_0",
786 "nand_rdy_1",
787 "nand_wen",
788 "nand_wp",
789 "pc1",
790 "pc2",
791 "pmu_int",
792 "pmu_scl",
793 "pmu_sda",
794 "rfst2g_mtsloten3g",
795 "rgmii_0_rx_ctl",
796 "rgmii_0_rxc",
797 "rgmii_0_rxd_0",
798 "rgmii_0_rxd_1",
799 "rgmii_0_rxd_2",
800 "rgmii_0_rxd_3",
801 "rgmii_0_tx_ctl",
802 "rgmii_0_txc",
803 "rgmii_0_txd_0",
804 "rgmii_0_txd_1",
805 "rgmii_0_txd_2",
806 "rgmii_0_txd_3",
807 "rgmii_1_rx_ctl",
808 "rgmii_1_rxc",
809 "rgmii_1_rxd_0",
810 "rgmii_1_rxd_1",
811 "rgmii_1_rxd_2",
812 "rgmii_1_rxd_3",
813 "rgmii_1_tx_ctl",
814 "rgmii_1_txc",
815 "rgmii_1_txd_0",
816 "rgmii_1_txd_1",
817 "rgmii_1_txd_2",
818 "rgmii_1_txd_3",
819 "rgmii_gpio_0",
820 "rgmii_gpio_1",
821 "rgmii_gpio_2",
822 "rgmii_gpio_3",
823 "rtxdata2g_txdata3g1",
824 "rtxen2g_txdata3g2",
825 "rxdata3g0",
826 "rxdata3g1",
827 "rxdata3g2",
828 "sdio1_clk",
829 "sdio1_cmd",
830 "sdio1_data_0",
831 "sdio1_data_1",
832 "sdio1_data_2",
833 "sdio1_data_3",
834 "sdio4_clk",
835 "sdio4_cmd",
836 "sdio4_data_0",
837 "sdio4_data_1",
838 "sdio4_data_2",
839 "sdio4_data_3",
840 "sim_clk",
841 "sim_data",
842 "sim_det",
843 "sim_resetn",
844 "sim2_clk",
845 "sim2_data",
846 "sim2_det",
847 "sim2_resetn",
848 "sri_c",
849 "sri_d",
850 "sri_e",
851 "ssp_extclk",
852 "ssp0_clk",
853 "ssp0_fs",
854 "ssp0_rxd",
855 "ssp0_txd",
856 "ssp2_clk",
857 "ssp2_fs_0",
858 "ssp2_fs_1",
859 "ssp2_fs_2",
860 "ssp2_fs_3",
861 "ssp2_rxd_0",
862 "ssp2_rxd_1",
863 "ssp2_txd_0",
864 "ssp2_txd_1",
865 "ssp3_clk",
866 "ssp3_fs",
867 "ssp3_rxd",
868 "ssp3_txd",
869 "ssp4_clk",
870 "ssp4_fs",
871 "ssp4_rxd",
872 "ssp4_txd",
873 "ssp5_clk",
874 "ssp5_fs",
875 "ssp5_rxd",
876 "ssp5_txd",
877 "ssp6_clk",
878 "ssp6_fs",
879 "ssp6_rxd",
880 "ssp6_txd",
881 "stat_1",
882 "stat_2",
883 "sysclken",
884 "traceclk",
885 "tracedt00",
886 "tracedt01",
887 "tracedt02",
888 "tracedt03",
889 "tracedt04",
890 "tracedt05",
891 "tracedt06",
892 "tracedt07",
893 "tracedt08",
894 "tracedt09",
895 "tracedt10",
896 "tracedt11",
897 "tracedt12",
898 "tracedt13",
899 "tracedt14",
900 "tracedt15",
901 "txdata3g0",
902 "txpwrind",
903 "uartb1_ucts",
904 "uartb1_urts",
905 "uartb1_urxd",
906 "uartb1_utxd",
907 "uartb2_urxd",
908 "uartb2_utxd",
909 "uartb3_ucts",
910 "uartb3_urts",
911 "uartb3_urxd",
912 "uartb3_utxd",
913 "uartb4_ucts",
914 "uartb4_urts",
915 "uartb4_urxd",
916 "uartb4_utxd",
917 "vc_cam1_scl",
918 "vc_cam1_sda",
919 "vc_cam2_scl",
920 "vc_cam2_sda",
921 "vc_cam3_scl",
922 "vc_cam3_sda",
923};
924
925/* Every pin can implement all ALT1-ALT4 functions */
926#define CAPRI_PIN_FUNCTION(fcn_name) \
927{ \
928 .name = #fcn_name, \
929 .groups = capri_alt_groups, \
930 .ngroups = ARRAY_SIZE(capri_alt_groups), \
931}
932
933static const struct capri_pin_function capri_functions[] = {
934 CAPRI_PIN_FUNCTION(alt1),
935 CAPRI_PIN_FUNCTION(alt2),
936 CAPRI_PIN_FUNCTION(alt3),
937 CAPRI_PIN_FUNCTION(alt4),
938};
939
940static struct capri_pinctrl_data capri_pinctrl = {
941 .pins = capri_pinctrl_pins,
942 .npins = ARRAY_SIZE(capri_pinctrl_pins),
943 .functions = capri_functions,
944 .nfunctions = ARRAY_SIZE(capri_functions),
945};
946
947static inline enum capri_pin_type pin_type_get(struct pinctrl_dev *pctldev,
948 unsigned pin)
949{
950 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
951
952 if (pin >= pdata->npins)
953 return CAPRI_PIN_TYPE_UNKNOWN;
954
955 return *(enum capri_pin_type *)(pdata->pins[pin].drv_data);
956}
957
958#define CAPRI_PIN_SHIFT(type, param) \
959 (CAPRI_ ## type ## _PIN_REG_ ## param ## _SHIFT)
960
961#define CAPRI_PIN_MASK(type, param) \
962 (CAPRI_ ## type ## _PIN_REG_ ## param ## _MASK)
963
964/*
965 * This helper function is used to build up the value and mask used to write to
966 * a pin register, but does not actually write to the register.
967 */
968static inline void capri_pin_update(u32 *reg_val, u32 *reg_mask, u32 param_val,
969 u32 param_shift, u32 param_mask)
970{
971 *reg_val &= ~param_mask;
972 *reg_val |= (param_val << param_shift) & param_mask;
973 *reg_mask |= param_mask;
974}
975
976static struct regmap_config capri_pinctrl_regmap_config = {
977 .reg_bits = 32,
978 .reg_stride = 4,
979 .val_bits = 32,
980 .max_register = CAPRI_PIN_VC_CAM3_SDA,
981};
982
983static int capri_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
984{
985 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
986
987 return pdata->npins;
988}
989
990static const char *capri_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
991 unsigned group)
992{
993 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
994
995 return pdata->pins[group].name;
996}
997
998static int capri_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
999 unsigned group,
1000 const unsigned **pins,
1001 unsigned *num_pins)
1002{
1003 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1004
1005 *pins = &pdata->pins[group].number;
1006 *num_pins = 1;
1007
1008 return 0;
1009}
1010
1011static void capri_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
1012 struct seq_file *s,
1013 unsigned offset)
1014{
1015 seq_printf(s, " %s", dev_name(pctldev->dev));
1016}
1017
1018static struct pinctrl_ops capri_pinctrl_ops = {
1019 .get_groups_count = capri_pinctrl_get_groups_count,
1020 .get_group_name = capri_pinctrl_get_group_name,
1021 .get_group_pins = capri_pinctrl_get_group_pins,
1022 .pin_dbg_show = capri_pinctrl_pin_dbg_show,
1023 .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
1024 .dt_free_map = pinctrl_utils_dt_free_map,
1025};
1026
1027static int capri_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
1028{
1029 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1030
1031 return pdata->nfunctions;
1032}
1033
1034static const char *capri_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev,
1035 unsigned function)
1036{
1037 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1038
1039 return pdata->functions[function].name;
1040}
1041
1042static int capri_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev,
1043 unsigned function,
1044 const char * const **groups,
1045 unsigned * const num_groups)
1046{
1047 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1048
1049 *groups = pdata->functions[function].groups;
1050 *num_groups = pdata->functions[function].ngroups;
1051
1052 return 0;
1053}
1054
1055static int capri_pinmux_enable(struct pinctrl_dev *pctldev,
1056 unsigned function,
1057 unsigned group)
1058{
1059 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1060 const struct capri_pin_function *f = &pdata->functions[function];
1061 u32 offset = 4 * pdata->pins[group].number;
1062 int rc = 0;
1063
1064 dev_dbg(pctldev->dev,
1065 "%s(): Enable function %s (%d) of pin %s (%d) @offset 0x%x.\n",
1066 __func__, f->name, function, pdata->pins[group].name,
1067 pdata->pins[group].number, offset);
1068
1069 rc = regmap_update_bits(pdata->regmap, offset, CAPRI_PIN_REG_F_SEL_MASK,
1070 function << CAPRI_PIN_REG_F_SEL_SHIFT);
1071 if (rc)
1072 dev_err(pctldev->dev,
1073 "Error updating register for pin %s (%d).\n",
1074 pdata->pins[group].name, pdata->pins[group].number);
1075
1076 return rc;
1077}
1078
1079static struct pinmux_ops capri_pinctrl_pinmux_ops = {
1080 .get_functions_count = capri_pinctrl_get_fcns_count,
1081 .get_function_name = capri_pinctrl_get_fcn_name,
1082 .get_function_groups = capri_pinctrl_get_fcn_groups,
1083 .enable = capri_pinmux_enable,
1084};
1085
1086static int capri_pinctrl_pin_config_get(struct pinctrl_dev *pctldev,
1087 unsigned pin,
1088 unsigned long *config)
1089{
1090 return -ENOTSUPP;
1091}
1092
1093
1094/* Goes through the configs and update register val/mask */
1095static int capri_std_pin_update(struct pinctrl_dev *pctldev,
1096 unsigned pin,
1097 unsigned long *configs,
1098 unsigned num_configs,
1099 u32 *val,
1100 u32 *mask)
1101{
1102 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1103 int i;
1104 enum pin_config_param param;
1105 u16 arg;
1106
1107 for (i = 0; i < num_configs; i++) {
1108 param = pinconf_to_config_param(configs[i]);
1109 arg = pinconf_to_config_argument(configs[i]);
1110
1111 switch (param) {
1112 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
1113 arg = (arg >= 1 ? 1 : 0);
1114 capri_pin_update(val, mask, arg,
1115 CAPRI_PIN_SHIFT(STD, HYST),
1116 CAPRI_PIN_MASK(STD, HYST));
1117 break;
1118 /*
1119 * The pin bias can only be one of pull-up, pull-down, or
1120 * disable. The user does not need to specify a value for the
1121 * property, and the default value from pinconf-generic is
1122 * ignored.
1123 */
1124 case PIN_CONFIG_BIAS_DISABLE:
1125 capri_pin_update(val, mask, 0,
1126 CAPRI_PIN_SHIFT(STD, PULL_UP),
1127 CAPRI_PIN_MASK(STD, PULL_UP));
1128 capri_pin_update(val, mask, 0,
1129 CAPRI_PIN_SHIFT(STD, PULL_DN),
1130 CAPRI_PIN_MASK(STD, PULL_DN));
1131 break;
1132
1133 case PIN_CONFIG_BIAS_PULL_UP:
1134 capri_pin_update(val, mask, 1,
1135 CAPRI_PIN_SHIFT(STD, PULL_UP),
1136 CAPRI_PIN_MASK(STD, PULL_UP));
1137 capri_pin_update(val, mask, 0,
1138 CAPRI_PIN_SHIFT(STD, PULL_DN),
1139 CAPRI_PIN_MASK(STD, PULL_DN));
1140 break;
1141
1142 case PIN_CONFIG_BIAS_PULL_DOWN:
1143 capri_pin_update(val, mask, 0,
1144 CAPRI_PIN_SHIFT(STD, PULL_UP),
1145 CAPRI_PIN_MASK(STD, PULL_UP));
1146 capri_pin_update(val, mask, 1,
1147 CAPRI_PIN_SHIFT(STD, PULL_DN),
1148 CAPRI_PIN_MASK(STD, PULL_DN));
1149 break;
1150
1151 case PIN_CONFIG_SLEW_RATE:
1152 arg = (arg >= 1 ? 1 : 0);
1153 capri_pin_update(val, mask, arg,
1154 CAPRI_PIN_SHIFT(STD, SLEW),
1155 CAPRI_PIN_MASK(STD, SLEW));
1156 break;
1157
1158 case PIN_CONFIG_INPUT_ENABLE:
1159 /* inversed since register is for input _disable_ */
1160 arg = (arg >= 1 ? 0 : 1);
1161 capri_pin_update(val, mask, arg,
1162 CAPRI_PIN_SHIFT(STD, INPUT_DIS),
1163 CAPRI_PIN_MASK(STD, INPUT_DIS));
1164 break;
1165
1166 case PIN_CONFIG_DRIVE_STRENGTH:
1167 /* Valid range is 2-16 mA, even numbers only */
1168 if ((arg < 2) || (arg > 16) || (arg % 2)) {
1169 dev_err(pctldev->dev,
1170 "Invalid Drive Strength value (%d) for "
1171 "pin %s (%d). Valid values are "
1172 "(2..16) mA, even numbers only.\n",
1173 arg, pdata->pins[pin].name, pin);
1174 return -EINVAL;
1175 }
1176 capri_pin_update(val, mask, (arg/2)-1,
1177 CAPRI_PIN_SHIFT(STD, DRV_STR),
1178 CAPRI_PIN_MASK(STD, DRV_STR));
1179 break;
1180
1181 default:
1182 dev_err(pctldev->dev,
1183 "Unrecognized pin config %d for pin %s (%d).\n",
1184 param, pdata->pins[pin].name, pin);
1185 return -EINVAL;
1186
1187 } /* switch config */
1188 } /* for each config */
1189
1190 return 0;
1191}
1192
1193/*
1194 * The pull-up strength for an I2C pin is represented by bits 4-6 in the
1195 * register with the following mapping:
1196 * 0b000: No pull-up
1197 * 0b001: 1200 Ohm
1198 * 0b010: 1800 Ohm
1199 * 0b011: 720 Ohm
1200 * 0b100: 2700 Ohm
1201 * 0b101: 831 Ohm
1202 * 0b110: 1080 Ohm
1203 * 0b111: 568 Ohm
1204 * This array maps pull-up strength in Ohms to register values (1+index).
1205 */
1206static const u16 capri_pullup_map[] = {1200, 1800, 720, 2700, 831, 1080, 568};
1207
1208/* Goes through the configs and update register val/mask */
1209static int capri_i2c_pin_update(struct pinctrl_dev *pctldev,
1210 unsigned pin,
1211 unsigned long *configs,
1212 unsigned num_configs,
1213 u32 *val,
1214 u32 *mask)
1215{
1216 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1217 int i, j;
1218 enum pin_config_param param;
1219 u16 arg;
1220
1221 for (i = 0; i < num_configs; i++) {
1222 param = pinconf_to_config_param(configs[i]);
1223 arg = pinconf_to_config_argument(configs[i]);
1224
1225 switch (param) {
1226 case PIN_CONFIG_BIAS_PULL_UP:
1227 for (j = 0; j < ARRAY_SIZE(capri_pullup_map); j++)
1228 if (capri_pullup_map[j] == arg)
1229 break;
1230
1231 if (j == ARRAY_SIZE(capri_pullup_map)) {
1232 dev_err(pctldev->dev,
1233 "Invalid pull-up value (%d) for pin %s "
1234 "(%d). Valid values are 568, 720, 831, "
1235 "1080, 1200, 1800, 2700 Ohms.\n",
1236 arg, pdata->pins[pin].name, pin);
1237 return -EINVAL;
1238 }
1239
1240 capri_pin_update(val, mask, j+1,
1241 CAPRI_PIN_SHIFT(I2C, PULL_UP_STR),
1242 CAPRI_PIN_MASK(I2C, PULL_UP_STR));
1243 break;
1244
1245 case PIN_CONFIG_BIAS_DISABLE:
1246 capri_pin_update(val, mask, 0,
1247 CAPRI_PIN_SHIFT(I2C, PULL_UP_STR),
1248 CAPRI_PIN_MASK(I2C, PULL_UP_STR));
1249 break;
1250
1251 case PIN_CONFIG_SLEW_RATE:
1252 arg = (arg >= 1 ? 1 : 0);
1253 capri_pin_update(val, mask, arg,
1254 CAPRI_PIN_SHIFT(I2C, SLEW),
1255 CAPRI_PIN_MASK(I2C, SLEW));
1256 break;
1257
1258 case PIN_CONFIG_INPUT_ENABLE:
1259 /* inversed since register is for input _disable_ */
1260 arg = (arg >= 1 ? 0 : 1);
1261 capri_pin_update(val, mask, arg,
1262 CAPRI_PIN_SHIFT(I2C, INPUT_DIS),
1263 CAPRI_PIN_MASK(I2C, INPUT_DIS));
1264 break;
1265
1266 default:
1267 dev_err(pctldev->dev,
1268 "Unrecognized pin config %d for pin %s (%d).\n",
1269 param, pdata->pins[pin].name, pin);
1270 return -EINVAL;
1271
1272 } /* switch config */
1273 } /* for each config */
1274
1275 return 0;
1276}
1277
1278/* Goes through the configs and update register val/mask */
1279static int capri_hdmi_pin_update(struct pinctrl_dev *pctldev,
1280 unsigned pin,
1281 unsigned long *configs,
1282 unsigned num_configs,
1283 u32 *val,
1284 u32 *mask)
1285{
1286 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1287 int i;
1288 enum pin_config_param param;
1289 u16 arg;
1290
1291 for (i = 0; i < num_configs; i++) {
1292 param = pinconf_to_config_param(configs[i]);
1293 arg = pinconf_to_config_argument(configs[i]);
1294
1295 switch (param) {
1296 case PIN_CONFIG_SLEW_RATE:
1297 arg = (arg >= 1 ? 1 : 0);
1298 capri_pin_update(val, mask, arg,
1299 CAPRI_PIN_SHIFT(HDMI, MODE),
1300 CAPRI_PIN_MASK(HDMI, MODE));
1301 break;
1302
1303 case PIN_CONFIG_INPUT_ENABLE:
1304 /* inversed since register is for input _disable_ */
1305 arg = (arg >= 1 ? 0 : 1);
1306 capri_pin_update(val, mask, arg,
1307 CAPRI_PIN_SHIFT(HDMI, INPUT_DIS),
1308 CAPRI_PIN_MASK(HDMI, INPUT_DIS));
1309 break;
1310
1311 default:
1312 dev_err(pctldev->dev,
1313 "Unrecognized pin config %d for pin %s (%d).\n",
1314 param, pdata->pins[pin].name, pin);
1315 return -EINVAL;
1316
1317 } /* switch config */
1318 } /* for each config */
1319
1320 return 0;
1321}
1322
1323static int capri_pinctrl_pin_config_set(struct pinctrl_dev *pctldev,
1324 unsigned pin,
1325 unsigned long *configs,
1326 unsigned num_configs)
1327{
1328 struct capri_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev);
1329 enum capri_pin_type pin_type;
1330 u32 offset = 4 * pin;
1331 u32 cfg_val, cfg_mask;
1332 int rc;
1333
1334 cfg_val = 0;
1335 cfg_mask = 0;
1336 pin_type = pin_type_get(pctldev, pin);
1337
1338 /* Different pins have different configuration options */
1339 switch (pin_type) {
1340 case CAPRI_PIN_TYPE_STD:
1341 rc = capri_std_pin_update(pctldev, pin, configs, num_configs,
1342 &cfg_val, &cfg_mask);
1343 break;
1344
1345 case CAPRI_PIN_TYPE_I2C:
1346 rc = capri_i2c_pin_update(pctldev, pin, configs, num_configs,
1347 &cfg_val, &cfg_mask);
1348 break;
1349
1350 case CAPRI_PIN_TYPE_HDMI:
1351 rc = capri_hdmi_pin_update(pctldev, pin, configs, num_configs,
1352 &cfg_val, &cfg_mask);
1353 break;
1354
1355 default:
1356 dev_err(pctldev->dev, "Unknown pin type for pin %s (%d).\n",
1357 pdata->pins[pin].name, pin);
1358 return -EINVAL;
1359
1360 } /* switch pin type */
1361
1362 if (rc)
1363 return rc;
1364
1365 dev_dbg(pctldev->dev,
1366 "%s(): Set pin %s (%d) with config 0x%x, mask 0x%x\n",
1367 __func__, pdata->pins[pin].name, pin, cfg_val, cfg_mask);
1368
1369 rc = regmap_update_bits(pdata->regmap, offset, cfg_mask, cfg_val);
1370 if (rc) {
1371 dev_err(pctldev->dev,
1372 "Error updating register for pin %s (%d).\n",
1373 pdata->pins[pin].name, pin);
1374 return rc;
1375 }
1376
1377 return 0;
1378}
1379
1380static struct pinconf_ops capri_pinctrl_pinconf_ops = {
1381 .pin_config_get = capri_pinctrl_pin_config_get,
1382 .pin_config_set = capri_pinctrl_pin_config_set,
1383};
1384
1385static struct pinctrl_desc capri_pinctrl_desc = {
1386 /* name, pins, npins members initialized in probe function */
1387 .pctlops = &capri_pinctrl_ops,
1388 .pmxops = &capri_pinctrl_pinmux_ops,
1389 .confops = &capri_pinctrl_pinconf_ops,
1390 .owner = THIS_MODULE,
1391};
1392
1393int __init capri_pinctrl_probe(struct platform_device *pdev)
1394{
1395 struct capri_pinctrl_data *pdata = &capri_pinctrl;
1396 struct resource *res;
1397 struct pinctrl_dev *pctl;
1398
1399 /* So far We can assume there is only 1 bank of registers */
1400 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1401 if (!res) {
1402 dev_err(&pdev->dev, "Missing MEM resource\n");
1403 return -ENODEV;
1404 }
1405
1406 pdata->reg_base = devm_ioremap_resource(&pdev->dev, res);
1407 if (IS_ERR(pdata->reg_base)) {
1408 dev_err(&pdev->dev, "Failed to ioremap MEM resource\n");
1409 return -ENODEV;
1410 }
1411
1412 /* Initialize the dynamic part of pinctrl_desc */
1413 pdata->regmap = devm_regmap_init_mmio(&pdev->dev, pdata->reg_base,
1414 &capri_pinctrl_regmap_config);
1415 if (IS_ERR(pdata->regmap)) {
1416 dev_err(&pdev->dev, "Regmap MMIO init failed.\n");
1417 return -ENODEV;
1418 }
1419
1420 capri_pinctrl_desc.name = dev_name(&pdev->dev);
1421 capri_pinctrl_desc.pins = capri_pinctrl.pins;
1422 capri_pinctrl_desc.npins = capri_pinctrl.npins;
1423
1424 pctl = pinctrl_register(&capri_pinctrl_desc,
1425 &pdev->dev,
1426 pdata);
1427 if (!pctl) {
1428 dev_err(&pdev->dev, "Failed to register pinctrl\n");
1429 return -ENODEV;
1430 }
1431
1432 platform_set_drvdata(pdev, pdata);
1433
1434 return 0;
1435}
1436
1437static struct of_device_id capri_pinctrl_of_match[] = {
1438 { .compatible = "brcm,bcm11351-pinctrl", },
1439 { },
1440};
1441
1442static struct platform_driver capri_pinctrl_driver = {
1443 .driver = {
1444 .name = "bcm-capri-pinctrl",
1445 .owner = THIS_MODULE,
1446 .of_match_table = capri_pinctrl_of_match,
1447 },
1448};
1449
1450module_platform_driver_probe(capri_pinctrl_driver, capri_pinctrl_probe);
1451
1452MODULE_AUTHOR("Sherman Yin <syin@broadcom.com>");
1453MODULE_DESCRIPTION("Broadcom Capri pinctrl driver");
1454MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/pinctrl-msm.c
index 38d579b47f31..e43fbce56598 100644
--- a/drivers/pinctrl/pinctrl-msm.c
+++ b/drivers/pinctrl/pinctrl-msm.c
@@ -665,7 +665,10 @@ static void msm_gpio_irq_ack(struct irq_data *d)
665 spin_lock_irqsave(&pctrl->lock, flags); 665 spin_lock_irqsave(&pctrl->lock, flags);
666 666
667 val = readl(pctrl->regs + g->intr_status_reg); 667 val = readl(pctrl->regs + g->intr_status_reg);
668 val &= ~BIT(g->intr_status_bit); 668 if (g->intr_ack_high)
669 val |= BIT(g->intr_status_bit);
670 else
671 val &= ~BIT(g->intr_status_bit);
669 writel(val, pctrl->regs + g->intr_status_reg); 672 writel(val, pctrl->regs + g->intr_status_reg);
670 673
671 if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) 674 if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -744,6 +747,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
744 break; 747 break;
745 case IRQ_TYPE_EDGE_BOTH: 748 case IRQ_TYPE_EDGE_BOTH:
746 val |= BIT(g->intr_detection_bit); 749 val |= BIT(g->intr_detection_bit);
750 val |= BIT(g->intr_polarity_bit);
747 break; 751 break;
748 case IRQ_TYPE_LEVEL_LOW: 752 case IRQ_TYPE_LEVEL_LOW:
749 break; 753 break;
diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/pinctrl-msm.h
index 8fbe9fb19f36..6e26f1b676d7 100644
--- a/drivers/pinctrl/pinctrl-msm.h
+++ b/drivers/pinctrl/pinctrl-msm.h
@@ -84,6 +84,7 @@ struct msm_pingroup {
84 84
85 unsigned intr_enable_bit:5; 85 unsigned intr_enable_bit:5;
86 unsigned intr_status_bit:5; 86 unsigned intr_status_bit:5;
87 unsigned intr_ack_high:1;
87 88
88 unsigned intr_target_bit:5; 89 unsigned intr_target_bit:5;
89 unsigned intr_raw_status_bit:5; 90 unsigned intr_raw_status_bit:5;
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 208341fd57d2..8f6f16ef73f3 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -877,7 +877,6 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
877 struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip); 877 struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
878 u32 status; 878 u32 status;
879 879
880 pr_err("PLONK IRQ %d\n", irq);
881 clk_enable(nmk_chip->clk); 880 clk_enable(nmk_chip->clk);
882 status = readl(nmk_chip->addr + NMK_GPIO_IS); 881 status = readl(nmk_chip->addr + NMK_GPIO_IS);
883 clk_disable(nmk_chip->clk); 882 clk_disable(nmk_chip->clk);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 46dddc159286..96c60d230c13 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -342,7 +342,7 @@ static const struct pinctrl_ops rockchip_pctrl_ops = {
342 * @pin: pin to change 342 * @pin: pin to change
343 * @mux: new mux function to set 343 * @mux: new mux function to set
344 */ 344 */
345static void rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) 345static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
346{ 346{
347 struct rockchip_pinctrl *info = bank->drvdata; 347 struct rockchip_pinctrl *info = bank->drvdata;
348 void __iomem *reg = info->reg_base + info->ctrl->mux_offset; 348 void __iomem *reg = info->reg_base + info->ctrl->mux_offset;
@@ -350,6 +350,20 @@ static void rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
350 u8 bit; 350 u8 bit;
351 u32 data; 351 u32 data;
352 352
353 /*
354 * The first 16 pins of rk3188_bank0 are always gpios and do not have
355 * a mux register at all.
356 */
357 if (bank->bank_type == RK3188_BANK0 && pin < 16) {
358 if (mux != RK_FUNC_GPIO) {
359 dev_err(info->dev,
360 "pin %d only supports a gpio mux\n", pin);
361 return -ENOTSUPP;
362 } else {
363 return 0;
364 }
365 }
366
353 dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n", 367 dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n",
354 bank->bank_num, pin, mux); 368 bank->bank_num, pin, mux);
355 369
@@ -365,6 +379,8 @@ static void rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
365 writel(data, reg); 379 writel(data, reg);
366 380
367 spin_unlock_irqrestore(&bank->slock, flags); 381 spin_unlock_irqrestore(&bank->slock, flags);
382
383 return 0;
368} 384}
369 385
370#define RK2928_PULL_OFFSET 0x118 386#define RK2928_PULL_OFFSET 0x118
@@ -560,7 +576,7 @@ static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
560 const unsigned int *pins = info->groups[group].pins; 576 const unsigned int *pins = info->groups[group].pins;
561 const struct rockchip_pin_config *data = info->groups[group].data; 577 const struct rockchip_pin_config *data = info->groups[group].data;
562 struct rockchip_pin_bank *bank; 578 struct rockchip_pin_bank *bank;
563 int cnt; 579 int cnt, ret = 0;
564 580
565 dev_dbg(info->dev, "enable function %s group %s\n", 581 dev_dbg(info->dev, "enable function %s group %s\n",
566 info->functions[selector].name, info->groups[group].name); 582 info->functions[selector].name, info->groups[group].name);
@@ -571,8 +587,18 @@ static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
571 */ 587 */
572 for (cnt = 0; cnt < info->groups[group].npins; cnt++) { 588 for (cnt = 0; cnt < info->groups[group].npins; cnt++) {
573 bank = pin_to_bank(info, pins[cnt]); 589 bank = pin_to_bank(info, pins[cnt]);
574 rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 590 ret = rockchip_set_mux(bank, pins[cnt] - bank->pin_base,
575 data[cnt].func); 591 data[cnt].func);
592 if (ret)
593 break;
594 }
595
596 if (ret) {
597 /* revert the already done pin settings */
598 for (cnt--; cnt >= 0; cnt--)
599 rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
600
601 return ret;
576 } 602 }
577 603
578 return 0; 604 return 0;
@@ -607,7 +633,7 @@ static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
607 struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); 633 struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
608 struct rockchip_pin_bank *bank; 634 struct rockchip_pin_bank *bank;
609 struct gpio_chip *chip; 635 struct gpio_chip *chip;
610 int pin; 636 int pin, ret;
611 u32 data; 637 u32 data;
612 638
613 chip = range->gc; 639 chip = range->gc;
@@ -617,7 +643,9 @@ static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
617 dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n", 643 dev_dbg(info->dev, "gpio_direction for pin %u as %s-%d to %s\n",
618 offset, range->name, pin, input ? "input" : "output"); 644 offset, range->name, pin, input ? "input" : "output");
619 645
620 rockchip_set_mux(bank, pin, RK_FUNC_GPIO); 646 ret = rockchip_set_mux(bank, pin, RK_FUNC_GPIO);
647 if (ret < 0)
648 return ret;
621 649
622 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); 650 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
623 /* set bit to 1 for output, 0 for input */ 651 /* set bit to 1 for output, 0 for input */
@@ -1144,9 +1172,13 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
1144 u32 polarity; 1172 u32 polarity;
1145 u32 level; 1173 u32 level;
1146 u32 data; 1174 u32 data;
1175 int ret;
1147 1176
1148 /* make sure the pin is configured as gpio input */ 1177 /* make sure the pin is configured as gpio input */
1149 rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO); 1178 ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO);
1179 if (ret < 0)
1180 return ret;
1181
1150 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); 1182 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
1151 data &= ~mask; 1183 data &= ~mask;
1152 writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR); 1184 writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
@@ -1534,7 +1566,7 @@ static struct rockchip_pin_ctrl rk3188_pin_ctrl = {
1534 .nr_banks = ARRAY_SIZE(rk3188_pin_banks), 1566 .nr_banks = ARRAY_SIZE(rk3188_pin_banks),
1535 .label = "RK3188-GPIO", 1567 .label = "RK3188-GPIO",
1536 .type = RK3188, 1568 .type = RK3188,
1537 .mux_offset = 0x68, 1569 .mux_offset = 0x60,
1538 .pull_calc_reg = rk3188_calc_pull_reg_and_bit, 1570 .pull_calc_reg = rk3188_calc_pull_reg_and_bit,
1539}; 1571};
1540 1572
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 81075f2a1d3f..2960557bfed9 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
810static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, 810static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
811 unsigned pin_pos) 811 unsigned pin_pos)
812{ 812{
813 struct pcs_soc_data *pcs_soc = &pcs->socdata;
813 struct pinctrl_pin_desc *pin; 814 struct pinctrl_pin_desc *pin;
814 struct pcs_name *pn; 815 struct pcs_name *pn;
815 int i; 816 int i;
@@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
821 return -ENOMEM; 822 return -ENOMEM;
822 } 823 }
823 824
825 if (pcs_soc->irq_enable_mask) {
826 unsigned val;
827
828 val = pcs->read(pcs->base + offset);
829 if (val & pcs_soc->irq_enable_mask) {
830 dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
831 (unsigned long)pcs->res->start + offset, val);
832 val &= ~pcs_soc->irq_enable_mask;
833 pcs->write(val, pcs->base + offset);
834 }
835 }
836
824 pin = &pcs->pins.pa[i]; 837 pin = &pcs->pins.pa[i];
825 pn = &pcs->names[i]; 838 pn = &pcs->names[i];
826 sprintf(pn->name, "%lx.%d", 839 sprintf(pn->name, "%lx.%d",
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index c5e0f6973a3b..26ca6855f478 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
629 */ 629 */
630 for (i = 0; i < state->pinfuncgrpcnt; i++) { 630 for (i = 0; i < state->pinfuncgrpcnt; i++) {
631 const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i]; 631 const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
632 unsigned int port = pfg->port;
633 unsigned int mode = pfg->mode; 632 unsigned int mode = pfg->mode;
634 int j; 633 int j, port = pfg->port;
635 634
636 /* 635 /*
637 * Skip pin groups which are always mapped and don't need 636 * Skip pin groups which are always mapped and don't need
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 48093719167a..f5cd3f961808 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
4794 FN_MSIOF0_SCK_B, 0, 4794 FN_MSIOF0_SCK_B, 0,
4795 /* IP5_23_21 [3] */ 4795 /* IP5_23_21 [3] */
4796 FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4, 4796 FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
4797 FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, 4797 FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
4798 FN_IERX_C, 0,
4799 /* IP5_20_18 [3] */ 4798 /* IP5_20_18 [3] */
4800 FN_WE0_N, FN_IECLK, FN_CAN_CLK, 4799 FN_WE0_N, FN_IECLK, FN_CAN_CLK,
4801 FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0, 4800 FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 5186d70c49d4..7868bf3a0f91 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
5288 /* SEL_SCIF3 [2] */ 5288 /* SEL_SCIF3 [2] */
5289 FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3, 5289 FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
5290 /* SEL_IEB [2] */ 5290 /* SEL_IEB [2] */
5291 FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 5291 FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
5292 /* SEL_MMC [1] */ 5292 /* SEL_MMC [1] */
5293 FN_SEL_MMC_0, FN_SEL_MMC_1, 5293 FN_SEL_MMC_0, FN_SEL_MMC_1,
5294 /* SEL_SCIF5 [1] */ 5294 /* SEL_SCIF5 [1] */
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 9802b67040cc..2c61281bebd7 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
523 return GPIOF_DIR_IN; 523 return GPIOF_DIR_IN;
524} 524}
525 525
526static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
527{
528 return pinctrl_gpio_direction_input(chip->base + offset);
529}
530
531static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
532 int value)
533{
534 return pinctrl_gpio_direction_output(chip->base + offset);
535}
536
537static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) 526static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset)
538{ 527{
539 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); 528 struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev);
@@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
568 wmt_clearbits(data, reg_data_out, BIT(bit)); 557 wmt_clearbits(data, reg_data_out, BIT(bit));
569} 558}
570 559
560static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
561{
562 return pinctrl_gpio_direction_input(chip->base + offset);
563}
564
565static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
566 int value)
567{
568 wmt_gpio_set_value(chip, offset, value);
569 return pinctrl_gpio_direction_output(chip->base + offset);
570}
571
571static struct gpio_chip wmt_gpio_chip = { 572static struct gpio_chip wmt_gpio_chip = {
572 .label = "gpio-wmt", 573 .label = "gpio-wmt",
573 .owner = THIS_MODULE, 574 .owner = THIS_MODULE,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9f611cbbc294..c31aa07b3ba5 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
83{ 83{
84 struct acpi_device *acpi_dev; 84 struct acpi_device *acpi_dev;
85 acpi_handle handle; 85 acpi_handle handle;
86 struct acpi_buffer buffer; 86 int ret = 0;
87 int ret;
88 87
89 pnp_dbg(&dev->dev, "set resources\n"); 88 pnp_dbg(&dev->dev, "set resources\n");
90 89
@@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
97 if (WARN_ON_ONCE(acpi_dev != dev->data)) 96 if (WARN_ON_ONCE(acpi_dev != dev->data))
98 dev->data = acpi_dev; 97 dev->data = acpi_dev;
99 98
100 ret = pnpacpi_build_resource_template(dev, &buffer); 99 if (acpi_has_method(handle, METHOD_NAME__SRS)) {
101 if (ret) 100 struct acpi_buffer buffer;
102 return ret; 101
103 ret = pnpacpi_encode_resources(dev, &buffer); 102 ret = pnpacpi_build_resource_template(dev, &buffer);
104 if (ret) { 103 if (ret)
104 return ret;
105
106 ret = pnpacpi_encode_resources(dev, &buffer);
107 if (!ret) {
108 acpi_status status;
109
110 status = acpi_set_current_resources(handle, &buffer);
111 if (ACPI_FAILURE(status))
112 ret = -EIO;
113 }
105 kfree(buffer.pointer); 114 kfree(buffer.pointer);
106 return ret;
107 } 115 }
108 if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) 116 if (!ret && acpi_bus_power_manageable(handle))
109 ret = -EINVAL;
110 else if (acpi_bus_power_manageable(handle))
111 ret = acpi_bus_set_power(handle, ACPI_STATE_D0); 117 ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
112 kfree(buffer.pointer); 118
113 return ret; 119 return ret;
114} 120}
115 121
@@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
117{ 123{
118 struct acpi_device *acpi_dev; 124 struct acpi_device *acpi_dev;
119 acpi_handle handle; 125 acpi_handle handle;
120 int ret; 126 acpi_status status;
121 127
122 dev_dbg(&dev->dev, "disable resources\n"); 128 dev_dbg(&dev->dev, "disable resources\n");
123 129
@@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
128 } 134 }
129 135
130 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ 136 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
131 ret = 0;
132 if (acpi_bus_power_manageable(handle)) 137 if (acpi_bus_power_manageable(handle))
133 acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); 138 acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
134 /* continue even if acpi_bus_set_power() fails */ 139
135 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) 140 /* continue even if acpi_bus_set_power() fails */
136 ret = -ENODEV; 141 status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
137 return ret; 142 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
143 return -ENODEV;
144
145 return 0;
138} 146}
139 147
140#ifdef CONFIG_ACPI_SLEEP 148#ifdef CONFIG_ACPI_SLEEP
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index deb7f4bcdb7b..438d4c72c7b3 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -37,7 +37,7 @@ __visible struct {
37 * kernel begins at offset 3GB... 37 * kernel begins at offset 3GB...
38 */ 38 */
39 39
40asmlinkage void pnp_bios_callfunc(void); 40asmlinkage __visible void pnp_bios_callfunc(void);
41 41
42__asm__(".text \n" 42__asm__(".text \n"
43 __ALIGN_STR "\n" 43 __ALIGN_STR "\n"
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 258fef272ea7..ebf0d6710b5a 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/pci.h>
18#include <linux/string.h> 19#include <linux/string.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/pnp.h> 21#include <linux/pnp.h>
@@ -334,6 +335,81 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
334} 335}
335#endif 336#endif
336 337
338#ifdef CONFIG_PCI
339/* Device IDs of parts that have 32KB MCH space */
340static const unsigned int mch_quirk_devices[] = {
341 0x0154, /* Ivy Bridge */
342 0x0c00, /* Haswell */
343};
344
345static struct pci_dev *get_intel_host(void)
346{
347 int i;
348 struct pci_dev *host;
349
350 for (i = 0; i < ARRAY_SIZE(mch_quirk_devices); i++) {
351 host = pci_get_device(PCI_VENDOR_ID_INTEL, mch_quirk_devices[i],
352 NULL);
353 if (host)
354 return host;
355 }
356 return NULL;
357}
358
359static void quirk_intel_mch(struct pnp_dev *dev)
360{
361 struct pci_dev *host;
362 u32 addr_lo, addr_hi;
363 struct pci_bus_region region;
364 struct resource mch;
365 struct pnp_resource *pnp_res;
366 struct resource *res;
367
368 host = get_intel_host();
369 if (!host)
370 return;
371
372 /*
373 * MCHBAR is not an architected PCI BAR, so MCH space is usually
374 * reported as a PNP0C02 resource. The MCH space was originally
375 * 16KB, but is 32KB in newer parts. Some BIOSes still report a
376 * PNP0C02 resource that is only 16KB, which means the rest of the
377 * MCH space is consumed but unreported.
378 */
379
380 /*
381 * Read MCHBAR for Host Member Mapped Register Range Base
382 * https://www-ssl.intel.com/content/www/us/en/processors/core/4th-gen-core-family-desktop-vol-2-datasheet
383 * Sec 3.1.12.
384 */
385 pci_read_config_dword(host, 0x48, &addr_lo);
386 region.start = addr_lo & ~0x7fff;
387 pci_read_config_dword(host, 0x4c, &addr_hi);
388 region.start |= (u64) addr_hi << 32;
389 region.end = region.start + 32*1024 - 1;
390
391 memset(&mch, 0, sizeof(mch));
392 mch.flags = IORESOURCE_MEM;
393 pcibios_bus_to_resource(host->bus, &mch, &region);
394
395 list_for_each_entry(pnp_res, &dev->resources, list) {
396 res = &pnp_res->res;
397 if (res->end < mch.start || res->start > mch.end)
398 continue; /* no overlap */
399 if (res->start == mch.start && res->end == mch.end)
400 continue; /* exact match */
401
402 dev_info(&dev->dev, FW_BUG "PNP resource %pR covers only part of %s Intel MCH; extending to %pR\n",
403 res, pci_name(host), &mch);
404 res->start = mch.start;
405 res->end = mch.end;
406 break;
407 }
408
409 pci_dev_put(host);
410}
411#endif
412
337/* 413/*
338 * PnP Quirks 414 * PnP Quirks
339 * Cards or devices that need some tweaking due to incomplete resource info 415 * Cards or devices that need some tweaking due to incomplete resource info
@@ -364,6 +440,9 @@ static struct pnp_fixup pnp_fixups[] = {
364#ifdef CONFIG_AMD_NB 440#ifdef CONFIG_AMD_NB
365 {"PNP0c01", quirk_amd_mmconfig_area}, 441 {"PNP0c01", quirk_amd_mmconfig_area},
366#endif 442#endif
443#ifdef CONFIG_PCI
444 {"PNP0c02", quirk_intel_mch},
445#endif
367 {""} 446 {""}
368}; 447};
369 448
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 476aa495c110..b95cf71ed695 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -11,7 +11,7 @@
11 * Copyright (C) 2012 ARM Limited 11 * Copyright (C) 2012 ARM Limited
12 */ 12 */
13 13
14#include <linux/jiffies.h> 14#include <linux/delay.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_device.h> 16#include <linux/of_device.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -23,17 +23,12 @@
23static void vexpress_reset_do(struct device *dev, const char *what) 23static void vexpress_reset_do(struct device *dev, const char *what)
24{ 24{
25 int err = -ENOENT; 25 int err = -ENOENT;
26 struct vexpress_config_func *func = 26 struct vexpress_config_func *func = dev_get_drvdata(dev);
27 vexpress_config_func_get_by_dev(dev);
28 27
29 if (func) { 28 if (func) {
30 unsigned long timeout;
31
32 err = vexpress_config_write(func, 0, 0); 29 err = vexpress_config_write(func, 0, 0);
33 30 if (!err)
34 timeout = jiffies + HZ; 31 mdelay(1000);
35 while (time_before(jiffies, timeout))
36 cpu_relax();
37 } 32 }
38 33
39 dev_emerg(dev, "Unable to %s (%d)\n", what, err); 34 dev_emerg(dev, "Unable to %s (%d)\n", what, err);
@@ -96,12 +91,18 @@ static int vexpress_reset_probe(struct platform_device *pdev)
96 enum vexpress_reset_func func; 91 enum vexpress_reset_func func;
97 const struct of_device_id *match = 92 const struct of_device_id *match =
98 of_match_device(vexpress_reset_of_match, &pdev->dev); 93 of_match_device(vexpress_reset_of_match, &pdev->dev);
94 struct vexpress_config_func *config_func;
99 95
100 if (match) 96 if (match)
101 func = (enum vexpress_reset_func)match->data; 97 func = (enum vexpress_reset_func)match->data;
102 else 98 else
103 func = pdev->id_entry->driver_data; 99 func = pdev->id_entry->driver_data;
104 100
101 config_func = vexpress_config_func_get_by_dev(&pdev->dev);
102 if (!config_func)
103 return -EINVAL;
104 dev_set_drvdata(&pdev->dev, config_func);
105
105 switch (func) { 106 switch (func) {
106 case FUNC_SHUTDOWN: 107 case FUNC_SHUTDOWN:
107 vexpress_power_off_device = &pdev->dev; 108 vexpress_power_off_device = &pdev->dev;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6963bdf54175..6aea373547f6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,6 +6,7 @@ menu "PTP clock support"
6 6
7config PTP_1588_CLOCK 7config PTP_1588_CLOCK
8 tristate "PTP clock support" 8 tristate "PTP clock support"
9 depends on NET
9 select PPS 10 select PPS
10 select NET_PTP_CLASSIFY 11 select NET_PTP_CLASSIFY
11 help 12 help
@@ -74,7 +75,7 @@ config DP83640_PHY
74config PTP_1588_CLOCK_PCH 75config PTP_1588_CLOCK_PCH
75 tristate "Intel PCH EG20T as PTP clock" 76 tristate "Intel PCH EG20T as PTP clock"
76 depends on X86 || COMPILE_TEST 77 depends on X86 || COMPILE_TEST
77 depends on HAS_IOMEM 78 depends on HAS_IOMEM && NET
78 select PTP_1588_CLOCK 79 select PTP_1588_CLOCK
79 help 80 help
80 This driver adds support for using the PCH EG20T as a PTP 81 This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 8ad26b8bf418..cb2d4f0f9711 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -2,7 +2,7 @@
2 * ST Microelectronics SPEAr Pulse Width Modulator driver 2 * ST Microelectronics SPEAr Pulse Width Modulator driver
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Shiraz Hashim <shiraz.hashim@st.com> 5 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -264,6 +264,6 @@ static struct platform_driver spear_pwm_driver = {
264module_platform_driver(spear_pwm_driver); 264module_platform_driver(spear_pwm_driver);
265 265
266MODULE_LICENSE("GPL"); 266MODULE_LICENSE("GPL");
267MODULE_AUTHOR("Shiraz Hashim <shiraz.hashim@st.com>"); 267MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
268MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.com>"); 268MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.com>");
269MODULE_ALIAS("platform:spear-pwm"); 269MODULE_ALIAS("platform:spear-pwm");
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index ded3b3574209..6d38be3d970c 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -38,66 +38,24 @@ struct pbias_reg_info {
38struct pbias_regulator_data { 38struct pbias_regulator_data {
39 struct regulator_desc desc; 39 struct regulator_desc desc;
40 void __iomem *pbias_addr; 40 void __iomem *pbias_addr;
41 unsigned int pbias_reg;
42 struct regulator_dev *dev; 41 struct regulator_dev *dev;
43 struct regmap *syscon; 42 struct regmap *syscon;
44 const struct pbias_reg_info *info; 43 const struct pbias_reg_info *info;
45 int voltage; 44 int voltage;
46}; 45};
47 46
48static int pbias_regulator_set_voltage(struct regulator_dev *dev, 47static const unsigned int pbias_volt_table[] = {
49 int min_uV, int max_uV, unsigned *selector) 48 1800000,
50{ 49 3000000
51 struct pbias_regulator_data *data = rdev_get_drvdata(dev); 50};
52 const struct pbias_reg_info *info = data->info;
53 int ret, vmode;
54
55 if (min_uV <= 1800000)
56 vmode = 0;
57 else if (min_uV > 1800000)
58 vmode = info->vmode;
59
60 ret = regmap_update_bits(data->syscon, data->pbias_reg,
61 info->vmode, vmode);
62
63 return ret;
64}
65
66static int pbias_regulator_get_voltage(struct regulator_dev *rdev)
67{
68 struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
69 const struct pbias_reg_info *info = data->info;
70 int value, voltage;
71
72 regmap_read(data->syscon, data->pbias_reg, &value);
73 value &= info->vmode;
74
75 voltage = value ? 3000000 : 1800000;
76
77 return voltage;
78}
79 51
80static int pbias_regulator_enable(struct regulator_dev *rdev) 52static int pbias_regulator_enable(struct regulator_dev *rdev)
81{ 53{
82 struct pbias_regulator_data *data = rdev_get_drvdata(rdev); 54 struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
83 const struct pbias_reg_info *info = data->info; 55 const struct pbias_reg_info *info = data->info;
84 int ret;
85
86 ret = regmap_update_bits(data->syscon, data->pbias_reg,
87 info->enable_mask, info->enable);
88
89 return ret;
90}
91
92static int pbias_regulator_disable(struct regulator_dev *rdev)
93{
94 struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
95 const struct pbias_reg_info *info = data->info;
96 int ret;
97 56
98 ret = regmap_update_bits(data->syscon, data->pbias_reg, 57 return regmap_update_bits(data->syscon, rdev->desc->enable_reg,
99 info->enable_mask, 0); 58 info->enable_mask, info->enable);
100 return ret;
101} 59}
102 60
103static int pbias_regulator_is_enable(struct regulator_dev *rdev) 61static int pbias_regulator_is_enable(struct regulator_dev *rdev)
@@ -106,17 +64,18 @@ static int pbias_regulator_is_enable(struct regulator_dev *rdev)
106 const struct pbias_reg_info *info = data->info; 64 const struct pbias_reg_info *info = data->info;
107 int value; 65 int value;
108 66
109 regmap_read(data->syscon, data->pbias_reg, &value); 67 regmap_read(data->syscon, rdev->desc->enable_reg, &value);
110 68
111 return (value & info->enable_mask) == info->enable_mask; 69 return (value & info->enable_mask) == info->enable;
112} 70}
113 71
114static struct regulator_ops pbias_regulator_voltage_ops = { 72static struct regulator_ops pbias_regulator_voltage_ops = {
115 .set_voltage = pbias_regulator_set_voltage, 73 .list_voltage = regulator_list_voltage_table,
116 .get_voltage = pbias_regulator_get_voltage, 74 .get_voltage_sel = regulator_get_voltage_sel_regmap,
117 .enable = pbias_regulator_enable, 75 .set_voltage_sel = regulator_set_voltage_sel_regmap,
118 .disable = pbias_regulator_disable, 76 .enable = pbias_regulator_enable,
119 .is_enabled = pbias_regulator_is_enable, 77 .disable = regulator_disable_regmap,
78 .is_enabled = pbias_regulator_is_enable,
120}; 79};
121 80
122static const struct pbias_reg_info pbias_mmc_omap2430 = { 81static const struct pbias_reg_info pbias_mmc_omap2430 = {
@@ -192,6 +151,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
192 if (IS_ERR(syscon)) 151 if (IS_ERR(syscon))
193 return PTR_ERR(syscon); 152 return PTR_ERR(syscon);
194 153
154 cfg.regmap = syscon;
195 cfg.dev = &pdev->dev; 155 cfg.dev = &pdev->dev;
196 156
197 for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) { 157 for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) {
@@ -207,15 +167,19 @@ static int pbias_regulator_probe(struct platform_device *pdev)
207 if (!res) 167 if (!res)
208 return -EINVAL; 168 return -EINVAL;
209 169
210 drvdata[data_idx].pbias_reg = res->start;
211 drvdata[data_idx].syscon = syscon; 170 drvdata[data_idx].syscon = syscon;
212 drvdata[data_idx].info = info; 171 drvdata[data_idx].info = info;
213 drvdata[data_idx].desc.name = info->name; 172 drvdata[data_idx].desc.name = info->name;
214 drvdata[data_idx].desc.owner = THIS_MODULE; 173 drvdata[data_idx].desc.owner = THIS_MODULE;
215 drvdata[data_idx].desc.type = REGULATOR_VOLTAGE; 174 drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
216 drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops; 175 drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
176 drvdata[data_idx].desc.volt_table = pbias_volt_table;
217 drvdata[data_idx].desc.n_voltages = 2; 177 drvdata[data_idx].desc.n_voltages = 2;
218 drvdata[data_idx].desc.enable_time = info->enable_time; 178 drvdata[data_idx].desc.enable_time = info->enable_time;
179 drvdata[data_idx].desc.vsel_reg = res->start;
180 drvdata[data_idx].desc.vsel_mask = info->vmode;
181 drvdata[data_idx].desc.enable_reg = res->start;
182 drvdata[data_idx].desc.enable_mask = info->enable_mask;
219 183
220 cfg.init_data = pbias_matches[idx].init_data; 184 cfg.init_data = pbias_matches[idx].init_data;
221 cfg.driver_data = &drvdata[data_idx]; 185 cfg.driver_data = &drvdata[data_idx];
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index bd628a6f981d..e5f13c4310fe 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client,
569 if (IS_ERR(hym8563->rtc)) 569 if (IS_ERR(hym8563->rtc))
570 return PTR_ERR(hym8563->rtc); 570 return PTR_ERR(hym8563->rtc);
571 571
572 /* the hym8563 alarm only supports a minute accuracy */
573 hym8563->rtc->uie_unsupported = 1;
574
572#ifdef CONFIG_COMMON_CLK 575#ifdef CONFIG_COMMON_CLK
573 hym8563_clkout_register_clk(hym8563); 576 hym8563_clkout_register_clk(hym8563);
574#endif 577#endif
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 5c8f8226c848..4cdb64be061b 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -206,7 +206,7 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
206 tm->tm_hour = bcd2bin(regs[2] & 0x3f); 206 tm->tm_hour = bcd2bin(regs[2] & 0x3f);
207 tm->tm_mday = bcd2bin(regs[3] & 0x3f); 207 tm->tm_mday = bcd2bin(regs[3] & 0x3f);
208 tm->tm_wday = regs[4] & 0x7; 208 tm->tm_wday = regs[4] & 0x7;
209 tm->tm_mon = bcd2bin(regs[5] & 0x1f); 209 tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1;
210 tm->tm_year = bcd2bin(regs[6]) + 100; 210 tm->tm_year = bcd2bin(regs[6]) + 100;
211 211
212 return rtc_valid_tm(tm); 212 return rtc_valid_tm(tm);
@@ -229,7 +229,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
229 regs[3] = bin2bcd(tm->tm_hour); 229 regs[3] = bin2bcd(tm->tm_hour);
230 regs[4] = bin2bcd(tm->tm_mday); 230 regs[4] = bin2bcd(tm->tm_mday);
231 regs[5] = tm->tm_wday; 231 regs[5] = tm->tm_wday;
232 regs[6] = bin2bcd(tm->tm_mon); 232 regs[6] = bin2bcd(tm->tm_mon + 1);
233 regs[7] = bin2bcd(tm->tm_year - 100); 233 regs[7] = bin2bcd(tm->tm_year - 100);
234 234
235 msg.addr = client->addr; 235 msg.addr = client->addr;
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 1990285296c6..c316051d9bda 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1252,7 +1252,7 @@ static __init int sclp_initcall(void)
1252 return rc; 1252 return rc;
1253 1253
1254 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); 1254 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1255 rc = PTR_RET(sclp_pdev); 1255 rc = PTR_ERR_OR_ZERO(sclp_pdev);
1256 if (rc) 1256 if (rc)
1257 goto fail_platform_driver_unregister; 1257 goto fail_platform_driver_unregister;
1258 1258
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 6e8f90f84e49..6e14999f9e8f 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -515,7 +515,7 @@ static int __init sclp_detect_standby_memory(void)
515 if (rc) 515 if (rc)
516 goto out; 516 goto out;
517 sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); 517 sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
518 rc = PTR_RET(sclp_pdev); 518 rc = PTR_ERR_OR_ZERO(sclp_pdev);
519 if (rc) 519 if (rc)
520 goto out_driver; 520 goto out_driver;
521 sclp_add_standby_memory(); 521 sclp_add_standby_memory();
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 4eed38cd0af6..cd9c91909596 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -97,13 +97,16 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
97static int __sclp_vt220_emit(struct sclp_vt220_request *request); 97static int __sclp_vt220_emit(struct sclp_vt220_request *request);
98static void sclp_vt220_emit_current(void); 98static void sclp_vt220_emit_current(void);
99 99
100/* Registration structure for our interest in SCLP event buffers */ 100/* Registration structure for SCLP output event buffers */
101static struct sclp_register sclp_vt220_register = { 101static struct sclp_register sclp_vt220_register = {
102 .send_mask = EVTYP_VT220MSG_MASK, 102 .send_mask = EVTYP_VT220MSG_MASK,
103 .pm_event_fn = sclp_vt220_pm_event_fn,
104};
105
106/* Registration structure for SCLP input event buffers */
107static struct sclp_register sclp_vt220_register_input = {
103 .receive_mask = EVTYP_VT220MSG_MASK, 108 .receive_mask = EVTYP_VT220MSG_MASK,
104 .state_change_fn = NULL,
105 .receiver_fn = sclp_vt220_receiver_fn, 109 .receiver_fn = sclp_vt220_receiver_fn,
106 .pm_event_fn = sclp_vt220_pm_event_fn,
107}; 110};
108 111
109 112
@@ -715,9 +718,14 @@ static int __init sclp_vt220_tty_init(void)
715 rc = tty_register_driver(driver); 718 rc = tty_register_driver(driver);
716 if (rc) 719 if (rc)
717 goto out_init; 720 goto out_init;
721 rc = sclp_register(&sclp_vt220_register_input);
722 if (rc)
723 goto out_reg;
718 sclp_vt220_driver = driver; 724 sclp_vt220_driver = driver;
719 return 0; 725 return 0;
720 726
727out_reg:
728 tty_unregister_driver(driver);
721out_init: 729out_init:
722 __sclp_vt220_cleanup(); 730 __sclp_vt220_cleanup();
723out_driver: 731out_driver:
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 9f0ea6cb6922..e3bf885f4a6c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
541 541
542static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 542static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
543{ 543{
544 do { 544 static int ntsm_unsupported;
545
546 while (true) {
545 memset(sei, 0, sizeof(*sei)); 547 memset(sei, 0, sizeof(*sei));
546 sei->request.length = 0x0010; 548 sei->request.length = 0x0010;
547 sei->request.code = 0x000e; 549 sei->request.code = 0x000e;
548 sei->ntsm = ntsm; 550 if (!ntsm_unsupported)
551 sei->ntsm = ntsm;
549 552
550 if (chsc(sei)) 553 if (chsc(sei))
551 break; 554 break;
552 555
553 if (sei->response.code != 0x0001) { 556 if (sei->response.code != 0x0001) {
554 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 557 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
555 sei->response.code); 558 sei->response.code, sei->ntsm);
559
560 if (sei->response.code == 3 && sei->ntsm) {
561 /* Fallback for old firmware. */
562 ntsm_unsupported = 1;
563 continue;
564 }
556 break; 565 break;
557 } 566 }
558 567
@@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
568 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 577 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
569 break; 578 break;
570 } 579 }
571 } while (sei->u.nt0_area.flags & 0x80); 580
581 if (!(sei->u.nt0_area.flags & 0x80))
582 break;
583 }
572} 584}
573 585
574/* 586/*
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8cf4a0c69baf..9a6e4a2cd072 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -7463,6 +7463,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7463 if (hpsa_simple_mode) 7463 if (hpsa_simple_mode)
7464 return; 7464 return;
7465 7465
7466 trans_support = readl(&(h->cfgtable->TransportSupport));
7467 if (!(trans_support & PERFORMANT_MODE))
7468 return;
7469
7466 /* Check for I/O accelerator mode support */ 7470 /* Check for I/O accelerator mode support */
7467 if (trans_support & CFGTBL_Trans_io_accel1) { 7471 if (trans_support & CFGTBL_Trans_io_accel1) {
7468 transMethod |= CFGTBL_Trans_io_accel1 | 7472 transMethod |= CFGTBL_Trans_io_accel1 |
@@ -7479,10 +7483,6 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7479 } 7483 }
7480 7484
7481 /* TODO, check that this next line h->nreply_queues is correct */ 7485 /* TODO, check that this next line h->nreply_queues is correct */
7482 trans_support = readl(&(h->cfgtable->TransportSupport));
7483 if (!(trans_support & PERFORMANT_MODE))
7484 return;
7485
7486 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; 7486 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7487 hpsa_get_max_perf_mode_cmds(h); 7487 hpsa_get_max_perf_mode_cmds(h);
7488 /* Performant mode ring buffer and supporting data structures */ 7488 /* Performant mode ring buffer and supporting data structures */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 7f0af4fcc001..6fd7d40b2c4d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
8293 8293
8294 mpt2sas_base_free_resources(ioc); 8294 mpt2sas_base_free_resources(ioc);
8295 pci_save_state(pdev); 8295 pci_save_state(pdev);
8296 pci_disable_device(pdev);
8297 pci_set_power_state(pdev, device_state); 8296 pci_set_power_state(pdev, device_state);
8298 return 0; 8297 return 0;
8299} 8298}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 771c16bfdbac..f17aa7aa7879 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -189,6 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
189 /* 189 /*
190 * Retry after abort failed, escalate to next level. 190 * Retry after abort failed, escalate to next level.
191 */ 191 */
192 scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
192 SCSI_LOG_ERROR_RECOVERY(3, 193 SCSI_LOG_ERROR_RECOVERY(3,
193 scmd_printk(KERN_INFO, scmd, 194 scmd_printk(KERN_INFO, scmd,
194 "scmd %p previous abort failed\n", scmd)); 195 "scmd %p previous abort failed\n", scmd));
@@ -920,10 +921,12 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
920 ses->prot_op = scmd->prot_op; 921 ses->prot_op = scmd->prot_op;
921 922
922 scmd->prot_op = SCSI_PROT_NORMAL; 923 scmd->prot_op = SCSI_PROT_NORMAL;
924 scmd->eh_eflags = 0;
923 scmd->cmnd = ses->eh_cmnd; 925 scmd->cmnd = ses->eh_cmnd;
924 memset(scmd->cmnd, 0, BLK_MAX_CDB); 926 memset(scmd->cmnd, 0, BLK_MAX_CDB);
925 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 927 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
926 scmd->request->next_rq = NULL; 928 scmd->request->next_rq = NULL;
929 scmd->result = 0;
927 930
928 if (sense_bytes) { 931 if (sense_bytes) {
929 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, 932 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1157,6 +1160,15 @@ int scsi_eh_get_sense(struct list_head *work_q,
1157 __func__)); 1160 __func__));
1158 break; 1161 break;
1159 } 1162 }
1163 if (status_byte(scmd->result) != CHECK_CONDITION)
1164 /*
1165 * don't request sense if there's no check condition
1166 * status because the error we're processing isn't one
1167 * that has a sense code (and some devices get
1168 * confused by sense requests out of the blue)
1169 */
1170 continue;
1171
1160 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, 1172 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1161 "%s: requesting sense\n", 1173 "%s: requesting sense\n",
1162 current->comm)); 1174 current->comm));
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 65a123d9c676..9db097a28a74 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -137,6 +137,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
137 * lock such that the kblockd_schedule_work() call happens 137 * lock such that the kblockd_schedule_work() call happens
138 * before blk_cleanup_queue() finishes. 138 * before blk_cleanup_queue() finishes.
139 */ 139 */
140 cmd->result = 0;
140 spin_lock_irqsave(q->queue_lock, flags); 141 spin_lock_irqsave(q->queue_lock, flags);
141 blk_requeue_request(q, cmd->request); 142 blk_requeue_request(q, cmd->request);
142 kblockd_schedule_work(q, &device->requeue_work); 143 kblockd_schedule_work(q, &device->requeue_work);
@@ -1044,6 +1045,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
1044 */ 1045 */
1045int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) 1046int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1046{ 1047{
1048 struct scsi_device *sdev = cmd->device;
1047 struct request *rq = cmd->request; 1049 struct request *rq = cmd->request;
1048 1050
1049 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); 1051 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
@@ -1091,7 +1093,7 @@ err_exit:
1091 scsi_release_buffers(cmd); 1093 scsi_release_buffers(cmd);
1092 cmd->request->special = NULL; 1094 cmd->request->special = NULL;
1093 scsi_put_command(cmd); 1095 scsi_put_command(cmd);
1094 put_device(&cmd->device->sdev_gendev); 1096 put_device(&sdev->sdev_gendev);
1095 return error; 1097 return error;
1096} 1098}
1097EXPORT_SYMBOL(scsi_init_io); 1099EXPORT_SYMBOL(scsi_init_io);
@@ -1273,7 +1275,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1273 struct scsi_cmnd *cmd = req->special; 1275 struct scsi_cmnd *cmd = req->special;
1274 scsi_release_buffers(cmd); 1276 scsi_release_buffers(cmd);
1275 scsi_put_command(cmd); 1277 scsi_put_command(cmd);
1276 put_device(&cmd->device->sdev_gendev); 1278 put_device(&sdev->sdev_gendev);
1277 req->special = NULL; 1279 req->special = NULL;
1278 } 1280 }
1279 break; 1281 break;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index fe30ea94ffe6..109802f776ed 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
77 goto next_msg; 77 goto next_msg;
78 } 78 }
79 79
80 if (!capable(CAP_SYS_ADMIN)) { 80 if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
81 err = -EPERM; 81 err = -EPERM;
82 goto next_msg; 82 goto next_msg;
83 } 83 }
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 1b681427dde0..c341f855fadc 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)
1621 list_del(&rphy->list); 1621 list_del(&rphy->list);
1622 mutex_unlock(&sas_host->lock); 1622 mutex_unlock(&sas_host->lock);
1623 1623
1624 sas_bsg_remove(shost, rphy);
1625
1626 transport_destroy_device(dev); 1624 transport_destroy_device(dev);
1627 1625
1628 put_device(dev); 1626 put_device(dev);
@@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)
1681 } 1679 }
1682 1680
1683 sas_rphy_unlink(rphy); 1681 sas_rphy_unlink(rphy);
1682 sas_bsg_remove(NULL, rphy);
1684 transport_remove_device(dev); 1683 transport_remove_device(dev);
1685 device_del(dev); 1684 device_del(dev);
1686} 1685}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 16bfd50cd3fe..db3b494e5926 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
750 750
751 vscsi->affinity_hint_set = true; 751 vscsi->affinity_hint_set = true;
752 } else { 752 } else {
753 for (i = 0; i < vscsi->num_queues; i++) 753 for (i = 0; i < vscsi->num_queues; i++) {
754 if (!vscsi->req_vqs[i].vq)
755 continue;
756
754 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); 757 virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
758 }
755 759
756 vscsi->affinity_hint_set = false; 760 vscsi->affinity_hint_set = false;
757 } 761 }
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index fc67f564f02c..788ed9b59b4e 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,10 +1,12 @@
1# 1#
2# Makefile for the SuperH specific drivers. 2# Makefile for the SuperH specific drivers.
3# 3#
4obj-y := intc/ 4obj-$(CONFIG_SUPERH) += intc/
5obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/
6ifneq ($(CONFIG_COMMON_CLK),y)
7obj-$(CONFIG_HAVE_CLK) += clk/
8endif
9obj-$(CONFIG_MAPLE) += maple/
10obj-$(CONFIG_SUPERHYWAY) += superhyway/
5 11
6obj-$(CONFIG_HAVE_CLK) += clk/ 12obj-y += pm_runtime.o
7obj-$(CONFIG_MAPLE) += maple/
8obj-$(CONFIG_SUPERHYWAY) += superhyway/
9
10obj-y += pm_runtime.o
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 8afa5a4589f2..10c65eb51f85 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
50 .con_ids = { NULL, }, 50 .con_ids = { NULL, },
51}; 51};
52 52
53static bool default_pm_on;
54
53static int __init sh_pm_runtime_init(void) 55static int __init sh_pm_runtime_init(void)
54{ 56{
57 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
58 if (!of_machine_is_compatible("renesas,emev2") &&
59 !of_machine_is_compatible("renesas,r7s72100") &&
60 !of_machine_is_compatible("renesas,r8a73a4") &&
61 !of_machine_is_compatible("renesas,r8a7740") &&
62 !of_machine_is_compatible("renesas,r8a7778") &&
63 !of_machine_is_compatible("renesas,r8a7779") &&
64 !of_machine_is_compatible("renesas,r8a7790") &&
65 !of_machine_is_compatible("renesas,r8a7791") &&
66 !of_machine_is_compatible("renesas,sh7372") &&
67 !of_machine_is_compatible("renesas,sh73a0"))
68 return 0;
69 }
70
71 default_pm_on = true;
55 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); 72 pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
56 return 0; 73 return 0;
57} 74}
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init);
59 76
60static int __init sh_pm_runtime_late_init(void) 77static int __init sh_pm_runtime_late_init(void)
61{ 78{
62 pm_genpd_poweroff_unused(); 79 if (default_pm_on)
80 pm_genpd_poweroff_unused();
63 return 0; 81 return 0;
64} 82}
65late_initcall(sh_pm_runtime_late_init); 83late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 60f2b41c7310..213b5cbb9dcc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -91,8 +91,8 @@ config SPI_BFIN5XX
91 help 91 help
92 This is the SPI controller master driver for Blackfin 5xx processor. 92 This is the SPI controller master driver for Blackfin 5xx processor.
93 93
94config SPI_BFIN_V3 94config SPI_ADI_V3
95 tristate "SPI controller v3 for Blackfin" 95 tristate "SPI controller v3 for ADI"
96 depends on BF60x 96 depends on BF60x
97 help 97 help
98 This is the SPI controller v3 master driver 98 This is the SPI controller v3 master driver
@@ -148,6 +148,13 @@ config SPI_BUTTERFLY
148 inexpensive battery powered microcontroller evaluation board. 148 inexpensive battery powered microcontroller evaluation board.
149 This same cable can be used to flash new firmware. 149 This same cable can be used to flash new firmware.
150 150
151config SPI_CADENCE
152 tristate "Cadence SPI controller"
153 depends on ARM
154 help
155 This selects the Cadence SPI controller master driver
156 used by Xilinx Zynq.
157
151config SPI_CLPS711X 158config SPI_CLPS711X
152 tristate "CLPS711X host SPI controller" 159 tristate "CLPS711X host SPI controller"
153 depends on ARCH_CLPS711X || COMPILE_TEST 160 depends on ARCH_CLPS711X || COMPILE_TEST
@@ -505,7 +512,7 @@ config SPI_TEGRA20_SLINK
505 512
506config SPI_TOPCLIFF_PCH 513config SPI_TOPCLIFF_PCH
507 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI" 514 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
508 depends on PCI 515 depends on PCI && (X86_32 || COMPILE_TEST)
509 help 516 help
510 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus 517 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
511 used in some x86 embedded processors. 518 used in some x86 embedded processors.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index bd792669e563..929c9f5eac01 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -18,10 +18,11 @@ obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
18obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o 18obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
19obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o 19obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
20obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o 20obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
21obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o 21obj-$(CONFIG_SPI_ADI_V3) += spi-adi-v3.o
22obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o 22obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
23obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o 23obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
24obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o 24obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
25obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
25obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 26obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
26obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 27obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
27obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o 28obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-adi-v3.c
index 4089d0e0d84e..dcb2287c7f8a 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-adi-v3.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Analog Devices SPI3 controller driver 2 * Analog Devices SPI3 controller driver
3 * 3 *
4 * Copyright (c) 2013 Analog Devices Inc. 4 * Copyright (c) 2014 Analog Devices Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#include <linux/clk.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/device.h> 18#include <linux/device.h>
18#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
@@ -26,35 +27,34 @@
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/spi/adi_spi3.h>
29#include <linux/types.h> 31#include <linux/types.h>
30 32
31#include <asm/bfin_spi3.h>
32#include <asm/cacheflush.h>
33#include <asm/dma.h> 33#include <asm/dma.h>
34#include <asm/portmux.h> 34#include <asm/portmux.h>
35 35
36enum bfin_spi_state { 36enum adi_spi_state {
37 START_STATE, 37 START_STATE,
38 RUNNING_STATE, 38 RUNNING_STATE,
39 DONE_STATE, 39 DONE_STATE,
40 ERROR_STATE 40 ERROR_STATE
41}; 41};
42 42
43struct bfin_spi_master; 43struct adi_spi_master;
44 44
45struct bfin_spi_transfer_ops { 45struct adi_spi_transfer_ops {
46 void (*write) (struct bfin_spi_master *); 46 void (*write) (struct adi_spi_master *);
47 void (*read) (struct bfin_spi_master *); 47 void (*read) (struct adi_spi_master *);
48 void (*duplex) (struct bfin_spi_master *); 48 void (*duplex) (struct adi_spi_master *);
49}; 49};
50 50
51/* runtime info for spi master */ 51/* runtime info for spi master */
52struct bfin_spi_master { 52struct adi_spi_master {
53 /* SPI framework hookup */ 53 /* SPI framework hookup */
54 struct spi_master *master; 54 struct spi_master *master;
55 55
56 /* Regs base of SPI controller */ 56 /* Regs base of SPI controller */
57 struct bfin_spi_regs __iomem *regs; 57 struct adi_spi_regs __iomem *regs;
58 58
59 /* Pin request list */ 59 /* Pin request list */
60 u16 *pin_req; 60 u16 *pin_req;
@@ -65,7 +65,7 @@ struct bfin_spi_master {
65 /* Current message transfer state info */ 65 /* Current message transfer state info */
66 struct spi_message *cur_msg; 66 struct spi_message *cur_msg;
67 struct spi_transfer *cur_transfer; 67 struct spi_transfer *cur_transfer;
68 struct bfin_spi_device *cur_chip; 68 struct adi_spi_device *cur_chip;
69 unsigned transfer_len; 69 unsigned transfer_len;
70 70
71 /* transfer buffer */ 71 /* transfer buffer */
@@ -90,12 +90,12 @@ struct bfin_spi_master {
90 u32 ssel; 90 u32 ssel;
91 91
92 unsigned long sclk; 92 unsigned long sclk;
93 enum bfin_spi_state state; 93 enum adi_spi_state state;
94 94
95 const struct bfin_spi_transfer_ops *ops; 95 const struct adi_spi_transfer_ops *ops;
96}; 96};
97 97
98struct bfin_spi_device { 98struct adi_spi_device {
99 u32 control; 99 u32 control;
100 u32 clock; 100 u32 clock;
101 u32 ssel; 101 u32 ssel;
@@ -105,17 +105,25 @@ struct bfin_spi_device {
105 u32 cs_gpio; 105 u32 cs_gpio;
106 u32 tx_dummy_val; /* tx value for rx only transfer */ 106 u32 tx_dummy_val; /* tx value for rx only transfer */
107 bool enable_dma; 107 bool enable_dma;
108 const struct bfin_spi_transfer_ops *ops; 108 const struct adi_spi_transfer_ops *ops;
109}; 109};
110 110
111static void bfin_spi_enable(struct bfin_spi_master *drv_data) 111static void adi_spi_enable(struct adi_spi_master *drv_data)
112{ 112{
113 bfin_write_or(&drv_data->regs->control, SPI_CTL_EN); 113 u32 ctl;
114
115 ctl = ioread32(&drv_data->regs->control);
116 ctl |= SPI_CTL_EN;
117 iowrite32(ctl, &drv_data->regs->control);
114} 118}
115 119
116static void bfin_spi_disable(struct bfin_spi_master *drv_data) 120static void adi_spi_disable(struct adi_spi_master *drv_data)
117{ 121{
118 bfin_write_and(&drv_data->regs->control, ~SPI_CTL_EN); 122 u32 ctl;
123
124 ctl = ioread32(&drv_data->regs->control);
125 ctl &= ~SPI_CTL_EN;
126 iowrite32(ctl, &drv_data->regs->control);
119} 127}
120 128
121/* Caculate the SPI_CLOCK register value based on input HZ */ 129/* Caculate the SPI_CLOCK register value based on input HZ */
@@ -128,35 +136,43 @@ static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz)
128 return spi_clock; 136 return spi_clock;
129} 137}
130 138
131static int bfin_spi_flush(struct bfin_spi_master *drv_data) 139static int adi_spi_flush(struct adi_spi_master *drv_data)
132{ 140{
133 unsigned long limit = loops_per_jiffy << 1; 141 unsigned long limit = loops_per_jiffy << 1;
134 142
135 /* wait for stop and clear stat */ 143 /* wait for stop and clear stat */
136 while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit) 144 while (!(ioread32(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit)
137 cpu_relax(); 145 cpu_relax();
138 146
139 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 147 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
140 148
141 return limit; 149 return limit;
142} 150}
143 151
144/* Chip select operation functions for cs_change flag */ 152/* Chip select operation functions for cs_change flag */
145static void bfin_spi_cs_active(struct bfin_spi_master *drv_data, struct bfin_spi_device *chip) 153static void adi_spi_cs_active(struct adi_spi_master *drv_data, struct adi_spi_device *chip)
146{ 154{
147 if (likely(chip->cs < MAX_CTRL_CS)) 155 if (likely(chip->cs < MAX_CTRL_CS)) {
148 bfin_write_and(&drv_data->regs->ssel, ~chip->ssel); 156 u32 reg;
149 else 157 reg = ioread32(&drv_data->regs->ssel);
158 reg &= ~chip->ssel;
159 iowrite32(reg, &drv_data->regs->ssel);
160 } else {
150 gpio_set_value(chip->cs_gpio, 0); 161 gpio_set_value(chip->cs_gpio, 0);
162 }
151} 163}
152 164
153static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data, 165static void adi_spi_cs_deactive(struct adi_spi_master *drv_data,
154 struct bfin_spi_device *chip) 166 struct adi_spi_device *chip)
155{ 167{
156 if (likely(chip->cs < MAX_CTRL_CS)) 168 if (likely(chip->cs < MAX_CTRL_CS)) {
157 bfin_write_or(&drv_data->regs->ssel, chip->ssel); 169 u32 reg;
158 else 170 reg = ioread32(&drv_data->regs->ssel);
171 reg |= chip->ssel;
172 iowrite32(reg, &drv_data->regs->ssel);
173 } else {
159 gpio_set_value(chip->cs_gpio, 1); 174 gpio_set_value(chip->cs_gpio, 1);
175 }
160 176
161 /* Move delay here for consistency */ 177 /* Move delay here for consistency */
162 if (chip->cs_chg_udelay) 178 if (chip->cs_chg_udelay)
@@ -164,187 +180,192 @@ static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data,
164} 180}
165 181
166/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ 182/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
167static inline void bfin_spi_cs_enable(struct bfin_spi_master *drv_data, 183static inline void adi_spi_cs_enable(struct adi_spi_master *drv_data,
168 struct bfin_spi_device *chip) 184 struct adi_spi_device *chip)
169{ 185{
170 if (chip->cs < MAX_CTRL_CS) 186 if (chip->cs < MAX_CTRL_CS) {
171 bfin_write_or(&drv_data->regs->ssel, chip->ssel >> 8); 187 u32 reg;
188 reg = ioread32(&drv_data->regs->ssel);
189 reg |= chip->ssel >> 8;
190 iowrite32(reg, &drv_data->regs->ssel);
191 }
172} 192}
173 193
174static inline void bfin_spi_cs_disable(struct bfin_spi_master *drv_data, 194static inline void adi_spi_cs_disable(struct adi_spi_master *drv_data,
175 struct bfin_spi_device *chip) 195 struct adi_spi_device *chip)
176{ 196{
177 if (chip->cs < MAX_CTRL_CS) 197 if (chip->cs < MAX_CTRL_CS) {
178 bfin_write_and(&drv_data->regs->ssel, ~(chip->ssel >> 8)); 198 u32 reg;
199 reg = ioread32(&drv_data->regs->ssel);
200 reg &= ~(chip->ssel >> 8);
201 iowrite32(reg, &drv_data->regs->ssel);
202 }
179} 203}
180 204
181/* stop controller and re-config current chip*/ 205/* stop controller and re-config current chip*/
182static void bfin_spi_restore_state(struct bfin_spi_master *drv_data) 206static void adi_spi_restore_state(struct adi_spi_master *drv_data)
183{ 207{
184 struct bfin_spi_device *chip = drv_data->cur_chip; 208 struct adi_spi_device *chip = drv_data->cur_chip;
185 209
186 /* Clear status and disable clock */ 210 /* Clear status and disable clock */
187 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 211 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
188 bfin_write(&drv_data->regs->rx_control, 0x0); 212 iowrite32(0x0, &drv_data->regs->rx_control);
189 bfin_write(&drv_data->regs->tx_control, 0x0); 213 iowrite32(0x0, &drv_data->regs->tx_control);
190 bfin_spi_disable(drv_data); 214 adi_spi_disable(drv_data);
191
192 SSYNC();
193 215
194 /* Load the registers */ 216 /* Load the registers */
195 bfin_write(&drv_data->regs->control, chip->control); 217 iowrite32(chip->control, &drv_data->regs->control);
196 bfin_write(&drv_data->regs->clock, chip->clock); 218 iowrite32(chip->clock, &drv_data->regs->clock);
197 219
198 bfin_spi_enable(drv_data); 220 adi_spi_enable(drv_data);
199 drv_data->tx_num = drv_data->rx_num = 0; 221 drv_data->tx_num = drv_data->rx_num = 0;
200 /* we always choose tx transfer initiate */ 222 /* we always choose tx transfer initiate */
201 bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN); 223 iowrite32(SPI_RXCTL_REN, &drv_data->regs->rx_control);
202 bfin_write(&drv_data->regs->tx_control, 224 iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI, &drv_data->regs->tx_control);
203 SPI_TXCTL_TEN | SPI_TXCTL_TTI); 225 adi_spi_cs_active(drv_data, chip);
204 bfin_spi_cs_active(drv_data, chip);
205} 226}
206 227
207/* discard invalid rx data and empty rfifo */ 228/* discard invalid rx data and empty rfifo */
208static inline void dummy_read(struct bfin_spi_master *drv_data) 229static inline void dummy_read(struct adi_spi_master *drv_data)
209{ 230{
210 while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)) 231 while (!(ioread32(&drv_data->regs->status) & SPI_STAT_RFE))
211 bfin_read(&drv_data->regs->rfifo); 232 ioread32(&drv_data->regs->rfifo);
212} 233}
213 234
214static void bfin_spi_u8_write(struct bfin_spi_master *drv_data) 235static void adi_spi_u8_write(struct adi_spi_master *drv_data)
215{ 236{
216 dummy_read(drv_data); 237 dummy_read(drv_data);
217 while (drv_data->tx < drv_data->tx_end) { 238 while (drv_data->tx < drv_data->tx_end) {
218 bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); 239 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
219 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 240 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
220 cpu_relax(); 241 cpu_relax();
221 bfin_read(&drv_data->regs->rfifo); 242 ioread32(&drv_data->regs->rfifo);
222 } 243 }
223} 244}
224 245
225static void bfin_spi_u8_read(struct bfin_spi_master *drv_data) 246static void adi_spi_u8_read(struct adi_spi_master *drv_data)
226{ 247{
227 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 248 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
228 249
229 dummy_read(drv_data); 250 dummy_read(drv_data);
230 while (drv_data->rx < drv_data->rx_end) { 251 while (drv_data->rx < drv_data->rx_end) {
231 bfin_write(&drv_data->regs->tfifo, tx_val); 252 iowrite32(tx_val, &drv_data->regs->tfifo);
232 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 253 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
233 cpu_relax(); 254 cpu_relax();
234 *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); 255 *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
235 } 256 }
236} 257}
237 258
238static void bfin_spi_u8_duplex(struct bfin_spi_master *drv_data) 259static void adi_spi_u8_duplex(struct adi_spi_master *drv_data)
239{ 260{
240 dummy_read(drv_data); 261 dummy_read(drv_data);
241 while (drv_data->rx < drv_data->rx_end) { 262 while (drv_data->rx < drv_data->rx_end) {
242 bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); 263 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
243 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 264 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
244 cpu_relax(); 265 cpu_relax();
245 *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); 266 *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
246 } 267 }
247} 268}
248 269
249static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { 270static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u8 = {
250 .write = bfin_spi_u8_write, 271 .write = adi_spi_u8_write,
251 .read = bfin_spi_u8_read, 272 .read = adi_spi_u8_read,
252 .duplex = bfin_spi_u8_duplex, 273 .duplex = adi_spi_u8_duplex,
253}; 274};
254 275
255static void bfin_spi_u16_write(struct bfin_spi_master *drv_data) 276static void adi_spi_u16_write(struct adi_spi_master *drv_data)
256{ 277{
257 dummy_read(drv_data); 278 dummy_read(drv_data);
258 while (drv_data->tx < drv_data->tx_end) { 279 while (drv_data->tx < drv_data->tx_end) {
259 bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); 280 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
260 drv_data->tx += 2; 281 drv_data->tx += 2;
261 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 282 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
262 cpu_relax(); 283 cpu_relax();
263 bfin_read(&drv_data->regs->rfifo); 284 ioread32(&drv_data->regs->rfifo);
264 } 285 }
265} 286}
266 287
267static void bfin_spi_u16_read(struct bfin_spi_master *drv_data) 288static void adi_spi_u16_read(struct adi_spi_master *drv_data)
268{ 289{
269 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 290 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
270 291
271 dummy_read(drv_data); 292 dummy_read(drv_data);
272 while (drv_data->rx < drv_data->rx_end) { 293 while (drv_data->rx < drv_data->rx_end) {
273 bfin_write(&drv_data->regs->tfifo, tx_val); 294 iowrite32(tx_val, &drv_data->regs->tfifo);
274 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 295 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
275 cpu_relax(); 296 cpu_relax();
276 *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 297 *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
277 drv_data->rx += 2; 298 drv_data->rx += 2;
278 } 299 }
279} 300}
280 301
281static void bfin_spi_u16_duplex(struct bfin_spi_master *drv_data) 302static void adi_spi_u16_duplex(struct adi_spi_master *drv_data)
282{ 303{
283 dummy_read(drv_data); 304 dummy_read(drv_data);
284 while (drv_data->rx < drv_data->rx_end) { 305 while (drv_data->rx < drv_data->rx_end) {
285 bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); 306 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
286 drv_data->tx += 2; 307 drv_data->tx += 2;
287 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 308 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
288 cpu_relax(); 309 cpu_relax();
289 *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 310 *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
290 drv_data->rx += 2; 311 drv_data->rx += 2;
291 } 312 }
292} 313}
293 314
294static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { 315static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u16 = {
295 .write = bfin_spi_u16_write, 316 .write = adi_spi_u16_write,
296 .read = bfin_spi_u16_read, 317 .read = adi_spi_u16_read,
297 .duplex = bfin_spi_u16_duplex, 318 .duplex = adi_spi_u16_duplex,
298}; 319};
299 320
300static void bfin_spi_u32_write(struct bfin_spi_master *drv_data) 321static void adi_spi_u32_write(struct adi_spi_master *drv_data)
301{ 322{
302 dummy_read(drv_data); 323 dummy_read(drv_data);
303 while (drv_data->tx < drv_data->tx_end) { 324 while (drv_data->tx < drv_data->tx_end) {
304 bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); 325 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
305 drv_data->tx += 4; 326 drv_data->tx += 4;
306 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 327 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
307 cpu_relax(); 328 cpu_relax();
308 bfin_read(&drv_data->regs->rfifo); 329 ioread32(&drv_data->regs->rfifo);
309 } 330 }
310} 331}
311 332
312static void bfin_spi_u32_read(struct bfin_spi_master *drv_data) 333static void adi_spi_u32_read(struct adi_spi_master *drv_data)
313{ 334{
314 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 335 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
315 336
316 dummy_read(drv_data); 337 dummy_read(drv_data);
317 while (drv_data->rx < drv_data->rx_end) { 338 while (drv_data->rx < drv_data->rx_end) {
318 bfin_write(&drv_data->regs->tfifo, tx_val); 339 iowrite32(tx_val, &drv_data->regs->tfifo);
319 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 340 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
320 cpu_relax(); 341 cpu_relax();
321 *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 342 *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
322 drv_data->rx += 4; 343 drv_data->rx += 4;
323 } 344 }
324} 345}
325 346
326static void bfin_spi_u32_duplex(struct bfin_spi_master *drv_data) 347static void adi_spi_u32_duplex(struct adi_spi_master *drv_data)
327{ 348{
328 dummy_read(drv_data); 349 dummy_read(drv_data);
329 while (drv_data->rx < drv_data->rx_end) { 350 while (drv_data->rx < drv_data->rx_end) {
330 bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); 351 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
331 drv_data->tx += 4; 352 drv_data->tx += 4;
332 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 353 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
333 cpu_relax(); 354 cpu_relax();
334 *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 355 *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
335 drv_data->rx += 4; 356 drv_data->rx += 4;
336 } 357 }
337} 358}
338 359
339static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u32 = { 360static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u32 = {
340 .write = bfin_spi_u32_write, 361 .write = adi_spi_u32_write,
341 .read = bfin_spi_u32_read, 362 .read = adi_spi_u32_read,
342 .duplex = bfin_spi_u32_duplex, 363 .duplex = adi_spi_u32_duplex,
343}; 364};
344 365
345 366
346/* test if there is more transfer to be done */ 367/* test if there is more transfer to be done */
347static void bfin_spi_next_transfer(struct bfin_spi_master *drv) 368static void adi_spi_next_transfer(struct adi_spi_master *drv)
348{ 369{
349 struct spi_message *msg = drv->cur_msg; 370 struct spi_message *msg = drv->cur_msg;
350 struct spi_transfer *t = drv->cur_transfer; 371 struct spi_transfer *t = drv->cur_transfer;
@@ -360,15 +381,15 @@ static void bfin_spi_next_transfer(struct bfin_spi_master *drv)
360 } 381 }
361} 382}
362 383
363static void bfin_spi_giveback(struct bfin_spi_master *drv_data) 384static void adi_spi_giveback(struct adi_spi_master *drv_data)
364{ 385{
365 struct bfin_spi_device *chip = drv_data->cur_chip; 386 struct adi_spi_device *chip = drv_data->cur_chip;
366 387
367 bfin_spi_cs_deactive(drv_data, chip); 388 adi_spi_cs_deactive(drv_data, chip);
368 spi_finalize_current_message(drv_data->master); 389 spi_finalize_current_message(drv_data->master);
369} 390}
370 391
371static int bfin_spi_setup_transfer(struct bfin_spi_master *drv) 392static int adi_spi_setup_transfer(struct adi_spi_master *drv)
372{ 393{
373 struct spi_transfer *t = drv->cur_transfer; 394 struct spi_transfer *t = drv->cur_transfer;
374 u32 cr, cr_width; 395 u32 cr, cr_width;
@@ -393,34 +414,33 @@ static int bfin_spi_setup_transfer(struct bfin_spi_master *drv)
393 switch (t->bits_per_word) { 414 switch (t->bits_per_word) {
394 case 8: 415 case 8:
395 cr_width = SPI_CTL_SIZE08; 416 cr_width = SPI_CTL_SIZE08;
396 drv->ops = &bfin_bfin_spi_transfer_ops_u8; 417 drv->ops = &adi_spi_transfer_ops_u8;
397 break; 418 break;
398 case 16: 419 case 16:
399 cr_width = SPI_CTL_SIZE16; 420 cr_width = SPI_CTL_SIZE16;
400 drv->ops = &bfin_bfin_spi_transfer_ops_u16; 421 drv->ops = &adi_spi_transfer_ops_u16;
401 break; 422 break;
402 case 32: 423 case 32:
403 cr_width = SPI_CTL_SIZE32; 424 cr_width = SPI_CTL_SIZE32;
404 drv->ops = &bfin_bfin_spi_transfer_ops_u32; 425 drv->ops = &adi_spi_transfer_ops_u32;
405 break; 426 break;
406 default: 427 default:
407 return -EINVAL; 428 return -EINVAL;
408 } 429 }
409 cr = bfin_read(&drv->regs->control) & ~SPI_CTL_SIZE; 430 cr = ioread32(&drv->regs->control) & ~SPI_CTL_SIZE;
410 cr |= cr_width; 431 cr |= cr_width;
411 bfin_write(&drv->regs->control, cr); 432 iowrite32(cr, &drv->regs->control);
412 433
413 /* speed setup */ 434 /* speed setup */
414 bfin_write(&drv->regs->clock, 435 iowrite32(hz_to_spi_clock(drv->sclk, t->speed_hz), &drv->regs->clock);
415 hz_to_spi_clock(drv->sclk, t->speed_hz));
416 return 0; 436 return 0;
417} 437}
418 438
419static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data) 439static int adi_spi_dma_xfer(struct adi_spi_master *drv_data)
420{ 440{
421 struct spi_transfer *t = drv_data->cur_transfer; 441 struct spi_transfer *t = drv_data->cur_transfer;
422 struct spi_message *msg = drv_data->cur_msg; 442 struct spi_message *msg = drv_data->cur_msg;
423 struct bfin_spi_device *chip = drv_data->cur_chip; 443 struct adi_spi_device *chip = drv_data->cur_chip;
424 u32 dma_config; 444 u32 dma_config;
425 unsigned long word_count, word_size; 445 unsigned long word_count, word_size;
426 void *tx_buf, *rx_buf; 446 void *tx_buf, *rx_buf;
@@ -498,17 +518,16 @@ static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data)
498 set_dma_config(drv_data->rx_dma, dma_config | WNR); 518 set_dma_config(drv_data->rx_dma, dma_config | WNR);
499 enable_dma(drv_data->tx_dma); 519 enable_dma(drv_data->tx_dma);
500 enable_dma(drv_data->rx_dma); 520 enable_dma(drv_data->rx_dma);
501 SSYNC();
502 521
503 bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN | SPI_RXCTL_RDR_NE); 522 iowrite32(SPI_RXCTL_REN | SPI_RXCTL_RDR_NE,
504 SSYNC(); 523 &drv_data->regs->rx_control);
505 bfin_write(&drv_data->regs->tx_control, 524 iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF,
506 SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF); 525 &drv_data->regs->tx_control);
507 526
508 return 0; 527 return 0;
509} 528}
510 529
511static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data) 530static int adi_spi_pio_xfer(struct adi_spi_master *drv_data)
512{ 531{
513 struct spi_message *msg = drv_data->cur_msg; 532 struct spi_message *msg = drv_data->cur_msg;
514 533
@@ -529,19 +548,19 @@ static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data)
529 return -EIO; 548 return -EIO;
530 } 549 }
531 550
532 if (!bfin_spi_flush(drv_data)) 551 if (!adi_spi_flush(drv_data))
533 return -EIO; 552 return -EIO;
534 msg->actual_length += drv_data->transfer_len; 553 msg->actual_length += drv_data->transfer_len;
535 tasklet_schedule(&drv_data->pump_transfers); 554 tasklet_schedule(&drv_data->pump_transfers);
536 return 0; 555 return 0;
537} 556}
538 557
539static void bfin_spi_pump_transfers(unsigned long data) 558static void adi_spi_pump_transfers(unsigned long data)
540{ 559{
541 struct bfin_spi_master *drv_data = (struct bfin_spi_master *)data; 560 struct adi_spi_master *drv_data = (struct adi_spi_master *)data;
542 struct spi_message *msg = NULL; 561 struct spi_message *msg = NULL;
543 struct spi_transfer *t = NULL; 562 struct spi_transfer *t = NULL;
544 struct bfin_spi_device *chip = NULL; 563 struct adi_spi_device *chip = NULL;
545 int ret; 564 int ret;
546 565
547 /* Get current state information */ 566 /* Get current state information */
@@ -552,7 +571,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
552 /* Handle for abort */ 571 /* Handle for abort */
553 if (drv_data->state == ERROR_STATE) { 572 if (drv_data->state == ERROR_STATE) {
554 msg->status = -EIO; 573 msg->status = -EIO;
555 bfin_spi_giveback(drv_data); 574 adi_spi_giveback(drv_data);
556 return; 575 return;
557 } 576 }
558 577
@@ -560,14 +579,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
560 if (t->delay_usecs) 579 if (t->delay_usecs)
561 udelay(t->delay_usecs); 580 udelay(t->delay_usecs);
562 if (t->cs_change) 581 if (t->cs_change)
563 bfin_spi_cs_deactive(drv_data, chip); 582 adi_spi_cs_deactive(drv_data, chip);
564 bfin_spi_next_transfer(drv_data); 583 adi_spi_next_transfer(drv_data);
565 t = drv_data->cur_transfer; 584 t = drv_data->cur_transfer;
566 } 585 }
567 /* Handle end of message */ 586 /* Handle end of message */
568 if (drv_data->state == DONE_STATE) { 587 if (drv_data->state == DONE_STATE) {
569 msg->status = 0; 588 msg->status = 0;
570 bfin_spi_giveback(drv_data); 589 adi_spi_giveback(drv_data);
571 return; 590 return;
572 } 591 }
573 592
@@ -577,34 +596,34 @@ static void bfin_spi_pump_transfers(unsigned long data)
577 return; 596 return;
578 } 597 }
579 598
580 ret = bfin_spi_setup_transfer(drv_data); 599 ret = adi_spi_setup_transfer(drv_data);
581 if (ret) { 600 if (ret) {
582 msg->status = ret; 601 msg->status = ret;
583 bfin_spi_giveback(drv_data); 602 adi_spi_giveback(drv_data);
584 } 603 }
585 604
586 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 605 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
587 bfin_spi_cs_active(drv_data, chip); 606 adi_spi_cs_active(drv_data, chip);
588 drv_data->state = RUNNING_STATE; 607 drv_data->state = RUNNING_STATE;
589 608
590 if (chip->enable_dma) 609 if (chip->enable_dma)
591 ret = bfin_spi_dma_xfer(drv_data); 610 ret = adi_spi_dma_xfer(drv_data);
592 else 611 else
593 ret = bfin_spi_pio_xfer(drv_data); 612 ret = adi_spi_pio_xfer(drv_data);
594 if (ret) { 613 if (ret) {
595 msg->status = ret; 614 msg->status = ret;
596 bfin_spi_giveback(drv_data); 615 adi_spi_giveback(drv_data);
597 } 616 }
598} 617}
599 618
600static int bfin_spi_transfer_one_message(struct spi_master *master, 619static int adi_spi_transfer_one_message(struct spi_master *master,
601 struct spi_message *m) 620 struct spi_message *m)
602{ 621{
603 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 622 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
604 623
605 drv_data->cur_msg = m; 624 drv_data->cur_msg = m;
606 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 625 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
607 bfin_spi_restore_state(drv_data); 626 adi_spi_restore_state(drv_data);
608 627
609 drv_data->state = START_STATE; 628 drv_data->state = START_STATE;
610 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 629 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
@@ -630,15 +649,15 @@ static const u16 ssel[][MAX_SPI_SSEL] = {
630 P_SPI2_SSEL6, P_SPI2_SSEL7}, 649 P_SPI2_SSEL6, P_SPI2_SSEL7},
631}; 650};
632 651
633static int bfin_spi_setup(struct spi_device *spi) 652static int adi_spi_setup(struct spi_device *spi)
634{ 653{
635 struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); 654 struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
636 struct bfin_spi_device *chip = spi_get_ctldata(spi); 655 struct adi_spi_device *chip = spi_get_ctldata(spi);
637 u32 bfin_ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE; 656 u32 ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE;
638 int ret = -EINVAL; 657 int ret = -EINVAL;
639 658
640 if (!chip) { 659 if (!chip) {
641 struct bfin_spi3_chip *chip_info = spi->controller_data; 660 struct adi_spi3_chip *chip_info = spi->controller_data;
642 661
643 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 662 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
644 if (!chip) { 663 if (!chip) {
@@ -646,7 +665,7 @@ static int bfin_spi_setup(struct spi_device *spi)
646 return -ENOMEM; 665 return -ENOMEM;
647 } 666 }
648 if (chip_info) { 667 if (chip_info) {
649 if (chip_info->control & ~bfin_ctl_reg) { 668 if (chip_info->control & ~ctl_reg) {
650 dev_err(&spi->dev, 669 dev_err(&spi->dev,
651 "do not set bits that the SPI framework manages\n"); 670 "do not set bits that the SPI framework manages\n");
652 goto error; 671 goto error;
@@ -657,6 +676,7 @@ static int bfin_spi_setup(struct spi_device *spi)
657 chip->enable_dma = chip_info->enable_dma; 676 chip->enable_dma = chip_info->enable_dma;
658 } 677 }
659 chip->cs = spi->chip_select; 678 chip->cs = spi->chip_select;
679
660 if (chip->cs < MAX_CTRL_CS) { 680 if (chip->cs < MAX_CTRL_CS) {
661 chip->ssel = (1 << chip->cs) << 8; 681 chip->ssel = (1 << chip->cs) << 8;
662 ret = peripheral_request(ssel[spi->master->bus_num] 682 ret = peripheral_request(ssel[spi->master->bus_num]
@@ -678,7 +698,7 @@ static int bfin_spi_setup(struct spi_device *spi)
678 } 698 }
679 699
680 /* force a default base state */ 700 /* force a default base state */
681 chip->control &= bfin_ctl_reg; 701 chip->control &= ctl_reg;
682 702
683 if (spi->mode & SPI_CPOL) 703 if (spi->mode & SPI_CPOL)
684 chip->control |= SPI_CTL_CPOL; 704 chip->control |= SPI_CTL_CPOL;
@@ -692,8 +712,8 @@ static int bfin_spi_setup(struct spi_device *spi)
692 712
693 chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz); 713 chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz);
694 714
695 bfin_spi_cs_enable(drv_data, chip); 715 adi_spi_cs_enable(drv_data, chip);
696 bfin_spi_cs_deactive(drv_data, chip); 716 adi_spi_cs_deactive(drv_data, chip);
697 717
698 return 0; 718 return 0;
699error: 719error:
@@ -705,10 +725,10 @@ error:
705 return ret; 725 return ret;
706} 726}
707 727
708static void bfin_spi_cleanup(struct spi_device *spi) 728static void adi_spi_cleanup(struct spi_device *spi)
709{ 729{
710 struct bfin_spi_device *chip = spi_get_ctldata(spi); 730 struct adi_spi_device *chip = spi_get_ctldata(spi);
711 struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); 731 struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
712 732
713 if (!chip) 733 if (!chip)
714 return; 734 return;
@@ -716,7 +736,7 @@ static void bfin_spi_cleanup(struct spi_device *spi)
716 if (chip->cs < MAX_CTRL_CS) { 736 if (chip->cs < MAX_CTRL_CS) {
717 peripheral_free(ssel[spi->master->bus_num] 737 peripheral_free(ssel[spi->master->bus_num]
718 [chip->cs-1]); 738 [chip->cs-1]);
719 bfin_spi_cs_disable(drv_data, chip); 739 adi_spi_cs_disable(drv_data, chip);
720 } else { 740 } else {
721 gpio_free(chip->cs_gpio); 741 gpio_free(chip->cs_gpio);
722 } 742 }
@@ -725,10 +745,11 @@ static void bfin_spi_cleanup(struct spi_device *spi)
725 spi_set_ctldata(spi, NULL); 745 spi_set_ctldata(spi, NULL);
726} 746}
727 747
728static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id) 748static irqreturn_t adi_spi_tx_dma_isr(int irq, void *dev_id)
729{ 749{
730 struct bfin_spi_master *drv_data = dev_id; 750 struct adi_spi_master *drv_data = dev_id;
731 u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma); 751 u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma);
752 u32 tx_ctl;
732 753
733 clear_dma_irqstat(drv_data->tx_dma); 754 clear_dma_irqstat(drv_data->tx_dma);
734 if (dma_stat & DMA_DONE) { 755 if (dma_stat & DMA_DONE) {
@@ -739,13 +760,15 @@ static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id)
739 if (drv_data->tx) 760 if (drv_data->tx)
740 drv_data->state = ERROR_STATE; 761 drv_data->state = ERROR_STATE;
741 } 762 }
742 bfin_write_and(&drv_data->regs->tx_control, ~SPI_TXCTL_TDR_NF); 763 tx_ctl = ioread32(&drv_data->regs->tx_control);
764 tx_ctl &= ~SPI_TXCTL_TDR_NF;
765 iowrite32(tx_ctl, &drv_data->regs->tx_control);
743 return IRQ_HANDLED; 766 return IRQ_HANDLED;
744} 767}
745 768
746static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id) 769static irqreturn_t adi_spi_rx_dma_isr(int irq, void *dev_id)
747{ 770{
748 struct bfin_spi_master *drv_data = dev_id; 771 struct adi_spi_master *drv_data = dev_id;
749 struct spi_message *msg = drv_data->cur_msg; 772 struct spi_message *msg = drv_data->cur_msg;
750 u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma); 773 u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma);
751 774
@@ -760,8 +783,8 @@ static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id)
760 dev_err(&drv_data->master->dev, 783 dev_err(&drv_data->master->dev,
761 "spi rx dma error: %d\n", dma_stat); 784 "spi rx dma error: %d\n", dma_stat);
762 } 785 }
763 bfin_write(&drv_data->regs->tx_control, 0); 786 iowrite32(0, &drv_data->regs->tx_control);
764 bfin_write(&drv_data->regs->rx_control, 0); 787 iowrite32(0, &drv_data->regs->rx_control);
765 if (drv_data->rx_num != drv_data->tx_num) 788 if (drv_data->rx_num != drv_data->tx_num)
766 dev_dbg(&drv_data->master->dev, 789 dev_dbg(&drv_data->master->dev,
767 "dma interrupt missing: tx=%d,rx=%d\n", 790 "dma interrupt missing: tx=%d,rx=%d\n",
@@ -770,15 +793,15 @@ static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id)
770 return IRQ_HANDLED; 793 return IRQ_HANDLED;
771} 794}
772 795
773static int bfin_spi_probe(struct platform_device *pdev) 796static int adi_spi_probe(struct platform_device *pdev)
774{ 797{
775 struct device *dev = &pdev->dev; 798 struct device *dev = &pdev->dev;
776 struct bfin_spi3_master *info = dev_get_platdata(dev); 799 struct adi_spi3_master *info = dev_get_platdata(dev);
777 struct spi_master *master; 800 struct spi_master *master;
778 struct bfin_spi_master *drv_data; 801 struct adi_spi_master *drv_data;
779 struct resource *mem, *res; 802 struct resource *mem, *res;
780 unsigned int tx_dma, rx_dma; 803 unsigned int tx_dma, rx_dma;
781 unsigned long sclk; 804 struct clk *sclk;
782 int ret; 805 int ret;
783 806
784 if (!info) { 807 if (!info) {
@@ -786,10 +809,10 @@ static int bfin_spi_probe(struct platform_device *pdev)
786 return -ENODEV; 809 return -ENODEV;
787 } 810 }
788 811
789 sclk = get_sclk1(); 812 sclk = devm_clk_get(dev, "spi");
790 if (!sclk) { 813 if (IS_ERR(sclk)) {
791 dev_err(dev, "can not get sclk1\n"); 814 dev_err(dev, "can not get spi clock\n");
792 return -ENXIO; 815 return PTR_ERR(sclk);
793 } 816 }
794 817
795 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 818 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -819,9 +842,9 @@ static int bfin_spi_probe(struct platform_device *pdev)
819 842
820 master->bus_num = pdev->id; 843 master->bus_num = pdev->id;
821 master->num_chipselect = info->num_chipselect; 844 master->num_chipselect = info->num_chipselect;
822 master->cleanup = bfin_spi_cleanup; 845 master->cleanup = adi_spi_cleanup;
823 master->setup = bfin_spi_setup; 846 master->setup = adi_spi_setup;
824 master->transfer_one_message = bfin_spi_transfer_one_message; 847 master->transfer_one_message = adi_spi_transfer_one_message;
825 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | 848 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
826 SPI_BPW_MASK(8); 849 SPI_BPW_MASK(8);
827 850
@@ -830,7 +853,7 @@ static int bfin_spi_probe(struct platform_device *pdev)
830 drv_data->tx_dma = tx_dma; 853 drv_data->tx_dma = tx_dma;
831 drv_data->rx_dma = rx_dma; 854 drv_data->rx_dma = rx_dma;
832 drv_data->pin_req = info->pin_req; 855 drv_data->pin_req = info->pin_req;
833 drv_data->sclk = sclk; 856 drv_data->sclk = clk_get_rate(sclk);
834 857
835 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 858 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
836 drv_data->regs = devm_ioremap_resource(dev, mem); 859 drv_data->regs = devm_ioremap_resource(dev, mem);
@@ -845,28 +868,28 @@ static int bfin_spi_probe(struct platform_device *pdev)
845 dev_err(dev, "can not request SPI TX DMA channel\n"); 868 dev_err(dev, "can not request SPI TX DMA channel\n");
846 goto err_put_master; 869 goto err_put_master;
847 } 870 }
848 set_dma_callback(tx_dma, bfin_spi_tx_dma_isr, drv_data); 871 set_dma_callback(tx_dma, adi_spi_tx_dma_isr, drv_data);
849 872
850 ret = request_dma(rx_dma, "SPI_RX_DMA"); 873 ret = request_dma(rx_dma, "SPI_RX_DMA");
851 if (ret) { 874 if (ret) {
852 dev_err(dev, "can not request SPI RX DMA channel\n"); 875 dev_err(dev, "can not request SPI RX DMA channel\n");
853 goto err_free_tx_dma; 876 goto err_free_tx_dma;
854 } 877 }
855 set_dma_callback(drv_data->rx_dma, bfin_spi_rx_dma_isr, drv_data); 878 set_dma_callback(drv_data->rx_dma, adi_spi_rx_dma_isr, drv_data);
856 879
857 /* request CLK, MOSI and MISO */ 880 /* request CLK, MOSI and MISO */
858 ret = peripheral_request_list(drv_data->pin_req, "bfin-spi3"); 881 ret = peripheral_request_list(drv_data->pin_req, "adi-spi3");
859 if (ret < 0) { 882 if (ret < 0) {
860 dev_err(dev, "can not request spi pins\n"); 883 dev_err(dev, "can not request spi pins\n");
861 goto err_free_rx_dma; 884 goto err_free_rx_dma;
862 } 885 }
863 886
864 bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); 887 iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
865 bfin_write(&drv_data->regs->ssel, 0x0000FE00); 888 iowrite32(0x0000FE00, &drv_data->regs->ssel);
866 bfin_write(&drv_data->regs->delay, 0x0); 889 iowrite32(0x0, &drv_data->regs->delay);
867 890
868 tasklet_init(&drv_data->pump_transfers, 891 tasklet_init(&drv_data->pump_transfers,
869 bfin_spi_pump_transfers, (unsigned long)drv_data); 892 adi_spi_pump_transfers, (unsigned long)drv_data);
870 /* register with the SPI framework */ 893 /* register with the SPI framework */
871 ret = devm_spi_register_master(dev, master); 894 ret = devm_spi_register_master(dev, master);
872 if (ret) { 895 if (ret) {
@@ -888,43 +911,41 @@ err_put_master:
888 return ret; 911 return ret;
889} 912}
890 913
891static int bfin_spi_remove(struct platform_device *pdev) 914static int adi_spi_remove(struct platform_device *pdev)
892{ 915{
893 struct spi_master *master = platform_get_drvdata(pdev); 916 struct spi_master *master = platform_get_drvdata(pdev);
894 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 917 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
895
896 bfin_spi_disable(drv_data);
897 918
919 adi_spi_disable(drv_data);
898 peripheral_free_list(drv_data->pin_req); 920 peripheral_free_list(drv_data->pin_req);
899 free_dma(drv_data->rx_dma); 921 free_dma(drv_data->rx_dma);
900 free_dma(drv_data->tx_dma); 922 free_dma(drv_data->tx_dma);
901
902 return 0; 923 return 0;
903} 924}
904 925
905#ifdef CONFIG_PM 926#ifdef CONFIG_PM
906static int bfin_spi_suspend(struct device *dev) 927static int adi_spi_suspend(struct device *dev)
907{ 928{
908 struct spi_master *master = dev_get_drvdata(dev); 929 struct spi_master *master = dev_get_drvdata(dev);
909 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 930 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
910 931
911 spi_master_suspend(master); 932 spi_master_suspend(master);
912 933
913 drv_data->control = bfin_read(&drv_data->regs->control); 934 drv_data->control = ioread32(&drv_data->regs->control);
914 drv_data->ssel = bfin_read(&drv_data->regs->ssel); 935 drv_data->ssel = ioread32(&drv_data->regs->ssel);
915 936
916 bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); 937 iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
917 bfin_write(&drv_data->regs->ssel, 0x0000FE00); 938 iowrite32(0x0000FE00, &drv_data->regs->ssel);
918 dma_disable_irq(drv_data->rx_dma); 939 dma_disable_irq(drv_data->rx_dma);
919 dma_disable_irq(drv_data->tx_dma); 940 dma_disable_irq(drv_data->tx_dma);
920 941
921 return 0; 942 return 0;
922} 943}
923 944
924static int bfin_spi_resume(struct device *dev) 945static int adi_spi_resume(struct device *dev)
925{ 946{
926 struct spi_master *master = dev_get_drvdata(dev); 947 struct spi_master *master = dev_get_drvdata(dev);
927 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 948 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
928 int ret = 0; 949 int ret = 0;
929 950
930 /* bootrom may modify spi and dma status when resume in spi boot mode */ 951 /* bootrom may modify spi and dma status when resume in spi boot mode */
@@ -932,8 +953,8 @@ static int bfin_spi_resume(struct device *dev)
932 953
933 dma_enable_irq(drv_data->rx_dma); 954 dma_enable_irq(drv_data->rx_dma);
934 dma_enable_irq(drv_data->tx_dma); 955 dma_enable_irq(drv_data->tx_dma);
935 bfin_write(&drv_data->regs->control, drv_data->control); 956 iowrite32(drv_data->control, &drv_data->regs->control);
936 bfin_write(&drv_data->regs->ssel, drv_data->ssel); 957 iowrite32(drv_data->ssel, &drv_data->regs->ssel);
937 958
938 ret = spi_master_resume(master); 959 ret = spi_master_resume(master);
939 if (ret) { 960 if (ret) {
@@ -944,21 +965,21 @@ static int bfin_spi_resume(struct device *dev)
944 return ret; 965 return ret;
945} 966}
946#endif 967#endif
947static const struct dev_pm_ops bfin_spi_pm_ops = { 968static const struct dev_pm_ops adi_spi_pm_ops = {
948 SET_SYSTEM_SLEEP_PM_OPS(bfin_spi_suspend, bfin_spi_resume) 969 SET_SYSTEM_SLEEP_PM_OPS(adi_spi_suspend, adi_spi_resume)
949}; 970};
950 971
951MODULE_ALIAS("platform:bfin-spi3"); 972MODULE_ALIAS("platform:adi-spi3");
952static struct platform_driver bfin_spi_driver = { 973static struct platform_driver adi_spi_driver = {
953 .driver = { 974 .driver = {
954 .name = "bfin-spi3", 975 .name = "adi-spi3",
955 .owner = THIS_MODULE, 976 .owner = THIS_MODULE,
956 .pm = &bfin_spi_pm_ops, 977 .pm = &adi_spi_pm_ops,
957 }, 978 },
958 .remove = bfin_spi_remove, 979 .remove = adi_spi_remove,
959}; 980};
960 981
961module_platform_driver_probe(bfin_spi_driver, bfin_spi_probe); 982module_platform_driver_probe(adi_spi_driver, adi_spi_probe);
962 983
963MODULE_DESCRIPTION("Analog Devices SPI3 controller driver"); 984MODULE_DESCRIPTION("Analog Devices SPI3 controller driver");
964MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>"); 985MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8005f9869481..92a6f0d93233 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -224,7 +224,7 @@ struct atmel_spi {
224 struct platform_device *pdev; 224 struct platform_device *pdev;
225 225
226 struct spi_transfer *current_transfer; 226 struct spi_transfer *current_transfer;
227 unsigned long current_remaining_bytes; 227 int current_remaining_bytes;
228 int done_status; 228 int done_status;
229 229
230 struct completion xfer_completion; 230 struct completion xfer_completion;
@@ -874,8 +874,9 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
874 spi_readl(as, RDR); 874 spi_readl(as, RDR);
875 } 875 }
876 if (xfer->bits_per_word > 8) { 876 if (xfer->bits_per_word > 8) {
877 as->current_remaining_bytes -= 2; 877 if (as->current_remaining_bytes > 2)
878 if (as->current_remaining_bytes < 0) 878 as->current_remaining_bytes -= 2;
879 else
879 as->current_remaining_bytes = 0; 880 as->current_remaining_bytes = 0;
880 } else { 881 } else {
881 as->current_remaining_bytes--; 882 as->current_remaining_bytes--;
@@ -1110,13 +1111,18 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1110 atmel_spi_next_xfer_pio(master, xfer); 1111 atmel_spi_next_xfer_pio(master, xfer);
1111 } else { 1112 } else {
1112 as->current_remaining_bytes -= len; 1113 as->current_remaining_bytes -= len;
1114 if (as->current_remaining_bytes < 0)
1115 as->current_remaining_bytes = 0;
1113 } 1116 }
1114 } else { 1117 } else {
1115 atmel_spi_next_xfer_pio(master, xfer); 1118 atmel_spi_next_xfer_pio(master, xfer);
1116 } 1119 }
1117 1120
1121 /* interrupts are disabled, so free the lock for schedule */
1122 atmel_spi_unlock(as);
1118 ret = wait_for_completion_timeout(&as->xfer_completion, 1123 ret = wait_for_completion_timeout(&as->xfer_completion,
1119 SPI_DMA_TIMEOUT); 1124 SPI_DMA_TIMEOUT);
1125 atmel_spi_lock(as);
1120 if (WARN_ON(ret == 0)) { 1126 if (WARN_ON(ret == 0)) {
1121 dev_err(&spi->dev, 1127 dev_err(&spi->dev,
1122 "spi trasfer timeout, err %d\n", ret); 1128 "spi trasfer timeout, err %d\n", ret);
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 55e57c3eb9bd..ebf720b88a2a 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/gpio.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <linux/ioport.h> 18#include <linux/ioport.h>
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
new file mode 100644
index 000000000000..bb758978465d
--- /dev/null
+++ b/drivers/spi/spi-cadence.c
@@ -0,0 +1,673 @@
1/*
2 * Cadence SPI controller driver (master mode only)
3 *
4 * Copyright (C) 2008 - 2014 Xilinx, Inc.
5 *
6 * based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c)
7 *
8 * This program is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU General Public License version 2 as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of_irq.h>
20#include <linux/of_address.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23
24/* Name of this driver */
25#define CDNS_SPI_NAME "cdns-spi"
26
27/* Register offset definitions */
28#define CDNS_SPI_CR_OFFSET 0x00 /* Configuration Register, RW */
29#define CDNS_SPI_ISR_OFFSET 0x04 /* Interrupt Status Register, RO */
30#define CDNS_SPI_IER_OFFSET 0x08 /* Interrupt Enable Register, WO */
31#define CDNS_SPI_IDR_OFFSET 0x0c /* Interrupt Disable Register, WO */
32#define CDNS_SPI_IMR_OFFSET 0x10 /* Interrupt Enabled Mask Register, RO */
33#define CDNS_SPI_ER_OFFSET 0x14 /* Enable/Disable Register, RW */
34#define CDNS_SPI_DR_OFFSET 0x18 /* Delay Register, RW */
35#define CDNS_SPI_TXD_OFFSET 0x1C /* Data Transmit Register, WO */
36#define CDNS_SPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
37#define CDNS_SPI_SICR_OFFSET 0x24 /* Slave Idle Count Register, RW */
38#define CDNS_SPI_THLD_OFFSET 0x28 /* Transmit FIFO Watermark Register,RW */
39
40/*
41 * SPI Configuration Register bit Masks
42 *
43 * This register contains various control bits that affect the operation
44 * of the SPI controller
45 */
46#define CDNS_SPI_CR_MANSTRT_MASK 0x00010000 /* Manual TX Start */
47#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
48#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
49#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
50#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
51#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
52#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
53#define CDNS_SPI_CR_SSFORCE_MASK 0x00004000 /* Manual SS Enable Mask */
54#define CDNS_SPI_CR_BAUD_DIV_4_MASK 0x00000008 /* Default Baud Div Mask */
55#define CDNS_SPI_CR_DEFAULT_MASK (CDNS_SPI_CR_MSTREN_MASK | \
56 CDNS_SPI_CR_SSCTRL_MASK | \
57 CDNS_SPI_CR_SSFORCE_MASK | \
58 CDNS_SPI_CR_BAUD_DIV_4_MASK)
59
60/*
61 * SPI Configuration Register - Baud rate and slave select
62 *
63 * These are the values used in the calculation of baud rate divisor and
64 * setting the slave select.
65 */
66
67#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
68#define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */
69#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
70#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
71#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
72
73/*
74 * SPI Interrupt Registers bit Masks
75 *
76 * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
77 * bit definitions.
78 */
79#define CDNS_SPI_IXR_TXOW_MASK 0x00000004 /* SPI TX FIFO Overwater */
80#define CDNS_SPI_IXR_MODF_MASK 0x00000002 /* SPI Mode Fault */
81#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */
82#define CDNS_SPI_IXR_DEFAULT_MASK (CDNS_SPI_IXR_TXOW_MASK | \
83 CDNS_SPI_IXR_MODF_MASK)
84#define CDNS_SPI_IXR_TXFULL_MASK 0x00000008 /* SPI TX Full */
85#define CDNS_SPI_IXR_ALL_MASK 0x0000007F /* SPI all interrupts */
86
87/*
88 * SPI Enable Register bit Masks
89 *
90 * This register is used to enable or disable the SPI controller
91 */
92#define CDNS_SPI_ER_ENABLE_MASK 0x00000001 /* SPI Enable Bit Mask */
93#define CDNS_SPI_ER_DISABLE_MASK 0x0 /* SPI Disable Bit Mask */
94
95/* SPI FIFO depth in bytes */
96#define CDNS_SPI_FIFO_DEPTH 128
97
98/* Default number of chip select lines */
99#define CDNS_SPI_DEFAULT_NUM_CS 4
100
101/**
102 * struct cdns_spi - This definition defines spi driver instance
103 * @regs: Virtual address of the SPI controller registers
104 * @ref_clk: Pointer to the peripheral clock
105 * @pclk: Pointer to the APB clock
106 * @speed_hz: Current SPI bus clock speed in Hz
107 * @txbuf: Pointer to the TX buffer
108 * @rxbuf: Pointer to the RX buffer
109 * @tx_bytes: Number of bytes left to transfer
110 * @rx_bytes: Number of bytes requested
111 * @dev_busy: Device busy flag
112 * @is_decoded_cs: Flag for decoder property set or not
113 */
114struct cdns_spi {
115 void __iomem *regs;
116 struct clk *ref_clk;
117 struct clk *pclk;
118 u32 speed_hz;
119 const u8 *txbuf;
120 u8 *rxbuf;
121 int tx_bytes;
122 int rx_bytes;
123 u8 dev_busy;
124 u32 is_decoded_cs;
125};
126
127/* Macros for the SPI controller read/write */
128static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
129{
130 return readl_relaxed(xspi->regs + offset);
131}
132
133static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
134{
135 writel_relaxed(val, xspi->regs + offset);
136}
137
138/**
139 * cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
140 * @xspi: Pointer to the cdns_spi structure
141 *
142 * On reset the SPI controller is configured to be in master mode, baud rate
143 * divisor is set to 4, threshold value for TX FIFO not full interrupt is set
144 * to 1 and size of the word to be transferred as 8 bit.
145 * This function initializes the SPI controller to disable and clear all the
146 * interrupts, enable manual slave select and manual start, deselect all the
147 * chip select lines, and enable the SPI controller.
148 */
149static void cdns_spi_init_hw(struct cdns_spi *xspi)
150{
151 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
152 CDNS_SPI_ER_DISABLE_MASK);
153 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
154 CDNS_SPI_IXR_ALL_MASK);
155
156 /* Clear the RX FIFO */
157 while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) &
158 CDNS_SPI_IXR_RXNEMTY_MASK)
159 cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
160
161 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
162 CDNS_SPI_IXR_ALL_MASK);
163 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET,
164 CDNS_SPI_CR_DEFAULT_MASK);
165 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
166 CDNS_SPI_ER_ENABLE_MASK);
167}
168
169/**
170 * cdns_spi_chipselect - Select or deselect the chip select line
171 * @spi: Pointer to the spi_device structure
172 * @is_on: Select(0) or deselect (1) the chip select line
173 */
174static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
175{
176 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
177 u32 ctrl_reg;
178
179 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
180
181 if (is_high) {
182 /* Deselect the slave */
183 ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK;
184 } else {
185 /* Select the slave */
186 ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK;
187 if (!(xspi->is_decoded_cs))
188 ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
189 CDNS_SPI_SS_SHIFT) &
190 CDNS_SPI_CR_SSCTRL_MASK;
191 else
192 ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
193 CDNS_SPI_CR_SSCTRL_MASK;
194 }
195
196 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
197}
198
199/**
200 * cdns_spi_config_clock_mode - Sets clock polarity and phase
201 * @spi: Pointer to the spi_device structure
202 *
203 * Sets the requested clock polarity and phase.
204 */
205static void cdns_spi_config_clock_mode(struct spi_device *spi)
206{
207 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
208 u32 ctrl_reg;
209
210 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
211
212 /* Set the SPI clock phase and clock polarity */
213 ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK);
214 if (spi->mode & SPI_CPHA)
215 ctrl_reg |= CDNS_SPI_CR_CPHA_MASK;
216 if (spi->mode & SPI_CPOL)
217 ctrl_reg |= CDNS_SPI_CR_CPOL_MASK;
218
219 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
220}
221
222/**
223 * cdns_spi_config_clock_freq - Sets clock frequency
224 * @spi: Pointer to the spi_device structure
225 * @transfer: Pointer to the spi_transfer structure which provides
226 * information about next transfer setup parameters
227 *
228 * Sets the requested clock frequency.
229 * Note: If the requested frequency is not an exact match with what can be
230 * obtained using the prescalar value the driver sets the clock frequency which
231 * is lower than the requested frequency (maximum lower) for the transfer. If
232 * the requested frequency is higher or lower than that is supported by the SPI
233 * controller the driver will set the highest or lowest frequency supported by
234 * controller.
235 */
236static void cdns_spi_config_clock_freq(struct spi_device *spi,
237 struct spi_transfer *transfer)
238{
239 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
240 u32 ctrl_reg, baud_rate_val;
241 unsigned long frequency;
242
243 frequency = clk_get_rate(xspi->ref_clk);
244
245 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
246
247 /* Set the clock frequency */
248 if (xspi->speed_hz != transfer->speed_hz) {
249 /* first valid value is 1 */
250 baud_rate_val = CDNS_SPI_BAUD_DIV_MIN;
251 while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) &&
252 (frequency / (2 << baud_rate_val)) > transfer->speed_hz)
253 baud_rate_val++;
254
255 ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK;
256 ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
257
258 xspi->speed_hz = frequency / (2 << baud_rate_val);
259 }
260 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
261}
262
263/**
264 * cdns_spi_setup_transfer - Configure SPI controller for specified transfer
265 * @spi: Pointer to the spi_device structure
266 * @transfer: Pointer to the spi_transfer structure which provides
267 * information about next transfer setup parameters
268 *
269 * Sets the operational mode of SPI controller for the next SPI transfer and
270 * sets the requested clock frequency.
271 *
272 * Return: Always 0
273 */
274static int cdns_spi_setup_transfer(struct spi_device *spi,
275 struct spi_transfer *transfer)
276{
277 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
278
279 cdns_spi_config_clock_freq(spi, transfer);
280
281 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
282 __func__, spi->mode, spi->bits_per_word,
283 xspi->speed_hz);
284
285 return 0;
286}
287
288/**
289 * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
290 * @xspi: Pointer to the cdns_spi structure
291 */
292static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
293{
294 unsigned long trans_cnt = 0;
295
296 while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
297 (xspi->tx_bytes > 0)) {
298 if (xspi->txbuf)
299 cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET,
300 *xspi->txbuf++);
301 else
302 cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0);
303
304 xspi->tx_bytes--;
305 trans_cnt++;
306 }
307}
308
309/**
310 * cdns_spi_irq - Interrupt service routine of the SPI controller
311 * @irq: IRQ number
312 * @dev_id: Pointer to the xspi structure
313 *
314 * This function handles TX empty and Mode Fault interrupts only.
315 * On TX empty interrupt this function reads the received data from RX FIFO and
316 * fills the TX FIFO if there is any data remaining to be transferred.
317 * On Mode Fault interrupt this function indicates that transfer is completed,
318 * the SPI subsystem will identify the error as the remaining bytes to be
319 * transferred is non-zero.
320 *
321 * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
322 */
323static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
324{
325 struct spi_master *master = dev_id;
326 struct cdns_spi *xspi = spi_master_get_devdata(master);
327 u32 intr_status, status;
328
329 status = IRQ_NONE;
330 intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET);
331 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status);
332
333 if (intr_status & CDNS_SPI_IXR_MODF_MASK) {
334 /* Indicate that transfer is completed, the SPI subsystem will
335 * identify the error as the remaining bytes to be
336 * transferred is non-zero
337 */
338 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
339 CDNS_SPI_IXR_DEFAULT_MASK);
340 spi_finalize_current_transfer(master);
341 status = IRQ_HANDLED;
342 } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) {
343 unsigned long trans_cnt;
344
345 trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
346
347 /* Read out the data from the RX FIFO */
348 while (trans_cnt) {
349 u8 data;
350
351 data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
352 if (xspi->rxbuf)
353 *xspi->rxbuf++ = data;
354
355 xspi->rx_bytes--;
356 trans_cnt--;
357 }
358
359 if (xspi->tx_bytes) {
360 /* There is more data to send */
361 cdns_spi_fill_tx_fifo(xspi);
362 } else {
363 /* Transfer is completed */
364 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
365 CDNS_SPI_IXR_DEFAULT_MASK);
366 spi_finalize_current_transfer(master);
367 }
368 status = IRQ_HANDLED;
369 }
370
371 return status;
372}
373
374/**
375 * cdns_transfer_one - Initiates the SPI transfer
376 * @master: Pointer to spi_master structure
377 * @spi: Pointer to the spi_device structure
378 * @transfer: Pointer to the spi_transfer structure which provides
379 * information about next transfer parameters
380 *
381 * This function fills the TX FIFO, starts the SPI transfer and
382 * returns a positive transfer count so that core will wait for completion.
383 *
384 * Return: Number of bytes transferred in the last transfer
385 */
386static int cdns_transfer_one(struct spi_master *master,
387 struct spi_device *spi,
388 struct spi_transfer *transfer)
389{
390 struct cdns_spi *xspi = spi_master_get_devdata(master);
391
392 xspi->txbuf = transfer->tx_buf;
393 xspi->rxbuf = transfer->rx_buf;
394 xspi->tx_bytes = transfer->len;
395 xspi->rx_bytes = transfer->len;
396
397 cdns_spi_setup_transfer(spi, transfer);
398
399 cdns_spi_fill_tx_fifo(xspi);
400
401 cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET,
402 CDNS_SPI_IXR_DEFAULT_MASK);
403 return transfer->len;
404}
405
406/**
407 * cdns_prepare_transfer_hardware - Prepares hardware for transfer.
408 * @master: Pointer to the spi_master structure which provides
409 * information about the controller.
410 *
411 * This function enables SPI master controller.
412 *
413 * Return: 0 always
414 */
415static int cdns_prepare_transfer_hardware(struct spi_master *master)
416{
417 struct cdns_spi *xspi = spi_master_get_devdata(master);
418
419 cdns_spi_config_clock_mode(master->cur_msg->spi);
420
421 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
422 CDNS_SPI_ER_ENABLE_MASK);
423
424 return 0;
425}
426
427/**
428 * cdns_unprepare_transfer_hardware - Relaxes hardware after transfer
429 * @master: Pointer to the spi_master structure which provides
430 * information about the controller.
431 *
432 * This function disables the SPI master controller.
433 *
434 * Return: 0 always
435 */
436static int cdns_unprepare_transfer_hardware(struct spi_master *master)
437{
438 struct cdns_spi *xspi = spi_master_get_devdata(master);
439
440 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
441 CDNS_SPI_ER_DISABLE_MASK);
442
443 return 0;
444}
445
446/**
447 * cdns_spi_probe - Probe method for the SPI driver
448 * @pdev: Pointer to the platform_device structure
449 *
450 * This function initializes the driver data structures and the hardware.
451 *
452 * Return: 0 on success and error value on error
453 */
454static int cdns_spi_probe(struct platform_device *pdev)
455{
456 int ret = 0, irq;
457 struct spi_master *master;
458 struct cdns_spi *xspi;
459 struct resource *res;
460 u32 num_cs;
461
462 master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
463 if (master == NULL)
464 return -ENOMEM;
465
466 xspi = spi_master_get_devdata(master);
467 master->dev.of_node = pdev->dev.of_node;
468 platform_set_drvdata(pdev, master);
469
470 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
471 xspi->regs = devm_ioremap_resource(&pdev->dev, res);
472 if (IS_ERR(xspi->regs)) {
473 ret = PTR_ERR(xspi->regs);
474 goto remove_master;
475 }
476
477 xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
478 if (IS_ERR(xspi->pclk)) {
479 dev_err(&pdev->dev, "pclk clock not found.\n");
480 ret = PTR_ERR(xspi->pclk);
481 goto remove_master;
482 }
483
484 xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
485 if (IS_ERR(xspi->ref_clk)) {
486 dev_err(&pdev->dev, "ref_clk clock not found.\n");
487 ret = PTR_ERR(xspi->ref_clk);
488 goto remove_master;
489 }
490
491 ret = clk_prepare_enable(xspi->pclk);
492 if (ret) {
493 dev_err(&pdev->dev, "Unable to enable APB clock.\n");
494 goto remove_master;
495 }
496
497 ret = clk_prepare_enable(xspi->ref_clk);
498 if (ret) {
499 dev_err(&pdev->dev, "Unable to enable device clock.\n");
500 goto clk_dis_apb;
501 }
502
503 /* SPI controller initializations */
504 cdns_spi_init_hw(xspi);
505
506 irq = platform_get_irq(pdev, 0);
507 if (irq <= 0) {
508 ret = -ENXIO;
509 dev_err(&pdev->dev, "irq number is invalid\n");
510 goto remove_master;
511 }
512
513 ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
514 0, pdev->name, master);
515 if (ret != 0) {
516 ret = -ENXIO;
517 dev_err(&pdev->dev, "request_irq failed\n");
518 goto remove_master;
519 }
520
521 ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
522
523 if (ret < 0)
524 master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
525 else
526 master->num_chipselect = num_cs;
527
528 ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
529 &xspi->is_decoded_cs);
530
531 if (ret < 0)
532 xspi->is_decoded_cs = 0;
533
534 master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
535 master->transfer_one = cdns_transfer_one;
536 master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
537 master->set_cs = cdns_spi_chipselect;
538 master->mode_bits = SPI_CPOL | SPI_CPHA;
539
540 /* Set to default valid value */
541 master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
542 xspi->speed_hz = master->max_speed_hz;
543
544 master->bits_per_word_mask = SPI_BPW_MASK(8);
545
546 ret = spi_register_master(master);
547 if (ret) {
548 dev_err(&pdev->dev, "spi_register_master failed\n");
549 goto clk_dis_all;
550 }
551
552 return ret;
553
554clk_dis_all:
555 clk_disable_unprepare(xspi->ref_clk);
556clk_dis_apb:
557 clk_disable_unprepare(xspi->pclk);
558remove_master:
559 spi_master_put(master);
560 return ret;
561}
562
563/**
564 * cdns_spi_remove - Remove method for the SPI driver
565 * @pdev: Pointer to the platform_device structure
566 *
567 * This function is called if a device is physically removed from the system or
568 * if the driver module is being unloaded. It frees all resources allocated to
569 * the device.
570 *
571 * Return: 0 on success and error value on error
572 */
573static int cdns_spi_remove(struct platform_device *pdev)
574{
575 struct spi_master *master = platform_get_drvdata(pdev);
576 struct cdns_spi *xspi = spi_master_get_devdata(master);
577
578 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
579 CDNS_SPI_ER_DISABLE_MASK);
580
581 clk_disable_unprepare(xspi->ref_clk);
582 clk_disable_unprepare(xspi->pclk);
583
584 spi_unregister_master(master);
585
586 return 0;
587}
588
589/**
590 * cdns_spi_suspend - Suspend method for the SPI driver
591 * @dev: Address of the platform_device structure
592 *
593 * This function disables the SPI controller and
594 * changes the driver state to "suspend"
595 *
596 * Return: Always 0
597 */
598static int __maybe_unused cdns_spi_suspend(struct device *dev)
599{
600 struct platform_device *pdev = container_of(dev,
601 struct platform_device, dev);
602 struct spi_master *master = platform_get_drvdata(pdev);
603 struct cdns_spi *xspi = spi_master_get_devdata(master);
604
605 spi_master_suspend(master);
606
607 clk_disable_unprepare(xspi->ref_clk);
608
609 clk_disable_unprepare(xspi->pclk);
610
611 return 0;
612}
613
614/**
615 * cdns_spi_resume - Resume method for the SPI driver
616 * @dev: Address of the platform_device structure
617 *
618 * This function changes the driver state to "ready"
619 *
620 * Return: 0 on success and error value on error
621 */
622static int __maybe_unused cdns_spi_resume(struct device *dev)
623{
624 struct platform_device *pdev = container_of(dev,
625 struct platform_device, dev);
626 struct spi_master *master = platform_get_drvdata(pdev);
627 struct cdns_spi *xspi = spi_master_get_devdata(master);
628 int ret = 0;
629
630 ret = clk_prepare_enable(xspi->pclk);
631 if (ret) {
632 dev_err(dev, "Cannot enable APB clock.\n");
633 return ret;
634 }
635
636 ret = clk_prepare_enable(xspi->ref_clk);
637 if (ret) {
638 dev_err(dev, "Cannot enable device clock.\n");
639 clk_disable(xspi->pclk);
640 return ret;
641 }
642 spi_master_resume(master);
643
644 return 0;
645}
646
647static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend,
648 cdns_spi_resume);
649
650static struct of_device_id cdns_spi_of_match[] = {
651 { .compatible = "xlnx,zynq-spi-r1p6" },
652 { .compatible = "cdns,spi-r1p6" },
653 { /* end of table */ }
654};
655MODULE_DEVICE_TABLE(of, cdns_spi_of_match);
656
657/* cdns_spi_driver - This structure defines the SPI subsystem platform driver */
658static struct platform_driver cdns_spi_driver = {
659 .probe = cdns_spi_probe,
660 .remove = cdns_spi_remove,
661 .driver = {
662 .name = CDNS_SPI_NAME,
663 .owner = THIS_MODULE,
664 .of_match_table = cdns_spi_of_match,
665 .pm = &cdns_spi_dev_pm_ops,
666 },
667};
668
669module_platform_driver(cdns_spi_driver);
670
671MODULE_AUTHOR("Xilinx, Inc.");
672MODULE_DESCRIPTION("Cadence SPI driver");
673MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 1492f5ee9aaa..a5cba14ac3d2 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -16,6 +16,7 @@
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/of_gpio.h>
19 20
20#include "spi-dw.h" 21#include "spi-dw.h"
21 22
@@ -70,6 +71,27 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
70 dws->num_cs = 4; 71 dws->num_cs = 4;
71 dws->max_freq = clk_get_rate(dwsmmio->clk); 72 dws->max_freq = clk_get_rate(dwsmmio->clk);
72 73
74 if (pdev->dev.of_node) {
75 int i;
76
77 for (i = 0; i < dws->num_cs; i++) {
78 int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
79 "cs-gpios", i);
80
81 if (cs_gpio == -EPROBE_DEFER) {
82 ret = cs_gpio;
83 goto out;
84 }
85
86 if (gpio_is_valid(cs_gpio)) {
87 ret = devm_gpio_request(&pdev->dev, cs_gpio,
88 dev_name(&pdev->dev));
89 if (ret)
90 goto out;
91 }
92 }
93 }
94
73 ret = dw_spi_add_host(&pdev->dev, dws); 95 ret = dw_spi_add_host(&pdev->dev, dws);
74 if (ret) 96 if (ret)
75 goto out; 97 goto out;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 712ac5629cd4..29f33143b795 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/gpio.h>
27 28
28#include "spi-dw.h" 29#include "spi-dw.h"
29 30
@@ -36,12 +37,6 @@
36#define DONE_STATE ((void *)2) 37#define DONE_STATE ((void *)2)
37#define ERROR_STATE ((void *)-1) 38#define ERROR_STATE ((void *)-1)
38 39
39#define QUEUE_RUNNING 0
40#define QUEUE_STOPPED 1
41
42#define MRST_SPI_DEASSERT 0
43#define MRST_SPI_ASSERT 1
44
45/* Slave spi_dev related */ 40/* Slave spi_dev related */
46struct chip_data { 41struct chip_data {
47 u16 cr0; 42 u16 cr0;
@@ -263,28 +258,22 @@ static int map_dma_buffers(struct dw_spi *dws)
263static void giveback(struct dw_spi *dws) 258static void giveback(struct dw_spi *dws)
264{ 259{
265 struct spi_transfer *last_transfer; 260 struct spi_transfer *last_transfer;
266 unsigned long flags;
267 struct spi_message *msg; 261 struct spi_message *msg;
268 262
269 spin_lock_irqsave(&dws->lock, flags);
270 msg = dws->cur_msg; 263 msg = dws->cur_msg;
271 dws->cur_msg = NULL; 264 dws->cur_msg = NULL;
272 dws->cur_transfer = NULL; 265 dws->cur_transfer = NULL;
273 dws->prev_chip = dws->cur_chip; 266 dws->prev_chip = dws->cur_chip;
274 dws->cur_chip = NULL; 267 dws->cur_chip = NULL;
275 dws->dma_mapped = 0; 268 dws->dma_mapped = 0;
276 queue_work(dws->workqueue, &dws->pump_messages);
277 spin_unlock_irqrestore(&dws->lock, flags);
278 269
279 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer, 270 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
280 transfer_list); 271 transfer_list);
281 272
282 if (!last_transfer->cs_change && dws->cs_control) 273 if (!last_transfer->cs_change)
283 dws->cs_control(MRST_SPI_DEASSERT); 274 spi_chip_sel(dws, dws->cur_msg->spi, 0);
284 275
285 msg->state = NULL; 276 spi_finalize_current_message(dws->master);
286 if (msg->complete)
287 msg->complete(msg->context);
288} 277}
289 278
290static void int_error_stop(struct dw_spi *dws, const char *msg) 279static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -502,7 +491,7 @@ static void pump_transfers(unsigned long data)
502 dw_writew(dws, DW_SPI_CTRL0, cr0); 491 dw_writew(dws, DW_SPI_CTRL0, cr0);
503 492
504 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); 493 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
505 spi_chip_sel(dws, spi->chip_select); 494 spi_chip_sel(dws, spi, 1);
506 495
507 /* Set the interrupt mask, for poll mode just disable all int */ 496 /* Set the interrupt mask, for poll mode just disable all int */
508 spi_mask_intr(dws, 0xff); 497 spi_mask_intr(dws, 0xff);
@@ -529,30 +518,12 @@ early_exit:
529 return; 518 return;
530} 519}
531 520
532static void pump_messages(struct work_struct *work) 521static int dw_spi_transfer_one_message(struct spi_master *master,
522 struct spi_message *msg)
533{ 523{
534 struct dw_spi *dws = 524 struct dw_spi *dws = spi_master_get_devdata(master);
535 container_of(work, struct dw_spi, pump_messages);
536 unsigned long flags;
537
538 /* Lock queue and check for queue work */
539 spin_lock_irqsave(&dws->lock, flags);
540 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
541 dws->busy = 0;
542 spin_unlock_irqrestore(&dws->lock, flags);
543 return;
544 }
545
546 /* Make sure we are not already running a message */
547 if (dws->cur_msg) {
548 spin_unlock_irqrestore(&dws->lock, flags);
549 return;
550 }
551
552 /* Extract head of queue */
553 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
554 list_del_init(&dws->cur_msg->queue);
555 525
526 dws->cur_msg = msg;
556 /* Initial message state*/ 527 /* Initial message state*/
557 dws->cur_msg->state = START_STATE; 528 dws->cur_msg->state = START_STATE;
558 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, 529 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
@@ -560,46 +531,9 @@ static void pump_messages(struct work_struct *work)
560 transfer_list); 531 transfer_list);
561 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); 532 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
562 533
563 /* Mark as busy and launch transfers */ 534 /* Launch transfers */
564 tasklet_schedule(&dws->pump_transfers); 535 tasklet_schedule(&dws->pump_transfers);
565 536
566 dws->busy = 1;
567 spin_unlock_irqrestore(&dws->lock, flags);
568}
569
570/* spi_device use this to queue in their spi_msg */
571static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
572{
573 struct dw_spi *dws = spi_master_get_devdata(spi->master);
574 unsigned long flags;
575
576 spin_lock_irqsave(&dws->lock, flags);
577
578 if (dws->run == QUEUE_STOPPED) {
579 spin_unlock_irqrestore(&dws->lock, flags);
580 return -ESHUTDOWN;
581 }
582
583 msg->actual_length = 0;
584 msg->status = -EINPROGRESS;
585 msg->state = START_STATE;
586
587 list_add_tail(&msg->queue, &dws->queue);
588
589 if (dws->run == QUEUE_RUNNING && !dws->busy) {
590
591 if (dws->cur_transfer || dws->cur_msg)
592 queue_work(dws->workqueue,
593 &dws->pump_messages);
594 else {
595 /* If no other data transaction in air, just go */
596 spin_unlock_irqrestore(&dws->lock, flags);
597 pump_messages(&dws->pump_messages);
598 return 0;
599 }
600 }
601
602 spin_unlock_irqrestore(&dws->lock, flags);
603 return 0; 537 return 0;
604} 538}
605 539
@@ -608,6 +542,7 @@ static int dw_spi_setup(struct spi_device *spi)
608{ 542{
609 struct dw_spi_chip *chip_info = NULL; 543 struct dw_spi_chip *chip_info = NULL;
610 struct chip_data *chip; 544 struct chip_data *chip;
545 int ret;
611 546
612 /* Only alloc on first setup */ 547 /* Only alloc on first setup */
613 chip = spi_get_ctldata(spi); 548 chip = spi_get_ctldata(spi);
@@ -661,81 +596,13 @@ static int dw_spi_setup(struct spi_device *spi)
661 | (spi->mode << SPI_MODE_OFFSET) 596 | (spi->mode << SPI_MODE_OFFSET)
662 | (chip->tmode << SPI_TMOD_OFFSET); 597 | (chip->tmode << SPI_TMOD_OFFSET);
663 598
664 return 0; 599 if (gpio_is_valid(spi->cs_gpio)) {
665} 600 ret = gpio_direction_output(spi->cs_gpio,
666 601 !(spi->mode & SPI_CS_HIGH));
667static int init_queue(struct dw_spi *dws) 602 if (ret)
668{ 603 return ret;
669 INIT_LIST_HEAD(&dws->queue);
670 spin_lock_init(&dws->lock);
671
672 dws->run = QUEUE_STOPPED;
673 dws->busy = 0;
674
675 tasklet_init(&dws->pump_transfers,
676 pump_transfers, (unsigned long)dws);
677
678 INIT_WORK(&dws->pump_messages, pump_messages);
679 dws->workqueue = create_singlethread_workqueue(
680 dev_name(dws->master->dev.parent));
681 if (dws->workqueue == NULL)
682 return -EBUSY;
683
684 return 0;
685}
686
687static int start_queue(struct dw_spi *dws)
688{
689 unsigned long flags;
690
691 spin_lock_irqsave(&dws->lock, flags);
692
693 if (dws->run == QUEUE_RUNNING || dws->busy) {
694 spin_unlock_irqrestore(&dws->lock, flags);
695 return -EBUSY;
696 } 604 }
697 605
698 dws->run = QUEUE_RUNNING;
699 dws->cur_msg = NULL;
700 dws->cur_transfer = NULL;
701 dws->cur_chip = NULL;
702 dws->prev_chip = NULL;
703 spin_unlock_irqrestore(&dws->lock, flags);
704
705 queue_work(dws->workqueue, &dws->pump_messages);
706
707 return 0;
708}
709
710static int stop_queue(struct dw_spi *dws)
711{
712 unsigned long flags;
713 unsigned limit = 50;
714 int status = 0;
715
716 spin_lock_irqsave(&dws->lock, flags);
717 dws->run = QUEUE_STOPPED;
718 while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
719 spin_unlock_irqrestore(&dws->lock, flags);
720 msleep(10);
721 spin_lock_irqsave(&dws->lock, flags);
722 }
723
724 if (!list_empty(&dws->queue) || dws->busy)
725 status = -EBUSY;
726 spin_unlock_irqrestore(&dws->lock, flags);
727
728 return status;
729}
730
731static int destroy_queue(struct dw_spi *dws)
732{
733 int status;
734
735 status = stop_queue(dws);
736 if (status != 0)
737 return status;
738 destroy_workqueue(dws->workqueue);
739 return 0; 606 return 0;
740} 607}
741 608
@@ -794,7 +661,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
794 master->bus_num = dws->bus_num; 661 master->bus_num = dws->bus_num;
795 master->num_chipselect = dws->num_cs; 662 master->num_chipselect = dws->num_cs;
796 master->setup = dw_spi_setup; 663 master->setup = dw_spi_setup;
797 master->transfer = dw_spi_transfer; 664 master->transfer_one_message = dw_spi_transfer_one_message;
798 master->max_speed_hz = dws->max_freq; 665 master->max_speed_hz = dws->max_freq;
799 666
800 /* Basic HW init */ 667 /* Basic HW init */
@@ -808,33 +675,21 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
808 } 675 }
809 } 676 }
810 677
811 /* Initial and start queue */ 678 tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws);
812 ret = init_queue(dws);
813 if (ret) {
814 dev_err(&master->dev, "problem initializing queue\n");
815 goto err_diable_hw;
816 }
817 ret = start_queue(dws);
818 if (ret) {
819 dev_err(&master->dev, "problem starting queue\n");
820 goto err_diable_hw;
821 }
822 679
823 spi_master_set_devdata(master, dws); 680 spi_master_set_devdata(master, dws);
824 ret = devm_spi_register_master(dev, master); 681 ret = devm_spi_register_master(dev, master);
825 if (ret) { 682 if (ret) {
826 dev_err(&master->dev, "problem registering spi master\n"); 683 dev_err(&master->dev, "problem registering spi master\n");
827 goto err_queue_alloc; 684 goto err_dma_exit;
828 } 685 }
829 686
830 mrst_spi_debugfs_init(dws); 687 mrst_spi_debugfs_init(dws);
831 return 0; 688 return 0;
832 689
833err_queue_alloc: 690err_dma_exit:
834 destroy_queue(dws);
835 if (dws->dma_ops && dws->dma_ops->dma_exit) 691 if (dws->dma_ops && dws->dma_ops->dma_exit)
836 dws->dma_ops->dma_exit(dws); 692 dws->dma_ops->dma_exit(dws);
837err_diable_hw:
838 spi_enable_chip(dws, 0); 693 spi_enable_chip(dws, 0);
839err_free_master: 694err_free_master:
840 spi_master_put(master); 695 spi_master_put(master);
@@ -844,18 +699,10 @@ EXPORT_SYMBOL_GPL(dw_spi_add_host);
844 699
845void dw_spi_remove_host(struct dw_spi *dws) 700void dw_spi_remove_host(struct dw_spi *dws)
846{ 701{
847 int status = 0;
848
849 if (!dws) 702 if (!dws)
850 return; 703 return;
851 mrst_spi_debugfs_remove(dws); 704 mrst_spi_debugfs_remove(dws);
852 705
853 /* Remove the queue */
854 status = destroy_queue(dws);
855 if (status != 0)
856 dev_err(&dws->master->dev,
857 "dw_spi_remove: workqueue will not complete, message memory not freed\n");
858
859 if (dws->dma_ops && dws->dma_ops->dma_exit) 706 if (dws->dma_ops && dws->dma_ops->dma_exit)
860 dws->dma_ops->dma_exit(dws); 707 dws->dma_ops->dma_exit(dws);
861 spi_enable_chip(dws, 0); 708 spi_enable_chip(dws, 0);
@@ -868,7 +715,7 @@ int dw_spi_suspend_host(struct dw_spi *dws)
868{ 715{
869 int ret = 0; 716 int ret = 0;
870 717
871 ret = stop_queue(dws); 718 ret = spi_master_suspend(dws->master);
872 if (ret) 719 if (ret)
873 return ret; 720 return ret;
874 spi_enable_chip(dws, 0); 721 spi_enable_chip(dws, 0);
@@ -882,7 +729,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
882 int ret; 729 int ret;
883 730
884 spi_hw_init(dws); 731 spi_hw_init(dws);
885 ret = start_queue(dws); 732 ret = spi_master_resume(dws->master);
886 if (ret) 733 if (ret)
887 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 734 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
888 return ret; 735 return ret;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 587643dae11e..6d2acad34f64 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/io.h> 4#include <linux/io.h>
5#include <linux/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <linux/gpio.h>
6 7
7/* Register offsets */ 8/* Register offsets */
8#define DW_SPI_CTRL0 0x00 9#define DW_SPI_CTRL0 0x00
@@ -104,14 +105,6 @@ struct dw_spi {
104 u16 bus_num; 105 u16 bus_num;
105 u16 num_cs; /* supported slave numbers */ 106 u16 num_cs; /* supported slave numbers */
106 107
107 /* Driver message queue */
108 struct workqueue_struct *workqueue;
109 struct work_struct pump_messages;
110 spinlock_t lock;
111 struct list_head queue;
112 int busy;
113 int run;
114
115 /* Message Transfer pump */ 108 /* Message Transfer pump */
116 struct tasklet_struct pump_transfers; 109 struct tasklet_struct pump_transfers;
117 110
@@ -186,15 +179,20 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div)
186 dw_writel(dws, DW_SPI_BAUDR, div); 179 dw_writel(dws, DW_SPI_BAUDR, div);
187} 180}
188 181
189static inline void spi_chip_sel(struct dw_spi *dws, u16 cs) 182static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi,
183 int active)
190{ 184{
191 if (cs > dws->num_cs) 185 u16 cs = spi->chip_select;
192 return; 186 int gpio_val = active ? (spi->mode & SPI_CS_HIGH) :
187 !(spi->mode & SPI_CS_HIGH);
193 188
194 if (dws->cs_control) 189 if (dws->cs_control)
195 dws->cs_control(1); 190 dws->cs_control(active);
191 if (gpio_is_valid(spi->cs_gpio))
192 gpio_set_value(spi->cs_gpio, gpio_val);
196 193
197 dw_writel(dws, DW_SPI_SER, 1 << cs); 194 if (active)
195 dw_writel(dws, DW_SPI_SER, 1 << cs);
198} 196}
199 197
200/* Disable IRQ bits */ 198/* Disable IRQ bits */
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d565eeee3bd8..5021ddf03f60 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -406,7 +406,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
406 return IRQ_HANDLED; 406 return IRQ_HANDLED;
407} 407}
408 408
409static struct of_device_id fsl_dspi_dt_ids[] = { 409static const struct of_device_id fsl_dspi_dt_ids[] = {
410 { .compatible = "fsl,vf610-dspi", .data = NULL, }, 410 { .compatible = "fsl,vf610-dspi", .data = NULL, },
411 { /* sentinel */ } 411 { /* sentinel */ }
412}; 412};
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index e767f5831b9c..8ebd724e4c59 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -348,7 +348,7 @@ static void fsl_espi_cmd_trans(struct spi_message *m,
348 } 348 }
349 349
350 espi_trans->tx_buf = local_buf; 350 espi_trans->tx_buf = local_buf;
351 espi_trans->rx_buf = local_buf + espi_trans->n_tx; 351 espi_trans->rx_buf = local_buf;
352 fsl_espi_do_trans(m, espi_trans); 352 fsl_espi_do_trans(m, espi_trans);
353 353
354 espi_trans->actual_length = espi_trans->len; 354 espi_trans->actual_length = espi_trans->len;
@@ -397,7 +397,7 @@ static void fsl_espi_rw_trans(struct spi_message *m,
397 espi_trans->n_rx = trans_len; 397 espi_trans->n_rx = trans_len;
398 espi_trans->len = trans_len + n_tx; 398 espi_trans->len = trans_len + n_tx;
399 espi_trans->tx_buf = local_buf; 399 espi_trans->tx_buf = local_buf;
400 espi_trans->rx_buf = local_buf + n_tx; 400 espi_trans->rx_buf = local_buf;
401 fsl_espi_do_trans(m, espi_trans); 401 fsl_espi_do_trans(m, espi_trans);
402 402
403 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); 403 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
@@ -458,7 +458,7 @@ static int fsl_espi_setup(struct spi_device *spi)
458 return -EINVAL; 458 return -EINVAL;
459 459
460 if (!cs) { 460 if (!cs) {
461 cs = kzalloc(sizeof *cs, GFP_KERNEL); 461 cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
462 if (!cs) 462 if (!cs)
463 return -ENOMEM; 463 return -ENOMEM;
464 spi->controller_state = cs; 464 spi->controller_state = cs;
@@ -586,8 +586,10 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
586 struct spi_master *master; 586 struct spi_master *master;
587 struct mpc8xxx_spi *mpc8xxx_spi; 587 struct mpc8xxx_spi *mpc8xxx_spi;
588 struct fsl_espi_reg *reg_base; 588 struct fsl_espi_reg *reg_base;
589 u32 regval; 589 struct device_node *nc;
590 int i, ret = 0; 590 const __be32 *prop;
591 u32 regval, csmode;
592 int i, len, ret = 0;
591 593
592 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); 594 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
593 if (!master) { 595 if (!master) {
@@ -634,8 +636,32 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
634 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); 636 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
635 637
636 /* Init eSPI CS mode register */ 638 /* Init eSPI CS mode register */
637 for (i = 0; i < pdata->max_chipselect; i++) 639 for_each_available_child_of_node(master->dev.of_node, nc) {
638 mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); 640 /* get chip select */
641 prop = of_get_property(nc, "reg", &len);
642 if (!prop || len < sizeof(*prop))
643 continue;
644 i = be32_to_cpup(prop);
645 if (i < 0 || i >= pdata->max_chipselect)
646 continue;
647
648 csmode = CSMODE_INIT_VAL;
649 /* check if CSBEF is set in device tree */
650 prop = of_get_property(nc, "fsl,csbef", &len);
651 if (prop && len >= sizeof(*prop)) {
652 csmode &= ~(CSMODE_BEF(0xf));
653 csmode |= CSMODE_BEF(be32_to_cpup(prop));
654 }
655 /* check if CSAFT is set in device tree */
656 prop = of_get_property(nc, "fsl,csaft", &len);
657 if (prop && len >= sizeof(*prop)) {
658 csmode &= ~(CSMODE_AFT(0xf));
659 csmode |= CSMODE_AFT(be32_to_cpup(prop));
660 }
661 mpc8xxx_spi_write_reg(&reg_base->csmode[i], csmode);
662
663 dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
664 }
639 665
640 /* Enable SPI interface */ 666 /* Enable SPI interface */
641 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 667 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index e5d45fca3551..95212ea96c8d 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -99,11 +99,6 @@ int mpc8xxx_spi_transfer(struct spi_device *spi,
99 return 0; 99 return 0;
100} 100}
101 101
102void mpc8xxx_spi_cleanup(struct spi_device *spi)
103{
104 kfree(spi->controller_state);
105}
106
107const char *mpc8xxx_spi_strmode(unsigned int flags) 102const char *mpc8xxx_spi_strmode(unsigned int flags)
108{ 103{
109 if (flags & SPI_QE_CPU_MODE) { 104 if (flags & SPI_QE_CPU_MODE) {
@@ -134,7 +129,6 @@ int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
134 | SPI_LSB_FIRST | SPI_LOOP; 129 | SPI_LSB_FIRST | SPI_LOOP;
135 130
136 master->transfer = mpc8xxx_spi_transfer; 131 master->transfer = mpc8xxx_spi_transfer;
137 master->cleanup = mpc8xxx_spi_cleanup;
138 master->dev.of_node = dev->of_node; 132 master->dev.of_node = dev->of_node;
139 133
140 mpc8xxx_spi = spi_master_get_devdata(master); 134 mpc8xxx_spi = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 52db6936778e..2fcbfd01d109 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -124,7 +124,6 @@ extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
124extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, 124extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
125 struct spi_transfer *t, unsigned int len); 125 struct spi_transfer *t, unsigned int len);
126extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); 126extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m);
127extern void mpc8xxx_spi_cleanup(struct spi_device *spi);
128extern const char *mpc8xxx_spi_strmode(unsigned int flags); 127extern const char *mpc8xxx_spi_strmode(unsigned int flags);
129extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 128extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
130 unsigned int irq); 129 unsigned int irq);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index b3e7775034db..98ccd231bf00 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -431,7 +431,7 @@ static int fsl_spi_setup(struct spi_device *spi)
431 return -EINVAL; 431 return -EINVAL;
432 432
433 if (!cs) { 433 if (!cs) {
434 cs = kzalloc(sizeof *cs, GFP_KERNEL); 434 cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
435 if (!cs) 435 if (!cs)
436 return -ENOMEM; 436 return -ENOMEM;
437 spi->controller_state = cs; 437 spi->controller_state = cs;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 09823076df88..9f595535cf27 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -340,7 +340,7 @@ done:
340} 340}
341 341
342#ifdef CONFIG_OF 342#ifdef CONFIG_OF
343static struct of_device_id spi_gpio_dt_ids[] = { 343static const struct of_device_id spi_gpio_dt_ids[] = {
344 { .compatible = "spi-gpio" }, 344 { .compatible = "spi-gpio" },
345 {} 345 {}
346}; 346};
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 51d99779682f..66d2ae21e78e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1111,10 +1111,8 @@ static int pl022_dma_probe(struct pl022 *pl022)
1111 } 1111 }
1112 1112
1113 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1113 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1114 if (!pl022->dummypage) { 1114 if (!pl022->dummypage)
1115 dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n");
1116 goto err_no_dummypage; 1115 goto err_no_dummypage;
1117 }
1118 1116
1119 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", 1117 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1120 dma_chan_name(pl022->dma_rx_channel), 1118 dma_chan_name(pl022->dma_rx_channel),
@@ -1809,11 +1807,8 @@ static int pl022_setup(struct spi_device *spi)
1809 1807
1810 if (chip == NULL) { 1808 if (chip == NULL) {
1811 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1809 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1812 if (!chip) { 1810 if (!chip)
1813 dev_err(&spi->dev,
1814 "cannot allocate controller state\n");
1815 return -ENOMEM; 1811 return -ENOMEM;
1816 }
1817 dev_dbg(&spi->dev, 1812 dev_dbg(&spi->dev,
1818 "allocated memory for controller's runtime state\n"); 1813 "allocated memory for controller's runtime state\n");
1819 } 1814 }
@@ -2050,10 +2045,8 @@ pl022_platform_data_dt_get(struct device *dev)
2050 } 2045 }
2051 2046
2052 pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL); 2047 pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
2053 if (!pd) { 2048 if (!pd)
2054 dev_err(dev, "cannot allocate platform data memory\n");
2055 return NULL; 2049 return NULL;
2056 }
2057 2050
2058 pd->bus_id = -1; 2051 pd->bus_id = -1;
2059 pd->enable_dma = 1; 2052 pd->enable_dma = 1;
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 713af4806f26..f6759dc0153b 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
29 struct sg_table *sgt; 29 struct sg_table *sgt;
30 void *buf, *pbuf; 30 void *buf, *pbuf;
31 31
32 /*
33 * Some DMA controllers have problems transferring buffers that are
34 * not multiple of 4 bytes. So we truncate the transfer so that it
35 * is suitable for such controllers, and handle the trailing bytes
36 * manually after the DMA completes.
37 *
38 * REVISIT: It would be better if this information could be
39 * retrieved directly from the DMA device in a similar way than
40 * ->copy_align etc. is done.
41 */
42 len = ALIGN(drv_data->len, 4);
43
44 if (dir == DMA_TO_DEVICE) { 32 if (dir == DMA_TO_DEVICE) {
45 dmadev = drv_data->tx_chan->device->dev; 33 dmadev = drv_data->tx_chan->device->dev;
46 sgt = &drv_data->tx_sgt; 34 sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
144 if (!error) { 132 if (!error) {
145 pxa2xx_spi_unmap_dma_buffers(drv_data); 133 pxa2xx_spi_unmap_dma_buffers(drv_data);
146 134
147 /* Handle the last bytes of unaligned transfer */
148 drv_data->tx += drv_data->tx_map_len; 135 drv_data->tx += drv_data->tx_map_len;
149 drv_data->write(drv_data);
150
151 drv_data->rx += drv_data->rx_map_len; 136 drv_data->rx += drv_data->rx_map_len;
152 drv_data->read(drv_data);
153 137
154 msg->actual_length += drv_data->len; 138 msg->actual_length += drv_data->len;
155 msg->state = pxa2xx_spi_next_transfer(drv_data); 139 msg->state = pxa2xx_spi_next_transfer(drv_data);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index a07b75814442..901d0e0ba3eb 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -886,11 +886,8 @@ static int setup(struct spi_device *spi)
886 chip = spi_get_ctldata(spi); 886 chip = spi_get_ctldata(spi);
887 if (!chip) { 887 if (!chip) {
888 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 888 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
889 if (!chip) { 889 if (!chip)
890 dev_err(&spi->dev,
891 "failed setup: can't allocate chip data\n");
892 return -ENOMEM; 890 return -ENOMEM;
893 }
894 891
895 if (drv_data->ssp_type == CE4100_SSP) { 892 if (drv_data->ssp_type == CE4100_SSP) {
896 if (spi->chip_select > 4) { 893 if (spi->chip_select > 4) {
@@ -1037,11 +1034,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1037 return NULL; 1034 return NULL;
1038 1035
1039 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1036 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1040 if (!pdata) { 1037 if (!pdata)
1041 dev_err(&pdev->dev,
1042 "failed to allocate memory for platform data\n");
1043 return NULL; 1038 return NULL;
1044 }
1045 1039
1046 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1040 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1047 if (!res) 1041 if (!res)
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index b032e8885e24..fc1de86d3c8a 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -287,7 +287,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
287 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); 287 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
288 288
289 if (!xfer) { 289 if (!xfer) {
290 dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n", 290 dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
291 qup_err, spi_err, opflags); 291 qup_err, spi_err, opflags);
292 return IRQ_HANDLED; 292 return IRQ_HANDLED;
293 } 293 }
@@ -366,7 +366,7 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
366 n_words = xfer->len / w_size; 366 n_words = xfer->len / w_size;
367 controller->w_size = w_size; 367 controller->w_size = w_size;
368 368
369 if (n_words <= controller->in_fifo_sz) { 369 if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
370 mode = QUP_IO_M_MODE_FIFO; 370 mode = QUP_IO_M_MODE_FIFO;
371 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); 371 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
372 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); 372 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
734 int ret; 734 int ret;
735 735
736 ret = pm_runtime_get_sync(&pdev->dev); 736 ret = pm_runtime_get_sync(&pdev->dev);
737 if (ret) 737 if (ret < 0)
738 return ret; 738 return ret;
739 739
740 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 740 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
@@ -749,7 +749,7 @@ static int spi_qup_remove(struct platform_device *pdev)
749 return 0; 749 return 0;
750} 750}
751 751
752static struct of_device_id spi_qup_dt_match[] = { 752static const struct of_device_id spi_qup_dt_match[] = {
753 { .compatible = "qcom,spi-qup-v2.1.1", }, 753 { .compatible = "qcom,spi-qup-v2.1.1", },
754 { .compatible = "qcom,spi-qup-v2.2.1", }, 754 { .compatible = "qcom,spi-qup-v2.2.1", },
755 { } 755 { }
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 1fb0ad213324..10112745bb17 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -183,8 +183,6 @@
183#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ 183#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
184#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ 184#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
185 185
186#define DUMMY_DATA 0x00
187
188struct rspi_data { 186struct rspi_data {
189 void __iomem *addr; 187 void __iomem *addr;
190 u32 max_speed_hz; 188 u32 max_speed_hz;
@@ -197,11 +195,6 @@ struct rspi_data {
197 int rx_irq, tx_irq; 195 int rx_irq, tx_irq;
198 const struct spi_ops *ops; 196 const struct spi_ops *ops;
199 197
200 /* for dmaengine */
201 struct dma_chan *chan_tx;
202 struct dma_chan *chan_rx;
203
204 unsigned dma_width_16bit:1;
205 unsigned dma_callbacked:1; 198 unsigned dma_callbacked:1;
206 unsigned byte_access:1; 199 unsigned byte_access:1;
207}; 200};
@@ -253,6 +246,8 @@ struct spi_ops {
253 int (*transfer_one)(struct spi_master *master, struct spi_device *spi, 246 int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
254 struct spi_transfer *xfer); 247 struct spi_transfer *xfer);
255 u16 mode_bits; 248 u16 mode_bits;
249 u16 flags;
250 u16 fifo_size;
256}; 251};
257 252
258/* 253/*
@@ -266,7 +261,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
266 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); 261 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
267 262
268 /* Sets transfer bit rate */ 263 /* Sets transfer bit rate */
269 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; 264 spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
265 2 * rspi->max_speed_hz) - 1;
270 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 266 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
271 267
272 /* Disable dummy transmission, set 16-bit word access, 1 frame */ 268 /* Disable dummy transmission, set 16-bit word access, 1 frame */
@@ -302,7 +298,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
302 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); 298 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
303 299
304 /* Sets transfer bit rate */ 300 /* Sets transfer bit rate */
305 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; 301 spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
302 2 * rspi->max_speed_hz) - 1;
306 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 303 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
307 304
308 /* Disable dummy transmission, set byte access */ 305 /* Disable dummy transmission, set byte access */
@@ -335,7 +332,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
335 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); 332 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
336 333
337 /* Sets transfer bit rate */ 334 /* Sets transfer bit rate */
338 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz); 335 spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
339 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 336 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
340 337
341 /* Disable dummy transmission, set byte access */ 338 /* Disable dummy transmission, set byte access */
@@ -403,11 +400,22 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
403 return 0; 400 return 0;
404} 401}
405 402
403static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
404{
405 return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
406}
407
408static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
409{
410 return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
411}
412
406static int rspi_data_out(struct rspi_data *rspi, u8 data) 413static int rspi_data_out(struct rspi_data *rspi, u8 data)
407{ 414{
408 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 415 int error = rspi_wait_for_tx_empty(rspi);
416 if (error < 0) {
409 dev_err(&rspi->master->dev, "transmit timeout\n"); 417 dev_err(&rspi->master->dev, "transmit timeout\n");
410 return -ETIMEDOUT; 418 return error;
411 } 419 }
412 rspi_write_data(rspi, data); 420 rspi_write_data(rspi, data);
413 return 0; 421 return 0;
@@ -415,25 +423,36 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data)
415 423
416static int rspi_data_in(struct rspi_data *rspi) 424static int rspi_data_in(struct rspi_data *rspi)
417{ 425{
426 int error;
418 u8 data; 427 u8 data;
419 428
420 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { 429 error = rspi_wait_for_rx_full(rspi);
430 if (error < 0) {
421 dev_err(&rspi->master->dev, "receive timeout\n"); 431 dev_err(&rspi->master->dev, "receive timeout\n");
422 return -ETIMEDOUT; 432 return error;
423 } 433 }
424 data = rspi_read_data(rspi); 434 data = rspi_read_data(rspi);
425 return data; 435 return data;
426} 436}
427 437
428static int rspi_data_out_in(struct rspi_data *rspi, u8 data) 438static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
439 unsigned int n)
429{ 440{
430 int ret; 441 while (n-- > 0) {
431 442 if (tx) {
432 ret = rspi_data_out(rspi, data); 443 int ret = rspi_data_out(rspi, *tx++);
433 if (ret < 0) 444 if (ret < 0)
434 return ret; 445 return ret;
446 }
447 if (rx) {
448 int ret = rspi_data_in(rspi);
449 if (ret < 0)
450 return ret;
451 *rx++ = ret;
452 }
453 }
435 454
436 return rspi_data_in(rspi); 455 return 0;
437} 456}
438 457
439static void rspi_dma_complete(void *arg) 458static void rspi_dma_complete(void *arg)
@@ -444,97 +463,67 @@ static void rspi_dma_complete(void *arg)
444 wake_up_interruptible(&rspi->wait); 463 wake_up_interruptible(&rspi->wait);
445} 464}
446 465
447static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf, 466static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
448 unsigned len, struct dma_chan *chan, 467 struct sg_table *rx)
449 enum dma_transfer_direction dir)
450{
451 sg_init_table(sg, 1);
452 sg_set_buf(sg, buf, len);
453 sg_dma_len(sg) = len;
454 return dma_map_sg(chan->device->dev, sg, 1, dir);
455}
456
457static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
458 enum dma_transfer_direction dir)
459{ 468{
460 dma_unmap_sg(chan->device->dev, sg, 1, dir); 469 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
461} 470 u8 irq_mask = 0;
462 471 unsigned int other_irq = 0;
463static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len) 472 dma_cookie_t cookie;
464{ 473 int ret;
465 u16 *dst = buf;
466 const u8 *src = data;
467
468 while (len) {
469 *dst++ = (u16)(*src++);
470 len--;
471 }
472}
473
474static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
475{
476 u8 *dst = buf;
477 const u16 *src = data;
478
479 while (len) {
480 *dst++ = (u8)*src++;
481 len--;
482 }
483}
484 474
485static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) 475 if (tx) {
486{ 476 desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
487 struct scatterlist sg; 477 tx->sgl, tx->nents, DMA_TO_DEVICE,
488 const void *buf = NULL; 478 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
489 struct dma_async_tx_descriptor *desc; 479 if (!desc_tx)
490 unsigned int len; 480 return -EIO;
491 int ret = 0;
492
493 if (rspi->dma_width_16bit) {
494 void *tmp;
495 /*
496 * If DMAC bus width is 16-bit, the driver allocates a dummy
497 * buffer. And, the driver converts original data into the
498 * DMAC data as the following format:
499 * original data: 1st byte, 2nd byte ...
500 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
501 */
502 len = t->len * 2;
503 tmp = kmalloc(len, GFP_KERNEL);
504 if (!tmp)
505 return -ENOMEM;
506 rspi_memory_to_8bit(tmp, t->tx_buf, t->len);
507 buf = tmp;
508 } else {
509 len = t->len;
510 buf = t->tx_buf;
511 }
512 481
513 if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) { 482 irq_mask |= SPCR_SPTIE;
514 ret = -EFAULT;
515 goto end_nomap;
516 } 483 }
517 desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE, 484 if (rx) {
518 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 485 desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
519 if (!desc) { 486 rx->sgl, rx->nents, DMA_FROM_DEVICE,
520 ret = -EIO; 487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
521 goto end; 488 if (!desc_rx)
489 return -EIO;
490
491 irq_mask |= SPCR_SPRIE;
522 } 492 }
523 493
524 /* 494 /*
525 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be 495 * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
526 * called. So, this driver disables the IRQ while DMA transfer. 496 * called. So, this driver disables the IRQ while DMA transfer.
527 */ 497 */
528 disable_irq(rspi->tx_irq); 498 if (tx)
499 disable_irq(other_irq = rspi->tx_irq);
500 if (rx && rspi->rx_irq != other_irq)
501 disable_irq(rspi->rx_irq);
529 502
530 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR); 503 rspi_enable_irq(rspi, irq_mask);
531 rspi_enable_irq(rspi, SPCR_SPTIE);
532 rspi->dma_callbacked = 0; 504 rspi->dma_callbacked = 0;
533 505
534 desc->callback = rspi_dma_complete; 506 if (rx) {
535 desc->callback_param = rspi; 507 desc_rx->callback = rspi_dma_complete;
536 dmaengine_submit(desc); 508 desc_rx->callback_param = rspi;
537 dma_async_issue_pending(rspi->chan_tx); 509 cookie = dmaengine_submit(desc_rx);
510 if (dma_submit_error(cookie))
511 return cookie;
512 dma_async_issue_pending(rspi->master->dma_rx);
513 }
514 if (tx) {
515 if (rx) {
516 /* No callback */
517 desc_tx->callback = NULL;
518 } else {
519 desc_tx->callback = rspi_dma_complete;
520 desc_tx->callback_param = rspi;
521 }
522 cookie = dmaengine_submit(desc_tx);
523 if (dma_submit_error(cookie))
524 return cookie;
525 dma_async_issue_pending(rspi->master->dma_tx);
526 }
538 527
539 ret = wait_event_interruptible_timeout(rspi->wait, 528 ret = wait_event_interruptible_timeout(rspi->wait,
540 rspi->dma_callbacked, HZ); 529 rspi->dma_callbacked, HZ);
@@ -542,15 +531,13 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
542 ret = 0; 531 ret = 0;
543 else if (!ret) 532 else if (!ret)
544 ret = -ETIMEDOUT; 533 ret = -ETIMEDOUT;
545 rspi_disable_irq(rspi, SPCR_SPTIE);
546 534
547 enable_irq(rspi->tx_irq); 535 rspi_disable_irq(rspi, irq_mask);
548 536
549end: 537 if (tx)
550 rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE); 538 enable_irq(rspi->tx_irq);
551end_nomap: 539 if (rx && rspi->rx_irq != other_irq)
552 if (rspi->dma_width_16bit) 540 enable_irq(rspi->rx_irq);
553 kfree(buf);
554 541
555 return ret; 542 return ret;
556} 543}
@@ -585,157 +572,37 @@ static void qspi_receive_init(const struct rspi_data *rspi)
585 rspi_write8(rspi, 0, QSPI_SPBFCR); 572 rspi_write8(rspi, 0, QSPI_SPBFCR);
586} 573}
587 574
588static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) 575static bool __rspi_can_dma(const struct rspi_data *rspi,
576 const struct spi_transfer *xfer)
589{ 577{
590 struct scatterlist sg, sg_dummy; 578 return xfer->len > rspi->ops->fifo_size;
591 void *dummy = NULL, *rx_buf = NULL;
592 struct dma_async_tx_descriptor *desc, *desc_dummy;
593 unsigned int len;
594 int ret = 0;
595
596 if (rspi->dma_width_16bit) {
597 /*
598 * If DMAC bus width is 16-bit, the driver allocates a dummy
599 * buffer. And, finally the driver converts the DMAC data into
600 * actual data as the following format:
601 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
602 * actual data: 1st byte, 2nd byte ...
603 */
604 len = t->len * 2;
605 rx_buf = kmalloc(len, GFP_KERNEL);
606 if (!rx_buf)
607 return -ENOMEM;
608 } else {
609 len = t->len;
610 rx_buf = t->rx_buf;
611 }
612
613 /* prepare dummy transfer to generate SPI clocks */
614 dummy = kzalloc(len, GFP_KERNEL);
615 if (!dummy) {
616 ret = -ENOMEM;
617 goto end_nomap;
618 }
619 if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
620 DMA_TO_DEVICE)) {
621 ret = -EFAULT;
622 goto end_nomap;
623 }
624 desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
625 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
626 if (!desc_dummy) {
627 ret = -EIO;
628 goto end_dummy_mapped;
629 }
630
631 /* prepare receive transfer */
632 if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
633 DMA_FROM_DEVICE)) {
634 ret = -EFAULT;
635 goto end_dummy_mapped;
636
637 }
638 desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
639 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
640 if (!desc) {
641 ret = -EIO;
642 goto end;
643 }
644
645 rspi_receive_init(rspi);
646
647 /*
648 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
649 * called. So, this driver disables the IRQ while DMA transfer.
650 */
651 disable_irq(rspi->tx_irq);
652 if (rspi->rx_irq != rspi->tx_irq)
653 disable_irq(rspi->rx_irq);
654
655 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
656 rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
657 rspi->dma_callbacked = 0;
658
659 desc->callback = rspi_dma_complete;
660 desc->callback_param = rspi;
661 dmaengine_submit(desc);
662 dma_async_issue_pending(rspi->chan_rx);
663
664 desc_dummy->callback = NULL; /* No callback */
665 dmaengine_submit(desc_dummy);
666 dma_async_issue_pending(rspi->chan_tx);
667
668 ret = wait_event_interruptible_timeout(rspi->wait,
669 rspi->dma_callbacked, HZ);
670 if (ret > 0 && rspi->dma_callbacked)
671 ret = 0;
672 else if (!ret)
673 ret = -ETIMEDOUT;
674 rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
675
676 enable_irq(rspi->tx_irq);
677 if (rspi->rx_irq != rspi->tx_irq)
678 enable_irq(rspi->rx_irq);
679
680end:
681 rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
682end_dummy_mapped:
683 rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
684end_nomap:
685 if (rspi->dma_width_16bit) {
686 if (!ret)
687 rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
688 kfree(rx_buf);
689 }
690 kfree(dummy);
691
692 return ret;
693} 579}
694 580
695static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t) 581static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
582 struct spi_transfer *xfer)
696{ 583{
697 if (t->tx_buf && rspi->chan_tx) 584 struct rspi_data *rspi = spi_master_get_devdata(master);
698 return 1;
699 /* If the module receives data by DMAC, it also needs TX DMAC */
700 if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
701 return 1;
702 585
703 return 0; 586 return __rspi_can_dma(rspi, xfer);
704} 587}
705 588
706static int rspi_transfer_out_in(struct rspi_data *rspi, 589static int rspi_common_transfer(struct rspi_data *rspi,
707 struct spi_transfer *xfer) 590 struct spi_transfer *xfer)
708{ 591{
709 int remain = xfer->len, ret; 592 int ret;
710 const u8 *tx_buf = xfer->tx_buf;
711 u8 *rx_buf = xfer->rx_buf;
712 u8 spcr, data;
713
714 rspi_receive_init(rspi);
715
716 spcr = rspi_read8(rspi, RSPI_SPCR);
717 if (rx_buf)
718 spcr &= ~SPCR_TXMD;
719 else
720 spcr |= SPCR_TXMD;
721 rspi_write8(rspi, spcr, RSPI_SPCR);
722 593
723 while (remain > 0) { 594 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
724 data = tx_buf ? *tx_buf++ : DUMMY_DATA; 595 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
725 ret = rspi_data_out(rspi, data); 596 return rspi_dma_transfer(rspi, &xfer->tx_sg,
726 if (ret < 0) 597 xfer->rx_buf ? &xfer->rx_sg : NULL);
727 return ret;
728 if (rx_buf) {
729 ret = rspi_data_in(rspi);
730 if (ret < 0)
731 return ret;
732 *rx_buf++ = ret;
733 }
734 remain--;
735 } 598 }
736 599
600 ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
601 if (ret < 0)
602 return ret;
603
737 /* Wait for the last transmission */ 604 /* Wait for the last transmission */
738 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); 605 rspi_wait_for_tx_empty(rspi);
739 606
740 return 0; 607 return 0;
741} 608}
@@ -744,46 +611,18 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
744 struct spi_transfer *xfer) 611 struct spi_transfer *xfer)
745{ 612{
746 struct rspi_data *rspi = spi_master_get_devdata(master); 613 struct rspi_data *rspi = spi_master_get_devdata(master);
747 int ret; 614 u8 spcr;
748 615
749 if (!rspi_is_dma(rspi, xfer)) 616 spcr = rspi_read8(rspi, RSPI_SPCR);
750 return rspi_transfer_out_in(rspi, xfer); 617 if (xfer->rx_buf) {
751 618 rspi_receive_init(rspi);
752 if (xfer->tx_buf) { 619 spcr &= ~SPCR_TXMD;
753 ret = rspi_send_dma(rspi, xfer); 620 } else {
754 if (ret < 0) 621 spcr |= SPCR_TXMD;
755 return ret;
756 }
757 if (xfer->rx_buf)
758 return rspi_receive_dma(rspi, xfer);
759
760 return 0;
761}
762
763static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
764 struct spi_transfer *xfer)
765{
766 int remain = xfer->len, ret;
767 const u8 *tx_buf = xfer->tx_buf;
768 u8 *rx_buf = xfer->rx_buf;
769 u8 data;
770
771 rspi_rz_receive_init(rspi);
772
773 while (remain > 0) {
774 data = tx_buf ? *tx_buf++ : DUMMY_DATA;
775 ret = rspi_data_out_in(rspi, data);
776 if (ret < 0)
777 return ret;
778 if (rx_buf)
779 *rx_buf++ = ret;
780 remain--;
781 } 622 }
623 rspi_write8(rspi, spcr, RSPI_SPCR);
782 624
783 /* Wait for the last transmission */ 625 return rspi_common_transfer(rspi, xfer);
784 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
785
786 return 0;
787} 626}
788 627
789static int rspi_rz_transfer_one(struct spi_master *master, 628static int rspi_rz_transfer_one(struct spi_master *master,
@@ -791,68 +630,44 @@ static int rspi_rz_transfer_one(struct spi_master *master,
791 struct spi_transfer *xfer) 630 struct spi_transfer *xfer)
792{ 631{
793 struct rspi_data *rspi = spi_master_get_devdata(master); 632 struct rspi_data *rspi = spi_master_get_devdata(master);
633 int ret;
794 634
795 return rspi_rz_transfer_out_in(rspi, xfer); 635 rspi_rz_receive_init(rspi);
636
637 return rspi_common_transfer(rspi, xfer);
796} 638}
797 639
798static int qspi_transfer_out_in(struct rspi_data *rspi, 640static int qspi_transfer_out_in(struct rspi_data *rspi,
799 struct spi_transfer *xfer) 641 struct spi_transfer *xfer)
800{ 642{
801 int remain = xfer->len, ret;
802 const u8 *tx_buf = xfer->tx_buf;
803 u8 *rx_buf = xfer->rx_buf;
804 u8 data;
805
806 qspi_receive_init(rspi); 643 qspi_receive_init(rspi);
807 644
808 while (remain > 0) { 645 return rspi_common_transfer(rspi, xfer);
809 data = tx_buf ? *tx_buf++ : DUMMY_DATA;
810 ret = rspi_data_out_in(rspi, data);
811 if (ret < 0)
812 return ret;
813 if (rx_buf)
814 *rx_buf++ = ret;
815 remain--;
816 }
817
818 /* Wait for the last transmission */
819 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
820
821 return 0;
822} 646}
823 647
824static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) 648static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
825{ 649{
826 const u8 *buf = xfer->tx_buf;
827 unsigned int i;
828 int ret; 650 int ret;
829 651
830 for (i = 0; i < xfer->len; i++) { 652 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer))
831 ret = rspi_data_out(rspi, *buf++); 653 return rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
832 if (ret < 0) 654
833 return ret; 655 ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
834 } 656 if (ret < 0)
657 return ret;
835 658
836 /* Wait for the last transmission */ 659 /* Wait for the last transmission */
837 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); 660 rspi_wait_for_tx_empty(rspi);
838 661
839 return 0; 662 return 0;
840} 663}
841 664
842static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) 665static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
843{ 666{
844 u8 *buf = xfer->rx_buf; 667 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer))
845 unsigned int i; 668 return rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
846 int ret;
847
848 for (i = 0; i < xfer->len; i++) {
849 ret = rspi_data_in(rspi);
850 if (ret < 0)
851 return ret;
852 *buf++ = ret;
853 }
854 669
855 return 0; 670 return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
856} 671}
857 672
858static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, 673static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
@@ -862,10 +677,10 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
862 677
863 if (spi->mode & SPI_LOOP) { 678 if (spi->mode & SPI_LOOP) {
864 return qspi_transfer_out_in(rspi, xfer); 679 return qspi_transfer_out_in(rspi, xfer);
865 } else if (xfer->tx_buf && xfer->tx_nbits > SPI_NBITS_SINGLE) { 680 } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
866 /* Quad or Dual SPI Write */ 681 /* Quad or Dual SPI Write */
867 return qspi_transfer_out(rspi, xfer); 682 return qspi_transfer_out(rspi, xfer);
868 } else if (xfer->rx_buf && xfer->rx_nbits > SPI_NBITS_SINGLE) { 683 } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
869 /* Quad or Dual SPI Read */ 684 /* Quad or Dual SPI Read */
870 return qspi_transfer_in(rspi, xfer); 685 return qspi_transfer_in(rspi, xfer);
871 } else { 686 } else {
@@ -1046,65 +861,78 @@ static irqreturn_t rspi_irq_tx(int irq, void *_sr)
1046 return 0; 861 return 0;
1047} 862}
1048 863
1049static int rspi_request_dma(struct rspi_data *rspi, 864static struct dma_chan *rspi_request_dma_chan(struct device *dev,
1050 struct platform_device *pdev) 865 enum dma_transfer_direction dir,
866 unsigned int id,
867 dma_addr_t port_addr)
1051{ 868{
1052 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
1053 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1054 dma_cap_mask_t mask; 869 dma_cap_mask_t mask;
870 struct dma_chan *chan;
1055 struct dma_slave_config cfg; 871 struct dma_slave_config cfg;
1056 int ret; 872 int ret;
1057 873
1058 if (!res || !rspi_pd) 874 dma_cap_zero(mask);
1059 return 0; /* The driver assumes no error. */ 875 dma_cap_set(DMA_SLAVE, mask);
1060 876
1061 rspi->dma_width_16bit = rspi_pd->dma_width_16bit; 877 chan = dma_request_channel(mask, shdma_chan_filter,
1062 878 (void *)(unsigned long)id);
1063 /* If the module receives data by DMAC, it also needs TX DMAC */ 879 if (!chan) {
1064 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { 880 dev_warn(dev, "dma_request_channel failed\n");
1065 dma_cap_zero(mask); 881 return NULL;
1066 dma_cap_set(DMA_SLAVE, mask);
1067 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
1068 (void *)rspi_pd->dma_rx_id);
1069 if (rspi->chan_rx) {
1070 cfg.slave_id = rspi_pd->dma_rx_id;
1071 cfg.direction = DMA_DEV_TO_MEM;
1072 cfg.dst_addr = 0;
1073 cfg.src_addr = res->start + RSPI_SPDR;
1074 ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
1075 if (!ret)
1076 dev_info(&pdev->dev, "Use DMA when rx.\n");
1077 else
1078 return ret;
1079 }
1080 } 882 }
1081 if (rspi_pd->dma_tx_id) { 883
1082 dma_cap_zero(mask); 884 memset(&cfg, 0, sizeof(cfg));
1083 dma_cap_set(DMA_SLAVE, mask); 885 cfg.slave_id = id;
1084 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter, 886 cfg.direction = dir;
1085 (void *)rspi_pd->dma_tx_id); 887 if (dir == DMA_MEM_TO_DEV)
1086 if (rspi->chan_tx) { 888 cfg.dst_addr = port_addr;
1087 cfg.slave_id = rspi_pd->dma_tx_id; 889 else
1088 cfg.direction = DMA_MEM_TO_DEV; 890 cfg.src_addr = port_addr;
1089 cfg.dst_addr = res->start + RSPI_SPDR; 891
1090 cfg.src_addr = 0; 892 ret = dmaengine_slave_config(chan, &cfg);
1091 ret = dmaengine_slave_config(rspi->chan_tx, &cfg); 893 if (ret) {
1092 if (!ret) 894 dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1093 dev_info(&pdev->dev, "Use DMA when tx\n"); 895 dma_release_channel(chan);
1094 else 896 return NULL;
1095 return ret;
1096 }
1097 } 897 }
1098 898
899 return chan;
900}
901
902static int rspi_request_dma(struct device *dev, struct spi_master *master,
903 const struct resource *res)
904{
905 const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
906
907 if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id)
908 return 0; /* The driver assumes no error. */
909
910 master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM,
911 rspi_pd->dma_rx_id,
912 res->start + RSPI_SPDR);
913 if (!master->dma_rx)
914 return -ENODEV;
915
916 master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV,
917 rspi_pd->dma_tx_id,
918 res->start + RSPI_SPDR);
919 if (!master->dma_tx) {
920 dma_release_channel(master->dma_rx);
921 master->dma_rx = NULL;
922 return -ENODEV;
923 }
924
925 master->can_dma = rspi_can_dma;
926 dev_info(dev, "DMA available");
1099 return 0; 927 return 0;
1100} 928}
1101 929
1102static void rspi_release_dma(struct rspi_data *rspi) 930static void rspi_release_dma(struct rspi_data *rspi)
1103{ 931{
1104 if (rspi->chan_tx) 932 if (rspi->master->dma_tx)
1105 dma_release_channel(rspi->chan_tx); 933 dma_release_channel(rspi->master->dma_tx);
1106 if (rspi->chan_rx) 934 if (rspi->master->dma_rx)
1107 dma_release_channel(rspi->chan_rx); 935 dma_release_channel(rspi->master->dma_rx);
1108} 936}
1109 937
1110static int rspi_remove(struct platform_device *pdev) 938static int rspi_remove(struct platform_device *pdev)
@@ -1118,23 +946,29 @@ static int rspi_remove(struct platform_device *pdev)
1118} 946}
1119 947
1120static const struct spi_ops rspi_ops = { 948static const struct spi_ops rspi_ops = {
1121 .set_config_register = rspi_set_config_register, 949 .set_config_register = rspi_set_config_register,
1122 .transfer_one = rspi_transfer_one, 950 .transfer_one = rspi_transfer_one,
1123 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 951 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
952 .flags = SPI_MASTER_MUST_TX,
953 .fifo_size = 8,
1124}; 954};
1125 955
1126static const struct spi_ops rspi_rz_ops = { 956static const struct spi_ops rspi_rz_ops = {
1127 .set_config_register = rspi_rz_set_config_register, 957 .set_config_register = rspi_rz_set_config_register,
1128 .transfer_one = rspi_rz_transfer_one, 958 .transfer_one = rspi_rz_transfer_one,
1129 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 959 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
960 .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
961 .fifo_size = 8, /* 8 for TX, 32 for RX */
1130}; 962};
1131 963
1132static const struct spi_ops qspi_ops = { 964static const struct spi_ops qspi_ops = {
1133 .set_config_register = qspi_set_config_register, 965 .set_config_register = qspi_set_config_register,
1134 .transfer_one = qspi_transfer_one, 966 .transfer_one = qspi_transfer_one,
1135 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | 967 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
1136 SPI_TX_DUAL | SPI_TX_QUAD | 968 SPI_TX_DUAL | SPI_TX_QUAD |
1137 SPI_RX_DUAL | SPI_RX_QUAD, 969 SPI_RX_DUAL | SPI_RX_QUAD,
970 .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
971 .fifo_size = 32,
1138}; 972};
1139 973
1140#ifdef CONFIG_OF 974#ifdef CONFIG_OF
@@ -1254,6 +1088,7 @@ static int rspi_probe(struct platform_device *pdev)
1254 master->prepare_message = rspi_prepare_message; 1088 master->prepare_message = rspi_prepare_message;
1255 master->unprepare_message = rspi_unprepare_message; 1089 master->unprepare_message = rspi_unprepare_message;
1256 master->mode_bits = ops->mode_bits; 1090 master->mode_bits = ops->mode_bits;
1091 master->flags = ops->flags;
1257 master->dev.of_node = pdev->dev.of_node; 1092 master->dev.of_node = pdev->dev.of_node;
1258 1093
1259 ret = platform_get_irq_byname(pdev, "rx"); 1094 ret = platform_get_irq_byname(pdev, "rx");
@@ -1291,11 +1126,9 @@ static int rspi_probe(struct platform_device *pdev)
1291 goto error2; 1126 goto error2;
1292 } 1127 }
1293 1128
1294 ret = rspi_request_dma(rspi, pdev); 1129 ret = rspi_request_dma(&pdev->dev, master, res);
1295 if (ret < 0) { 1130 if (ret < 0)
1296 dev_err(&pdev->dev, "rspi_request_dma failed.\n"); 1131 dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1297 goto error3;
1298 }
1299 1132
1300 ret = devm_spi_register_master(&pdev->dev, master); 1133 ret = devm_spi_register_master(&pdev->dev, master);
1301 if (ret < 0) { 1134 if (ret < 0) {
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index bed23384dfab..381d4af149fc 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -183,11 +183,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
183 183
184 /* allocate settings on the first call */ 184 /* allocate settings on the first call */
185 if (!cs) { 185 if (!cs) {
186 cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL); 186 cs = devm_kzalloc(&spi->dev,
187 if (!cs) { 187 sizeof(struct s3c24xx_spi_devstate),
188 dev_err(&spi->dev, "no memory for controller state\n"); 188 GFP_KERNEL);
189 if (!cs)
189 return -ENOMEM; 190 return -ENOMEM;
190 }
191 191
192 cs->spcon = SPCON_DEFAULT; 192 cs->spcon = SPCON_DEFAULT;
193 cs->hz = -1; 193 cs->hz = -1;
@@ -209,11 +209,6 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
209 return 0; 209 return 0;
210} 210}
211 211
212static void s3c24xx_spi_cleanup(struct spi_device *spi)
213{
214 kfree(spi->controller_state);
215}
216
217static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) 212static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
218{ 213{
219 return hw->tx ? hw->tx[count] : 0; 214 return hw->tx ? hw->tx[count] : 0;
@@ -543,7 +538,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
543 hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; 538 hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
544 539
545 hw->master->setup = s3c24xx_spi_setup; 540 hw->master->setup = s3c24xx_spi_setup;
546 hw->master->cleanup = s3c24xx_spi_cleanup;
547 541
548 dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); 542 dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
549 543
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index f19cd97855e8..affcfd6fe76b 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -773,7 +773,6 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
773 773
774 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 774 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
775 if (!cs) { 775 if (!cs) {
776 dev_err(&spi->dev, "could not allocate memory for controller data\n");
777 of_node_put(data_np); 776 of_node_put(data_np);
778 return ERR_PTR(-ENOMEM); 777 return ERR_PTR(-ENOMEM);
779 } 778 }
@@ -987,10 +986,8 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
987 u32 temp; 986 u32 temp;
988 987
989 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); 988 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
990 if (!sci) { 989 if (!sci)
991 dev_err(dev, "memory allocation for spi_info failed\n");
992 return ERR_PTR(-ENOMEM); 990 return ERR_PTR(-ENOMEM);
993 }
994 991
995 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { 992 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
996 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); 993 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 9009456bdf4d..c8e795ef2e13 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -244,9 +244,9 @@ static int hspi_probe(struct platform_device *pdev)
244 return -ENOMEM; 244 return -ENOMEM;
245 } 245 }
246 246
247 clk = clk_get(NULL, "shyway_clk"); 247 clk = clk_get(&pdev->dev, NULL);
248 if (IS_ERR(clk)) { 248 if (IS_ERR(clk)) {
249 dev_err(&pdev->dev, "shyway_clk is required\n"); 249 dev_err(&pdev->dev, "couldn't get clock\n");
250 ret = -EINVAL; 250 ret = -EINVAL;
251 goto error0; 251 goto error0;
252 } 252 }
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e850d03e7190..45b09142afe2 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -642,10 +642,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
642 u32 num_cs = 1; 642 u32 num_cs = 1;
643 643
644 info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL); 644 info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
645 if (!info) { 645 if (!info)
646 dev_err(dev, "failed to allocate setup data\n");
647 return NULL; 646 return NULL;
648 }
649 647
650 /* Parse the MSIOF properties */ 648 /* Parse the MSIOF properties */
651 of_property_read_u32(np, "num-cs", &num_cs); 649 of_property_read_u32(np, "num-cs", &num_cs);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 1a77ad52812f..95ac276eaafe 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/completion.h>
13#include <linux/interrupt.h> 14#include <linux/interrupt.h>
14#include <linux/io.h> 15#include <linux/io.h>
15#include <linux/of.h> 16#include <linux/of.h>
@@ -85,6 +86,7 @@
85#define SIRFSOC_SPI_TX_DONE BIT(1) 86#define SIRFSOC_SPI_TX_DONE BIT(1)
86#define SIRFSOC_SPI_RX_OFLOW BIT(2) 87#define SIRFSOC_SPI_RX_OFLOW BIT(2)
87#define SIRFSOC_SPI_TX_UFLOW BIT(3) 88#define SIRFSOC_SPI_TX_UFLOW BIT(3)
89#define SIRFSOC_SPI_RX_IO_DMA BIT(4)
88#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6) 90#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
89#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7) 91#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
90#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8) 92#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
@@ -264,41 +266,34 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
264{ 266{
265 struct sirfsoc_spi *sspi = dev_id; 267 struct sirfsoc_spi *sspi = dev_id;
266 u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS); 268 u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
267
268 writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
269
270 if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) { 269 if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
271 complete(&sspi->tx_done); 270 complete(&sspi->tx_done);
272 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 271 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
272 writel(SIRFSOC_SPI_INT_MASK_ALL,
273 sspi->base + SIRFSOC_SPI_INT_STATUS);
273 return IRQ_HANDLED; 274 return IRQ_HANDLED;
274 } 275 }
275 276
276 /* Error Conditions */ 277 /* Error Conditions */
277 if (spi_stat & SIRFSOC_SPI_RX_OFLOW || 278 if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
278 spi_stat & SIRFSOC_SPI_TX_UFLOW) { 279 spi_stat & SIRFSOC_SPI_TX_UFLOW) {
280 complete(&sspi->tx_done);
279 complete(&sspi->rx_done); 281 complete(&sspi->rx_done);
280 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 282 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
283 writel(SIRFSOC_SPI_INT_MASK_ALL,
284 sspi->base + SIRFSOC_SPI_INT_STATUS);
285 return IRQ_HANDLED;
281 } 286 }
287 if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
288 complete(&sspi->tx_done);
289 while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
290 SIRFSOC_SPI_RX_IO_DMA))
291 cpu_relax();
292 complete(&sspi->rx_done);
293 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
294 writel(SIRFSOC_SPI_INT_MASK_ALL,
295 sspi->base + SIRFSOC_SPI_INT_STATUS);
282 296
283 if (spi_stat & (SIRFSOC_SPI_FRM_END
284 | SIRFSOC_SPI_RXFIFO_THD_REACH))
285 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
286 & SIRFSOC_SPI_FIFO_EMPTY)) &&
287 sspi->left_rx_word)
288 sspi->rx_word(sspi);
289
290 if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY
291 | SIRFSOC_SPI_TXFIFO_THD_REACH))
292 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
293 & SIRFSOC_SPI_FIFO_FULL)) &&
294 sspi->left_tx_word)
295 sspi->tx_word(sspi);
296
297 /* Received all words */
298 if ((sspi->left_rx_word == 0) && (sspi->left_tx_word == 0)) {
299 complete(&sspi->rx_done);
300 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
301 }
302 return IRQ_HANDLED; 297 return IRQ_HANDLED;
303} 298}
304 299
@@ -309,59 +304,51 @@ static void spi_sirfsoc_dma_fini_callback(void *data)
309 complete(dma_complete); 304 complete(dma_complete);
310} 305}
311 306
312static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) 307static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
308 struct spi_transfer *t)
313{ 309{
314 struct sirfsoc_spi *sspi; 310 struct sirfsoc_spi *sspi;
315 int timeout = t->len * 10; 311 int timeout = t->len * 10;
316 sspi = spi_master_get_devdata(spi->master); 312 u32 cmd;
317
318 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
319 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
320 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
321 reinit_completion(&sspi->rx_done);
322 reinit_completion(&sspi->tx_done);
323 313
324 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); 314 sspi = spi_master_get_devdata(spi->master);
325 315 memcpy(&cmd, sspi->tx, t->len);
326 /* 316 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
327 * fill tx_buf into command register and wait for its completion 317 cmd = cpu_to_be32(cmd) >>
328 */ 318 ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
329 if (sspi->tx_by_cmd) { 319 if (sspi->word_width == 2 && t->len == 4 &&
330 u32 cmd; 320 (!(spi->mode & SPI_LSB_FIRST)))
331 memcpy(&cmd, sspi->tx, t->len); 321 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
332 322 writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
333 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) 323 writel(SIRFSOC_SPI_FRM_END_INT_EN,
334 cmd = cpu_to_be32(cmd) >> 324 sspi->base + SIRFSOC_SPI_INT_EN);
335 ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8); 325 writel(SIRFSOC_SPI_CMD_TX_EN,
336 if (sspi->word_width == 2 && t->len == 4 && 326 sspi->base + SIRFSOC_SPI_TX_RX_EN);
337 (!(spi->mode & SPI_LSB_FIRST))) 327 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
338 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16); 328 dev_err(&spi->dev, "cmd transfer timeout\n");
339 329 return 0;
340 writel(cmd, sspi->base + SIRFSOC_SPI_CMD); 330 }
341 writel(SIRFSOC_SPI_FRM_END_INT_EN,
342 sspi->base + SIRFSOC_SPI_INT_EN);
343 writel(SIRFSOC_SPI_CMD_TX_EN,
344 sspi->base + SIRFSOC_SPI_TX_RX_EN);
345 331
346 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 332 return t->len;
347 dev_err(&spi->dev, "transfer timeout\n"); 333}
348 return 0;
349 }
350 334
351 return t->len; 335static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
352 } 336 struct spi_transfer *t)
337{
338 struct sirfsoc_spi *sspi;
339 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
340 int timeout = t->len * 10;
353 341
354 if (sspi->left_tx_word == 1) { 342 sspi = spi_master_get_devdata(spi->master);
355 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 343 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
356 SIRFSOC_SPI_ENA_AUTO_CLR, 344 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
357 sspi->base + SIRFSOC_SPI_CTRL); 345 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
358 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 346 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
359 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 347 writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
360 } else if ((sspi->left_tx_word > 1) && (sspi->left_tx_word < 348 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
361 SIRFSOC_SPI_DAT_FRM_LEN_MAX)) { 349 if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
362 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 350 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
363 SIRFSOC_SPI_MUL_DAT_MODE | 351 SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
364 SIRFSOC_SPI_ENA_AUTO_CLR,
365 sspi->base + SIRFSOC_SPI_CTRL); 352 sspi->base + SIRFSOC_SPI_CTRL);
366 writel(sspi->left_tx_word - 1, 353 writel(sspi->left_tx_word - 1,
367 sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 354 sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
@@ -373,76 +360,122 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
373 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 360 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
374 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 361 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
375 } 362 }
376 363 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
377 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 364 (t->tx_buf != t->rx_buf) ?
378 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 365 DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
379 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 366 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
380 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 367 sspi->dst_start, t->len, DMA_DEV_TO_MEM,
381 368 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
382 if (IS_DMA_VALID(t)) { 369 rx_desc->callback = spi_sirfsoc_dma_fini_callback;
383 struct dma_async_tx_descriptor *rx_desc, *tx_desc; 370 rx_desc->callback_param = &sspi->rx_done;
384 371
385 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE); 372 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
386 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan, 373 (t->tx_buf != t->rx_buf) ?
387 sspi->dst_start, t->len, DMA_DEV_TO_MEM, 374 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
388 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 375 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
389 rx_desc->callback = spi_sirfsoc_dma_fini_callback; 376 sspi->src_start, t->len, DMA_MEM_TO_DEV,
390 rx_desc->callback_param = &sspi->rx_done; 377 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
391 378 tx_desc->callback = spi_sirfsoc_dma_fini_callback;
392 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE); 379 tx_desc->callback_param = &sspi->tx_done;
393 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan, 380
394 sspi->src_start, t->len, DMA_MEM_TO_DEV, 381 dmaengine_submit(tx_desc);
395 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 382 dmaengine_submit(rx_desc);
396 tx_desc->callback = spi_sirfsoc_dma_fini_callback; 383 dma_async_issue_pending(sspi->tx_chan);
397 tx_desc->callback_param = &sspi->tx_done; 384 dma_async_issue_pending(sspi->rx_chan);
398 385 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
399 dmaengine_submit(tx_desc); 386 sspi->base + SIRFSOC_SPI_TX_RX_EN);
400 dmaengine_submit(rx_desc); 387 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
401 dma_async_issue_pending(sspi->tx_chan);
402 dma_async_issue_pending(sspi->rx_chan);
403 } else {
404 /* Send the first word to trigger the whole tx/rx process */
405 sspi->tx_word(sspi);
406
407 writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
408 SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
409 SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
410 SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
411 }
412
413 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
414
415 if (!IS_DMA_VALID(t)) { /* for PIO */
416 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0)
417 dev_err(&spi->dev, "transfer timeout\n");
418 } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
419 dev_err(&spi->dev, "transfer timeout\n"); 388 dev_err(&spi->dev, "transfer timeout\n");
420 dmaengine_terminate_all(sspi->rx_chan); 389 dmaengine_terminate_all(sspi->rx_chan);
421 } else 390 } else
422 sspi->left_rx_word = 0; 391 sspi->left_rx_word = 0;
423
424 /* 392 /*
425 * we only wait tx-done event if transferring by DMA. for PIO, 393 * we only wait tx-done event if transferring by DMA. for PIO,
426 * we get rx data by writing tx data, so if rx is done, tx has 394 * we get rx data by writing tx data, so if rx is done, tx has
427 * done earlier 395 * done earlier
428 */ 396 */
429 if (IS_DMA_VALID(t)) { 397 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
430 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 398 dev_err(&spi->dev, "transfer timeout\n");
431 dev_err(&spi->dev, "transfer timeout\n"); 399 dmaengine_terminate_all(sspi->tx_chan);
432 dmaengine_terminate_all(sspi->tx_chan);
433 }
434 }
435
436 if (IS_DMA_VALID(t)) {
437 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
438 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
439 } 400 }
440 401 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
402 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
441 /* TX, RX FIFO stop */ 403 /* TX, RX FIFO stop */
442 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 404 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
443 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 405 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
444 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); 406 if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
445 writel(0, sspi->base + SIRFSOC_SPI_INT_EN); 407 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
408}
409
410static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
411 struct spi_transfer *t)
412{
413 struct sirfsoc_spi *sspi;
414 int timeout = t->len * 10;
415
416 sspi = spi_master_get_devdata(spi->master);
417 do {
418 writel(SIRFSOC_SPI_FIFO_RESET,
419 sspi->base + SIRFSOC_SPI_RXFIFO_OP);
420 writel(SIRFSOC_SPI_FIFO_RESET,
421 sspi->base + SIRFSOC_SPI_TXFIFO_OP);
422 writel(SIRFSOC_SPI_FIFO_START,
423 sspi->base + SIRFSOC_SPI_RXFIFO_OP);
424 writel(SIRFSOC_SPI_FIFO_START,
425 sspi->base + SIRFSOC_SPI_TXFIFO_OP);
426 writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
427 writel(SIRFSOC_SPI_INT_MASK_ALL,
428 sspi->base + SIRFSOC_SPI_INT_STATUS);
429 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
430 SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
431 sspi->base + SIRFSOC_SPI_CTRL);
432 writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
433 - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
434 writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
435 - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
436 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
437 & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
438 sspi->tx_word(sspi);
439 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
440 SIRFSOC_SPI_TX_UFLOW_INT_EN |
441 SIRFSOC_SPI_RX_OFLOW_INT_EN,
442 sspi->base + SIRFSOC_SPI_INT_EN);
443 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
444 sspi->base + SIRFSOC_SPI_TX_RX_EN);
445 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
446 !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
447 dev_err(&spi->dev, "transfer timeout\n");
448 break;
449 }
450 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
451 & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
452 sspi->rx_word(sspi);
453 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
454 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
455 } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
456}
457
458static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
459{
460 struct sirfsoc_spi *sspi;
461 sspi = spi_master_get_devdata(spi->master);
462
463 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
464 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
465 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
466 reinit_completion(&sspi->rx_done);
467 reinit_completion(&sspi->tx_done);
468 /*
469 * in the transfer, if transfer data using command register with rx_buf
470 * null, just fill command data into command register and wait for its
471 * completion.
472 */
473 if (sspi->tx_by_cmd)
474 spi_sirfsoc_cmd_transfer(spi, t);
475 else if (IS_DMA_VALID(t))
476 spi_sirfsoc_dma_transfer(spi, t);
477 else
478 spi_sirfsoc_pio_transfer(spi, t);
446 479
447 return t->len - sspi->left_rx_word * sspi->word_width; 480 return t->len - sspi->left_rx_word * sspi->word_width;
448} 481}
@@ -470,7 +503,16 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
470 writel(regval, sspi->base + SIRFSOC_SPI_CTRL); 503 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
471 } else { 504 } else {
472 int gpio = sspi->chipselect[spi->chip_select]; 505 int gpio = sspi->chipselect[spi->chip_select];
473 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 506 switch (value) {
507 case BITBANG_CS_ACTIVE:
508 gpio_direction_output(gpio,
509 spi->mode & SPI_CS_HIGH ? 1 : 0);
510 break;
511 case BITBANG_CS_INACTIVE:
512 gpio_direction_output(gpio,
513 spi->mode & SPI_CS_HIGH ? 0 : 1);
514 break;
515 }
474 } 516 }
475} 517}
476 518
@@ -503,7 +545,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
503 break; 545 break;
504 case 12: 546 case 12:
505 case 16: 547 case 16:
506 regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 : 548 regval |= (bits_per_word == 12) ?
549 SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
507 SIRFSOC_SPI_TRAN_DAT_FORMAT_16; 550 SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
508 sspi->rx_word = spi_sirfsoc_rx_word_u16; 551 sspi->rx_word = spi_sirfsoc_rx_word_u16;
509 sspi->tx_word = spi_sirfsoc_tx_word_u16; 552 sspi->tx_word = spi_sirfsoc_tx_word_u16;
@@ -531,8 +574,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
531 regval |= SIRFSOC_SPI_CLK_IDLE_STAT; 574 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
532 575
533 /* 576 /*
534 * Data should be driven at least 1/2 cycle before the fetch edge to make 577 * Data should be driven at least 1/2 cycle before the fetch edge
535 * sure that data gets stable at the fetch edge. 578 * to make sure that data gets stable at the fetch edge.
536 */ 579 */
537 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || 580 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
538 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) 581 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
@@ -559,16 +602,24 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
559 regval &= ~SIRFSOC_SPI_CMD_MODE; 602 regval &= ~SIRFSOC_SPI_CMD_MODE;
560 sspi->tx_by_cmd = false; 603 sspi->tx_by_cmd = false;
561 } 604 }
605 /*
606 * set spi controller in RISC chipselect mode, we are controlling CS by
607 * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
608 */
609 regval |= SIRFSOC_SPI_CS_IO_MODE;
562 writel(regval, sspi->base + SIRFSOC_SPI_CTRL); 610 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
563 611
564 if (IS_DMA_VALID(t)) { 612 if (IS_DMA_VALID(t)) {
565 /* Enable DMA mode for RX, TX */ 613 /* Enable DMA mode for RX, TX */
566 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 614 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
567 writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 615 writel(SIRFSOC_SPI_RX_DMA_FLUSH,
616 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
568 } else { 617 } else {
569 /* Enable IO mode for RX, TX */ 618 /* Enable IO mode for RX, TX */
570 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 619 writel(SIRFSOC_SPI_IO_MODE_SEL,
571 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 620 sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
621 writel(SIRFSOC_SPI_IO_MODE_SEL,
622 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
572 } 623 }
573 624
574 return 0; 625 return 0;
@@ -598,7 +649,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
598 goto err_cs; 649 goto err_cs;
599 } 650 }
600 651
601 master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs); 652 master = spi_alloc_master(&pdev->dev,
653 sizeof(*sspi) + sizeof(int) * num_cs);
602 if (!master) { 654 if (!master) {
603 dev_err(&pdev->dev, "Unable to allocate SPI master\n"); 655 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
604 return -ENOMEM; 656 return -ENOMEM;
@@ -794,8 +846,7 @@ static struct platform_driver spi_sirfsoc_driver = {
794 .remove = spi_sirfsoc_remove, 846 .remove = spi_sirfsoc_remove,
795}; 847};
796module_platform_driver(spi_sirfsoc_driver); 848module_platform_driver(spi_sirfsoc_driver);
797
798MODULE_DESCRIPTION("SiRF SoC SPI master driver"); 849MODULE_DESCRIPTION("SiRF SoC SPI master driver");
799MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, " 850MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
800 "Barry Song <Baohua.Song@csr.com>"); 851MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
801MODULE_LICENSE("GPL v2"); 852MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 400649595505..e4a85ada861d 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1012,7 +1012,7 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
1012 return IRQ_WAKE_THREAD; 1012 return IRQ_WAKE_THREAD;
1013} 1013}
1014 1014
1015static struct of_device_id tegra_spi_of_match[] = { 1015static const struct of_device_id tegra_spi_of_match[] = {
1016 { .compatible = "nvidia,tegra114-spi", }, 1016 { .compatible = "nvidia,tegra114-spi", },
1017 {} 1017 {}
1018}; 1018};
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 47869ea636e1..3548ce25c08f 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -419,7 +419,7 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
419 return handle_cpu_based_xfer(tsd); 419 return handle_cpu_based_xfer(tsd);
420} 420}
421 421
422static struct of_device_id tegra_sflash_of_match[] = { 422static const struct of_device_id tegra_sflash_of_match[] = {
423 { .compatible = "nvidia,tegra20-sflash", }, 423 { .compatible = "nvidia,tegra20-sflash", },
424 {} 424 {}
425}; 425};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index e3c1b93e45d1..0b9e32e9f493 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1001,7 +1001,7 @@ static const struct tegra_slink_chip_data tegra20_spi_cdata = {
1001 .cs_hold_time = false, 1001 .cs_hold_time = false,
1002}; 1002};
1003 1003
1004static struct of_device_id tegra_slink_of_match[] = { 1004static const struct of_device_id tegra_slink_of_match[] = {
1005 { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, }, 1005 { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
1006 { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, }, 1006 { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
1007 {} 1007 {}
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 2d4010d80824..daf5aa1c24c3 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -253,10 +253,8 @@ static int tle62x0_probe(struct spi_device *spi)
253 } 253 }
254 254
255 st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL); 255 st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
256 if (st == NULL) { 256 if (st == NULL)
257 dev_err(&spi->dev, "no memory for device state\n");
258 return -ENOMEM; 257 return -ENOMEM;
259 }
260 258
261 st->us = spi; 259 st->us = spi;
262 st->nr_gpio = pdata->gpio_count; 260 st->nr_gpio = pdata->gpio_count;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index f406b30af961..f05abf89c067 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1578,14 +1578,11 @@ static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1578 struct pch_pd_dev_save *pd_dev_save; 1578 struct pch_pd_dev_save *pd_dev_save;
1579 1579
1580 pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL); 1580 pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
1581 if (!pd_dev_save) { 1581 if (!pd_dev_save)
1582 dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
1583 return -ENOMEM; 1582 return -ENOMEM;
1584 }
1585 1583
1586 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); 1584 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
1587 if (!board_dat) { 1585 if (!board_dat) {
1588 dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
1589 retval = -ENOMEM; 1586 retval = -ENOMEM;
1590 goto err_no_mem; 1587 goto err_no_mem;
1591 } 1588 }
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4eb9bf02996c..d4f9670b51bc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
580 spi->master->set_cs(spi, !enable); 580 spi->master->set_cs(spi, !enable);
581} 581}
582 582
583#ifdef CONFIG_HAS_DMA
583static int spi_map_buf(struct spi_master *master, struct device *dev, 584static int spi_map_buf(struct spi_master *master, struct device *dev,
584 struct sg_table *sgt, void *buf, size_t len, 585 struct sg_table *sgt, void *buf, size_t len,
585 enum dma_data_direction dir) 586 enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
637 } 638 }
638} 639}
639 640
640static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 641static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
641{ 642{
642 struct device *tx_dev, *rx_dev; 643 struct device *tx_dev, *rx_dev;
643 struct spi_transfer *xfer; 644 struct spi_transfer *xfer;
644 void *tmp;
645 unsigned int max_tx, max_rx;
646 int ret; 645 int ret;
647 646
648 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
649 max_tx = 0;
650 max_rx = 0;
651
652 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
653 if ((master->flags & SPI_MASTER_MUST_TX) &&
654 !xfer->tx_buf)
655 max_tx = max(xfer->len, max_tx);
656 if ((master->flags & SPI_MASTER_MUST_RX) &&
657 !xfer->rx_buf)
658 max_rx = max(xfer->len, max_rx);
659 }
660
661 if (max_tx) {
662 tmp = krealloc(master->dummy_tx, max_tx,
663 GFP_KERNEL | GFP_DMA);
664 if (!tmp)
665 return -ENOMEM;
666 master->dummy_tx = tmp;
667 memset(tmp, 0, max_tx);
668 }
669
670 if (max_rx) {
671 tmp = krealloc(master->dummy_rx, max_rx,
672 GFP_KERNEL | GFP_DMA);
673 if (!tmp)
674 return -ENOMEM;
675 master->dummy_rx = tmp;
676 }
677
678 if (max_tx || max_rx) {
679 list_for_each_entry(xfer, &msg->transfers,
680 transfer_list) {
681 if (!xfer->tx_buf)
682 xfer->tx_buf = master->dummy_tx;
683 if (!xfer->rx_buf)
684 xfer->rx_buf = master->dummy_rx;
685 }
686 }
687 }
688
689 if (!master->can_dma) 647 if (!master->can_dma)
690 return 0; 648 return 0;
691 649
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
742 700
743 return 0; 701 return 0;
744} 702}
703#else /* !CONFIG_HAS_DMA */
704static inline int __spi_map_msg(struct spi_master *master,
705 struct spi_message *msg)
706{
707 return 0;
708}
709
710static inline int spi_unmap_msg(struct spi_master *master,
711 struct spi_message *msg)
712{
713 return 0;
714}
715#endif /* !CONFIG_HAS_DMA */
716
717static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
718{
719 struct spi_transfer *xfer;
720 void *tmp;
721 unsigned int max_tx, max_rx;
722
723 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
724 max_tx = 0;
725 max_rx = 0;
726
727 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
728 if ((master->flags & SPI_MASTER_MUST_TX) &&
729 !xfer->tx_buf)
730 max_tx = max(xfer->len, max_tx);
731 if ((master->flags & SPI_MASTER_MUST_RX) &&
732 !xfer->rx_buf)
733 max_rx = max(xfer->len, max_rx);
734 }
735
736 if (max_tx) {
737 tmp = krealloc(master->dummy_tx, max_tx,
738 GFP_KERNEL | GFP_DMA);
739 if (!tmp)
740 return -ENOMEM;
741 master->dummy_tx = tmp;
742 memset(tmp, 0, max_tx);
743 }
744
745 if (max_rx) {
746 tmp = krealloc(master->dummy_rx, max_rx,
747 GFP_KERNEL | GFP_DMA);
748 if (!tmp)
749 return -ENOMEM;
750 master->dummy_rx = tmp;
751 }
752
753 if (max_tx || max_rx) {
754 list_for_each_entry(xfer, &msg->transfers,
755 transfer_list) {
756 if (!xfer->tx_buf)
757 xfer->tx_buf = master->dummy_tx;
758 if (!xfer->rx_buf)
759 xfer->rx_buf = master->dummy_rx;
760 }
761 }
762 }
763
764 return __spi_map_msg(master, msg);
765}
745 766
746/* 767/*
747 * spi_transfer_one_message - Default implementation of transfer_one_message() 768 * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -775,7 +796,7 @@ static int spi_transfer_one_message(struct spi_master *master,
775 if (ret > 0) { 796 if (ret > 0) {
776 ret = 0; 797 ret = 0;
777 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 798 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
778 ms += 10; /* some tolerance */ 799 ms += ms + 100; /* some tolerance */
779 800
780 ms = wait_for_completion_timeout(&master->xfer_completion, 801 ms = wait_for_completion_timeout(&master->xfer_completion,
781 msecs_to_jiffies(ms)); 802 msecs_to_jiffies(ms));
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
1151{ 1172{
1152 int ret; 1173 int ret;
1153 1174
1154 master->queued = true;
1155 master->transfer = spi_queued_transfer; 1175 master->transfer = spi_queued_transfer;
1156 if (!master->transfer_one_message) 1176 if (!master->transfer_one_message)
1157 master->transfer_one_message = spi_transfer_one_message; 1177 master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
1162 dev_err(&master->dev, "problem initializing queue\n"); 1182 dev_err(&master->dev, "problem initializing queue\n");
1163 goto err_init_queue; 1183 goto err_init_queue;
1164 } 1184 }
1185 master->queued = true;
1165 ret = spi_start_queue(master); 1186 ret = spi_start_queue(master);
1166 if (ret) { 1187 if (ret) {
1167 dev_err(&master->dev, "problem starting queue\n"); 1188 dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
1171 return 0; 1192 return 0;
1172 1193
1173err_start_queue: 1194err_start_queue:
1174err_init_queue:
1175 spi_destroy_queue(master); 1195 spi_destroy_queue(master);
1196err_init_queue:
1176 return ret; 1197 return ret;
1177} 1198}
1178 1199
@@ -1234,6 +1255,8 @@ static void of_register_spi_devices(struct spi_master *master)
1234 spi->mode |= SPI_CS_HIGH; 1255 spi->mode |= SPI_CS_HIGH;
1235 if (of_find_property(nc, "spi-3wire", NULL)) 1256 if (of_find_property(nc, "spi-3wire", NULL))
1236 spi->mode |= SPI_3WIRE; 1257 spi->mode |= SPI_3WIRE;
1258 if (of_find_property(nc, "spi-lsb-first", NULL))
1259 spi->mode |= SPI_LSB_FIRST;
1237 1260
1238 /* Device DUAL/QUAD mode */ 1261 /* Device DUAL/QUAD mode */
1239 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1262 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
@@ -1247,11 +1270,10 @@ static void of_register_spi_devices(struct spi_master *master)
1247 spi->mode |= SPI_TX_QUAD; 1270 spi->mode |= SPI_TX_QUAD;
1248 break; 1271 break;
1249 default: 1272 default:
1250 dev_err(&master->dev, 1273 dev_warn(&master->dev,
1251 "spi-tx-bus-width %d not supported\n", 1274 "spi-tx-bus-width %d not supported\n",
1252 value); 1275 value);
1253 spi_dev_put(spi); 1276 break;
1254 continue;
1255 } 1277 }
1256 } 1278 }
1257 1279
@@ -1266,11 +1288,10 @@ static void of_register_spi_devices(struct spi_master *master)
1266 spi->mode |= SPI_RX_QUAD; 1288 spi->mode |= SPI_RX_QUAD;
1267 break; 1289 break;
1268 default: 1290 default:
1269 dev_err(&master->dev, 1291 dev_warn(&master->dev,
1270 "spi-rx-bus-width %d not supported\n", 1292 "spi-rx-bus-width %d not supported\n",
1271 value); 1293 value);
1272 spi_dev_put(spi); 1294 break;
1273 continue;
1274 } 1295 }
1275 } 1296 }
1276 1297
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1756 */ 1777 */
1757int spi_setup(struct spi_device *spi) 1778int spi_setup(struct spi_device *spi)
1758{ 1779{
1759 unsigned bad_bits; 1780 unsigned bad_bits, ugly_bits;
1760 int status = 0; 1781 int status = 0;
1761 1782
1762 /* check mode to prevent that DUAL and QUAD set at the same time 1783 /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
1776 * that aren't supported with their current master 1797 * that aren't supported with their current master
1777 */ 1798 */
1778 bad_bits = spi->mode & ~spi->master->mode_bits; 1799 bad_bits = spi->mode & ~spi->master->mode_bits;
1800 ugly_bits = bad_bits &
1801 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1802 if (ugly_bits) {
1803 dev_warn(&spi->dev,
1804 "setup: ignoring unsupported mode bits %x\n",
1805 ugly_bits);
1806 spi->mode &= ~ugly_bits;
1807 bad_bits &= ~ugly_bits;
1808 }
1779 if (bad_bits) { 1809 if (bad_bits) {
1780 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1810 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1781 bad_bits); 1811 bad_bits);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index ea5efb426f75..22365f140bec 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -40,8 +40,6 @@ source "drivers/staging/olpc_dcon/Kconfig"
40 40
41source "drivers/staging/panel/Kconfig" 41source "drivers/staging/panel/Kconfig"
42 42
43source "drivers/staging/rtl8187se/Kconfig"
44
45source "drivers/staging/rtl8192u/Kconfig" 43source "drivers/staging/rtl8192u/Kconfig"
46 44
47source "drivers/staging/rtl8192e/Kconfig" 45source "drivers/staging/rtl8192e/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 86e020c2ad0d..fbe84ed2d048 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_PRISM2_USB) += wlan-ng/
12obj-$(CONFIG_COMEDI) += comedi/ 12obj-$(CONFIG_COMEDI) += comedi/
13obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ 13obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
14obj-$(CONFIG_PANEL) += panel/ 14obj-$(CONFIG_PANEL) += panel/
15obj-$(CONFIG_R8187SE) += rtl8187se/
16obj-$(CONFIG_RTL8192U) += rtl8192u/ 15obj-$(CONFIG_RTL8192U) += rtl8192u/
17obj-$(CONFIG_RTL8192E) += rtl8192e/ 16obj-$(CONFIG_RTL8192E) += rtl8192e/
18obj-$(CONFIG_R8712U) += rtl8712/ 17obj-$(CONFIG_R8712U) += rtl8712/
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 924fce977985..257595016161 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -61,6 +61,8 @@ static void __comedi_buf_free(struct comedi_device *dev,
61 struct comedi_subdevice *s) 61 struct comedi_subdevice *s)
62{ 62{
63 struct comedi_async *async = s->async; 63 struct comedi_async *async = s->async;
64 struct comedi_buf_map *bm;
65 unsigned long flags;
64 66
65 if (async->prealloc_buf) { 67 if (async->prealloc_buf) {
66 vunmap(async->prealloc_buf); 68 vunmap(async->prealloc_buf);
@@ -68,8 +70,11 @@ static void __comedi_buf_free(struct comedi_device *dev,
68 async->prealloc_bufsz = 0; 70 async->prealloc_bufsz = 0;
69 } 71 }
70 72
71 comedi_buf_map_put(async->buf_map); 73 spin_lock_irqsave(&s->spin_lock, flags);
74 bm = async->buf_map;
72 async->buf_map = NULL; 75 async->buf_map = NULL;
76 spin_unlock_irqrestore(&s->spin_lock, flags);
77 comedi_buf_map_put(bm);
73} 78}
74 79
75static void __comedi_buf_alloc(struct comedi_device *dev, 80static void __comedi_buf_alloc(struct comedi_device *dev,
@@ -80,6 +85,7 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
80 struct page **pages = NULL; 85 struct page **pages = NULL;
81 struct comedi_buf_map *bm; 86 struct comedi_buf_map *bm;
82 struct comedi_buf_page *buf; 87 struct comedi_buf_page *buf;
88 unsigned long flags;
83 unsigned i; 89 unsigned i;
84 90
85 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) { 91 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
@@ -92,8 +98,10 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
92 if (!bm) 98 if (!bm)
93 return; 99 return;
94 100
95 async->buf_map = bm;
96 kref_init(&bm->refcount); 101 kref_init(&bm->refcount);
102 spin_lock_irqsave(&s->spin_lock, flags);
103 async->buf_map = bm;
104 spin_unlock_irqrestore(&s->spin_lock, flags);
97 bm->dma_dir = s->async_dma_dir; 105 bm->dma_dir = s->async_dma_dir;
98 if (bm->dma_dir != DMA_NONE) 106 if (bm->dma_dir != DMA_NONE)
99 /* Need ref to hardware device to free buffer later. */ 107 /* Need ref to hardware device to free buffer later. */
@@ -127,7 +135,9 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
127 135
128 pages[i] = virt_to_page(buf->virt_addr); 136 pages[i] = virt_to_page(buf->virt_addr);
129 } 137 }
138 spin_lock_irqsave(&s->spin_lock, flags);
130 bm->n_pages = i; 139 bm->n_pages = i;
140 spin_unlock_irqrestore(&s->spin_lock, flags);
131 141
132 /* vmap the prealloc_buf if all the pages were allocated */ 142 /* vmap the prealloc_buf if all the pages were allocated */
133 if (i == n_pages) 143 if (i == n_pages)
@@ -150,6 +160,29 @@ int comedi_buf_map_put(struct comedi_buf_map *bm)
150 return 1; 160 return 1;
151} 161}
152 162
163/* returns s->async->buf_map and increments its kref refcount */
164struct comedi_buf_map *
165comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
166{
167 struct comedi_async *async = s->async;
168 struct comedi_buf_map *bm = NULL;
169 unsigned long flags;
170
171 if (!async)
172 return NULL;
173
174 spin_lock_irqsave(&s->spin_lock, flags);
175 bm = async->buf_map;
176 /* only want it if buffer pages allocated */
177 if (bm && bm->n_pages)
178 comedi_buf_map_get(bm);
179 else
180 bm = NULL;
181 spin_unlock_irqrestore(&s->spin_lock, flags);
182
183 return bm;
184}
185
153bool comedi_buf_is_mmapped(struct comedi_async *async) 186bool comedi_buf_is_mmapped(struct comedi_async *async)
154{ 187{
155 struct comedi_buf_map *bm = async->buf_map; 188 struct comedi_buf_map *bm = async->buf_map;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index ea6dc36d753b..acc80197e35e 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1926,14 +1926,21 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1926 struct comedi_device *dev = file->private_data; 1926 struct comedi_device *dev = file->private_data;
1927 struct comedi_subdevice *s; 1927 struct comedi_subdevice *s;
1928 struct comedi_async *async; 1928 struct comedi_async *async;
1929 struct comedi_buf_map *bm; 1929 struct comedi_buf_map *bm = NULL;
1930 unsigned long start = vma->vm_start; 1930 unsigned long start = vma->vm_start;
1931 unsigned long size; 1931 unsigned long size;
1932 int n_pages; 1932 int n_pages;
1933 int i; 1933 int i;
1934 int retval; 1934 int retval;
1935 1935
1936 mutex_lock(&dev->mutex); 1936 /*
1937 * 'trylock' avoids circular dependency with current->mm->mmap_sem
1938 * and down-reading &dev->attach_lock should normally succeed without
1939 * contention unless the device is in the process of being attached
1940 * or detached.
1941 */
1942 if (!down_read_trylock(&dev->attach_lock))
1943 return -EAGAIN;
1937 1944
1938 if (!dev->attached) { 1945 if (!dev->attached) {
1939 dev_dbg(dev->class_dev, "no driver attached\n"); 1946 dev_dbg(dev->class_dev, "no driver attached\n");
@@ -1973,7 +1980,9 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1973 } 1980 }
1974 1981
1975 n_pages = size >> PAGE_SHIFT; 1982 n_pages = size >> PAGE_SHIFT;
1976 bm = async->buf_map; 1983
1984 /* get reference to current buf map (if any) */
1985 bm = comedi_buf_map_from_subdev_get(s);
1977 if (!bm || n_pages > bm->n_pages) { 1986 if (!bm || n_pages > bm->n_pages) {
1978 retval = -EINVAL; 1987 retval = -EINVAL;
1979 goto done; 1988 goto done;
@@ -1997,7 +2006,8 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1997 2006
1998 retval = 0; 2007 retval = 0;
1999done: 2008done:
2000 mutex_unlock(&dev->mutex); 2009 up_read(&dev->attach_lock);
2010 comedi_buf_map_put(bm); /* put reference to buf map - okay if NULL */
2001 return retval; 2011 return retval;
2002} 2012}
2003 2013
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index 9a746570f161..a492f2d2436e 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -19,6 +19,8 @@ void comedi_buf_reset(struct comedi_async *async);
19bool comedi_buf_is_mmapped(struct comedi_async *async); 19bool comedi_buf_is_mmapped(struct comedi_async *async);
20void comedi_buf_map_get(struct comedi_buf_map *bm); 20void comedi_buf_map_get(struct comedi_buf_map *bm);
21int comedi_buf_map_put(struct comedi_buf_map *bm); 21int comedi_buf_map_put(struct comedi_buf_map *bm);
22struct comedi_buf_map *comedi_buf_map_from_subdev_get(
23 struct comedi_subdevice *s);
22unsigned int comedi_buf_write_n_allocated(struct comedi_async *async); 24unsigned int comedi_buf_write_n_allocated(struct comedi_async *async);
23void comedi_device_cancel_all(struct comedi_device *dev); 25void comedi_device_cancel_all(struct comedi_device *dev);
24 26
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 71db683098d6..b59af0303581 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -493,7 +493,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
493 /* pointer to the DA */ 493 /* pointer to the DA */
494 *datap++ = val & 0xff; 494 *datap++ = val & 0xff;
495 *datap++ = (val >> 8) & 0xff; 495 *datap++ = (val >> 8) & 0xff;
496 *datap++ = chan; 496 *datap++ = chan << 6;
497 devpriv->ao_readback[chan] = val; 497 devpriv->ao_readback[chan] = val;
498 498
499 s->async->events |= COMEDI_CB_BLOCK; 499 s->async->events |= COMEDI_CB_BLOCK;
@@ -1040,11 +1040,8 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
1040 /* set current channel of the running acquisition to zero */ 1040 /* set current channel of the running acquisition to zero */
1041 s->async->cur_chan = 0; 1041 s->async->cur_chan = 0;
1042 1042
1043 for (i = 0; i < cmd->chanlist_len; ++i) { 1043 for (i = 0; i < cmd->chanlist_len; ++i)
1044 unsigned int chan = CR_CHAN(cmd->chanlist[i]); 1044 devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
1045
1046 devpriv->ao_chanlist[i] = chan << 6;
1047 }
1048 1045
1049 /* we count in steps of 1ms (125us) */ 1046 /* we count in steps of 1ms (125us) */
1050 /* 125us mode not used yet */ 1047 /* 125us mode not used yet */
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index f96dcec740ae..7ac2602242f1 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -334,6 +334,7 @@ static int goldfish_audio_probe(struct platform_device *pdev)
334 return 0; 334 return 0;
335 335
336err_misc_register_failed: 336err_misc_register_failed:
337 free_irq(data->irq, data);
337err_request_irq_failed: 338err_request_irq_failed:
338 dma_free_coherent(&pdev->dev, COMBINED_BUFFER_SIZE, 339 dma_free_coherent(&pdev->dev, COMBINED_BUFFER_SIZE,
339 data->buffer_virt, data->buffer_phys); 340 data->buffer_virt, data->buffer_phys);
diff --git a/drivers/staging/gs_fpgaboot/Makefile b/drivers/staging/gs_fpgaboot/Makefile
index 34cb606e0e3d..d2f0211ba540 100644
--- a/drivers/staging/gs_fpgaboot/Makefile
+++ b/drivers/staging/gs_fpgaboot/Makefile
@@ -1,4 +1,2 @@
1gs_fpga-y += gs_fpgaboot.o io.o 1gs_fpga-y += gs_fpgaboot.o io.o
2obj-$(CONFIG_GS_FPGABOOT) += gs_fpga.o 2obj-$(CONFIG_GS_FPGABOOT) += gs_fpga.o
3
4ccflags-$(CONFIG_GS_FPGA_DEBUG) := -DDEBUG
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 89bc84d833e6..7506900c9b8d 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -373,7 +373,6 @@ static int __init gs_fpgaboot_init(void)
373 r = -1; 373 r = -1;
374 374
375 pr_info("FPGA DOWNLOAD --->\n"); 375 pr_info("FPGA DOWNLOAD --->\n");
376 pr_info("built at %s UTC\n", __TIMESTAMP__);
377 376
378 pr_info("FPGA image file name: %s\n", file); 377 pr_info("FPGA image file name: %s\n", file);
379 378
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 11fb95201545..dae8d1a9038e 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1526,7 +1526,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
1526 struct resource *iores; 1526 struct resource *iores;
1527 int ret = 0, touch_ret; 1527 int ret = 0, touch_ret;
1528 int i, s; 1528 int i, s;
1529 unsigned int scale_uv; 1529 uint64_t scale_uv;
1530 1530
1531 /* Allocate the IIO device. */ 1531 /* Allocate the IIO device. */
1532 iio = devm_iio_device_alloc(dev, sizeof(*lradc)); 1532 iio = devm_iio_device_alloc(dev, sizeof(*lradc));
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 36eedd8a0ea9..017d2f8379b7 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -70,6 +70,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
70 vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); 70 vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
71 vel = (vel << 4) >> 4; 71 vel = (vel << 4) >> 4;
72 *val = vel; 72 *val = vel;
73 break;
73 default: 74 default:
74 mutex_unlock(&st->lock); 75 mutex_unlock(&st->lock);
75 return -EINVAL; 76 return -EINVAL;
@@ -106,7 +107,7 @@ static int ad2s1200_probe(struct spi_device *spi)
106 int pn, ret = 0; 107 int pn, ret = 0;
107 unsigned short *pins = spi->dev.platform_data; 108 unsigned short *pins = spi->dev.platform_data;
108 109
109 for (pn = 0; pn < AD2S1200_PN; pn++) 110 for (pn = 0; pn < AD2S1200_PN; pn++) {
110 ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT, 111 ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
111 DRV_NAME); 112 DRV_NAME);
112 if (ret) { 113 if (ret) {
@@ -114,6 +115,7 @@ static int ad2s1200_probe(struct spi_device *spi)
114 pins[pn]); 115 pins[pn]);
115 return ret; 116 return ret;
116 } 117 }
118 }
117 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); 119 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
118 if (!indio_dev) 120 if (!indio_dev)
119 return -ENOMEM; 121 return -ENOMEM;
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 4144a75e5f71..c270c9ae6d27 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
517 of_node_put(port); 517 of_node_put(port);
518 if (port == imx_crtc->port) { 518 if (port == imx_crtc->port) {
519 ret = of_graph_parse_endpoint(ep, &endpoint); 519 ret = of_graph_parse_endpoint(ep, &endpoint);
520 return ret ? ret : endpoint.id; 520 return ret ? ret : endpoint.port;
521 } 521 }
522 } while (ep); 522 } while (ep);
523 523
@@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
675 if (!remote || !of_device_is_available(remote)) { 675 if (!remote || !of_device_is_available(remote)) {
676 of_node_put(remote); 676 of_node_put(remote);
677 continue; 677 continue;
678 } else if (!of_device_is_available(remote->parent)) {
679 dev_warn(&pdev->dev, "parent device of %s is not available\n",
680 remote->full_name);
681 of_node_put(remote);
682 continue;
678 } 683 }
679 684
680 ret = imx_drm_add_component(&pdev->dev, remote); 685 ret = imx_drm_add_component(&pdev->dev, remote);
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 575533f4fd64..a23f4f773146 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
582 tve->dev = dev; 582 tve->dev = dev;
583 spin_lock_init(&tve->lock); 583 spin_lock_init(&tve->lock);
584 584
585 ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); 585 ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
586 if (ddc_node) { 586 if (ddc_node) {
587 tve->ddc = of_find_i2c_adapter_by_node(ddc_node); 587 tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
588 of_node_put(ddc_node); 588 of_node_put(ddc_node);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8c101cbbee97..acc8184c46cd 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq)
1247 struct vpfe_fh *fh = vb2_get_drv_priv(vq); 1247 struct vpfe_fh *fh = vb2_get_drv_priv(vq);
1248 struct vpfe_video_device *video = fh->video; 1248 struct vpfe_video_device *video = fh->video;
1249 1249
1250 if (!vb2_is_streaming(vq))
1251 return 0;
1252 /* release all active buffers */ 1250 /* release all active buffers */
1251 if (video->cur_frm == video->next_frm) {
1252 vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
1253 } else {
1254 if (video->cur_frm != NULL)
1255 vb2_buffer_done(&video->cur_frm->vb,
1256 VB2_BUF_STATE_ERROR);
1257 if (video->next_frm != NULL)
1258 vb2_buffer_done(&video->next_frm->vb,
1259 VB2_BUF_STATE_ERROR);
1260 }
1261
1253 while (!list_empty(&video->dma_queue)) { 1262 while (!list_empty(&video->dma_queue)) {
1254 video->next_frm = list_entry(video->dma_queue.next, 1263 video->next_frm = list_entry(video->dma_queue.next,
1255 struct vpfe_cap_buffer, list); 1264 struct vpfe_cap_buffer, list);
diff --git a/drivers/staging/media/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h
index b3d2cc729657..4ba569258498 100644
--- a/drivers/staging/media/sn9c102/sn9c102_devtable.h
+++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, 48 { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ 49/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, 50 { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
51#endif
52 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, 51 { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
53 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, 52 { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
54#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
55 { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, 53 { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
56 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, 54 { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
57 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, 55 { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
deleted file mode 100644
index ff8d41ebca36..000000000000
--- a/drivers/staging/rtl8187se/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config R8187SE
2 tristate "RealTek RTL8187SE Wireless LAN NIC driver"
3 depends on PCI && WLAN
4 depends on m
5 select WIRELESS_EXT
6 select WEXT_PRIV
7 select EEPROM_93CX6
8 select CRYPTO
9 ---help---
10 If built as a module, it will be called r8187se.ko.
diff --git a/drivers/staging/rtl8187se/Makefile b/drivers/staging/rtl8187se/Makefile
deleted file mode 100644
index 91d1aa2830c9..000000000000
--- a/drivers/staging/rtl8187se/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
1
2#ccflags-y += -DCONFIG_IEEE80211_NOWEP=y
3#ccflags-y += -std=gnu89
4#ccflags-y += -O2
5#CC = gcc
6
7ccflags-y := -DSW_ANTE
8ccflags-y += -DTX_TRACK
9ccflags-y += -DHIGH_POWER
10ccflags-y += -DSW_DIG
11ccflags-y += -DRATE_ADAPT
12
13#enable it for legacy power save, disable it for leisure power save
14ccflags-y += -DENABLE_LPS
15
16
17#ccflags-y := -mhard-float -DCONFIG_FORCE_HARD_FLOAT=y
18
19r8187se-y := \
20 r8180_core.o \
21 r8180_wx.o \
22 r8180_rtl8225z2.o \
23 r8185b_init.o \
24 r8180_dm.o \
25 ieee80211/dot11d.o \
26 ieee80211/ieee80211_softmac.o \
27 ieee80211/ieee80211_rx.o \
28 ieee80211/ieee80211_tx.o \
29 ieee80211/ieee80211_wx.o \
30 ieee80211/ieee80211_module.o \
31 ieee80211/ieee80211_softmac_wx.o \
32 ieee80211/ieee80211_crypt.o \
33 ieee80211/ieee80211_crypt_tkip.o \
34 ieee80211/ieee80211_crypt_ccmp.o \
35 ieee80211/ieee80211_crypt_wep.o
36
37obj-$(CONFIG_R8187SE) += r8187se.o
38
diff --git a/drivers/staging/rtl8187se/Module.symvers b/drivers/staging/rtl8187se/Module.symvers
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/staging/rtl8187se/Module.symvers
+++ /dev/null
diff --git a/drivers/staging/rtl8187se/TODO b/drivers/staging/rtl8187se/TODO
deleted file mode 100644
index 704949a9da0d..000000000000
--- a/drivers/staging/rtl8187se/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
1TODO:
2- prepare private ieee80211 stack for merge with rtl8192su's version:
3 - add hwsec_active flag to struct ieee80211_device
4 - add bHwSec flag to cb_desc structure
5- switch to use shared "librtl" instead of private ieee80211 stack
6- switch to use LIB80211
7- switch to use MAC80211
8- use kernel coding style
9- checkpatch.pl fixes
10- sparse fixes
11- integrate with drivers/net/wireless/rtl818x
12
13Please send any patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.c b/drivers/staging/rtl8187se/ieee80211/dot11d.c
deleted file mode 100644
index 4483c2c0307c..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.c
+++ /dev/null
@@ -1,189 +0,0 @@
1#include "dot11d.h"
2
3void Dot11d_Init(struct ieee80211_device *ieee)
4{
5 PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
6
7 pDot11dInfo->bEnabled = 0;
8
9 pDot11dInfo->State = DOT11D_STATE_NONE;
10 pDot11dInfo->CountryIeLen = 0;
11 memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
12 memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
13 RESET_CIE_WATCHDOG(ieee);
14
15 netdev_info(ieee->dev, "Dot11d_Init()\n");
16}
17
18/* Reset to the state as we are just entering a regulatory domain. */
19void Dot11d_Reset(struct ieee80211_device *ieee)
20{
21 u32 i;
22 PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(ieee);
23
24 /* Clear old channel map */
25 memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
26 memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
27 /* Set new channel map */
28 for (i = 1; i <= 11; i++)
29 (pDot11dInfo->channel_map)[i] = 1;
30
31 for (i = 12; i <= 14; i++)
32 (pDot11dInfo->channel_map)[i] = 2;
33
34 pDot11dInfo->State = DOT11D_STATE_NONE;
35 pDot11dInfo->CountryIeLen = 0;
36 RESET_CIE_WATCHDOG(ieee);
37}
38
39/*
40 * Description:
41 * Update country IE from Beacon or Probe Response and configure PHY for
42 * operation in the regulatory domain.
43 *
44 * TODO:
45 * Configure Tx power.
46 *
47 * Assumption:
48 * 1. IS_DOT11D_ENABLE() is TRUE.
49 * 2. Input IE is an valid one.
50 */
51void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
52 u16 CoutryIeLen, u8 *pCoutryIe)
53{
54 PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
55 u8 i, j, NumTriples, MaxChnlNum;
56 u8 index, MaxTxPowerInDbm;
57 PCHNL_TXPOWER_TRIPLE pTriple;
58
59 if ((CoutryIeLen - 3)%3 != 0) {
60 netdev_info(dev->dev, "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
61 Dot11d_Reset(dev);
62 return;
63 }
64
65 memset(pDot11dInfo->channel_map, 0, MAX_CHANNEL_NUMBER+1);
66 memset(pDot11dInfo->MaxTxPwrDbmList, 0xFF, MAX_CHANNEL_NUMBER+1);
67 MaxChnlNum = 0;
68 NumTriples = (CoutryIeLen - 3) / 3; /* skip 3-byte country string. */
69 pTriple = (PCHNL_TXPOWER_TRIPLE)(pCoutryIe + 3);
70 for (i = 0; i < NumTriples; i++) {
71 if (MaxChnlNum >= pTriple->FirstChnl) {
72 /*
73 * It is not in a monotonically increasing order,
74 * so stop processing.
75 */
76 netdev_info(dev->dev,
77 "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........1\n");
78 Dot11d_Reset(dev);
79 return;
80 }
81 if (MAX_CHANNEL_NUMBER <
82 (pTriple->FirstChnl + pTriple->NumChnls)) {
83 /*
84 * It is not a valid set of channel id,
85 * so stop processing
86 */
87 netdev_info(dev->dev,
88 "Dot11d_UpdateCountryIe(): Invalid country IE, skip it........2\n");
89 Dot11d_Reset(dev);
90 return;
91 }
92
93 for (j = 0; j < pTriple->NumChnls; j++) {
94 index = pTriple->FirstChnl + j;
95 pDot11dInfo->channel_map[index] = 1;
96 MaxTxPowerInDbm = pTriple->MaxTxPowerInDbm;
97 pDot11dInfo->MaxTxPwrDbmList[index] = MaxTxPowerInDbm;
98 MaxChnlNum = pTriple->FirstChnl + j;
99 }
100
101 pTriple = (PCHNL_TXPOWER_TRIPLE)((u8 *)pTriple + 3);
102 }
103#if 1
104 netdev_info(dev->dev, "Channel List:");
105 for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
106 if (pDot11dInfo->channel_map[i] > 0)
107 netdev_info(dev->dev, " %d", i);
108 netdev_info(dev->dev, "\n");
109#endif
110
111 UPDATE_CIE_SRC(dev, pTaddr);
112
113 pDot11dInfo->CountryIeLen = CoutryIeLen;
114 memcpy(pDot11dInfo->CountryIeBuf, pCoutryIe, CoutryIeLen);
115 pDot11dInfo->State = DOT11D_STATE_LEARNED;
116}
117
118u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel)
119{
120 PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
121 u8 MaxTxPwrInDbm = 255;
122
123 if (MAX_CHANNEL_NUMBER < Channel) {
124 netdev_info(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
125 return MaxTxPwrInDbm;
126 }
127 if (pDot11dInfo->channel_map[Channel])
128 MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
129
130 return MaxTxPwrInDbm;
131}
132
133
134void DOT11D_ScanComplete(struct ieee80211_device *dev)
135{
136 PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
137
138 switch (pDot11dInfo->State) {
139 case DOT11D_STATE_LEARNED:
140 pDot11dInfo->State = DOT11D_STATE_DONE;
141 break;
142
143 case DOT11D_STATE_DONE:
144 if (GET_CIE_WATCHDOG(dev) == 0) {
145 /* Reset country IE if previous one is gone. */
146 Dot11d_Reset(dev);
147 }
148 break;
149 case DOT11D_STATE_NONE:
150 break;
151 }
152}
153
154int IsLegalChannel(struct ieee80211_device *dev, u8 channel)
155{
156 PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
157
158 if (MAX_CHANNEL_NUMBER < channel) {
159 netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n");
160 return 0;
161 }
162 if (pDot11dInfo->channel_map[channel] > 0)
163 return 1;
164 return 0;
165}
166
167int ToLegalChannel(struct ieee80211_device *dev, u8 channel)
168{
169 PRT_DOT11D_INFO pDot11dInfo = GET_DOT11D_INFO(dev);
170 u8 default_chn = 0;
171 u32 i = 0;
172
173 for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
174 if (pDot11dInfo->channel_map[i] > 0) {
175 default_chn = i;
176 break;
177 }
178 }
179
180 if (MAX_CHANNEL_NUMBER < channel) {
181 netdev_info(dev->dev, "IsLegalChannel(): Invalid Channel\n");
182 return default_chn;
183 }
184
185 if (pDot11dInfo->channel_map[channel] > 0)
186 return channel;
187
188 return default_chn;
189}
diff --git a/drivers/staging/rtl8187se/ieee80211/dot11d.h b/drivers/staging/rtl8187se/ieee80211/dot11d.h
deleted file mode 100644
index f996691307bf..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/dot11d.h
+++ /dev/null
@@ -1,71 +0,0 @@
1#ifndef __INC_DOT11D_H
2#define __INC_DOT11D_H
3
4#include "ieee80211.h"
5
6/* #define ENABLE_DOT11D */
7
8/* #define DOT11D_MAX_CHNL_NUM 83 */
9
10typedef struct _CHNL_TXPOWER_TRIPLE {
11 u8 FirstChnl;
12 u8 NumChnls;
13 u8 MaxTxPowerInDbm;
14} CHNL_TXPOWER_TRIPLE, *PCHNL_TXPOWER_TRIPLE;
15
16typedef enum _DOT11D_STATE {
17 DOT11D_STATE_NONE = 0,
18 DOT11D_STATE_LEARNED,
19 DOT11D_STATE_DONE,
20} DOT11D_STATE;
21
22typedef struct _RT_DOT11D_INFO {
23 /* DECLARE_RT_OBJECT(RT_DOT12D_INFO); */
24
25 bool bEnabled; /* dot11MultiDomainCapabilityEnabled */
26
27 u16 CountryIeLen; /* > 0 if CountryIeBuf[] contains valid country information element. */
28 u8 CountryIeBuf[MAX_IE_LEN];
29 u8 CountryIeSrcAddr[6]; /* Source AP of the country IE. */
30 u8 CountryIeWatchdog;
31
32 u8 channel_map[MAX_CHANNEL_NUMBER+1]; /* !!!Value 0: Invalid, 1: Valid (active scan), 2: Valid (passive scan) */
33 /* u8 ChnlListLen; // #Bytes valid in ChnlList[]. */
34 /* u8 ChnlList[DOT11D_MAX_CHNL_NUM]; */
35 u8 MaxTxPwrDbmList[MAX_CHANNEL_NUMBER+1];
36
37 DOT11D_STATE State;
38} RT_DOT11D_INFO, *PRT_DOT11D_INFO;
39
40#define eqMacAddr(a, b) (((a)[0] == (b)[0] && (a)[1] == (b)[1] && (a)[2] == (b)[2] && (a)[3] == (b)[3] && (a)[4] == (b)[4] && (a)[5] == (b)[5]) ? 1:0)
41#define cpMacAddr(des, src) ((des)[0] = (src)[0], (des)[1] = (src)[1], (des)[2] = (src)[2], (des)[3] = (src)[3], (des)[4] = (src)[4], (des)[5] = (src)[5])
42#define GET_DOT11D_INFO(__pIeeeDev) ((PRT_DOT11D_INFO)((__pIeeeDev)->pDot11dInfo))
43
44#define IS_DOT11D_ENABLE(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->bEnabled
45#define IS_COUNTRY_IE_VALID(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen > 0)
46
47#define IS_EQUAL_CIE_SRC(__pIeeeDev, __pTa) eqMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
48#define UPDATE_CIE_SRC(__pIeeeDev, __pTa) cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
49
50#define IS_COUNTRY_IE_CHANGED(__pIeeeDev, __Ie) \
51 (((__Ie).Length == 0 || (__Ie).Length != GET_DOT11D_INFO(__pIeeeDev)->CountryIeLen) ? \
52 FALSE : \
53 (!memcmp(GET_DOT11D_INFO(__pIeeeDev)->CountryIeBuf, (__Ie).Octet, (__Ie).Length)))
54
55#define CIE_WATCHDOG_TH 1
56#define GET_CIE_WATCHDOG(__pIeeeDev) GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog
57#define RESET_CIE_WATCHDOG(__pIeeeDev) GET_CIE_WATCHDOG(__pIeeeDev) = 0
58#define UPDATE_CIE_WATCHDOG(__pIeeeDev) ++GET_CIE_WATCHDOG(__pIeeeDev)
59
60#define IS_DOT11D_STATE_DONE(__pIeeeDev) (GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
61
62void Dot11d_Init(struct ieee80211_device *dev);
63void Dot11d_Reset(struct ieee80211_device *dev);
64void Dot11d_UpdateCountryIe(struct ieee80211_device *dev, u8 *pTaddr,
65 u16 CoutryIeLen, u8 *pCoutryIe);
66u8 DOT11D_GetMaxTxPwrInDbm(struct ieee80211_device *dev, u8 Channel);
67void DOT11D_ScanComplete(struct ieee80211_device *dev);
68int IsLegalChannel(struct ieee80211_device *dev, u8 channel);
69int ToLegalChannel(struct ieee80211_device *dev, u8 channel);
70
71#endif /* #ifndef __INC_DOT11D_H */
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
deleted file mode 100644
index d1763b7b8f27..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ /dev/null
@@ -1,1496 +0,0 @@
1/*
2 * Merged with mainline ieee80211.h in Aug 2004. Original ieee802_11
3 * remains copyright by the original authors
4 *
5 * Portions of the merged code are based on Host AP (software wireless
6 * LAN access point) driver for Intersil Prism2/2.5/3.
7 *
8 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
9 * <jkmaline@cc.hut.fi>
10 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
11 *
12 * Adaption to a generic IEEE 802.11 stack by James Ketrenos
13 * <jketreno@linux.intel.com>
14 * Copyright (c) 2004, Intel Corporation
15 *
16 * Modified for Realtek's wi-fi cards by Andrea Merello
17 * <andrea.merello@gmail.com>
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License version 2 as
21 * published by the Free Software Foundation. See README and COPYING for
22 * more details.
23 */
24#ifndef IEEE80211_H
25#define IEEE80211_H
26#include <linux/if_ether.h> /* ETH_ALEN */
27#include <linux/kernel.h> /* ARRAY_SIZE */
28#include <linux/jiffies.h>
29#include <linux/timer.h>
30#include <linux/sched.h>
31#include <linux/semaphore.h>
32#include <linux/wireless.h>
33#include <linux/ieee80211.h>
34#include <linux/interrupt.h>
35
36#define KEY_TYPE_NA 0x0
37#define KEY_TYPE_WEP40 0x1
38#define KEY_TYPE_TKIP 0x2
39#define KEY_TYPE_CCMP 0x4
40#define KEY_TYPE_WEP104 0x5
41
42#define aSifsTime 10
43
44#define MGMT_QUEUE_NUM 5
45
46
47#define IEEE_CMD_SET_WPA_PARAM 1
48#define IEEE_CMD_SET_WPA_IE 2
49#define IEEE_CMD_SET_ENCRYPTION 3
50#define IEEE_CMD_MLME 4
51
52#define IEEE_PARAM_WPA_ENABLED 1
53#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
54#define IEEE_PARAM_DROP_UNENCRYPTED 3
55#define IEEE_PARAM_PRIVACY_INVOKED 4
56#define IEEE_PARAM_AUTH_ALGS 5
57#define IEEE_PARAM_IEEE_802_1X 6
58//It should consistent with the driver_XXX.c
59// David, 2006.9.26
60#define IEEE_PARAM_WPAX_SELECT 7
61//Added for notify the encryption type selection
62// David, 2006.9.26
63#define IEEE_PROTO_WPA 1
64#define IEEE_PROTO_RSN 2
65//Added for notify the encryption type selection
66// David, 2006.9.26
67#define IEEE_WPAX_USEGROUP 0
68#define IEEE_WPAX_WEP40 1
69#define IEEE_WPAX_TKIP 2
70#define IEEE_WPAX_WRAP 3
71#define IEEE_WPAX_CCMP 4
72#define IEEE_WPAX_WEP104 5
73
74#define IEEE_KEY_MGMT_IEEE8021X 1
75#define IEEE_KEY_MGMT_PSK 2
76
77
78
79#define IEEE_MLME_STA_DEAUTH 1
80#define IEEE_MLME_STA_DISASSOC 2
81
82
83#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
84#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
85#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
86#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
87#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
88#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
89
90
91#define IEEE_CRYPT_ALG_NAME_LEN 16
92
93extern int ieee80211_crypto_tkip_init(void);
94extern void ieee80211_crypto_tkip_exit(void);
95
96//by amy for ps
97typedef struct ieee_param {
98 u32 cmd;
99 u8 sta_addr[ETH_ALEN];
100 union {
101 struct {
102 u8 name;
103 u32 value;
104 } wpa_param;
105 struct {
106 u32 len;
107 u8 reserved[32];
108 u8 data[0];
109 } wpa_ie;
110 struct{
111 int command;
112 int reason_code;
113 } mlme;
114 struct {
115 u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
116 u8 set_tx;
117 u32 err;
118 u8 idx;
119 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
120 u16 key_len;
121 u8 key[0];
122 } crypt;
123
124 } u;
125}ieee_param;
126
127
128#define MSECS(t) msecs_to_jiffies(t)
129#define msleep_interruptible_rtl msleep_interruptible
130
131#define IEEE80211_DATA_LEN 2304
132/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
133 6.2.1.1.2.
134
135 The figure in section 7.1.2 suggests a body size of up to 2312
136 bytes is allowed, which is a bit confusing, I suspect this
137 represents the 2304 bytes of real data, plus a possible 8 bytes of
138 WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
139
140#define IEEE80211_3ADDR_LEN 24
141#define IEEE80211_4ADDR_LEN 30
142#define IEEE80211_FCS_LEN 4
143#define IEEE80211_HLEN IEEE80211_4ADDR_LEN
144#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
145#define IEEE80211_MGMT_HDR_LEN 24
146#define IEEE80211_DATA_HDR3_LEN 24
147#define IEEE80211_DATA_HDR4_LEN 30
148
149#define MIN_FRAG_THRESHOLD 256U
150#define MAX_FRAG_THRESHOLD 2346U
151
152/* Frame control field constants */
153#define IEEE80211_FCTL_DSTODS 0x0300 //added by david
154#define IEEE80211_FCTL_WEP 0x4000
155
156/* debug macros */
157
158#ifdef CONFIG_IEEE80211_DEBUG
159extern u32 ieee80211_debug_level;
160#define IEEE80211_DEBUG(level, fmt, args...) \
161do { if (ieee80211_debug_level & (level)) \
162 printk(KERN_DEBUG "ieee80211: %c %s " fmt, \
163 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
164#else
165#define IEEE80211_DEBUG(level, fmt, args...) do {} while (0)
166#endif /* CONFIG_IEEE80211_DEBUG */
167
168/*
169 * To use the debug system;
170 *
171 * If you are defining a new debug classification, simply add it to the #define
172 * list here in the form of:
173 *
174 * #define IEEE80211_DL_xxxx VALUE
175 *
176 * shifting value to the left one bit from the previous entry. xxxx should be
177 * the name of the classification (for example, WEP)
178 *
179 * You then need to either add a IEEE80211_xxxx_DEBUG() macro definition for your
180 * classification, or use IEEE80211_DEBUG(IEEE80211_DL_xxxx, ...) whenever you want
181 * to send output to that classification.
182 *
183 * To add your debug level to the list of levels seen when you perform
184 *
185 * % cat /proc/net/ipw/debug_level
186 *
187 * you simply need to add your entry to the ipw_debug_levels array.
188 *
189 * If you do not see debug_level in /proc/net/ipw then you do not have
190 * CONFIG_IEEE80211_DEBUG defined in your kernel configuration
191 *
192 */
193
194#define IEEE80211_DL_INFO (1<<0)
195#define IEEE80211_DL_WX (1<<1)
196#define IEEE80211_DL_SCAN (1<<2)
197#define IEEE80211_DL_STATE (1<<3)
198#define IEEE80211_DL_MGMT (1<<4)
199#define IEEE80211_DL_FRAG (1<<5)
200#define IEEE80211_DL_EAP (1<<6)
201#define IEEE80211_DL_DROP (1<<7)
202
203#define IEEE80211_DL_TX (1<<8)
204#define IEEE80211_DL_RX (1<<9)
205
206#define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a)
207#define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a)
208#define IEEE80211_DEBUG_INFO(f, a...) IEEE80211_DEBUG(IEEE80211_DL_INFO, f, ## a)
209
210#define IEEE80211_DEBUG_WX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_WX, f, ## a)
211#define IEEE80211_DEBUG_SCAN(f, a...) IEEE80211_DEBUG(IEEE80211_DL_SCAN, f, ## a)
212//#define IEEE_DEBUG_SCAN IEEE80211_WARNING
213#define IEEE80211_DEBUG_STATE(f, a...) IEEE80211_DEBUG(IEEE80211_DL_STATE, f, ## a)
214#define IEEE80211_DEBUG_MGMT(f, a...) IEEE80211_DEBUG(IEEE80211_DL_MGMT, f, ## a)
215#define IEEE80211_DEBUG_FRAG(f, a...) IEEE80211_DEBUG(IEEE80211_DL_FRAG, f, ## a)
216#define IEEE80211_DEBUG_EAP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_EAP, f, ## a)
217#define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a)
218#define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a)
219#define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a)
220#include <linux/netdevice.h>
221#include <linux/if_arp.h> /* ARPHRD_ETHER */
222
223#ifndef WIRELESS_SPY
224#define WIRELESS_SPY // enable iwspy support
225#endif
226#include <net/iw_handler.h> // new driver API
227
228#ifndef ETH_P_PAE
229#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
230#endif /* ETH_P_PAE */
231
232#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
233
234#ifndef ETH_P_80211_RAW
235#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
236#endif
237
238/* IEEE 802.11 defines */
239
240#define P80211_OUI_LEN 3
241
242struct ieee80211_snap_hdr {
243
244 u8 dsap; /* always 0xAA */
245 u8 ssap; /* always 0xAA */
246 u8 ctrl; /* always 0x03 */
247 u8 oui[P80211_OUI_LEN]; /* organizational universal id */
248
249} __attribute__ ((packed));
250
251#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
252
253#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
254#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
255
256#define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG)
257#define WLAN_GET_SEQ_SEQ(seq) ((seq) & IEEE80211_SCTL_SEQ)
258
259#define WLAN_CAPABILITY_BSS (1<<0)
260#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
261
262#define IEEE80211_STATMASK_SIGNAL (1<<0)
263#define IEEE80211_STATMASK_RSSI (1<<1)
264#define IEEE80211_STATMASK_NOISE (1<<2)
265#define IEEE80211_STATMASK_RATE (1<<3)
266#define IEEE80211_STATMASK_WEMASK 0x7
267
268
269#define IEEE80211_CCK_MODULATION (1<<0)
270#define IEEE80211_OFDM_MODULATION (1<<1)
271
272#define IEEE80211_24GHZ_BAND (1<<0)
273#define IEEE80211_52GHZ_BAND (1<<1)
274
275#define IEEE80211_CCK_RATE_LEN 4
276#define IEEE80211_CCK_RATE_1MB 0x02
277#define IEEE80211_CCK_RATE_2MB 0x04
278#define IEEE80211_CCK_RATE_5MB 0x0B
279#define IEEE80211_CCK_RATE_11MB 0x16
280#define IEEE80211_OFDM_RATE_LEN 8
281#define IEEE80211_OFDM_RATE_6MB 0x0C
282#define IEEE80211_OFDM_RATE_9MB 0x12
283#define IEEE80211_OFDM_RATE_12MB 0x18
284#define IEEE80211_OFDM_RATE_18MB 0x24
285#define IEEE80211_OFDM_RATE_24MB 0x30
286#define IEEE80211_OFDM_RATE_36MB 0x48
287#define IEEE80211_OFDM_RATE_48MB 0x60
288#define IEEE80211_OFDM_RATE_54MB 0x6C
289#define IEEE80211_BASIC_RATE_MASK 0x80
290
291#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
292#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
293#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
294#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
295#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
296#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
297#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
298#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
299#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
300#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
301#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
302#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
303
304#define IEEE80211_CCK_RATES_MASK 0x0000000F
305#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
306 IEEE80211_CCK_RATE_2MB_MASK)
307#define IEEE80211_CCK_DEFAULT_RATES_MASK (IEEE80211_CCK_BASIC_RATES_MASK | \
308 IEEE80211_CCK_RATE_5MB_MASK | \
309 IEEE80211_CCK_RATE_11MB_MASK)
310
311#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
312#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
313 IEEE80211_OFDM_RATE_12MB_MASK | \
314 IEEE80211_OFDM_RATE_24MB_MASK)
315#define IEEE80211_OFDM_DEFAULT_RATES_MASK (IEEE80211_OFDM_BASIC_RATES_MASK | \
316 IEEE80211_OFDM_RATE_9MB_MASK | \
317 IEEE80211_OFDM_RATE_18MB_MASK | \
318 IEEE80211_OFDM_RATE_36MB_MASK | \
319 IEEE80211_OFDM_RATE_48MB_MASK | \
320 IEEE80211_OFDM_RATE_54MB_MASK)
321#define IEEE80211_DEFAULT_RATES_MASK (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
322 IEEE80211_CCK_DEFAULT_RATES_MASK)
323
324#define IEEE80211_NUM_OFDM_RATES 8
325#define IEEE80211_NUM_CCK_RATES 4
326#define IEEE80211_OFDM_SHIFT_MASK_A 4
327
328/* this is stolen and modified from the madwifi driver*/
329#define IEEE80211_FC0_TYPE_MASK 0x0c
330#define IEEE80211_FC0_TYPE_DATA 0x08
331#define IEEE80211_FC0_SUBTYPE_MASK 0xB0
332#define IEEE80211_FC0_SUBTYPE_QOS 0x80
333
334#define IEEE80211_QOS_HAS_SEQ(fc) \
335 (((fc) & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == \
336 (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
337
338/* this is stolen from ipw2200 driver */
339#define IEEE_IBSS_MAC_HASH_SIZE 31
340struct ieee_ibss_seq {
341 u8 mac[ETH_ALEN];
342 u16 seq_num[17];
343 u16 frag_num[17];
344 unsigned long packet_time[17];
345 struct list_head list;
346};
347
348/* NOTE: This data is for statistical purposes; not all hardware provides this
349 * information for frames received. Not setting these will not cause
350 * any adverse affects. */
351struct ieee80211_rx_stats {
352 u32 mac_time[2];
353 u8 signalstrength;
354 s8 rssi;
355 u8 signal;
356 u8 noise;
357 u16 rate; /* in 100 kbps */
358 u8 received_channel;
359 u8 control;
360 u8 mask;
361 u8 freq;
362 u16 len;
363 u8 nic_type;
364};
365
366/* IEEE 802.11 requires that STA supports concurrent reception of at least
367 * three fragmented frames. This define can be increased to support more
368 * concurrent frames, but it should be noted that each entry can consume about
369 * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
370#define IEEE80211_FRAG_CACHE_LEN 4
371
372struct ieee80211_frag_entry {
373 unsigned long first_frag_time;
374 unsigned int seq;
375 unsigned int last_frag;
376 struct sk_buff *skb;
377 u8 src_addr[ETH_ALEN];
378 u8 dst_addr[ETH_ALEN];
379};
380
381struct ieee80211_stats {
382 unsigned int tx_unicast_frames;
383 unsigned int tx_multicast_frames;
384 unsigned int tx_fragments;
385 unsigned int tx_unicast_octets;
386 unsigned int tx_multicast_octets;
387 unsigned int tx_deferred_transmissions;
388 unsigned int tx_single_retry_frames;
389 unsigned int tx_multiple_retry_frames;
390 unsigned int tx_retry_limit_exceeded;
391 unsigned int tx_discards;
392 unsigned int rx_unicast_frames;
393 unsigned int rx_multicast_frames;
394 unsigned int rx_fragments;
395 unsigned int rx_unicast_octets;
396 unsigned int rx_multicast_octets;
397 unsigned int rx_fcs_errors;
398 unsigned int rx_discards_no_buffer;
399 unsigned int tx_discards_wrong_sa;
400 unsigned int rx_discards_undecryptable;
401 unsigned int rx_message_in_msg_fragments;
402 unsigned int rx_message_in_bad_msg_fragments;
403};
404
405struct ieee80211_device;
406
407#include "ieee80211_crypt.h"
408
409#define SEC_KEY_1 (1<<0)
410#define SEC_KEY_2 (1<<1)
411#define SEC_KEY_3 (1<<2)
412#define SEC_KEY_4 (1<<3)
413#define SEC_ACTIVE_KEY (1<<4)
414#define SEC_AUTH_MODE (1<<5)
415#define SEC_UNICAST_GROUP (1<<6)
416#define SEC_LEVEL (1<<7)
417#define SEC_ENABLED (1<<8)
418
419#define SEC_LEVEL_0 0 /* None */
420#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
421#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
422#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
423#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
424
425#define WEP_KEYS 4
426#define WEP_KEY_LEN 13
427
428#define WEP_KEY_LEN_MODIF 32
429
430struct ieee80211_security {
431 u16 active_key:2,
432 enabled:1,
433 auth_mode:2,
434 auth_algo:4,
435 unicast_uses_group:1;
436 u8 key_sizes[WEP_KEYS];
437 u8 keys[WEP_KEYS][WEP_KEY_LEN_MODIF];
438 u8 level;
439 u16 flags;
440} __attribute__ ((packed));
441
442
443/*
444
445 802.11 data frame from AP
446
447 ,-------------------------------------------------------------------.
448Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
449 |------|------|---------|---------|---------|------|---------|------|
450Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
451 | | tion | (BSSID) | | | ence | data | |
452 `-------------------------------------------------------------------'
453
454Total: 28-2340 bytes
455
456*/
457
458/* Management Frame Information Element Types */
459enum {
460 MFIE_TYPE_SSID = 0,
461 MFIE_TYPE_RATES = 1,
462 MFIE_TYPE_FH_SET = 2,
463 MFIE_TYPE_DS_SET = 3,
464 MFIE_TYPE_CF_SET = 4,
465 MFIE_TYPE_TIM = 5,
466 MFIE_TYPE_IBSS_SET = 6,
467 MFIE_TYPE_COUNTRY = 7,
468 MFIE_TYPE_CHALLENGE = 16,
469 MFIE_TYPE_ERP = 42,
470 MFIE_TYPE_RSN = 48,
471 MFIE_TYPE_RATES_EX = 50,
472 MFIE_TYPE_GENERIC = 221,
473};
474
475struct ieee80211_header_data {
476 __le16 frame_ctl;
477 u16 duration_id;
478 u8 addr1[6];
479 u8 addr2[6];
480 u8 addr3[6];
481 u16 seq_ctrl;
482};
483
484struct ieee80211_hdr_4addr {
485 __le16 frame_ctl;
486 u16 duration_id;
487 u8 addr1[ETH_ALEN];
488 u8 addr2[ETH_ALEN];
489 u8 addr3[ETH_ALEN];
490 u16 seq_ctl;
491 u8 addr4[ETH_ALEN];
492} __attribute__ ((packed));
493
494struct ieee80211_hdr_3addrqos {
495 u16 frame_ctl;
496 u16 duration_id;
497 u8 addr1[ETH_ALEN];
498 u8 addr2[ETH_ALEN];
499 u8 addr3[ETH_ALEN];
500 u16 seq_ctl;
501 u16 qos_ctl;
502} __attribute__ ((packed));
503
504struct ieee80211_hdr_4addrqos {
505 u16 frame_ctl;
506 u16 duration_id;
507 u8 addr1[ETH_ALEN];
508 u8 addr2[ETH_ALEN];
509 u8 addr3[ETH_ALEN];
510 u16 seq_ctl;
511 u8 addr4[ETH_ALEN];
512 u16 qos_ctl;
513} __attribute__ ((packed));
514
515struct ieee80211_info_element_hdr {
516 u8 id;
517 u8 len;
518} __attribute__ ((packed));
519
520struct ieee80211_info_element {
521 u8 id;
522 u8 len;
523 u8 data[0];
524} __attribute__ ((packed));
525
526struct ieee80211_authentication {
527 struct ieee80211_header_data header;
528 u16 algorithm;
529 u16 transaction;
530 u16 status;
531 //struct ieee80211_info_element_hdr info_element;
532} __attribute__ ((packed));
533
534struct ieee80211_disassoc_frame {
535 struct ieee80211_hdr_3addr header;
536 u16 reasoncode;
537} __attribute__ ((packed));
538
539struct ieee80211_probe_request {
540 struct ieee80211_header_data header;
541 /* struct ieee80211_info_element info_element; */
542} __attribute__ ((packed));
543
544struct ieee80211_probe_response {
545 struct ieee80211_header_data header;
546 u32 time_stamp[2];
547 u16 beacon_interval;
548 u16 capability;
549 struct ieee80211_info_element info_element;
550} __attribute__ ((packed));
551
552struct ieee80211_assoc_request_frame {
553 struct ieee80211_hdr_3addr header;
554 u16 capability;
555 u16 listen_interval;
556 //u8 current_ap[ETH_ALEN];
557 struct ieee80211_info_element_hdr info_element;
558} __attribute__ ((packed));
559
560struct ieee80211_assoc_response_frame {
561 struct ieee80211_hdr_3addr header;
562 u16 capability;
563 u16 status;
564 u16 aid;
565 struct ieee80211_info_element info_element; /* supported rates */
566} __attribute__ ((packed));
567
568struct ieee80211_txb {
569 u8 nr_frags;
570 u8 encrypted;
571 u16 reserved;
572 u16 frag_size;
573 u16 payload_size;
574 struct sk_buff *fragments[0];
575};
576
577/* SWEEP TABLE ENTRIES NUMBER */
578#define MAX_SWEEP_TAB_ENTRIES 42
579#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
580
581/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
582 * only use 8, and then use extended rates for the remaining supported
583 * rates. Other APs, however, stick all of their supported rates on the
584 * main rates information element... */
585#define MAX_RATES_LENGTH ((u8)12)
586#define MAX_RATES_EX_LENGTH ((u8)16)
587
588#define MAX_NETWORK_COUNT 128
589
590#define MAX_CHANNEL_NUMBER 165
591
592#define IEEE80211_SOFTMAC_SCAN_TIME 100 /* (HZ / 2) */
593#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
594
595#define CRC_LENGTH 4U
596
597#define MAX_WPA_IE_LEN 64
598
599#define NETWORK_EMPTY_ESSID (1 << 0)
600#define NETWORK_HAS_OFDM (1 << 1)
601#define NETWORK_HAS_CCK (1 << 2)
602
603struct ieee80211_wmm_ac_param {
604 u8 ac_aci_acm_aifsn;
605 u8 ac_ecwmin_ecwmax;
606 u16 ac_txop_limit;
607};
608
609struct ieee80211_wmm_ts_info {
610 u8 ac_dir_tid;
611 u8 ac_up_psb;
612 u8 reserved;
613} __attribute__ ((packed));
614
615struct ieee80211_wmm_tspec_elem {
616 struct ieee80211_wmm_ts_info ts_info;
617 u16 norm_msdu_size;
618 u16 max_msdu_size;
619 u32 min_serv_inter;
620 u32 max_serv_inter;
621 u32 inact_inter;
622 u32 suspen_inter;
623 u32 serv_start_time;
624 u32 min_data_rate;
625 u32 mean_data_rate;
626 u32 peak_data_rate;
627 u32 max_burst_size;
628 u32 delay_bound;
629 u32 min_phy_rate;
630 u16 surp_band_allow;
631 u16 medium_time;
632}__attribute__((packed));
633
634enum eap_type {
635 EAP_PACKET = 0,
636 EAPOL_START,
637 EAPOL_LOGOFF,
638 EAPOL_KEY,
639 EAPOL_ENCAP_ASF_ALERT
640};
641
642static const char *eap_types[] = {
643 [EAP_PACKET] = "EAP-Packet",
644 [EAPOL_START] = "EAPOL-Start",
645 [EAPOL_LOGOFF] = "EAPOL-Logoff",
646 [EAPOL_KEY] = "EAPOL-Key",
647 [EAPOL_ENCAP_ASF_ALERT] = "EAPOL-Encap-ASF-Alert"
648};
649
650static inline const char *eap_get_type(int type)
651{
652 return (type >= ARRAY_SIZE(eap_types)) ? "Unknown" : eap_types[type];
653}
654
655struct eapol {
656 u8 snap[6];
657 u16 ethertype;
658 u8 version;
659 u8 type;
660 u16 length;
661} __attribute__ ((packed));
662
663struct ieee80211_softmac_stats {
664 unsigned int rx_ass_ok;
665 unsigned int rx_ass_err;
666 unsigned int rx_probe_rq;
667 unsigned int tx_probe_rs;
668 unsigned int tx_beacons;
669 unsigned int rx_auth_rq;
670 unsigned int rx_auth_rs_ok;
671 unsigned int rx_auth_rs_err;
672 unsigned int tx_auth_rq;
673 unsigned int no_auth_rs;
674 unsigned int no_ass_rs;
675 unsigned int tx_ass_rq;
676 unsigned int rx_ass_rq;
677 unsigned int tx_probe_rq;
678 unsigned int reassoc;
679 unsigned int swtxstop;
680 unsigned int swtxawake;
681};
682
683#define BEACON_PROBE_SSID_ID_POSITION 12
684
685/*
686 * These are the data types that can make up management packets
687 *
688 u16 auth_algorithm;
689 u16 auth_sequence;
690 u16 beacon_interval;
691 u16 capability;
692 u8 current_ap[ETH_ALEN];
693 u16 listen_interval;
694 struct {
695 u16 association_id:14, reserved:2;
696 } __attribute__ ((packed));
697 u32 time_stamp[2];
698 u16 reason;
699 u16 status;
700*/
701
702#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
703#define IEEE80211_DEFAULT_BASIC_RATE 10
704
705enum {WMM_all_frame, WMM_two_frame, WMM_four_frame, WMM_six_frame};
706#define MAX_SP_Len (WMM_all_frame << 4)
707#define IEEE80211_QOS_TID 0x0f
708#define QOS_CTL_NOTCONTAIN_ACK (0x01 << 5)
709
710#define MAX_IE_LEN 0xFF //+YJ,080625
711
712struct rtl8187se_channel_list {
713 u8 channel[MAX_CHANNEL_NUMBER + 1];
714 u8 len;
715};
716
717//by amy for ps
718#define IEEE80211_WATCH_DOG_TIME 2000
719//by amy for ps
720//by amy for antenna
721#define ANTENNA_DIVERSITY_TIMER_PERIOD 1000 // 1000 m
722//by amy for antenna
723
724#define IEEE80211_DTIM_MBCAST 4
725#define IEEE80211_DTIM_UCAST 2
726#define IEEE80211_DTIM_VALID 1
727#define IEEE80211_DTIM_INVALID 0
728
729#define IEEE80211_PS_DISABLED 0
730#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
731#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
732#define IEEE80211_PS_ENABLE IEEE80211_DTIM_VALID
733//added by David for QoS 2006/6/30
734//#define WMM_Hang_8187
735#ifdef WMM_Hang_8187
736#undef WMM_Hang_8187
737#endif
738
739#define WME_AC_BE 0x00
740#define WME_AC_BK 0x01
741#define WME_AC_VI 0x02
742#define WME_AC_VO 0x03
743#define WME_ACI_MASK 0x03
744#define WME_AIFSN_MASK 0x03
745#define WME_AC_PRAM_LEN 16
746
747//UP Mapping to AC, using in MgntQuery_SequenceNumber() and maybe for DSCP
748//#define UP2AC(up) ((up<3) ? ((up==0)?1:0) : (up>>1))
749#define UP2AC(up) ( \
750 ((up) < 1) ? WME_AC_BE : \
751 ((up) < 3) ? WME_AC_BK : \
752 ((up) < 4) ? WME_AC_BE : \
753 ((up) < 6) ? WME_AC_VI : \
754 WME_AC_VO)
755//AC Mapping to UP, using in Tx part for selecting the corresponding TX queue
756#define AC2UP(_ac) ( \
757 ((_ac) == WME_AC_VO) ? 6 : \
758 ((_ac) == WME_AC_VI) ? 5 : \
759 ((_ac) == WME_AC_BK) ? 1 : \
760 0)
761
762#define ETHER_ADDR_LEN 6 /* length of an Ethernet address */
763struct ether_header {
764 u8 ether_dhost[ETHER_ADDR_LEN];
765 u8 ether_shost[ETHER_ADDR_LEN];
766 u16 ether_type;
767} __attribute__((packed));
768
769#ifndef ETHERTYPE_PAE
770#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */
771#endif
772#ifndef ETHERTYPE_IP
773#define ETHERTYPE_IP 0x0800 /* IP protocol */
774#endif
775
776struct ieee80211_network {
777 /* These entries are used to identify a unique network */
778 u8 bssid[ETH_ALEN];
779 u8 channel;
780 /* Ensure null-terminated for any debug msgs */
781 u8 ssid[IW_ESSID_MAX_SIZE + 1];
782 u8 ssid_len;
783
784 /* These are network statistics */
785 struct ieee80211_rx_stats stats;
786 u16 capability;
787 u8 rates[MAX_RATES_LENGTH];
788 u8 rates_len;
789 u8 rates_ex[MAX_RATES_EX_LENGTH];
790 u8 rates_ex_len;
791 unsigned long last_scanned;
792 u8 mode;
793 u8 flags;
794 u32 last_associate;
795 u32 time_stamp[2];
796 u16 beacon_interval;
797 u16 listen_interval;
798 u16 atim_window;
799 u8 wpa_ie[MAX_WPA_IE_LEN];
800 size_t wpa_ie_len;
801 u8 rsn_ie[MAX_WPA_IE_LEN];
802 size_t rsn_ie_len;
803 u8 dtim_period;
804 u8 dtim_data;
805 u32 last_dtim_sta_time[2];
806 struct list_head list;
807 //appeded for QoS
808 u8 wmm_info;
809 struct ieee80211_wmm_ac_param wmm_param[4];
810 u8 QoS_Enable;
811 u8 SignalStrength;
812//by amy 080312
813 u8 HighestOperaRate;
814//by amy 080312
815 u8 Turbo_Enable;//enable turbo mode, added by thomas
816 u16 CountryIeLen;
817 u8 CountryIeBuf[MAX_IE_LEN];
818};
819
820enum ieee80211_state {
821
822 /* the card is not linked at all */
823 IEEE80211_NOLINK = 0,
824
825 /* IEEE80211_ASSOCIATING* are for BSS client mode
826 * the driver shall not perform RX filtering unless
827 * the state is LINKED.
828 * The driver shall just check for the state LINKED and
829 * defaults to NOLINK for ALL the other states (including
830 * LINKED_SCANNING)
831 */
832
833 /* the association procedure will start (wq scheduling)*/
834 IEEE80211_ASSOCIATING,
835 IEEE80211_ASSOCIATING_RETRY,
836
837 /* the association procedure is sending AUTH request*/
838 IEEE80211_ASSOCIATING_AUTHENTICATING,
839
840 /* the association procedure has successfully authenticated
841 * and is sending association request
842 */
843 IEEE80211_ASSOCIATING_AUTHENTICATED,
844
845 /* the link is ok. the card associated to a BSS or linked
846 * to a ibss cell or acting as an AP and creating the bss
847 */
848 IEEE80211_LINKED,
849
850 /* same as LINKED, but the driver shall apply RX filter
851 * rules as we are in NO_LINK mode. As the card is still
852 * logically linked, but it is doing a syncro site survey
853 * then it will be back to LINKED state.
854 */
855 IEEE80211_LINKED_SCANNING,
856
857};
858
859#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
860#define DEFAULT_FTS 2346
861
862#define CFG_IEEE80211_RESERVE_FCS (1<<0)
863#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
864
865typedef struct tx_pending_t{
866 int frag;
867 struct ieee80211_txb *txb;
868}tx_pending_t;
869
870enum {
871 COUNTRY_CODE_FCC = 0,
872 COUNTRY_CODE_IC = 1,
873 COUNTRY_CODE_ETSI = 2,
874 COUNTRY_CODE_SPAIN = 3,
875 COUNTRY_CODE_FRANCE = 4,
876 COUNTRY_CODE_MKK = 5,
877 COUNTRY_CODE_MKK1 = 6,
878 COUNTRY_CODE_ISRAEL = 7,
879 COUNTRY_CODE_TELEC = 8,
880 COUNTRY_CODE_GLOBAL_DOMAIN = 9,
881 COUNTRY_CODE_WORLD_WIDE_13_INDEX = 10
882};
883
884struct ieee80211_device {
885 struct net_device *dev;
886
887 /* Bookkeeping structures */
888 struct net_device_stats stats;
889 struct ieee80211_stats ieee_stats;
890 struct ieee80211_softmac_stats softmac_stats;
891
892 /* Probe / Beacon management */
893 struct list_head network_free_list;
894 struct list_head network_list;
895 struct ieee80211_network *networks;
896 int scans;
897 int scan_age;
898
899 int iw_mode; /* operating mode (IW_MODE_*) */
900
901 spinlock_t lock;
902 spinlock_t wpax_suitlist_lock;
903
904 int tx_headroom; /* Set to size of any additional room needed at front
905 * of allocated Tx SKBs */
906 u32 config;
907
908 /* WEP and other encryption related settings at the device level */
909 int open_wep; /* Set to 1 to allow unencrypted frames */
910
911 int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
912 * WEP key changes */
913
914 /* If the host performs {en,de}cryption, then set to 1 */
915 int host_encrypt;
916 int host_decrypt;
917 int ieee802_1x; /* is IEEE 802.1X used */
918
919 /* WPA data */
920 int wpa_enabled;
921 int drop_unencrypted;
922 int tkip_countermeasures;
923 int privacy_invoked;
924 size_t wpa_ie_len;
925 u8 *wpa_ie;
926
927 u8 ap_mac_addr[6];
928 u16 pairwise_key_type;
929 u16 broadcast_key_type;
930
931 struct list_head crypt_deinit_list;
932 struct ieee80211_crypt_data *crypt[WEP_KEYS];
933 int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
934 struct timer_list crypt_deinit_timer;
935
936 int bcrx_sta_key; /* use individual keys to override default keys even
937 * with RX of broad/multicast frames */
938
939 /* Fragmentation structures */
940 /* each stream contains an entry */
941 struct ieee80211_frag_entry frag_cache[17][IEEE80211_FRAG_CACHE_LEN];
942 unsigned int frag_next_idx[17];
943 u16 fts; /* Fragmentation Threshold */
944
945 /* This stores infos for the current network.
946 * Either the network we are associated in INFRASTRUCTURE
947 * or the network that we are creating in MASTER mode.
948 * ad-hoc is a mixture ;-).
949 * Note that in infrastructure mode, even when not associated,
950 * fields bssid and essid may be valid (if wpa_set and essid_set
951 * are true) as thy carry the value set by the user via iwconfig
952 */
953 struct ieee80211_network current_network;
954
955
956 enum ieee80211_state state;
957
958 int short_slot;
959 int mode; /* A, B, G */
960 int modulation; /* CCK, OFDM */
961 int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */
962 int abg_true; /* ABG flag */
963
964 /* used for forcing the ibss workqueue to terminate
965 * without wait for the syncro scan to terminate
966 */
967 short sync_scan_hurryup;
968
969 void * pDot11dInfo;
970 bool bGlobalDomain;
971
972 // For Liteon Ch12~13 passive scan
973 u8 MinPassiveChnlNum;
974 u8 IbssStartChnl;
975
976 int rate; /* current rate */
977 int basic_rate;
978 //FIXME: please callback, see if redundant with softmac_features
979 short active_scan;
980
981 /* this contains flags for selectively enable softmac support */
982 u16 softmac_features;
983
984 /* if the sequence control field is not filled by HW */
985 u16 seq_ctrl[5];
986
987 /* association procedure transaction sequence number */
988 u16 associate_seq;
989
990 /* AID for RTXed association responses */
991 u16 assoc_id;
992
993 /* power save mode related*/
994 short ps;
995 short sta_sleep;
996 int ps_timeout;
997 struct tasklet_struct ps_task;
998 u32 ps_th;
999 u32 ps_tl;
1000
1001 short raw_tx;
1002 /* used if IEEE_SOFTMAC_TX_QUEUE is set */
1003 short queue_stop;
1004 short scanning;
1005 short proto_started;
1006
1007 struct semaphore wx_sem;
1008 struct semaphore scan_sem;
1009
1010 spinlock_t mgmt_tx_lock;
1011 spinlock_t beacon_lock;
1012
1013 short beacon_txing;
1014
1015 short wap_set;
1016 short ssid_set;
1017
1018 u8 wpax_type_set; //{added by David, 2006.9.28}
1019 u32 wpax_type_notify; //{added by David, 2006.9.26}
1020
1021 /* QoS related flag */
1022 char init_wmmparam_flag;
1023
1024 /* for discarding duplicated packets in IBSS */
1025 struct list_head ibss_mac_hash[IEEE_IBSS_MAC_HASH_SIZE];
1026
1027 /* for discarding duplicated packets in BSS */
1028 u16 last_rxseq_num[17]; /* rx seq previous per-tid */
1029 u16 last_rxfrag_num[17];/* tx frag previous per-tid */
1030 unsigned long last_packet_time[17];
1031
1032 /* for PS mode */
1033 unsigned long last_rx_ps_time;
1034
1035 /* used if IEEE_SOFTMAC_SINGLE_QUEUE is set */
1036 struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
1037 int mgmt_queue_head;
1038 int mgmt_queue_tail;
1039
1040
1041 /* used if IEEE_SOFTMAC_TX_QUEUE is set */
1042 struct tx_pending_t tx_pending;
1043
1044 /* used if IEEE_SOFTMAC_ASSOCIATE is set */
1045 struct timer_list associate_timer;
1046
1047 /* used if IEEE_SOFTMAC_BEACONS is set */
1048 struct timer_list beacon_timer;
1049
1050 struct work_struct associate_complete_wq;
1051// struct work_struct associate_retry_wq;
1052 struct work_struct associate_procedure_wq;
1053// struct work_struct softmac_scan_wq;
1054 struct work_struct wx_sync_scan_wq;
1055 struct work_struct wmm_param_update_wq;
1056 struct work_struct ps_request_tx_ack_wq;//for ps
1057// struct work_struct hw_wakeup_wq;
1058// struct work_struct hw_sleep_wq;
1059// struct work_struct watch_dog_wq;
1060 bool bInactivePs;
1061 bool actscanning;
1062 bool beinretry;
1063 u16 ListenInterval;
1064 unsigned long NumRxDataInPeriod; //YJ,add,080828
1065 unsigned long NumRxBcnInPeriod; //YJ,add,080828
1066 unsigned long NumRxOkTotal;
1067 unsigned long NumRxUnicast;//YJ,add,080828,for keep alive
1068 bool bHwRadioOff;
1069 struct delayed_work softmac_scan_wq;
1070 struct delayed_work associate_retry_wq;
1071 struct delayed_work hw_wakeup_wq;
1072 struct delayed_work hw_sleep_wq;//+by amy 080324
1073 struct delayed_work watch_dog_wq;
1074 struct delayed_work sw_antenna_wq;
1075 struct delayed_work start_ibss_wq;
1076//by amy for rate adaptive 080312
1077 struct delayed_work rate_adapter_wq;
1078//by amy for rate adaptive
1079 struct delayed_work hw_dig_wq;
1080 struct delayed_work tx_pw_wq;
1081
1082//Added for RF power on power off by lizhaoming 080512
1083 struct delayed_work GPIOChangeRFWorkItem;
1084
1085 struct workqueue_struct *wq;
1086
1087 /* Callback functions */
1088 void (*set_security)(struct net_device *dev,
1089 struct ieee80211_security *sec);
1090
1091 /* Used to TX data frame by using txb structs.
1092 * this is not used if in the softmac_features
1093 * is set the flag IEEE_SOFTMAC_TX_QUEUE
1094 */
1095 int (*hard_start_xmit)(struct ieee80211_txb *txb,
1096 struct net_device *dev);
1097
1098 int (*reset_port)(struct net_device *dev);
1099
1100 /* Softmac-generated frames (management) are TXed via this
1101 * callback if the flag IEEE_SOFTMAC_SINGLE_QUEUE is
1102 * not set. As some cards may have different HW queues that
1103 * one might want to use for data and management frames
1104 * the option to have two callbacks might be useful.
1105 * This function can't sleep.
1106 */
1107 int (*softmac_hard_start_xmit)(struct sk_buff *skb,
1108 struct net_device *dev);
1109
1110 /* used instead of hard_start_xmit (not softmac_hard_start_xmit)
1111 * if the IEEE_SOFTMAC_TX_QUEUE feature is used to TX data
1112 * frames. If the option IEEE_SOFTMAC_SINGLE_QUEUE is also set
1113 * then also management frames are sent via this callback.
1114 * This function can't sleep.
1115 */
1116 void (*softmac_data_hard_start_xmit)(struct sk_buff *skb,
1117 struct net_device *dev,int rate);
1118
1119 /* stops the HW queue for DATA frames. Useful to avoid
1120 * waste time to TX data frame when we are reassociating
1121 * This function can sleep.
1122 */
1123 void (*data_hard_stop)(struct net_device *dev);
1124
1125 /* OK this is complementar to data_poll_hard_stop */
1126 void (*data_hard_resume)(struct net_device *dev);
1127
1128 /* ask to the driver to retune the radio .
1129 * This function can sleep. the driver should ensure
1130 * the radio has been switched before return.
1131 */
1132 void (*set_chan)(struct net_device *dev,short ch);
1133
1134 /* These are not used if the ieee stack takes care of
1135 * scanning (IEEE_SOFTMAC_SCAN feature set).
1136 * In this case only the set_chan is used.
1137 *
1138 * The syncro version is similar to the start_scan but
1139 * does not return until all channels has been scanned.
1140 * this is called in user context and should sleep,
1141 * it is called in a work_queue when switching to ad-hoc mode
1142 * or in behalf of iwlist scan when the card is associated
1143 * and root user ask for a scan.
1144 * the function stop_scan should stop both the syncro and
1145 * background scanning and can sleep.
1146 * The function start_scan should initiate the background
1147 * scanning and can't sleep.
1148 */
1149 void (*scan_syncro)(struct net_device *dev);
1150 void (*start_scan)(struct net_device *dev);
1151 void (*stop_scan)(struct net_device *dev);
1152
1153 /* indicate the driver that the link state is changed
1154 * for example it may indicate the card is associated now.
1155 * Driver might be interested in this to apply RX filter
1156 * rules or simply light the LINK led
1157 */
1158 void (*link_change)(struct net_device *dev);
1159
1160 /* these two function indicates to the HW when to start
1161 * and stop to send beacons. This is used when the
1162 * IEEE_SOFTMAC_BEACONS is not set. For now the
1163 * stop_send_bacons is NOT guaranteed to be called only
1164 * after start_send_beacons.
1165 */
1166 void (*start_send_beacons) (struct net_device *dev);
1167 void (*stop_send_beacons) (struct net_device *dev);
1168
1169 /* power save mode related */
1170 void (*sta_wake_up) (struct net_device *dev);
1171 void (*ps_request_tx_ack) (struct net_device *dev);
1172 void (*enter_sleep_state) (struct net_device *dev, u32 th, u32 tl);
1173 short (*ps_is_queue_empty) (struct net_device *dev);
1174
1175 /* QoS related */
1176 //void (*wmm_param_update) (struct net_device *dev, u8 *ac_param);
1177 //void (*wmm_param_update) (struct ieee80211_device *ieee);
1178
1179 /* This must be the last item so that it points to the data
1180 * allocated beyond this structure by alloc_ieee80211 */
1181 u8 priv[0];
1182};
1183
1184#define IEEE_A (1<<0)
1185#define IEEE_B (1<<1)
1186#define IEEE_G (1<<2)
1187#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
1188
1189/* Generate a 802.11 header */
1190
1191/* Uses the channel change callback directly
1192 * instead of [start/stop] scan callbacks
1193 */
1194#define IEEE_SOFTMAC_SCAN (1<<2)
1195
1196/* Perform authentication and association handshake */
1197#define IEEE_SOFTMAC_ASSOCIATE (1<<3)
1198
1199/* Generate probe requests */
1200#define IEEE_SOFTMAC_PROBERQ (1<<4)
1201
1202/* Generate response to probe requests */
1203#define IEEE_SOFTMAC_PROBERS (1<<5)
1204
1205/* The ieee802.11 stack will manages the netif queue
1206 * wake/stop for the driver, taking care of 802.11
1207 * fragmentation. See softmac.c for details. */
1208#define IEEE_SOFTMAC_TX_QUEUE (1<<7)
1209
1210/* Uses only the softmac_data_hard_start_xmit
1211 * even for TX management frames.
1212 */
1213#define IEEE_SOFTMAC_SINGLE_QUEUE (1<<8)
1214
1215/* Generate beacons. The stack will enqueue beacons
1216 * to the card
1217 */
1218#define IEEE_SOFTMAC_BEACONS (1<<6)
1219
1220
1221
1222static inline void *ieee80211_priv(struct net_device *dev)
1223{
1224 return ((struct ieee80211_device *)netdev_priv(dev))->priv;
1225}
1226
1227static inline int ieee80211_is_empty_essid(const char *essid, int essid_len)
1228{
1229 /* Single white space is for Linksys APs */
1230 if (essid_len == 1 && essid[0] == ' ')
1231 return 1;
1232
1233 /* Otherwise, if the entire essid is 0, we assume it is hidden */
1234 while (essid_len) {
1235 essid_len--;
1236 if (essid[essid_len] != '\0')
1237 return 0;
1238 }
1239
1240 return 1;
1241}
1242
1243static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee,
1244 int mode)
1245{
1246 /*
1247 * It is possible for both access points and our device to support
1248 * combinations of modes, so as long as there is one valid combination
1249 * of ap/device supported modes, then return success
1250 *
1251 */
1252 if ((mode & IEEE_A) &&
1253 (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
1254 (ieee->freq_band & IEEE80211_52GHZ_BAND))
1255 return 1;
1256
1257 if ((mode & IEEE_G) &&
1258 (ieee->modulation & IEEE80211_OFDM_MODULATION) &&
1259 (ieee->freq_band & IEEE80211_24GHZ_BAND))
1260 return 1;
1261
1262 if ((mode & IEEE_B) &&
1263 (ieee->modulation & IEEE80211_CCK_MODULATION) &&
1264 (ieee->freq_band & IEEE80211_24GHZ_BAND))
1265 return 1;
1266
1267 return 0;
1268}
1269
1270static inline int ieee80211_get_hdrlen(u16 fc)
1271{
1272 int hdrlen = 24;
1273
1274 switch (WLAN_FC_GET_TYPE(fc)) {
1275 case IEEE80211_FTYPE_DATA:
1276 if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
1277 hdrlen = 30; /* Addr4 */
1278 if(IEEE80211_QOS_HAS_SEQ(fc))
1279 hdrlen += 2; /* QOS ctrl*/
1280 break;
1281 case IEEE80211_FTYPE_CTL:
1282 switch (WLAN_FC_GET_STYPE(fc)) {
1283 case IEEE80211_STYPE_CTS:
1284 case IEEE80211_STYPE_ACK:
1285 hdrlen = 10;
1286 break;
1287 default:
1288 hdrlen = 16;
1289 break;
1290 }
1291 break;
1292 }
1293
1294 return hdrlen;
1295}
1296
1297
1298
1299/* ieee80211.c */
1300extern void free_ieee80211(struct net_device *dev);
1301extern struct net_device *alloc_ieee80211(int sizeof_priv);
1302
1303extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
1304
1305/* ieee80211_tx.c */
1306
1307extern int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
1308 struct sk_buff *frag, int hdr_len);
1309
1310extern int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev);
1311extern void ieee80211_txb_free(struct ieee80211_txb *);
1312
1313
1314/* ieee80211_rx.c */
1315extern int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
1316 struct ieee80211_rx_stats *rx_stats);
1317extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1318 struct ieee80211_hdr_4addr *header,
1319 struct ieee80211_rx_stats *stats);
1320
1321/* ieee80211_wx.c */
1322extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
1323 struct iw_request_info *info,
1324 union iwreq_data *wrqu, char *key);
1325extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
1326 struct iw_request_info *info,
1327 union iwreq_data *wrqu, char *key);
1328extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
1329 struct iw_request_info *info,
1330 union iwreq_data *wrqu, char *key);
1331extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
1332 struct iw_request_info *info,
1333 union iwreq_data *wrqu, char *extra);
1334int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
1335 struct iw_request_info *info,
1336 struct iw_param *data, char *extra);
1337int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
1338 struct iw_request_info *info,
1339 union iwreq_data *wrqu, char *extra);
1340
1341int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
1342/* ieee80211_softmac.c */
1343extern short ieee80211_is_54g(const struct ieee80211_network *net);
1344extern short ieee80211_is_shortslot(const struct ieee80211_network *net);
1345extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
1346 struct sk_buff *skb,
1347 struct ieee80211_rx_stats *rx_stats,
1348 u16 type, u16 stype);
1349extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
1350 struct ieee80211_network *net);
1351
1352extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
1353 struct ieee80211_device *ieee);
1354extern void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
1355extern void ieee80211_start_bss(struct ieee80211_device *ieee);
1356extern void ieee80211_start_master_bss(struct ieee80211_device *ieee);
1357extern void ieee80211_start_ibss(struct ieee80211_device *ieee);
1358extern void ieee80211_softmac_init(struct ieee80211_device *ieee);
1359extern void ieee80211_softmac_free(struct ieee80211_device *ieee);
1360extern void ieee80211_associate_abort(struct ieee80211_device *ieee);
1361extern void ieee80211_disassociate(struct ieee80211_device *ieee);
1362extern void ieee80211_stop_scan(struct ieee80211_device *ieee);
1363extern void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
1364extern void ieee80211_check_all_nets(struct ieee80211_device *ieee);
1365extern void ieee80211_start_protocol(struct ieee80211_device *ieee);
1366extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
1367extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
1368extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
1369extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
1370extern void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee);
1371extern void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee);
1372extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
1373extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
1374extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
1375extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
1376 struct iw_point *p);
1377extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
1378extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
1379extern void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta,
1380 u8 asRsn);
1381extern void ieee80211_rtl_start_scan(struct ieee80211_device *ieee);
1382
1383//Add for RF power on power off by lizhaoming 080512
1384extern void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta,
1385 u8 asRsn);
1386
1387/* ieee80211_crypt_ccmp&tkip&wep.c */
1388extern void ieee80211_tkip_null(void);
1389extern void ieee80211_wep_null(void);
1390extern void ieee80211_ccmp_null(void);
1391/* ieee80211_softmac_wx.c */
1392
1393extern int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
1394 struct iw_request_info *info,
1395 union iwreq_data *wrqu, char *ext);
1396
1397extern int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
1398 struct iw_request_info *info,
1399 union iwreq_data *awrq,
1400 char *extra);
1401
1402extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
1403 struct iw_request_info *a,
1404 union iwreq_data *wrqu, char *b);
1405
1406extern int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
1407 struct iw_request_info *info,
1408 union iwreq_data *wrqu, char *extra);
1409
1410extern int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
1411 struct iw_request_info *info,
1412 union iwreq_data *wrqu, char *extra);
1413
1414extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
1415 struct iw_request_info *a,
1416 union iwreq_data *wrqu, char *b);
1417
1418extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
1419 struct iw_request_info *a,
1420 union iwreq_data *wrqu, char *b);
1421
1422extern int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
1423 struct iw_request_info *a,
1424 union iwreq_data *wrqu, char *extra);
1425
1426extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
1427 struct iw_request_info *a,
1428 union iwreq_data *wrqu, char *b);
1429
1430extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
1431 struct iw_request_info *a,
1432 union iwreq_data *wrqu, char *b);
1433
1434extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
1435 struct iw_request_info *a,
1436 union iwreq_data *wrqu, char *b);
1437
1438extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
1439
1440extern int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
1441 struct iw_request_info *info,
1442 union iwreq_data *wrqu, char *extra);
1443
1444extern int ieee80211_wx_get_name(struct ieee80211_device *ieee,
1445 struct iw_request_info *info,
1446 union iwreq_data *wrqu, char *extra);
1447
1448extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
1449 struct iw_request_info *info,
1450 union iwreq_data *wrqu, char *extra);
1451
1452extern int ieee80211_wx_get_power(struct ieee80211_device *ieee,
1453 struct iw_request_info *info,
1454 union iwreq_data *wrqu, char *extra);
1455
1456extern void ieee80211_softmac_ips_scan_syncro(struct ieee80211_device *ieee);
1457
1458extern void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee,
1459 short pwr);
1460
1461extern const long ieee80211_wlan_frequencies[];
1462
1463extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee)
1464{
1465 ieee->scans++;
1466}
1467
1468extern inline int ieee80211_get_scans(struct ieee80211_device *ieee)
1469{
1470 return ieee->scans;
1471}
1472
1473static inline const char *escape_essid(const char *essid, u8 essid_len) {
1474 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
1475 const char *s = essid;
1476 char *d = escaped;
1477
1478 if (ieee80211_is_empty_essid(essid, essid_len)) {
1479 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
1480 return escaped;
1481 }
1482
1483 essid_len = min(essid_len, (u8)IW_ESSID_MAX_SIZE);
1484 while (essid_len--) {
1485 if (*s == '\0') {
1486 *d++ = '\\';
1487 *d++ = '0';
1488 s++;
1489 } else {
1490 *d++ = *s++;
1491 }
1492 }
1493 *d = '\0';
1494 return escaped;
1495}
1496#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
deleted file mode 100644
index 101f0c0cdb0a..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c
+++ /dev/null
@@ -1,240 +0,0 @@
1/*
2 * Host AP crypto routines
3 *
4 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. See README and COPYING for
10 * more details.
11 *
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16//#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21
22#include "ieee80211.h"
23
24MODULE_AUTHOR("Jouni Malinen");
25MODULE_DESCRIPTION("HostAP crypto");
26MODULE_LICENSE("GPL");
27
28struct ieee80211_crypto_alg {
29 struct list_head list;
30 struct ieee80211_crypto_ops *ops;
31};
32
33
34struct ieee80211_crypto {
35 struct list_head algs;
36 spinlock_t lock;
37};
38
39static struct ieee80211_crypto *hcrypt;
40
41void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
42{
43 struct list_head *ptr, *n;
44 struct ieee80211_crypt_data *entry;
45
46 for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
47 ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
48 entry = list_entry(ptr, struct ieee80211_crypt_data, list);
49
50 if (atomic_read(&entry->refcnt) != 0 && !force)
51 continue;
52
53 list_del(ptr);
54
55 if (entry->ops)
56 entry->ops->deinit(entry->priv);
57 kfree(entry);
58 }
59}
60
61void ieee80211_crypt_deinit_handler(unsigned long data)
62{
63 struct ieee80211_device *ieee = (struct ieee80211_device *)data;
64 unsigned long flags;
65
66 spin_lock_irqsave(&ieee->lock, flags);
67 ieee80211_crypt_deinit_entries(ieee, 0);
68 if (!list_empty(&ieee->crypt_deinit_list)) {
69 pr_debug("entries remaining in delayed crypt deletion list\n");
70 ieee->crypt_deinit_timer.expires = jiffies + HZ;
71 add_timer(&ieee->crypt_deinit_timer);
72 }
73 spin_unlock_irqrestore(&ieee->lock, flags);
74
75}
76
77void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
78 struct ieee80211_crypt_data **crypt)
79{
80 struct ieee80211_crypt_data *tmp;
81 unsigned long flags;
82
83 if (*crypt == NULL)
84 return;
85
86 tmp = *crypt;
87 *crypt = NULL;
88
89 /* must not run ops->deinit() while there may be pending encrypt or
90 * decrypt operations. Use a list of delayed deinits to avoid needing
91 * locking. */
92
93 spin_lock_irqsave(&ieee->lock, flags);
94 list_add(&tmp->list, &ieee->crypt_deinit_list);
95 if (!timer_pending(&ieee->crypt_deinit_timer)) {
96 ieee->crypt_deinit_timer.expires = jiffies + HZ;
97 add_timer(&ieee->crypt_deinit_timer);
98 }
99 spin_unlock_irqrestore(&ieee->lock, flags);
100}
101
102int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
103{
104 unsigned long flags;
105 struct ieee80211_crypto_alg *alg;
106
107 if (hcrypt == NULL)
108 return -1;
109
110 alg = kzalloc(sizeof(*alg), GFP_KERNEL);
111 if (alg == NULL)
112 return -ENOMEM;
113
114 alg->ops = ops;
115
116 spin_lock_irqsave(&hcrypt->lock, flags);
117 list_add(&alg->list, &hcrypt->algs);
118 spin_unlock_irqrestore(&hcrypt->lock, flags);
119
120 pr_debug("registered algorithm '%s'\n", ops->name);
121
122 return 0;
123}
124
125int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
126{
127 unsigned long flags;
128 struct list_head *ptr;
129 struct ieee80211_crypto_alg *del_alg = NULL;
130
131 if (hcrypt == NULL)
132 return -1;
133
134 spin_lock_irqsave(&hcrypt->lock, flags);
135 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
136 struct ieee80211_crypto_alg *alg =
137 (struct ieee80211_crypto_alg *) ptr;
138 if (alg->ops == ops) {
139 list_del(&alg->list);
140 del_alg = alg;
141 break;
142 }
143 }
144 spin_unlock_irqrestore(&hcrypt->lock, flags);
145
146 if (del_alg) {
147 pr_debug("unregistered algorithm '%s'\n", ops->name);
148 kfree(del_alg);
149 }
150
151 return del_alg ? 0 : -1;
152}
153
154
155struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
156{
157 unsigned long flags;
158 struct list_head *ptr;
159 struct ieee80211_crypto_alg *found_alg = NULL;
160
161 if (hcrypt == NULL)
162 return NULL;
163
164 spin_lock_irqsave(&hcrypt->lock, flags);
165 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
166 struct ieee80211_crypto_alg *alg =
167 (struct ieee80211_crypto_alg *) ptr;
168 if (strcmp(alg->ops->name, name) == 0) {
169 found_alg = alg;
170 break;
171 }
172 }
173 spin_unlock_irqrestore(&hcrypt->lock, flags);
174
175 if (found_alg)
176 return found_alg->ops;
177 else
178 return NULL;
179}
180
181
182static void *ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
183static void ieee80211_crypt_null_deinit(void *priv) {}
184
185static struct ieee80211_crypto_ops ieee80211_crypt_null = {
186 .name = "NULL",
187 .init = ieee80211_crypt_null_init,
188 .deinit = ieee80211_crypt_null_deinit,
189 .encrypt_mpdu = NULL,
190 .decrypt_mpdu = NULL,
191 .encrypt_msdu = NULL,
192 .decrypt_msdu = NULL,
193 .set_key = NULL,
194 .get_key = NULL,
195 .extra_prefix_len = 0,
196 .extra_postfix_len = 0,
197 .owner = THIS_MODULE,
198};
199
200
201int ieee80211_crypto_init(void)
202{
203 int ret = -ENOMEM;
204
205 hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
206 if (!hcrypt)
207 goto out;
208
209 INIT_LIST_HEAD(&hcrypt->algs);
210 spin_lock_init(&hcrypt->lock);
211
212 ret = ieee80211_register_crypto_ops(&ieee80211_crypt_null);
213 if (ret < 0) {
214 kfree(hcrypt);
215 hcrypt = NULL;
216 }
217out:
218 return ret;
219}
220
221
222void ieee80211_crypto_deinit(void)
223{
224 struct list_head *ptr, *n;
225 struct ieee80211_crypto_alg *alg = NULL;
226
227 if (hcrypt == NULL)
228 return;
229
230 list_for_each_safe(ptr, n, &hcrypt->algs) {
231 alg = list_entry(ptr, struct ieee80211_crypto_alg, list);
232 if (alg) {
233 list_del(ptr);
234 pr_debug("unregistered algorithm '%s' (deinit)\n",
235 alg->ops->name);
236 kfree(alg);
237 }
238 }
239 kfree(hcrypt);
240}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.h b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.h
deleted file mode 100644
index 0b4ea431982d..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * Original code based on Host AP (software wireless LAN access point) driver
3 * for Intersil Prism2/2.5/3.
4 *
5 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
6 * <jkmaline@cc.hut.fi>
7 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
8 *
9 * Adaption to a generic IEEE 802.11 stack by James Ketrenos
10 * <jketreno@linux.intel.com>
11 *
12 * Copyright (c) 2004, Intel Corporation
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation. See README and COPYING for
17 * more details.
18 */
19
20/*
21 * This file defines the interface to the ieee80211 crypto module.
22 */
23#ifndef IEEE80211_CRYPT_H
24#define IEEE80211_CRYPT_H
25
26#include <linux/skbuff.h>
27
28struct ieee80211_crypto_ops {
29 const char *name;
30
31 /* init new crypto context (e.g., allocate private data space,
32 * select IV, etc.); returns NULL on failure or pointer to allocated
33 * private data on success */
34 void * (*init)(int keyidx);
35
36 /* deinitialize crypto context and free allocated private data */
37 void (*deinit)(void *priv);
38
39 /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
40 * value from decrypt_mpdu is passed as the keyidx value for
41 * decrypt_msdu. skb must have enough head and tail room for the
42 * encryption; if not, error will be returned; these functions are
43 * called for all MPDUs (i.e., fragments).
44 */
45 int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
46 int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
47
48 /* These functions are called for full MSDUs, i.e. full frames.
49 * These can be NULL if full MSDU operations are not needed. */
50 int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
51 int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
52 void *priv);
53
54 int (*set_key)(void *key, int len, u8 *seq, void *priv);
55 int (*get_key)(void *key, int len, u8 *seq, void *priv);
56
57 /* procfs handler for printing out key information and possible
58 * statistics */
59 char * (*print_stats)(char *p, void *priv);
60
61 /* maximum number of bytes added by encryption; encrypt buf is
62 * allocated with extra_prefix_len bytes, copy of in_buf, and
63 * extra_postfix_len; encrypt need not use all this space, but
64 * the result must start at the beginning of the buffer and correct
65 * length must be returned */
66 int extra_prefix_len, extra_postfix_len;
67
68 struct module *owner;
69};
70
71struct ieee80211_crypt_data {
72 struct list_head list; /* delayed deletion list */
73 struct ieee80211_crypto_ops *ops;
74 void *priv;
75 atomic_t refcnt;
76};
77
78int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
79int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
80struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name);
81void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
82void ieee80211_crypt_deinit_handler(unsigned long);
83void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
84 struct ieee80211_crypt_data **crypt);
85
86#endif
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
deleted file mode 100644
index 4fe253818630..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_ccmp.c
+++ /dev/null
@@ -1,455 +0,0 @@
1/*
2 * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
3 *
4 * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. See README and COPYING for
9 * more details.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/random.h>
17#include <linux/skbuff.h>
18#include <linux/netdevice.h>
19#include <linux/if_ether.h>
20#include <linux/if_arp.h>
21#include <linux/string.h>
22#include <linux/wireless.h>
23
24#include "ieee80211.h"
25
26#include <linux/crypto.h>
27#include <linux/scatterlist.h>
28
29MODULE_AUTHOR("Jouni Malinen");
30MODULE_DESCRIPTION("Host AP crypt: CCMP");
31MODULE_LICENSE("GPL");
32
33
34#define AES_BLOCK_LEN 16
35#define CCMP_HDR_LEN 8
36#define CCMP_MIC_LEN 8
37#define CCMP_TK_LEN 16
38#define CCMP_PN_LEN 6
39
40struct ieee80211_ccmp_data {
41 u8 key[CCMP_TK_LEN];
42 int key_set;
43
44 u8 tx_pn[CCMP_PN_LEN];
45 u8 rx_pn[CCMP_PN_LEN];
46
47 u32 dot11RSNAStatsCCMPFormatErrors;
48 u32 dot11RSNAStatsCCMPReplays;
49 u32 dot11RSNAStatsCCMPDecryptErrors;
50
51 int key_idx;
52
53 struct crypto_tfm *tfm;
54
55 /* scratch buffers for virt_to_page() (crypto API) */
56 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
57 tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
58 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
59};
60
61static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
62 const u8 pt[16], u8 ct[16])
63{
64 crypto_cipher_encrypt_one((void *)tfm, ct, pt);
65}
66
67static void *ieee80211_ccmp_init(int key_idx)
68{
69 struct ieee80211_ccmp_data *priv;
70
71 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
72 if (priv == NULL)
73 goto fail;
74 priv->key_idx = key_idx;
75
76 priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
77 if (IS_ERR(priv->tfm)) {
78 pr_debug("could not allocate crypto API aes\n");
79 priv->tfm = NULL;
80 goto fail;
81 }
82
83 return priv;
84
85fail:
86 if (priv) {
87 if (priv->tfm)
88 crypto_free_cipher((void *)priv->tfm);
89 kfree(priv);
90 }
91
92 return NULL;
93}
94
95
96static void ieee80211_ccmp_deinit(void *priv)
97{
98 struct ieee80211_ccmp_data *_priv = priv;
99
100 if (_priv && _priv->tfm)
101 crypto_free_cipher((void *)_priv->tfm);
102 kfree(priv);
103}
104
105
106static inline void xor_block(u8 *b, u8 *a, size_t len)
107{
108 int i;
109 for (i = 0; i < len; i++)
110 b[i] ^= a[i];
111}
112
113static void ccmp_init_blocks(struct crypto_tfm *tfm,
114 struct ieee80211_hdr_4addr *hdr,
115 u8 *pn, size_t dlen, u8 *b0, u8 *auth,
116 u8 *s0)
117{
118 u8 *pos, qc = 0;
119 size_t aad_len;
120 u16 fc;
121 int a4_included, qc_included;
122 u8 aad[2 * AES_BLOCK_LEN];
123
124 fc = le16_to_cpu(hdr->frame_ctl);
125 a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
126 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
127 /*
128 qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
129 (WLAN_FC_GET_STYPE(fc) & 0x08));
130 */
131 qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
132 (WLAN_FC_GET_STYPE(fc) & 0x80));
133 aad_len = 22;
134 if (a4_included)
135 aad_len += 6;
136 if (qc_included) {
137 pos = (u8 *) &hdr->addr4;
138 if (a4_included)
139 pos += 6;
140 qc = *pos & 0x0f;
141 aad_len += 2;
142 }
143 /* CCM Initial Block:
144 * Flag (Include authentication header, M=3 (8-octet MIC),
145 * L=1 (2-octet Dlen))
146 * Nonce: 0x00 | A2 | PN
147 * Dlen */
148 b0[0] = 0x59;
149 b0[1] = qc;
150 memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
151 memcpy(b0 + 8, pn, CCMP_PN_LEN);
152 b0[14] = (dlen >> 8) & 0xff;
153 b0[15] = dlen & 0xff;
154
155 /* AAD:
156 * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
157 * A1 | A2 | A3
158 * SC with bits 4..15 (seq#) masked to zero
159 * A4 (if present)
160 * QC (if present)
161 */
162 pos = (u8 *) hdr;
163 aad[0] = 0; /* aad_len >> 8 */
164 aad[1] = aad_len & 0xff;
165 aad[2] = pos[0] & 0x8f;
166 aad[3] = pos[1] & 0xc7;
167 memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
168 pos = (u8 *) &hdr->seq_ctl;
169 aad[22] = pos[0] & 0x0f;
170 aad[23] = 0; /* all bits masked */
171 memset(aad + 24, 0, 8);
172 if (a4_included)
173 memcpy(aad + 24, hdr->addr4, ETH_ALEN);
174 if (qc_included) {
175 aad[a4_included ? 30 : 24] = qc;
176 /* rest of QC masked */
177 }
178
179 /* Start with the first block and AAD */
180 ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
181 xor_block(auth, aad, AES_BLOCK_LEN);
182 ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
183 xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
184 ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
185 b0[0] &= 0x07;
186 b0[14] = b0[15] = 0;
187 ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
188}
189
190static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
191{
192 struct ieee80211_ccmp_data *key = priv;
193 int data_len, i;
194 u8 *pos;
195 struct ieee80211_hdr_4addr *hdr;
196 int blocks, last, len;
197 u8 *mic;
198 u8 *b0 = key->tx_b0;
199 u8 *b = key->tx_b;
200 u8 *e = key->tx_e;
201 u8 *s0 = key->tx_s0;
202
203 if (skb_headroom(skb) < CCMP_HDR_LEN ||
204 skb_tailroom(skb) < CCMP_MIC_LEN ||
205 skb->len < hdr_len)
206 return -1;
207
208 data_len = skb->len - hdr_len;
209 pos = skb_push(skb, CCMP_HDR_LEN);
210 memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
211 pos += hdr_len;
212
213 i = CCMP_PN_LEN - 1;
214 while (i >= 0) {
215 key->tx_pn[i]++;
216 if (key->tx_pn[i] != 0)
217 break;
218 i--;
219 }
220
221 *pos++ = key->tx_pn[5];
222 *pos++ = key->tx_pn[4];
223 *pos++ = 0;
224 *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
225 *pos++ = key->tx_pn[3];
226 *pos++ = key->tx_pn[2];
227 *pos++ = key->tx_pn[1];
228 *pos++ = key->tx_pn[0];
229
230 hdr = (struct ieee80211_hdr_4addr *)skb->data;
231 mic = skb_put(skb, CCMP_MIC_LEN);
232
233 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
234
235 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
236 last = data_len % AES_BLOCK_LEN;
237
238 for (i = 1; i <= blocks; i++) {
239 len = (i == blocks && last) ? last : AES_BLOCK_LEN;
240 /* Authentication */
241 xor_block(b, pos, len);
242 ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
243 /* Encryption, with counter */
244 b0[14] = (i >> 8) & 0xff;
245 b0[15] = i & 0xff;
246 ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
247 xor_block(pos, e, len);
248 pos += len;
249 }
250
251 for (i = 0; i < CCMP_MIC_LEN; i++)
252 mic[i] = b[i] ^ s0[i];
253
254 return 0;
255}
256
257
258static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
259{
260 struct ieee80211_ccmp_data *key = priv;
261 u8 keyidx, *pos;
262 struct ieee80211_hdr_4addr *hdr;
263 u8 pn[6];
264 size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
265 u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
266 u8 *b0 = key->rx_b0;
267 u8 *b = key->rx_b;
268 u8 *a = key->rx_a;
269 int i, blocks, last, len;
270
271 if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
272 key->dot11RSNAStatsCCMPFormatErrors++;
273 return -1;
274 }
275
276 hdr = (struct ieee80211_hdr_4addr *)skb->data;
277 pos = skb->data + hdr_len;
278 keyidx = pos[3];
279 if (!(keyidx & (1 << 5))) {
280 if (net_ratelimit()) {
281 pr_debug("received packet without ExtIV flag from %pM\n",
282 hdr->addr2);
283 }
284 key->dot11RSNAStatsCCMPFormatErrors++;
285 return -2;
286 }
287 keyidx >>= 6;
288 if (key->key_idx != keyidx) {
289 pr_debug("RX tkey->key_idx=%d frame keyidx=%d priv=%p\n",
290 key->key_idx, keyidx, priv);
291 return -6;
292 }
293 if (!key->key_set) {
294 if (net_ratelimit()) {
295 pr_debug("received packet from %pM with keyid=%d that does not have a configured key\n",
296 hdr->addr2, keyidx);
297 }
298 return -3;
299 }
300
301 pn[0] = pos[7];
302 pn[1] = pos[6];
303 pn[2] = pos[5];
304 pn[3] = pos[4];
305 pn[4] = pos[1];
306 pn[5] = pos[0];
307 pos += 8;
308
309 if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
310 if (net_ratelimit()) {
311 pr_debug("replay detected: STA=%pM previous PN %pm received PN %pm\n",
312 hdr->addr2, key->rx_pn, pn);
313 }
314 key->dot11RSNAStatsCCMPReplays++;
315 return -4;
316 }
317
318 ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
319 xor_block(mic, b, CCMP_MIC_LEN);
320
321 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
322 last = data_len % AES_BLOCK_LEN;
323
324 for (i = 1; i <= blocks; i++) {
325 len = (i == blocks && last) ? last : AES_BLOCK_LEN;
326 /* Decrypt, with counter */
327 b0[14] = (i >> 8) & 0xff;
328 b0[15] = i & 0xff;
329 ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
330 xor_block(pos, b, len);
331 /* Authentication */
332 xor_block(a, pos, len);
333 ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
334 pos += len;
335 }
336
337 if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
338 if (net_ratelimit())
339 pr_debug("decrypt failed: STA=%pM\n", hdr->addr2);
340
341 key->dot11RSNAStatsCCMPDecryptErrors++;
342 return -5;
343 }
344
345 memcpy(key->rx_pn, pn, CCMP_PN_LEN);
346
347 /* Remove hdr and MIC */
348 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
349 skb_pull(skb, CCMP_HDR_LEN);
350 skb_trim(skb, skb->len - CCMP_MIC_LEN);
351
352 return keyidx;
353}
354
355
356static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
357{
358 struct ieee80211_ccmp_data *data = priv;
359 int keyidx;
360 struct crypto_tfm *tfm = data->tfm;
361
362 keyidx = data->key_idx;
363 memset(data, 0, sizeof(*data));
364 data->key_idx = keyidx;
365 data->tfm = tfm;
366 if (len == CCMP_TK_LEN) {
367 memcpy(data->key, key, CCMP_TK_LEN);
368 data->key_set = 1;
369 if (seq) {
370 data->rx_pn[0] = seq[5];
371 data->rx_pn[1] = seq[4];
372 data->rx_pn[2] = seq[3];
373 data->rx_pn[3] = seq[2];
374 data->rx_pn[4] = seq[1];
375 data->rx_pn[5] = seq[0];
376 }
377 crypto_cipher_setkey((void *)data->tfm, data->key, CCMP_TK_LEN);
378 } else if (len == 0)
379 data->key_set = 0;
380 else
381 return -1;
382
383 return 0;
384}
385
386
387static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
388{
389 struct ieee80211_ccmp_data *data = priv;
390
391 if (len < CCMP_TK_LEN)
392 return -1;
393
394 if (!data->key_set)
395 return 0;
396 memcpy(key, data->key, CCMP_TK_LEN);
397
398 if (seq) {
399 seq[0] = data->tx_pn[5];
400 seq[1] = data->tx_pn[4];
401 seq[2] = data->tx_pn[3];
402 seq[3] = data->tx_pn[2];
403 seq[4] = data->tx_pn[1];
404 seq[5] = data->tx_pn[0];
405 }
406
407 return CCMP_TK_LEN;
408}
409
410
411static char *ieee80211_ccmp_print_stats(char *p, void *priv)
412{
413 struct ieee80211_ccmp_data *ccmp = priv;
414 p += sprintf(p,
415 "key[%d] alg=CCMP key_set=%d tx_pn=%pm rx_pn=%pm format_errors=%d replays=%d decrypt_errors=%d\n",
416 ccmp->key_idx, ccmp->key_set,
417 ccmp->tx_pn, ccmp->rx_pn,
418 ccmp->dot11RSNAStatsCCMPFormatErrors,
419 ccmp->dot11RSNAStatsCCMPReplays,
420 ccmp->dot11RSNAStatsCCMPDecryptErrors);
421
422 return p;
423}
424
425void ieee80211_ccmp_null(void)
426{
427 return;
428}
429static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
430 .name = "CCMP",
431 .init = ieee80211_ccmp_init,
432 .deinit = ieee80211_ccmp_deinit,
433 .encrypt_mpdu = ieee80211_ccmp_encrypt,
434 .decrypt_mpdu = ieee80211_ccmp_decrypt,
435 .encrypt_msdu = NULL,
436 .decrypt_msdu = NULL,
437 .set_key = ieee80211_ccmp_set_key,
438 .get_key = ieee80211_ccmp_get_key,
439 .print_stats = ieee80211_ccmp_print_stats,
440 .extra_prefix_len = CCMP_HDR_LEN,
441 .extra_postfix_len = CCMP_MIC_LEN,
442 .owner = THIS_MODULE,
443};
444
445
446int ieee80211_crypto_ccmp_init(void)
447{
448 return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
449}
450
451
452void ieee80211_crypto_ccmp_exit(void)
453{
454 ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
455}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
deleted file mode 100644
index 6c1acc5dfba7..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_tkip.c
+++ /dev/null
@@ -1,740 +0,0 @@
1/*
2 * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
3 *
4 * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. See README and COPYING for
9 * more details.
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/random.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/if_ether.h>
18#include <linux/if_arp.h>
19#include <asm/string.h>
20
21#include "ieee80211.h"
22
23#include <linux/crypto.h>
24#include <linux/scatterlist.h>
25#include <linux/crc32.h>
26
27MODULE_AUTHOR("Jouni Malinen");
28MODULE_DESCRIPTION("Host AP crypt: TKIP");
29MODULE_LICENSE("GPL");
30
31
32struct ieee80211_tkip_data {
33#define TKIP_KEY_LEN 32
34 u8 key[TKIP_KEY_LEN];
35 int key_set;
36
37 u32 tx_iv32;
38 u16 tx_iv16;
39 u16 tx_ttak[5];
40 int tx_phase1_done;
41
42 u32 rx_iv32;
43 u16 rx_iv16;
44 u16 rx_ttak[5];
45 int rx_phase1_done;
46 u32 rx_iv32_new;
47 u16 rx_iv16_new;
48
49 u32 dot11RSNAStatsTKIPReplays;
50 u32 dot11RSNAStatsTKIPICVErrors;
51 u32 dot11RSNAStatsTKIPLocalMICFailures;
52
53 int key_idx;
54
55 struct crypto_blkcipher *rx_tfm_arc4;
56 struct crypto_hash *rx_tfm_michael;
57 struct crypto_blkcipher *tx_tfm_arc4;
58 struct crypto_hash *tx_tfm_michael;
59 struct crypto_tfm *tfm_arc4;
60 struct crypto_tfm *tfm_michael;
61
62 /* scratch buffers for virt_to_page() (crypto API) */
63 u8 rx_hdr[16], tx_hdr[16];
64};
65
66static void *ieee80211_tkip_init(int key_idx)
67{
68 struct ieee80211_tkip_data *priv;
69
70 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
71 if (priv == NULL)
72 goto fail;
73 priv->key_idx = key_idx;
74
75 priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
76 CRYPTO_ALG_ASYNC);
77 if (IS_ERR(priv->tx_tfm_arc4)) {
78 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
79 "crypto API arc4\n");
80 priv->tx_tfm_arc4 = NULL;
81 goto fail;
82 }
83
84 priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
85 CRYPTO_ALG_ASYNC);
86 if (IS_ERR(priv->tx_tfm_michael)) {
87 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
88 "crypto API michael_mic\n");
89 priv->tx_tfm_michael = NULL;
90 goto fail;
91 }
92
93 priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
94 CRYPTO_ALG_ASYNC);
95 if (IS_ERR(priv->rx_tfm_arc4)) {
96 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
97 "crypto API arc4\n");
98 priv->rx_tfm_arc4 = NULL;
99 goto fail;
100 }
101
102 priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
103 CRYPTO_ALG_ASYNC);
104 if (IS_ERR(priv->rx_tfm_michael)) {
105 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
106 "crypto API michael_mic\n");
107 priv->rx_tfm_michael = NULL;
108 goto fail;
109 }
110
111 return priv;
112
113fail:
114 if (priv) {
115 if (priv->tx_tfm_michael)
116 crypto_free_hash(priv->tx_tfm_michael);
117 if (priv->tx_tfm_arc4)
118 crypto_free_blkcipher(priv->tx_tfm_arc4);
119 if (priv->rx_tfm_michael)
120 crypto_free_hash(priv->rx_tfm_michael);
121 if (priv->rx_tfm_arc4)
122 crypto_free_blkcipher(priv->rx_tfm_arc4);
123 kfree(priv);
124 }
125
126 return NULL;
127}
128
129
130static void ieee80211_tkip_deinit(void *priv)
131{
132 struct ieee80211_tkip_data *_priv = priv;
133
134 if (_priv) {
135 if (_priv->tx_tfm_michael)
136 crypto_free_hash(_priv->tx_tfm_michael);
137 if (_priv->tx_tfm_arc4)
138 crypto_free_blkcipher(_priv->tx_tfm_arc4);
139 if (_priv->rx_tfm_michael)
140 crypto_free_hash(_priv->rx_tfm_michael);
141 if (_priv->rx_tfm_arc4)
142 crypto_free_blkcipher(_priv->rx_tfm_arc4);
143 }
144 kfree(priv);
145}
146
147
148static inline u16 RotR1(u16 val)
149{
150 return (val >> 1) | (val << 15);
151}
152
153
154static inline u8 Lo8(u16 val)
155{
156 return val & 0xff;
157}
158
159
160static inline u8 Hi8(u16 val)
161{
162 return val >> 8;
163}
164
165
166static inline u16 Lo16(u32 val)
167{
168 return val & 0xffff;
169}
170
171
172static inline u16 Hi16(u32 val)
173{
174 return val >> 16;
175}
176
177
178static inline u16 Mk16(u8 hi, u8 lo)
179{
180 return lo | (((u16) hi) << 8);
181}
182
183
184static inline u16 Mk16_le(u16 *v)
185{
186 return le16_to_cpu(*v);
187}
188
189
190static const u16 Sbox[256] = {
191 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
192 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
193 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
194 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
195 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
196 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
197 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
198 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
199 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
200 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
201 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
202 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
203 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
204 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
205 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
206 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
207 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
208 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
209 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
210 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
211 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
212 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
213 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
214 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
215 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
216 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
217 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
218 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
219 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
220 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
221 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
222 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
223};
224
225
226static inline u16 _S_(u16 v)
227{
228 u16 t = Sbox[Hi8(v)];
229 return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
230}
231
232#define PHASE1_LOOP_COUNT 8
233
234static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
235{
236 int i, j;
237
238 /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
239 TTAK[0] = Lo16(IV32);
240 TTAK[1] = Hi16(IV32);
241 TTAK[2] = Mk16(TA[1], TA[0]);
242 TTAK[3] = Mk16(TA[3], TA[2]);
243 TTAK[4] = Mk16(TA[5], TA[4]);
244
245 for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
246 j = 2 * (i & 1);
247 TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
248 TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
249 TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
250 TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
251 TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
252 }
253}
254
255
256static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
257 u16 IV16)
258{
259 /* Make temporary area overlap WEP seed so that the final copy can be
260 * avoided on little endian hosts. */
261 u16 *PPK = (u16 *) &WEPSeed[4];
262
263 /* Step 1 - make copy of TTAK and bring in TSC */
264 PPK[0] = TTAK[0];
265 PPK[1] = TTAK[1];
266 PPK[2] = TTAK[2];
267 PPK[3] = TTAK[3];
268 PPK[4] = TTAK[4];
269 PPK[5] = TTAK[4] + IV16;
270
271 /* Step 2 - 96-bit bijective mixing using S-box */
272 PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
273 PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
274 PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
275 PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
276 PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
277 PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
278
279 PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
280 PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
281 PPK[2] += RotR1(PPK[1]);
282 PPK[3] += RotR1(PPK[2]);
283 PPK[4] += RotR1(PPK[3]);
284 PPK[5] += RotR1(PPK[4]);
285
286 /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
287 * WEPSeed[0..2] is transmitted as WEP IV */
288 WEPSeed[0] = Hi8(IV16);
289 WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
290 WEPSeed[2] = Lo8(IV16);
291 WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
292
293#ifdef __BIG_ENDIAN
294 {
295 int i;
296 for (i = 0; i < 6; i++)
297 PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
298 }
299#endif
300}
301
302static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
303{
304 struct ieee80211_tkip_data *tkey = priv;
305 struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
306 int len;
307 u8 *pos;
308 struct ieee80211_hdr_4addr *hdr;
309 u8 rc4key[16], *icv;
310 u32 crc;
311 struct scatterlist sg;
312 int ret;
313
314 ret = 0;
315 if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
316 skb->len < hdr_len)
317 return -1;
318
319 hdr = (struct ieee80211_hdr_4addr *)skb->data;
320
321 if (!tkey->tx_phase1_done) {
322 tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
323 tkey->tx_iv32);
324 tkey->tx_phase1_done = 1;
325 }
326 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
327
328 len = skb->len - hdr_len;
329 pos = skb_push(skb, 8);
330 memmove(pos, pos + 8, hdr_len);
331 pos += hdr_len;
332
333 *pos++ = rc4key[0];
334 *pos++ = rc4key[1];
335 *pos++ = rc4key[2];
336 *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
337 *pos++ = tkey->tx_iv32 & 0xff;
338 *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
339 *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
340 *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
341
342 icv = skb_put(skb, 4);
343 crc = ~crc32_le(~0, pos, len);
344 icv[0] = crc;
345 icv[1] = crc >> 8;
346 icv[2] = crc >> 16;
347 icv[3] = crc >> 24;
348 crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
349 sg_init_one(&sg, pos, len + 4);
350 ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
351
352 tkey->tx_iv16++;
353 if (tkey->tx_iv16 == 0) {
354 tkey->tx_phase1_done = 0;
355 tkey->tx_iv32++;
356 }
357 return ret;
358}
359
360static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
361{
362 struct ieee80211_tkip_data *tkey = priv;
363 struct blkcipher_desc desc = { .tfm = tkey->rx_tfm_arc4 };
364 u8 keyidx, *pos;
365 u32 iv32;
366 u16 iv16;
367 struct ieee80211_hdr_4addr *hdr;
368 u8 icv[4];
369 u32 crc;
370 struct scatterlist sg;
371 u8 rc4key[16];
372 int plen;
373
374 if (skb->len < hdr_len + 8 + 4)
375 return -1;
376
377 hdr = (struct ieee80211_hdr_4addr *)skb->data;
378 pos = skb->data + hdr_len;
379 keyidx = pos[3];
380 if (!(keyidx & (1 << 5))) {
381 if (net_ratelimit()) {
382 printk(KERN_DEBUG "TKIP: received packet without ExtIV"
383 " flag from %pM\n", hdr->addr2);
384 }
385 return -2;
386 }
387 keyidx >>= 6;
388 if (tkey->key_idx != keyidx) {
389 printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
390 "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
391 return -6;
392 }
393 if (!tkey->key_set) {
394 if (net_ratelimit()) {
395 printk(KERN_DEBUG "TKIP: received packet from %pM"
396 " with keyid=%d that does not have a configured"
397 " key\n", hdr->addr2, keyidx);
398 }
399 return -3;
400 }
401 iv16 = (pos[0] << 8) | pos[2];
402 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
403 pos += 8;
404
405 if (iv32 < tkey->rx_iv32 ||
406 (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
407 if (net_ratelimit()) {
408 printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
409 " previous TSC %08x%04x received TSC "
410 "%08x%04x\n", hdr->addr2,
411 tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
412 }
413 tkey->dot11RSNAStatsTKIPReplays++;
414 return -4;
415 }
416
417 if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
418 tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
419 tkey->rx_phase1_done = 1;
420 }
421 tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
422
423 plen = skb->len - hdr_len - 12;
424 crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
425 sg_init_one(&sg, pos, plen + 4);
426 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
427 if (net_ratelimit()) {
428 printk(KERN_DEBUG ": TKIP: failed to decrypt "
429 "received packet from %pM\n",
430 hdr->addr2);
431 }
432 return -7;
433 }
434
435 crc = ~crc32_le(~0, pos, plen);
436 icv[0] = crc;
437 icv[1] = crc >> 8;
438 icv[2] = crc >> 16;
439 icv[3] = crc >> 24;
440 if (memcmp(icv, pos + plen, 4) != 0) {
441 if (iv32 != tkey->rx_iv32) {
442 /* Previously cached Phase1 result was already lost, so
443 * it needs to be recalculated for the next packet. */
444 tkey->rx_phase1_done = 0;
445 }
446 if (net_ratelimit()) {
447 printk(KERN_DEBUG "TKIP: ICV error detected: STA="
448 "%pM\n", hdr->addr2);
449 }
450 tkey->dot11RSNAStatsTKIPICVErrors++;
451 return -5;
452 }
453
454 /* Update real counters only after Michael MIC verification has
455 * completed */
456 tkey->rx_iv32_new = iv32;
457 tkey->rx_iv16_new = iv16;
458
459 /* Remove IV and ICV */
460 memmove(skb->data + 8, skb->data, hdr_len);
461 skb_pull(skb, 8);
462 skb_trim(skb, skb->len - 4);
463
464 return keyidx;
465}
466
467static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
468 u8 *data, size_t data_len, u8 *mic)
469{
470 struct hash_desc desc;
471 struct scatterlist sg[2];
472
473 if (tfm_michael == NULL) {
474 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
475 return -1;
476 }
477
478 sg_init_table(sg, 2);
479 sg_set_buf(&sg[0], hdr, 16);
480 sg_set_buf(&sg[1], data, data_len);
481
482 if (crypto_hash_setkey(tfm_michael, key, 8))
483 return -1;
484
485 desc.tfm = tfm_michael;
486 desc.flags = 0;
487 return crypto_hash_digest(&desc, sg, data_len + 16, mic);
488}
489
490static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
491{
492 struct ieee80211_hdr_4addr *hdr11;
493
494 hdr11 = (struct ieee80211_hdr_4addr *)skb->data;
495 switch (le16_to_cpu(hdr11->frame_ctl) &
496 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
497 case IEEE80211_FCTL_TODS:
498 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
499 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
500 break;
501 case IEEE80211_FCTL_FROMDS:
502 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
503 memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
504 break;
505 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
506 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
507 memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
508 break;
509 case 0:
510 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
511 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
512 break;
513 }
514
515 hdr[12] = 0; /* priority */
516
517 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
518}
519
520
521static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
522 void *priv)
523{
524 struct ieee80211_tkip_data *tkey = priv;
525 u8 *pos;
526 struct ieee80211_hdr_4addr *hdr;
527
528 hdr = (struct ieee80211_hdr_4addr *)skb->data;
529
530 if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
531 printk(KERN_DEBUG "Invalid packet for Michael MIC add "
532 "(tailroom=%d hdr_len=%d skb->len=%d)\n",
533 skb_tailroom(skb), hdr_len, skb->len);
534 return -1;
535 }
536
537 michael_mic_hdr(skb, tkey->tx_hdr);
538
539 if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
540 tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
541
542 pos = skb_put(skb, 8);
543
544 if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
545 skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
546 return -1;
547
548 return 0;
549}
550
551static void ieee80211_michael_mic_failure(struct net_device *dev,
552 struct ieee80211_hdr_4addr *hdr,
553 int keyidx)
554{
555 union iwreq_data wrqu;
556 struct iw_michaelmicfailure ev;
557
558 /* TODO: needed parameters: count, keyid, key type, TSC */
559 memset(&ev, 0, sizeof(ev));
560 ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
561 if (hdr->addr1[0] & 0x01)
562 ev.flags |= IW_MICFAILURE_GROUP;
563 else
564 ev.flags |= IW_MICFAILURE_PAIRWISE;
565 ev.src_addr.sa_family = ARPHRD_ETHER;
566 memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
567 memset(&wrqu, 0, sizeof(wrqu));
568 wrqu.data.length = sizeof(ev);
569 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
570}
571
572static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
573 int hdr_len, void *priv)
574{
575 struct ieee80211_tkip_data *tkey = priv;
576 u8 mic[8];
577 struct ieee80211_hdr_4addr *hdr;
578
579 hdr = (struct ieee80211_hdr_4addr *)skb->data;
580
581 if (!tkey->key_set)
582 return -1;
583
584 michael_mic_hdr(skb, tkey->rx_hdr);
585 if (IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl)))
586 tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
587
588 if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
589 skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
590 return -1;
591
592 if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
593 struct ieee80211_hdr_4addr *hdr;
594 hdr = (struct ieee80211_hdr_4addr *)skb->data;
595 printk(KERN_DEBUG "%s: Michael MIC verification failed for "
596 "MSDU from %pM keyidx=%d\n",
597 skb->dev ? skb->dev->name : "N/A", hdr->addr2,
598 keyidx);
599 if (skb->dev)
600 ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
601 tkey->dot11RSNAStatsTKIPLocalMICFailures++;
602 return -1;
603 }
604
605 /* Update TSC counters for RX now that the packet verification has
606 * completed. */
607 tkey->rx_iv32 = tkey->rx_iv32_new;
608 tkey->rx_iv16 = tkey->rx_iv16_new;
609
610 skb_trim(skb, skb->len - 8);
611
612 return 0;
613}
614
615
616static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
617{
618 struct ieee80211_tkip_data *tkey = priv;
619 int keyidx;
620 struct crypto_hash *tfm = tkey->tx_tfm_michael;
621 struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
622 struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
623 struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
624
625 keyidx = tkey->key_idx;
626 memset(tkey, 0, sizeof(*tkey));
627 tkey->key_idx = keyidx;
628
629 tkey->tx_tfm_michael = tfm;
630 tkey->tx_tfm_arc4 = tfm2;
631 tkey->rx_tfm_michael = tfm3;
632 tkey->rx_tfm_arc4 = tfm4;
633
634 if (len == TKIP_KEY_LEN) {
635 memcpy(tkey->key, key, TKIP_KEY_LEN);
636 tkey->key_set = 1;
637 tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
638 if (seq) {
639 tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
640 (seq[3] << 8) | seq[2];
641 tkey->rx_iv16 = (seq[1] << 8) | seq[0];
642 }
643 } else if (len == 0)
644 tkey->key_set = 0;
645 else
646 return -1;
647
648 return 0;
649}
650
651
652static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
653{
654 struct ieee80211_tkip_data *tkey = priv;
655
656 if (len < TKIP_KEY_LEN)
657 return -1;
658
659 if (!tkey->key_set)
660 return 0;
661 memcpy(key, tkey->key, TKIP_KEY_LEN);
662
663 if (seq) {
664 /* Return the sequence number of the last transmitted frame. */
665 u16 iv16 = tkey->tx_iv16;
666 u32 iv32 = tkey->tx_iv32;
667 if (iv16 == 0)
668 iv32--;
669 iv16--;
670 seq[0] = tkey->tx_iv16;
671 seq[1] = tkey->tx_iv16 >> 8;
672 seq[2] = tkey->tx_iv32;
673 seq[3] = tkey->tx_iv32 >> 8;
674 seq[4] = tkey->tx_iv32 >> 16;
675 seq[5] = tkey->tx_iv32 >> 24;
676 }
677
678 return TKIP_KEY_LEN;
679}
680
681
682static char *ieee80211_tkip_print_stats(char *p, void *priv)
683{
684 struct ieee80211_tkip_data *tkip = priv;
685 p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
686 "tx_pn=%02x%02x%02x%02x%02x%02x "
687 "rx_pn=%02x%02x%02x%02x%02x%02x "
688 "replays=%d icv_errors=%d local_mic_failures=%d\n",
689 tkip->key_idx, tkip->key_set,
690 (tkip->tx_iv32 >> 24) & 0xff,
691 (tkip->tx_iv32 >> 16) & 0xff,
692 (tkip->tx_iv32 >> 8) & 0xff,
693 tkip->tx_iv32 & 0xff,
694 (tkip->tx_iv16 >> 8) & 0xff,
695 tkip->tx_iv16 & 0xff,
696 (tkip->rx_iv32 >> 24) & 0xff,
697 (tkip->rx_iv32 >> 16) & 0xff,
698 (tkip->rx_iv32 >> 8) & 0xff,
699 tkip->rx_iv32 & 0xff,
700 (tkip->rx_iv16 >> 8) & 0xff,
701 tkip->rx_iv16 & 0xff,
702 tkip->dot11RSNAStatsTKIPReplays,
703 tkip->dot11RSNAStatsTKIPICVErrors,
704 tkip->dot11RSNAStatsTKIPLocalMICFailures);
705 return p;
706}
707
708
709static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
710 .name = "TKIP",
711 .init = ieee80211_tkip_init,
712 .deinit = ieee80211_tkip_deinit,
713 .encrypt_mpdu = ieee80211_tkip_encrypt,
714 .decrypt_mpdu = ieee80211_tkip_decrypt,
715 .encrypt_msdu = ieee80211_michael_mic_add,
716 .decrypt_msdu = ieee80211_michael_mic_verify,
717 .set_key = ieee80211_tkip_set_key,
718 .get_key = ieee80211_tkip_get_key,
719 .print_stats = ieee80211_tkip_print_stats,
720 .extra_prefix_len = 4 + 4, /* IV + ExtIV */
721 .extra_postfix_len = 8 + 4, /* MIC + ICV */
722 .owner = THIS_MODULE,
723};
724
725
726int ieee80211_crypto_tkip_init(void)
727{
728 return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
729}
730
731
732void ieee80211_crypto_tkip_exit(void)
733{
734 ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
735}
736
737
738void ieee80211_tkip_null(void)
739{
740}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
deleted file mode 100644
index f25367224941..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt_wep.c
+++ /dev/null
@@ -1,277 +0,0 @@
1/*
2 * Host AP crypt: host-based WEP encryption implementation for Host AP driver
3 *
4 * Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. See README and COPYING for
9 * more details.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/random.h>
17#include <linux/skbuff.h>
18#include <linux/string.h>
19
20#include "ieee80211.h"
21
22#include <linux/crypto.h>
23#include <linux/scatterlist.h>
24#include <linux/crc32.h>
25
26MODULE_AUTHOR("Jouni Malinen");
27MODULE_DESCRIPTION("Host AP crypt: WEP");
28MODULE_LICENSE("GPL");
29
30struct prism2_wep_data {
31 u32 iv;
32#define WEP_KEY_LEN 13
33 u8 key[WEP_KEY_LEN + 1];
34 u8 key_len;
35 u8 key_idx;
36 struct crypto_blkcipher *tx_tfm;
37 struct crypto_blkcipher *rx_tfm;
38};
39
40static void *prism2_wep_init(int keyidx)
41{
42 struct prism2_wep_data *priv;
43
44 priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
45 if (priv == NULL)
46 goto fail;
47 priv->key_idx = keyidx;
48 priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
49 if (IS_ERR(priv->tx_tfm)) {
50 pr_debug("could not allocate crypto API arc4\n");
51 priv->tx_tfm = NULL;
52 goto fail;
53 }
54 priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
55 if (IS_ERR(priv->rx_tfm)) {
56 pr_debug("could not allocate crypto API arc4\n");
57 priv->rx_tfm = NULL;
58 goto fail;
59 }
60
61 /* start WEP IV from a random value */
62 get_random_bytes(&priv->iv, 4);
63
64 return priv;
65
66fail:
67 if (priv) {
68 if (priv->tx_tfm)
69 crypto_free_blkcipher(priv->tx_tfm);
70 if (priv->rx_tfm)
71 crypto_free_blkcipher(priv->rx_tfm);
72 kfree(priv);
73 }
74
75 return NULL;
76}
77
78static void prism2_wep_deinit(void *priv)
79{
80 struct prism2_wep_data *_priv = priv;
81
82 if (_priv) {
83 if (_priv->tx_tfm)
84 crypto_free_blkcipher(_priv->tx_tfm);
85 if (_priv->rx_tfm)
86 crypto_free_blkcipher(_priv->rx_tfm);
87 }
88
89 kfree(priv);
90}
91
92/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
93 * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
94 * so the payload length increases with 8 bytes.
95 *
96 * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
97 */
98static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
99{
100 struct prism2_wep_data *wep = priv;
101 struct blkcipher_desc desc = { .tfm = wep->tx_tfm };
102 u32 klen, len;
103 u8 key[WEP_KEY_LEN + 3];
104 u8 *pos;
105 u32 crc;
106 u8 *icv;
107 struct scatterlist sg;
108
109 if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
110 skb->len < hdr_len)
111 return -1;
112
113 len = skb->len - hdr_len;
114 pos = skb_push(skb, 4);
115 memmove(pos, pos + 4, hdr_len);
116 pos += hdr_len;
117
118 klen = 3 + wep->key_len;
119
120 wep->iv++;
121
122 /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
123 * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
124 * can be used to speedup attacks, so avoid using them. */
125 if ((wep->iv & 0xff00) == 0xff00) {
126 u8 B = (wep->iv >> 16) & 0xff;
127 if (B >= 3 && B < klen)
128 wep->iv += 0x0100;
129 }
130
131 /* Prepend 24-bit IV to RC4 key and TX frame */
132 *pos++ = key[0] = (wep->iv >> 16) & 0xff;
133 *pos++ = key[1] = (wep->iv >> 8) & 0xff;
134 *pos++ = key[2] = wep->iv & 0xff;
135 *pos++ = wep->key_idx << 6;
136
137 /* Copy rest of the WEP key (the secret part) */
138 memcpy(key + 3, wep->key, wep->key_len);
139
140 /* Append little-endian CRC32 and encrypt it to produce ICV */
141 crc = ~crc32_le(~0, pos, len);
142 icv = skb_put(skb, 4);
143 icv[0] = crc;
144 icv[1] = crc >> 8;
145 icv[2] = crc >> 16;
146 icv[3] = crc >> 24;
147
148 crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
149 sg_init_one(&sg, pos, len + 4);
150
151 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
152}
153
154/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
155 * the frame: IV (4 bytes), encrypted payload (including SNAP header),
156 * ICV (4 bytes). len includes both IV and ICV.
157 *
158 * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
159 * failure. If frame is OK, IV and ICV will be removed.
160 */
161static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
162{
163 struct prism2_wep_data *wep = priv;
164 struct blkcipher_desc desc = { .tfm = wep->rx_tfm };
165 u32 klen, plen;
166 u8 key[WEP_KEY_LEN + 3];
167 u8 keyidx, *pos;
168 u32 crc;
169 u8 icv[4];
170 struct scatterlist sg;
171
172 if (skb->len < hdr_len + 8)
173 return -1;
174
175 pos = skb->data + hdr_len;
176 key[0] = *pos++;
177 key[1] = *pos++;
178 key[2] = *pos++;
179 keyidx = *pos++ >> 6;
180 if (keyidx != wep->key_idx)
181 return -1;
182
183 klen = 3 + wep->key_len;
184
185 /* Copy rest of the WEP key (the secret part) */
186 memcpy(key + 3, wep->key, wep->key_len);
187
188 /* Apply RC4 to data and compute CRC32 over decrypted data */
189 plen = skb->len - hdr_len - 8;
190
191 crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
192 sg_init_one(&sg, pos, plen + 4);
193
194 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
195 return -7;
196
197 crc = ~crc32_le(~0, pos, plen);
198 icv[0] = crc;
199 icv[1] = crc >> 8;
200 icv[2] = crc >> 16;
201 icv[3] = crc >> 24;
202
203 if (memcmp(icv, pos + plen, 4) != 0) {
204 /* ICV mismatch - drop frame */
205 return -2;
206 }
207
208 /* Remove IV and ICV */
209 memmove(skb->data + 4, skb->data, hdr_len);
210 skb_pull(skb, 4);
211 skb_trim(skb, skb->len - 4);
212 return 0;
213}
214
215static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
216{
217 struct prism2_wep_data *wep = priv;
218
219 if (len < 0 || len > WEP_KEY_LEN)
220 return -1;
221
222 memcpy(wep->key, key, len);
223 wep->key_len = len;
224
225 return 0;
226}
227
228static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
229{
230 struct prism2_wep_data *wep = priv;
231
232 if (len < wep->key_len)
233 return -1;
234
235 memcpy(key, wep->key, wep->key_len);
236
237 return wep->key_len;
238}
239
240static char *prism2_wep_print_stats(char *p, void *priv)
241{
242 struct prism2_wep_data *wep = priv;
243 p += sprintf(p, "key[%d] alg=WEP len=%d\n",
244 wep->key_idx, wep->key_len);
245 return p;
246}
247
248static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
249 .name = "WEP",
250 .init = prism2_wep_init,
251 .deinit = prism2_wep_deinit,
252 .encrypt_mpdu = prism2_wep_encrypt,
253 .decrypt_mpdu = prism2_wep_decrypt,
254 .encrypt_msdu = NULL,
255 .decrypt_msdu = NULL,
256 .set_key = prism2_wep_set_key,
257 .get_key = prism2_wep_get_key,
258 .print_stats = prism2_wep_print_stats,
259 .extra_prefix_len = 4, /* IV */
260 .extra_postfix_len = 4, /* ICV */
261 .owner = THIS_MODULE,
262};
263
264int ieee80211_crypto_wep_init(void)
265{
266 return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
267}
268
269void ieee80211_crypto_wep_exit(void)
270{
271 ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
272}
273
274void ieee80211_wep_null(void)
275{
276 return;
277}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
deleted file mode 100644
index 07a1fbb6678e..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*******************************************************************************
2
3 Copyright(c) 2004 Intel Corporation. All rights reserved.
4
5 Portions of this file are based on the WEP enablement code provided by the
6 Host AP project hostap-drivers v0.1.3
7 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
8 <jkmaline@cc.hut.fi>
9 Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31*******************************************************************************/
32
33#include <linux/compiler.h>
34//#include <linux/config.h>
35#include <linux/errno.h>
36#include <linux/if_arp.h>
37#include <linux/in6.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/proc_fs.h>
45#include <linux/skbuff.h>
46#include <linux/slab.h>
47#include <linux/tcp.h>
48#include <linux/types.h>
49#include <linux/wireless.h>
50#include <linux/etherdevice.h>
51#include <linux/uaccess.h>
52#include <net/arp.h>
53#include <net/net_namespace.h>
54
55#include "ieee80211.h"
56
57MODULE_DESCRIPTION("802.11 data/management/control stack");
58MODULE_AUTHOR("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
59MODULE_LICENSE("GPL");
60
61#define DRV_NAME "ieee80211"
62
63static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
64{
65 if (ieee->networks)
66 return 0;
67
68 ieee->networks = kcalloc(
69 MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
70 GFP_KERNEL);
71 if (!ieee->networks)
72 return -ENOMEM;
73
74 return 0;
75}
76
77static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
78{
79 if (!ieee->networks)
80 return;
81 kfree(ieee->networks);
82 ieee->networks = NULL;
83}
84
85static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee)
86{
87 int i;
88
89 INIT_LIST_HEAD(&ieee->network_free_list);
90 INIT_LIST_HEAD(&ieee->network_list);
91 for (i = 0; i < MAX_NETWORK_COUNT; i++)
92 list_add_tail(&ieee->networks[i].list, &ieee->network_free_list);
93}
94
95
96struct net_device *alloc_ieee80211(int sizeof_priv)
97{
98 struct ieee80211_device *ieee;
99 struct net_device *dev;
100 int i, err;
101
102 IEEE80211_DEBUG_INFO("Initializing...\n");
103
104 dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
105 if (!dev) {
106 IEEE80211_ERROR("Unable to network device.\n");
107 goto failed;
108 }
109 ieee = netdev_priv(dev);
110
111 ieee->dev = dev;
112
113 err = ieee80211_networks_allocate(ieee);
114 if (err) {
115 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n",
116 err);
117 goto failed;
118 }
119 ieee80211_networks_initialize(ieee);
120
121 /* Default fragmentation threshold is maximum payload size */
122 ieee->fts = DEFAULT_FTS;
123 ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
124 ieee->open_wep = 1;
125
126 /* Default to enabling full open WEP with host based encrypt/decrypt */
127 ieee->host_encrypt = 1;
128 ieee->host_decrypt = 1;
129 ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
130
131 INIT_LIST_HEAD(&ieee->crypt_deinit_list);
132 init_timer(&ieee->crypt_deinit_timer);
133 ieee->crypt_deinit_timer.data = (unsigned long)ieee;
134 ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;
135
136 spin_lock_init(&ieee->lock);
137 spin_lock_init(&ieee->wpax_suitlist_lock);
138
139 ieee->wpax_type_set = 0;
140 ieee->wpa_enabled = 0;
141 ieee->tkip_countermeasures = 0;
142 ieee->drop_unencrypted = 0;
143 ieee->privacy_invoked = 0;
144 ieee->ieee802_1x = 1;
145 ieee->raw_tx = 0;
146
147 ieee80211_softmac_init(ieee);
148
149 for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
150 INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
151
152 for (i = 0; i < 17; i++) {
153 ieee->last_rxseq_num[i] = -1;
154 ieee->last_rxfrag_num[i] = -1;
155 ieee->last_packet_time[i] = 0;
156 }
157//These function were added to load crypte module autoly
158 ieee80211_tkip_null();
159 ieee80211_wep_null();
160 ieee80211_ccmp_null();
161 return dev;
162
163 failed:
164 if (dev)
165 free_netdev(dev);
166 return NULL;
167}
168
169
170void free_ieee80211(struct net_device *dev)
171{
172 struct ieee80211_device *ieee = netdev_priv(dev);
173
174 int i;
175 struct list_head *p, *q;
176
177
178 ieee80211_softmac_free(ieee);
179 del_timer_sync(&ieee->crypt_deinit_timer);
180 ieee80211_crypt_deinit_entries(ieee, 1);
181
182 for (i = 0; i < WEP_KEYS; i++) {
183 struct ieee80211_crypt_data *crypt = ieee->crypt[i];
184 if (crypt) {
185 if (crypt->ops)
186 crypt->ops->deinit(crypt->priv);
187 kfree(crypt);
188 ieee->crypt[i] = NULL;
189 }
190 }
191
192 ieee80211_networks_free(ieee);
193
194 for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++) {
195 list_for_each_safe(p, q, &ieee->ibss_mac_hash[i]) {
196 kfree(list_entry(p, struct ieee_ibss_seq, list));
197 list_del(p);
198 }
199 }
200
201
202 free_netdev(dev);
203}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
deleted file mode 100644
index b522b57a2691..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ /dev/null
@@ -1,1486 +0,0 @@
1/*
2 * Original code based Host AP (software wireless LAN access point) driver
3 * for Intersil Prism2/2.5/3 - hostap.o module, common routines
4 *
5 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
6 * <jkmaline@cc.hut.fi>
7 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
8 * Copyright (c) 2004, Intel Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. See README and COPYING for
13 * more details.
14 ******************************************************************************
15
16 Few modifications for Realtek's Wi-Fi drivers by
17 Andrea Merello <andrea.merello@gmail.com>
18
19 A special thanks goes to Realtek for their support !
20
21******************************************************************************/
22
23
24#include <linux/compiler.h>
25//#include <linux/config.h>
26#include <linux/errno.h>
27#include <linux/if_arp.h>
28#include <linux/in6.h>
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/netdevice.h>
34#include <linux/pci.h>
35#include <linux/proc_fs.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/tcp.h>
39#include <linux/types.h>
40#include <linux/wireless.h>
41#include <linux/etherdevice.h>
42#include <linux/uaccess.h>
43#include <linux/ctype.h>
44
45#include "ieee80211.h"
46#include "dot11d.h"
47static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
48 struct sk_buff *skb,
49 struct ieee80211_rx_stats *rx_stats)
50{
51 struct ieee80211_hdr_4addr *hdr =
52 (struct ieee80211_hdr_4addr *)skb->data;
53 u16 fc = le16_to_cpu(hdr->frame_ctl);
54
55 skb->dev = ieee->dev;
56 skb_reset_mac_header(skb);
57 skb_pull(skb, ieee80211_get_hdrlen(fc));
58 skb->pkt_type = PACKET_OTHERHOST;
59 skb->protocol = __constant_htons(ETH_P_80211_RAW);
60 memset(skb->cb, 0, sizeof(skb->cb));
61 netif_rx(skb);
62}
63
64
65/* Called only as a tasklet (software IRQ) */
66static struct ieee80211_frag_entry *
67ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
68 unsigned int frag, u8 tid, u8 *src, u8 *dst)
69{
70 struct ieee80211_frag_entry *entry;
71 int i;
72
73 for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) {
74 entry = &ieee->frag_cache[tid][i];
75 if (entry->skb != NULL &&
76 time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
77 IEEE80211_DEBUG_FRAG(
78 "expiring fragment cache entry "
79 "seq=%u last_frag=%u\n",
80 entry->seq, entry->last_frag);
81 dev_kfree_skb_any(entry->skb);
82 entry->skb = NULL;
83 }
84
85 if (entry->skb != NULL && entry->seq == seq &&
86 (entry->last_frag + 1 == frag || frag == -1) &&
87 memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
88 memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
89 return entry;
90 }
91
92 return NULL;
93}
94
95/* Called only as a tasklet (software IRQ) */
96static struct sk_buff *
97ieee80211_frag_cache_get(struct ieee80211_device *ieee,
98 struct ieee80211_hdr_4addr *hdr)
99{
100 struct sk_buff *skb = NULL;
101 u16 fc = le16_to_cpu(hdr->frame_ctl);
102 u16 sc = le16_to_cpu(hdr->seq_ctl);
103 unsigned int frag = WLAN_GET_SEQ_FRAG(sc);
104 unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
105 struct ieee80211_frag_entry *entry;
106 struct ieee80211_hdr_3addrqos *hdr_3addrqos;
107 struct ieee80211_hdr_4addrqos *hdr_4addrqos;
108 u8 tid;
109
110 if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
111 hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr;
112 tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QOS_TID;
113 tid = UP2AC(tid);
114 tid++;
115 } else if (IEEE80211_QOS_HAS_SEQ(fc)) {
116 hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr;
117 tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QOS_TID;
118 tid = UP2AC(tid);
119 tid++;
120 } else {
121 tid = 0;
122 }
123
124 if (frag == 0) {
125 /* Reserve enough space to fit maximum frame length */
126 skb = dev_alloc_skb(ieee->dev->mtu +
127 sizeof(struct ieee80211_hdr_4addr) +
128 8 /* LLC */ +
129 2 /* alignment */ +
130 8 /* WEP */ +
131 ETH_ALEN /* WDS */ +
132 (IEEE80211_QOS_HAS_SEQ(fc) ? 2 : 0) /* QOS Control */);
133 if (skb == NULL)
134 return NULL;
135
136 entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]];
137 ieee->frag_next_idx[tid]++;
138 if (ieee->frag_next_idx[tid] >= IEEE80211_FRAG_CACHE_LEN)
139 ieee->frag_next_idx[tid] = 0;
140
141 if (entry->skb != NULL)
142 dev_kfree_skb_any(entry->skb);
143
144 entry->first_frag_time = jiffies;
145 entry->seq = seq;
146 entry->last_frag = frag;
147 entry->skb = skb;
148 memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
149 memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
150 } else {
151 /* received a fragment of a frame for which the head fragment
152 * should have already been received */
153 entry = ieee80211_frag_cache_find(ieee, seq, frag, tid, hdr->addr2,
154 hdr->addr1);
155 if (entry != NULL) {
156 entry->last_frag = frag;
157 skb = entry->skb;
158 }
159 }
160
161 return skb;
162}
163
164
165/* Called only as a tasklet (software IRQ) */
166static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
167 struct ieee80211_hdr_4addr *hdr)
168{
169 u16 fc = le16_to_cpu(hdr->frame_ctl);
170 u16 sc = le16_to_cpu(hdr->seq_ctl);
171 unsigned int seq = WLAN_GET_SEQ_SEQ(sc);
172 struct ieee80211_frag_entry *entry;
173 struct ieee80211_hdr_3addrqos *hdr_3addrqos;
174 struct ieee80211_hdr_4addrqos *hdr_4addrqos;
175 u8 tid;
176
177 if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
178 hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr;
179 tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QOS_TID;
180 tid = UP2AC(tid);
181 tid++;
182 } else if (IEEE80211_QOS_HAS_SEQ(fc)) {
183 hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr;
184 tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QOS_TID;
185 tid = UP2AC(tid);
186 tid++;
187 } else {
188 tid = 0;
189 }
190
191 entry = ieee80211_frag_cache_find(ieee, seq, -1, tid, hdr->addr2,
192 hdr->addr1);
193
194 if (entry == NULL) {
195 IEEE80211_DEBUG_FRAG(
196 "could not invalidate fragment cache "
197 "entry (seq=%u)\n", seq);
198 return -1;
199 }
200
201 entry->skb = NULL;
202 return 0;
203}
204
205
206
207/* ieee80211_rx_frame_mgtmt
208 *
209 * Responsible for handling management control frames
210 *
211 * Called by ieee80211_rx */
212static inline int
213ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
214 struct ieee80211_rx_stats *rx_stats, u16 type,
215 u16 stype)
216{
217 struct ieee80211_hdr_4addr *hdr;
218
219 // cheat the the hdr type
220 hdr = (struct ieee80211_hdr_4addr *)skb->data;
221
222 /* On the struct stats definition there is written that
223 * this is not mandatory.... but seems that the probe
224 * response parser uses it
225 */
226 rx_stats->len = skb->len;
227 ieee80211_rx_mgt(ieee, (struct ieee80211_hdr_4addr *)skb->data,
228 rx_stats);
229
230 if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN))) {
231 dev_kfree_skb_any(skb);
232 return 0;
233 }
234
235 ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype);
236
237 dev_kfree_skb_any(skb);
238
239 return 0;
240
241}
242
243
244
245/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
246/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
247static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
248/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
249static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
250/* No encapsulation header if EtherType < 0x600 (=length) */
251
252/* Called by ieee80211_rx_frame_decrypt */
253static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
254 struct sk_buff *skb, size_t hdrlen)
255{
256 struct net_device *dev = ieee->dev;
257 u16 fc, ethertype;
258 struct ieee80211_hdr_4addr *hdr;
259 u8 *pos;
260
261 if (skb->len < 24)
262 return 0;
263
264 hdr = (struct ieee80211_hdr_4addr *)skb->data;
265 fc = le16_to_cpu(hdr->frame_ctl);
266
267 /* check that the frame is unicast frame to us */
268 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
269 IEEE80211_FCTL_TODS &&
270 memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
271 memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
272 /* ToDS frame with own addr BSSID and DA */
273 } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
274 IEEE80211_FCTL_FROMDS &&
275 memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
276 /* FromDS frame with own addr as DA */
277 } else
278 return 0;
279
280 if (skb->len < 24 + 8)
281 return 0;
282
283 /* check for port access entity Ethernet type */
284// pos = skb->data + 24;
285 pos = skb->data + hdrlen;
286 ethertype = (pos[6] << 8) | pos[7];
287 if (ethertype == ETH_P_PAE)
288 return 1;
289
290 return 0;
291}
292
293/* Called only as a tasklet (software IRQ), by ieee80211_rx */
294static inline int
295ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
296 struct ieee80211_crypt_data *crypt)
297{
298 struct ieee80211_hdr_4addr *hdr;
299 int res, hdrlen;
300
301 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
302 return 0;
303
304 hdr = (struct ieee80211_hdr_4addr *)skb->data;
305 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
306
307#ifdef CONFIG_IEEE80211_CRYPT_TKIP
308 if (ieee->tkip_countermeasures &&
309 strcmp(crypt->ops->name, "TKIP") == 0) {
310 if (net_ratelimit()) {
311 netdev_dbg(ieee->dev,
312 "TKIP countermeasures: dropped received packet from %pM\n",
313 ieee->dev->name, hdr->addr2);
314 }
315 return -1;
316 }
317#endif
318
319 atomic_inc(&crypt->refcnt);
320 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
321 atomic_dec(&crypt->refcnt);
322 if (res < 0) {
323 IEEE80211_DEBUG_DROP(
324 "decryption failed (SA=%pM"
325 ") res=%d\n", hdr->addr2, res);
326 if (res == -2)
327 IEEE80211_DEBUG_DROP("Decryption failed ICV "
328 "mismatch (key %d)\n",
329 skb->data[hdrlen + 3] >> 6);
330 ieee->ieee_stats.rx_discards_undecryptable++;
331 return -1;
332 }
333
334 return res;
335}
336
337
338/* Called only as a tasklet (software IRQ), by ieee80211_rx */
339static inline int
340ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee,
341 struct sk_buff *skb, int keyidx,
342 struct ieee80211_crypt_data *crypt)
343{
344 struct ieee80211_hdr_4addr *hdr;
345 int res, hdrlen;
346
347 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
348 return 0;
349
350 hdr = (struct ieee80211_hdr_4addr *)skb->data;
351 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
352
353 atomic_inc(&crypt->refcnt);
354 res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
355 atomic_dec(&crypt->refcnt);
356 if (res < 0) {
357 netdev_dbg(ieee->dev,
358 "MSDU decryption/MIC verification failed (SA=%pM keyidx=%d)\n",
359 hdr->addr2, keyidx);
360 return -1;
361 }
362
363 return 0;
364}
365
366
367/* this function is stolen from ipw2200 driver*/
368#define IEEE_PACKET_RETRY_TIME (5*HZ)
369static int is_duplicate_packet(struct ieee80211_device *ieee,
370 struct ieee80211_hdr_4addr *header)
371{
372 u16 fc = le16_to_cpu(header->frame_ctl);
373 u16 sc = le16_to_cpu(header->seq_ctl);
374 u16 seq = WLAN_GET_SEQ_SEQ(sc);
375 u16 frag = WLAN_GET_SEQ_FRAG(sc);
376 u16 *last_seq, *last_frag;
377 unsigned long *last_time;
378 struct ieee80211_hdr_3addrqos *hdr_3addrqos;
379 struct ieee80211_hdr_4addrqos *hdr_4addrqos;
380 u8 tid;
381
382 //TO2DS and QoS
383 if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS) && IEEE80211_QOS_HAS_SEQ(fc)) {
384 hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)header;
385 tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QOS_TID;
386 tid = UP2AC(tid);
387 tid++;
388 } else if (IEEE80211_QOS_HAS_SEQ(fc)) { //QoS
389 hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)header;
390 tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QOS_TID;
391 tid = UP2AC(tid);
392 tid++;
393 } else { // no QoS
394 tid = 0;
395 }
396 switch (ieee->iw_mode) {
397 case IW_MODE_ADHOC:
398 {
399 struct list_head *p;
400 struct ieee_ibss_seq *entry = NULL;
401 u8 *mac = header->addr2;
402 int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE;
403
404 list_for_each(p, &ieee->ibss_mac_hash[index]) {
405 entry = list_entry(p, struct ieee_ibss_seq, list);
406 if (!memcmp(entry->mac, mac, ETH_ALEN))
407 break;
408 }
409 // if (memcmp(entry->mac, mac, ETH_ALEN)){
410 if (p == &ieee->ibss_mac_hash[index]) {
411 entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC);
412 if (!entry)
413 return 0;
414
415 memcpy(entry->mac, mac, ETH_ALEN);
416 entry->seq_num[tid] = seq;
417 entry->frag_num[tid] = frag;
418 entry->packet_time[tid] = jiffies;
419 list_add(&entry->list, &ieee->ibss_mac_hash[index]);
420 return 0;
421 }
422 last_seq = &entry->seq_num[tid];
423 last_frag = &entry->frag_num[tid];
424 last_time = &entry->packet_time[tid];
425 break;
426 }
427
428 case IW_MODE_INFRA:
429 last_seq = &ieee->last_rxseq_num[tid];
430 last_frag = &ieee->last_rxfrag_num[tid];
431 last_time = &ieee->last_packet_time[tid];
432
433 break;
434 default:
435 return 0;
436 }
437
438// if(tid != 0) {
439// printk(KERN_WARNING ":)))))))))))%x %x %x, fc(%x)\n", tid, *last_seq, seq, header->frame_ctl);
440// }
441 if ((*last_seq == seq) &&
442 time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) {
443 if (*last_frag == frag) {
444 //printk(KERN_WARNING "[1] go drop!\n");
445 goto drop;
446
447 }
448 if (*last_frag + 1 != frag)
449 /* out-of-order fragment */
450 //printk(KERN_WARNING "[2] go drop!\n");
451 goto drop;
452 } else
453 *last_seq = seq;
454
455 *last_frag = frag;
456 *last_time = jiffies;
457 return 0;
458
459drop:
460// BUG_ON(!(fc & IEEE80211_FCTL_RETRY));
461// printk("DUP\n");
462
463 return 1;
464}
465
466
467/* All received frames are sent to this function. @skb contains the frame in
468 * IEEE 802.11 format, i.e., in the format it was sent over air.
469 * This function is called only as a tasklet (software IRQ). */
470int ieee80211_rtl_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
471 struct ieee80211_rx_stats *rx_stats)
472{
473 struct net_device *dev = ieee->dev;
474 //struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
475 struct ieee80211_hdr_4addr *hdr;
476
477 size_t hdrlen;
478 u16 fc, type, stype, sc;
479 struct net_device_stats *stats;
480 unsigned int frag;
481 u8 *payload;
482 u16 ethertype;
483 u8 dst[ETH_ALEN];
484 u8 src[ETH_ALEN];
485 u8 bssid[ETH_ALEN];
486 struct ieee80211_crypt_data *crypt = NULL;
487 int keyidx = 0;
488
489 // cheat the the hdr type
490 hdr = (struct ieee80211_hdr_4addr *)skb->data;
491 stats = &ieee->stats;
492
493 if (skb->len < 10) {
494 netdev_info(ieee->dev, "SKB length < 10\n");
495 goto rx_dropped;
496 }
497
498 fc = le16_to_cpu(hdr->frame_ctl);
499 type = WLAN_FC_GET_TYPE(fc);
500 stype = WLAN_FC_GET_STYPE(fc);
501 sc = le16_to_cpu(hdr->seq_ctl);
502
503 frag = WLAN_GET_SEQ_FRAG(sc);
504
505//YJ,add,080828,for keep alive
506 if ((fc & IEEE80211_FCTL_TODS) != IEEE80211_FCTL_TODS) {
507 if (!memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN))
508 ieee->NumRxUnicast++;
509 } else {
510 if (!memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN))
511 ieee->NumRxUnicast++;
512 }
513//YJ,add,080828,for keep alive,end
514
515 hdrlen = ieee80211_get_hdrlen(fc);
516
517
518 if (ieee->iw_mode == IW_MODE_MONITOR) {
519 ieee80211_monitor_rx(ieee, skb, rx_stats);
520 stats->rx_packets++;
521 stats->rx_bytes += skb->len;
522 return 1;
523 }
524
525 if (ieee->host_decrypt) {
526 int idx = 0;
527 if (skb->len >= hdrlen + 3)
528 idx = skb->data[hdrlen + 3] >> 6;
529 crypt = ieee->crypt[idx];
530
531 /* allow NULL decrypt to indicate an station specific override
532 * for default encryption */
533 if (crypt && (crypt->ops == NULL ||
534 crypt->ops->decrypt_mpdu == NULL))
535 crypt = NULL;
536
537 if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
538 /* This seems to be triggered by some (multicast?)
539 * frames from other than current BSS, so just drop the
540 * frames silently instead of filling system log with
541 * these reports. */
542 IEEE80211_DEBUG_DROP("Decryption failed (not set)"
543 " (SA=%pM)\n",
544 hdr->addr2);
545 ieee->ieee_stats.rx_discards_undecryptable++;
546 goto rx_dropped;
547 }
548 }
549
550 if (skb->len < IEEE80211_DATA_HDR3_LEN)
551 goto rx_dropped;
552
553 // if QoS enabled, should check the sequence for each of the AC
554 if (is_duplicate_packet(ieee, hdr))
555 goto rx_dropped;
556
557
558 if (type == IEEE80211_FTYPE_MGMT) {
559 if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
560 goto rx_dropped;
561 else
562 goto rx_exit;
563 }
564
565 /* Data frame - extract src/dst addresses */
566 switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
567 case IEEE80211_FCTL_FROMDS:
568 memcpy(dst, hdr->addr1, ETH_ALEN);
569 memcpy(src, hdr->addr3, ETH_ALEN);
570 memcpy(bssid, hdr->addr2, ETH_ALEN);
571 break;
572 case IEEE80211_FCTL_TODS:
573 memcpy(dst, hdr->addr3, ETH_ALEN);
574 memcpy(src, hdr->addr2, ETH_ALEN);
575 memcpy(bssid, hdr->addr1, ETH_ALEN);
576 break;
577 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
578 if (skb->len < IEEE80211_DATA_HDR4_LEN)
579 goto rx_dropped;
580 memcpy(dst, hdr->addr3, ETH_ALEN);
581 memcpy(src, hdr->addr4, ETH_ALEN);
582 memcpy(bssid, ieee->current_network.bssid, ETH_ALEN);
583 break;
584 case 0:
585 memcpy(dst, hdr->addr1, ETH_ALEN);
586 memcpy(src, hdr->addr2, ETH_ALEN);
587 memcpy(bssid, hdr->addr3, ETH_ALEN);
588 break;
589 }
590
591
592 dev->last_rx = jiffies;
593
594
595 /* Nullfunc frames may have PS-bit set, so they must be passed to
596 * hostap_handle_sta_rx() before being dropped here. */
597 if (stype != IEEE80211_STYPE_DATA &&
598 stype != IEEE80211_STYPE_DATA_CFACK &&
599 stype != IEEE80211_STYPE_DATA_CFPOLL &&
600 stype != IEEE80211_STYPE_DATA_CFACKPOLL &&
601 stype != IEEE80211_STYPE_QOS_DATA//add by David,2006.8.4
602 ) {
603 if (stype != IEEE80211_STYPE_NULLFUNC)
604 IEEE80211_DEBUG_DROP(
605 "RX: dropped data frame "
606 "with no data (type=0x%02x, "
607 "subtype=0x%02x, len=%d)\n",
608 type, stype, skb->len);
609 goto rx_dropped;
610 }
611 if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN))
612 goto rx_dropped;
613
614 ieee->NumRxDataInPeriod++;
615 ieee->NumRxOkTotal++;
616 /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
617
618 if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
619 (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
620 goto rx_dropped;
621
622 hdr = (struct ieee80211_hdr_4addr *)skb->data;
623
624 /* skb: hdr + (possibly fragmented) plaintext payload */
625 // PR: FIXME: hostap has additional conditions in the "if" below:
626 // ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
627 if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
628 int flen;
629 struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
630 IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
631
632 if (!frag_skb) {
633 IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG,
634 "Rx cannot get skb from fragment "
635 "cache (morefrag=%d seq=%u frag=%u)\n",
636 (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
637 WLAN_GET_SEQ_SEQ(sc), frag);
638 goto rx_dropped;
639 }
640 flen = skb->len;
641 if (frag != 0)
642 flen -= hdrlen;
643
644 if (frag_skb->tail + flen > frag_skb->end) {
645 netdev_warn(ieee->dev,
646 "host decrypted and reassembled frame did not fit skb\n");
647 ieee80211_frag_cache_invalidate(ieee, hdr);
648 goto rx_dropped;
649 }
650
651 if (frag == 0) {
652 /* copy first fragment (including full headers) into
653 * beginning of the fragment cache skb */
654 memcpy(skb_put(frag_skb, flen), skb->data, flen);
655 } else {
656 /* append frame payload to the end of the fragment
657 * cache skb */
658 memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
659 flen);
660 }
661 dev_kfree_skb_any(skb);
662 skb = NULL;
663
664 if (fc & IEEE80211_FCTL_MOREFRAGS) {
665 /* more fragments expected - leave the skb in fragment
666 * cache for now; it will be delivered to upper layers
667 * after all fragments have been received */
668 goto rx_exit;
669 }
670
671 /* this was the last fragment and the frame will be
672 * delivered, so remove skb from fragment cache */
673 skb = frag_skb;
674 hdr = (struct ieee80211_hdr_4addr *)skb->data;
675 ieee80211_frag_cache_invalidate(ieee, hdr);
676 }
677
678 /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
679 * encrypted/authenticated */
680 if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
681 ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
682 goto rx_dropped;
683
684 hdr = (struct ieee80211_hdr_4addr *)skb->data;
685 if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) {
686 if (/*ieee->ieee802_1x &&*/
687 ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
688
689#ifdef CONFIG_IEEE80211_DEBUG
690 /* pass unencrypted EAPOL frames even if encryption is
691 * configured */
692 struct eapol *eap = (struct eapol *)(skb->data +
693 24);
694 IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
695 eap_get_type(eap->type));
696#endif
697 } else {
698 IEEE80211_DEBUG_DROP(
699 "encryption configured, but RX "
700 "frame not encrypted (SA=%pM)\n",
701 hdr->addr2);
702 goto rx_dropped;
703 }
704 }
705
706#ifdef CONFIG_IEEE80211_DEBUG
707 if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
708 ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
709 struct eapol *eap = (struct eapol *)(skb->data +
710 24);
711 IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
712 eap_get_type(eap->type));
713 }
714#endif
715
716 if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep &&
717 !ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
718 IEEE80211_DEBUG_DROP(
719 "dropped unencrypted RX data "
720 "frame from %pM"
721 " (drop_unencrypted=1)\n",
722 hdr->addr2);
723 goto rx_dropped;
724 }
725/*
726 if(ieee80211_is_eapol_frame(ieee, skb, hdrlen)) {
727 printk(KERN_WARNING "RX: IEEE802.1X EPAOL frame!\n");
728 }
729*/
730 /* skb: hdr + (possible reassembled) full plaintext payload */
731 payload = skb->data + hdrlen;
732 ethertype = (payload[6] << 8) | payload[7];
733
734
735 /* convert hdr + possible LLC headers into Ethernet header */
736 if (skb->len - hdrlen >= 8 &&
737 ((memcmp(payload, rfc1042_header, SNAP_SIZE) == 0 &&
738 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
739 memcmp(payload, bridge_tunnel_header, SNAP_SIZE) == 0)) {
740 /* remove RFC1042 or Bridge-Tunnel encapsulation and
741 * replace EtherType */
742 skb_pull(skb, hdrlen + SNAP_SIZE);
743 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
744 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
745 } else {
746 u16 len;
747 /* Leave Ethernet header part of hdr and full payload */
748 skb_pull(skb, hdrlen);
749 len = htons(skb->len);
750 memcpy(skb_push(skb, 2), &len, 2);
751 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
752 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
753 }
754
755
756 stats->rx_packets++;
757 stats->rx_bytes += skb->len;
758
759 if (skb) {
760 skb->protocol = eth_type_trans(skb, dev);
761 memset(skb->cb, 0, sizeof(skb->cb));
762 skb->dev = dev;
763 skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
764 ieee->last_rx_ps_time = jiffies;
765 netif_rx(skb);
766 }
767
768 rx_exit:
769 return 1;
770
771 rx_dropped:
772 stats->rx_dropped++;
773
774 /* Returning 0 indicates to caller that we have not handled the SKB--
775 * so it is still allocated and can be used again by underlying
776 * hardware as a DMA target */
777 return 0;
778}
779
780#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
781
782static inline int ieee80211_is_ofdm_rate(u8 rate)
783{
784 switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
785 case IEEE80211_OFDM_RATE_6MB:
786 case IEEE80211_OFDM_RATE_9MB:
787 case IEEE80211_OFDM_RATE_12MB:
788 case IEEE80211_OFDM_RATE_18MB:
789 case IEEE80211_OFDM_RATE_24MB:
790 case IEEE80211_OFDM_RATE_36MB:
791 case IEEE80211_OFDM_RATE_48MB:
792 case IEEE80211_OFDM_RATE_54MB:
793 return 1;
794 }
795 return 0;
796}
797
798static inline int ieee80211_SignalStrengthTranslate(int CurrSS)
799{
800 int RetSS;
801
802 // Step 1. Scale mapping.
803 if (CurrSS >= 71 && CurrSS <= 100)
804 RetSS = 90 + ((CurrSS - 70) / 3);
805 else if (CurrSS >= 41 && CurrSS <= 70)
806 RetSS = 78 + ((CurrSS - 40) / 3);
807 else if (CurrSS >= 31 && CurrSS <= 40)
808 RetSS = 66 + (CurrSS - 30);
809 else if (CurrSS >= 21 && CurrSS <= 30)
810 RetSS = 54 + (CurrSS - 20);
811 else if (CurrSS >= 5 && CurrSS <= 20)
812 RetSS = 42 + (((CurrSS - 5) * 2) / 3);
813 else if (CurrSS == 4)
814 RetSS = 36;
815 else if (CurrSS == 3)
816 RetSS = 27;
817 else if (CurrSS == 2)
818 RetSS = 18;
819 else if (CurrSS == 1)
820 RetSS = 9;
821 else
822 RetSS = CurrSS;
823
824 //RT_TRACE(COMP_DBG, DBG_LOUD, ("##### After Mapping: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS));
825
826 // Step 2. Smoothing.
827
828 //RT_TRACE(COMP_DBG, DBG_LOUD, ("$$$$$ After Smoothing: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS));
829
830 return RetSS;
831}
832
833static inline void
834ieee80211_extract_country_ie(struct ieee80211_device *ieee,
835 struct ieee80211_info_element *info_element,
836 struct ieee80211_network *network, u8 *addr2)
837{
838 if (IS_DOT11D_ENABLE(ieee)) {
839 if (info_element->len != 0) {
840 memcpy(network->CountryIeBuf, info_element->data, info_element->len);
841 network->CountryIeLen = info_element->len;
842
843 if (!IS_COUNTRY_IE_VALID(ieee))
844 Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data);
845 }
846
847 //
848 // 070305, rcnjko: I update country IE watch dog here because
849 // some AP (e.g. Cisco 1242) don't include country IE in their
850 // probe response frame.
851 //
852 if (IS_EQUAL_CIE_SRC(ieee, addr2))
853 UPDATE_CIE_WATCHDOG(ieee);
854 }
855
856}
857
858/* SignalStrengthIndex is 0-100 */
859static int ieee80211_TranslateToDbm(unsigned char SignalStrengthIndex)
860{
861 unsigned char SignalPower; // in dBm.
862
863 // Translate to dBm (x=0.5y-95).
864 SignalPower = (int)SignalStrengthIndex * 7 / 10;
865 SignalPower -= 95;
866
867 return SignalPower;
868}
869inline int ieee80211_network_init(
870 struct ieee80211_device *ieee,
871 struct ieee80211_probe_response *beacon,
872 struct ieee80211_network *network,
873 struct ieee80211_rx_stats *stats)
874{
875#ifdef CONFIG_IEEE80211_DEBUG
876 char rates_str[64];
877 char *p;
878#endif
879 struct ieee80211_info_element *info_element;
880 u16 left;
881 u8 i;
882 short offset;
883 u8 curRate = 0, hOpRate = 0, curRate_ex = 0;
884
885 /* Pull out fixed field data */
886 memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
887 network->capability = beacon->capability;
888 network->last_scanned = jiffies;
889 network->time_stamp[0] = beacon->time_stamp[0];
890 network->time_stamp[1] = beacon->time_stamp[1];
891 network->beacon_interval = beacon->beacon_interval;
892 /* Where to pull this? beacon->listen_interval;*/
893 network->listen_interval = 0x0A;
894 network->rates_len = network->rates_ex_len = 0;
895 network->last_associate = 0;
896 network->ssid_len = 0;
897 network->flags = 0;
898 network->atim_window = 0;
899 network->QoS_Enable = 0;
900//by amy 080312
901 network->HighestOperaRate = 0;
902//by amy 080312
903 network->Turbo_Enable = 0;
904 network->CountryIeLen = 0;
905 memset(network->CountryIeBuf, 0, MAX_IE_LEN);
906
907 if (stats->freq == IEEE80211_52GHZ_BAND) {
908 /* for A band (No DS info) */
909 network->channel = stats->received_channel;
910 } else
911 network->flags |= NETWORK_HAS_CCK;
912
913 network->wpa_ie_len = 0;
914 network->rsn_ie_len = 0;
915
916 info_element = &beacon->info_element;
917 left = stats->len - ((void *)info_element - (void *)beacon);
918 while (left >= sizeof(struct ieee80211_info_element_hdr)) {
919 if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) {
920 IEEE80211_DEBUG_SCAN("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%d left=%d.\n",
921 info_element->len + sizeof(struct ieee80211_info_element),
922 left);
923 return 1;
924 }
925
926 switch (info_element->id) {
927 case MFIE_TYPE_SSID:
928 if (ieee80211_is_empty_essid(info_element->data,
929 info_element->len)) {
930 network->flags |= NETWORK_EMPTY_ESSID;
931 break;
932 }
933
934 network->ssid_len = min(info_element->len,
935 (u8)IW_ESSID_MAX_SIZE);
936 memcpy(network->ssid, info_element->data, network->ssid_len);
937 if (network->ssid_len < IW_ESSID_MAX_SIZE)
938 memset(network->ssid + network->ssid_len, 0,
939 IW_ESSID_MAX_SIZE - network->ssid_len);
940
941 IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n",
942 network->ssid, network->ssid_len);
943 break;
944
945 case MFIE_TYPE_RATES:
946#ifdef CONFIG_IEEE80211_DEBUG
947 p = rates_str;
948#endif
949 network->rates_len = min(info_element->len, MAX_RATES_LENGTH);
950 for (i = 0; i < network->rates_len; i++) {
951 network->rates[i] = info_element->data[i];
952 curRate = network->rates[i] & 0x7f;
953 if (hOpRate < curRate)
954 hOpRate = curRate;
955#ifdef CONFIG_IEEE80211_DEBUG
956 p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]);
957#endif
958 if (ieee80211_is_ofdm_rate(info_element->data[i])) {
959 network->flags |= NETWORK_HAS_OFDM;
960 if (info_element->data[i] &
961 IEEE80211_BASIC_RATE_MASK)
962 network->flags &=
963 ~NETWORK_HAS_CCK;
964 }
965 }
966
967 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n",
968 rates_str, network->rates_len);
969 break;
970
971 case MFIE_TYPE_RATES_EX:
972#ifdef CONFIG_IEEE80211_DEBUG
973 p = rates_str;
974#endif
975 network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH);
976 for (i = 0; i < network->rates_ex_len; i++) {
977 network->rates_ex[i] = info_element->data[i];
978 curRate_ex = network->rates_ex[i] & 0x7f;
979 if (hOpRate < curRate_ex)
980 hOpRate = curRate_ex;
981#ifdef CONFIG_IEEE80211_DEBUG
982 p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]);
983#endif
984 if (ieee80211_is_ofdm_rate(info_element->data[i])) {
985 network->flags |= NETWORK_HAS_OFDM;
986 if (info_element->data[i] &
987 IEEE80211_BASIC_RATE_MASK)
988 network->flags &=
989 ~NETWORK_HAS_CCK;
990 }
991 }
992
993 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
994 rates_str, network->rates_ex_len);
995 break;
996
997 case MFIE_TYPE_DS_SET:
998 IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n",
999 info_element->data[0]);
1000 if (stats->freq == IEEE80211_24GHZ_BAND)
1001 network->channel = info_element->data[0];
1002 break;
1003
1004 case MFIE_TYPE_FH_SET:
1005 IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n");
1006 break;
1007
1008 case MFIE_TYPE_CF_SET:
1009 IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n");
1010 break;
1011
1012 case MFIE_TYPE_TIM:
1013
1014 if (info_element->len < 4)
1015 break;
1016
1017 network->dtim_period = info_element->data[1];
1018
1019 if (ieee->state != IEEE80211_LINKED)
1020 break;
1021
1022 network->last_dtim_sta_time[0] = jiffies;
1023 network->last_dtim_sta_time[1] = stats->mac_time[1];
1024
1025 network->dtim_data = IEEE80211_DTIM_VALID;
1026
1027 if (info_element->data[0] != 0)
1028 break;
1029
1030 if (info_element->data[2] & 1)
1031 network->dtim_data |= IEEE80211_DTIM_MBCAST;
1032
1033 offset = (info_element->data[2] >> 1)*2;
1034
1035 //printk("offset1:%x aid:%x\n",offset, ieee->assoc_id);
1036
1037 /* add and modified for ps 2008.1.22 */
1038 if (ieee->assoc_id < 8*offset ||
1039 ieee->assoc_id > 8*(offset + info_element->len - 3)) {
1040 break;
1041 }
1042
1043 offset = (ieee->assoc_id/8) - offset;// + ((aid % 8)? 0 : 1) ;
1044
1045 // printk("offset:%x data:%x, ucast:%d\n", offset,
1046 // info_element->data[3+offset] ,
1047 // info_element->data[3+offset] & (1<<(ieee->assoc_id%8)));
1048
1049 if (info_element->data[3+offset] & (1<<(ieee->assoc_id%8)))
1050 network->dtim_data |= IEEE80211_DTIM_UCAST;
1051
1052 break;
1053
1054 case MFIE_TYPE_IBSS_SET:
1055 IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n");
1056 break;
1057
1058 case MFIE_TYPE_CHALLENGE:
1059 IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n");
1060 break;
1061
1062 case MFIE_TYPE_GENERIC:
1063 //nic is 87B
1064 IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n",
1065 info_element->len);
1066 if (info_element->len >= 4 &&
1067 info_element->data[0] == 0x00 &&
1068 info_element->data[1] == 0x50 &&
1069 info_element->data[2] == 0xf2 &&
1070 info_element->data[3] == 0x01) {
1071 network->wpa_ie_len = min(info_element->len + 2,
1072 MAX_WPA_IE_LEN);
1073 memcpy(network->wpa_ie, info_element,
1074 network->wpa_ie_len);
1075 }
1076
1077 if (info_element->len == 7 &&
1078 info_element->data[0] == 0x00 &&
1079 info_element->data[1] == 0xe0 &&
1080 info_element->data[2] == 0x4c &&
1081 info_element->data[3] == 0x01 &&
1082 info_element->data[4] == 0x02) {
1083 network->Turbo_Enable = 1;
1084 }
1085 if (1 == stats->nic_type) //nic 87
1086 break;
1087
1088 if (info_element->len >= 5 &&
1089 info_element->data[0] == 0x00 &&
1090 info_element->data[1] == 0x50 &&
1091 info_element->data[2] == 0xf2 &&
1092 info_element->data[3] == 0x02 &&
1093 info_element->data[4] == 0x00) {
1094 //printk(KERN_WARNING "wmm info updated: %x\n", info_element->data[6]);
1095 //WMM Information Element
1096 network->wmm_info = info_element->data[6];
1097 network->QoS_Enable = 1;
1098 }
1099
1100 if (info_element->len >= 8 &&
1101 info_element->data[0] == 0x00 &&
1102 info_element->data[1] == 0x50 &&
1103 info_element->data[2] == 0xf2 &&
1104 info_element->data[3] == 0x02 &&
1105 info_element->data[4] == 0x01) {
1106 // Not care about version at present.
1107 //WMM Information Element
1108 //printk(KERN_WARNING "wmm info&param updated: %x\n", info_element->data[6]);
1109 network->wmm_info = info_element->data[6];
1110 //WMM Parameter Element
1111 memcpy(network->wmm_param, (u8 *)(info_element->data + 8), (info_element->len - 8));
1112 network->QoS_Enable = 1;
1113 }
1114 break;
1115
1116 case MFIE_TYPE_RSN:
1117 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n",
1118 info_element->len);
1119 network->rsn_ie_len = min(info_element->len + 2,
1120 MAX_WPA_IE_LEN);
1121 memcpy(network->rsn_ie, info_element,
1122 network->rsn_ie_len);
1123 break;
1124 case MFIE_TYPE_COUNTRY:
1125 IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n",
1126 info_element->len);
1127// printk("=====>Receive <%s> Country IE\n",network->ssid);
1128 ieee80211_extract_country_ie(ieee, info_element, network, beacon->header.addr2);
1129 break;
1130 default:
1131 IEEE80211_DEBUG_SCAN("unsupported IE %d\n",
1132 info_element->id);
1133 break;
1134 }
1135
1136 left -= sizeof(struct ieee80211_info_element_hdr) +
1137 info_element->len;
1138 info_element = (struct ieee80211_info_element *)
1139 &info_element->data[info_element->len];
1140 }
1141//by amy 080312
1142 network->HighestOperaRate = hOpRate;
1143//by amy 080312
1144 network->mode = 0;
1145 if (stats->freq == IEEE80211_52GHZ_BAND)
1146 network->mode = IEEE_A;
1147 else {
1148 if (network->flags & NETWORK_HAS_OFDM)
1149 network->mode |= IEEE_G;
1150 if (network->flags & NETWORK_HAS_CCK)
1151 network->mode |= IEEE_B;
1152 }
1153
1154 if (network->mode == 0) {
1155 IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' "
1156 "network.\n",
1157 escape_essid(network->ssid,
1158 network->ssid_len),
1159 network->bssid);
1160 return 1;
1161 }
1162
1163 if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
1164 network->flags |= NETWORK_EMPTY_ESSID;
1165
1166 stats->signal = ieee80211_TranslateToDbm(stats->signalstrength);
1167 //stats->noise = stats->signal - stats->noise;
1168 stats->noise = ieee80211_TranslateToDbm(100 - stats->signalstrength) - 25;
1169 memcpy(&network->stats, stats, sizeof(network->stats));
1170
1171 return 0;
1172}
1173
1174static inline int is_same_network(struct ieee80211_network *src,
1175 struct ieee80211_network *dst,
1176 struct ieee80211_device *ieee)
1177{
1178 /* A network is only a duplicate if the channel, BSSID, ESSID
1179 * and the capability field (in particular IBSS and BSS) all match.
1180 * We treat all <hidden> with the same BSSID and channel
1181 * as one network */
1182 return (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && //YJ,mod,080819,for hidden ap
1183 //((src->ssid_len == dst->ssid_len) &&
1184 (src->channel == dst->channel) &&
1185 !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
1186 (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && //YJ,mod,080819,for hidden ap
1187 //!memcmp(src->ssid, dst->ssid, src->ssid_len) &&
1188 ((src->capability & WLAN_CAPABILITY_IBSS) ==
1189 (dst->capability & WLAN_CAPABILITY_IBSS)) &&
1190 ((src->capability & WLAN_CAPABILITY_BSS) ==
1191 (dst->capability & WLAN_CAPABILITY_BSS)));
1192}
1193
1194inline void update_network(struct ieee80211_network *dst,
1195 struct ieee80211_network *src)
1196{
1197 unsigned char quality = src->stats.signalstrength;
1198 unsigned char signal = 0;
1199 unsigned char noise = 0;
1200 if (dst->stats.signalstrength > 0)
1201 quality = (dst->stats.signalstrength * 5 + src->stats.signalstrength + 5)/6;
1202 signal = ieee80211_TranslateToDbm(quality);
1203 //noise = signal - src->stats.noise;
1204 if (dst->stats.noise > 0)
1205 noise = (dst->stats.noise * 5 + src->stats.noise)/6;
1206 //if(strcmp(dst->ssid, "linksys_lzm000") == 0)
1207// printk("ssid:%s, quality:%d, signal:%d\n", dst->ssid, quality, signal);
1208 memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
1209 dst->stats.signalstrength = quality;
1210 dst->stats.signal = signal;
1211// printk("==================>stats.signal is %d\n",dst->stats.signal);
1212 dst->stats.noise = noise;
1213
1214
1215 dst->capability = src->capability;
1216 memcpy(dst->rates, src->rates, src->rates_len);
1217 dst->rates_len = src->rates_len;
1218 memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
1219 dst->rates_ex_len = src->rates_ex_len;
1220 dst->HighestOperaRate = src->HighestOperaRate;
1221 //printk("==========>in %s: src->ssid is %s,chan is %d\n",__func__,src->ssid,src->channel);
1222
1223 //YJ,add,080819,for hidden ap
1224 if (src->ssid_len > 0) {
1225 //if(src->ssid_len == 13)
1226 // printk("=====================>>>>>>>> Dst ssid: %s Src ssid: %s\n", dst->ssid, src->ssid);
1227 memset(dst->ssid, 0, dst->ssid_len);
1228 dst->ssid_len = src->ssid_len;
1229 memcpy(dst->ssid, src->ssid, src->ssid_len);
1230 }
1231 //YJ,add,080819,for hidden ap,end
1232
1233 dst->channel = src->channel;
1234 dst->mode = src->mode;
1235 dst->flags = src->flags;
1236 dst->time_stamp[0] = src->time_stamp[0];
1237 dst->time_stamp[1] = src->time_stamp[1];
1238
1239 dst->beacon_interval = src->beacon_interval;
1240 dst->listen_interval = src->listen_interval;
1241 dst->atim_window = src->atim_window;
1242 dst->dtim_period = src->dtim_period;
1243 dst->dtim_data = src->dtim_data;
1244 dst->last_dtim_sta_time[0] = src->last_dtim_sta_time[0];
1245 dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1];
1246// printk("update:%s, dtim_period:%x, dtim_data:%x\n", src->ssid, src->dtim_period, src->dtim_data);
1247 memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
1248 dst->wpa_ie_len = src->wpa_ie_len;
1249 memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
1250 dst->rsn_ie_len = src->rsn_ie_len;
1251
1252 dst->last_scanned = jiffies;
1253 /* dst->last_associate is not overwritten */
1254// disable QoS process now, added by David 2006/7/25
1255#if 1
1256 dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame.
1257/*
1258 if((dst->wmm_info^src->wmm_info)&0x0f) {//Param Set Count change, update Parameter
1259 memcpy(dst->wmm_param, src->wmm_param, IEEE80211_AC_PRAM_LEN);
1260 }
1261*/
1262 if (src->wmm_param[0].ac_aci_acm_aifsn || \
1263 src->wmm_param[1].ac_aci_acm_aifsn || \
1264 src->wmm_param[2].ac_aci_acm_aifsn || \
1265 src->wmm_param[3].ac_aci_acm_aifsn) {
1266 memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN);
1267 }
1268 dst->QoS_Enable = src->QoS_Enable;
1269#else
1270 dst->QoS_Enable = 1;//for Rtl8187 simulation
1271#endif
1272 dst->SignalStrength = src->SignalStrength;
1273 dst->Turbo_Enable = src->Turbo_Enable;
1274 dst->CountryIeLen = src->CountryIeLen;
1275 memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
1276}
1277
1278
1279inline void
1280ieee80211_process_probe_response(struct ieee80211_device *ieee,
1281 struct ieee80211_probe_response *beacon,
1282 struct ieee80211_rx_stats *stats)
1283{
1284 struct ieee80211_network network;
1285 struct ieee80211_network *target;
1286 struct ieee80211_network *oldest = NULL;
1287#ifdef CONFIG_IEEE80211_DEBUG
1288 struct ieee80211_info_element *info_element = &beacon->info_element;
1289#endif
1290 unsigned long flags;
1291 short renew;
1292 u8 wmm_info;
1293 u8 is_beacon = (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_BEACON) ? 1 : 0; //YJ,add,080819,for hidden ap
1294
1295 memset(&network, 0, sizeof(struct ieee80211_network));
1296
1297 IEEE80211_DEBUG_SCAN(
1298 "'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
1299 escape_essid(info_element->data, info_element->len),
1300 beacon->header.addr3,
1301 (beacon->capability & (1<<0xf)) ? '1' : '0',
1302 (beacon->capability & (1<<0xe)) ? '1' : '0',
1303 (beacon->capability & (1<<0xd)) ? '1' : '0',
1304 (beacon->capability & (1<<0xc)) ? '1' : '0',
1305 (beacon->capability & (1<<0xb)) ? '1' : '0',
1306 (beacon->capability & (1<<0xa)) ? '1' : '0',
1307 (beacon->capability & (1<<0x9)) ? '1' : '0',
1308 (beacon->capability & (1<<0x8)) ? '1' : '0',
1309 (beacon->capability & (1<<0x7)) ? '1' : '0',
1310 (beacon->capability & (1<<0x6)) ? '1' : '0',
1311 (beacon->capability & (1<<0x5)) ? '1' : '0',
1312 (beacon->capability & (1<<0x4)) ? '1' : '0',
1313 (beacon->capability & (1<<0x3)) ? '1' : '0',
1314 (beacon->capability & (1<<0x2)) ? '1' : '0',
1315 (beacon->capability & (1<<0x1)) ? '1' : '0',
1316 (beacon->capability & (1<<0x0)) ? '1' : '0');
1317
1318 if (ieee80211_network_init(ieee, beacon, &network, stats)) {
1319 IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
1320 escape_essid(info_element->data,
1321 info_element->len),
1322 beacon->header.addr3,
1323 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
1324 IEEE80211_STYPE_PROBE_RESP ?
1325 "PROBE RESPONSE" : "BEACON");
1326 return;
1327 }
1328
1329 // For Asus EeePc request,
1330 // (1) if wireless adapter receive get any 802.11d country code in AP beacon,
1331 // wireless adapter should follow the country code.
1332 // (2) If there is no any country code in beacon,
1333 // then wireless adapter should do active scan from ch1~11 and
1334 // passive scan from ch12~14
1335 if (ieee->bGlobalDomain) {
1336 if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP) {
1337 // Case 1: Country code
1338 if (IS_COUNTRY_IE_VALID(ieee)) {
1339 if (!IsLegalChannel(ieee, network.channel)) {
1340 printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel);
1341 return;
1342 }
1343 }
1344 // Case 2: No any country code.
1345 else {
1346 // Filter over channel ch12~14
1347 if (network.channel > 11) {
1348 printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel);
1349 return;
1350 }
1351 }
1352 } else {
1353 // Case 1: Country code
1354 if (IS_COUNTRY_IE_VALID(ieee)) {
1355 if (!IsLegalChannel(ieee, network.channel)) {
1356 printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n", network.channel);
1357 return;
1358 }
1359 }
1360 // Case 2: No any country code.
1361 else {
1362 // Filter over channel ch12~14
1363 if (network.channel > 14) {
1364 printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n", network.channel);
1365 return;
1366 }
1367 }
1368 }
1369 }
1370 /* The network parsed correctly -- so now we scan our known networks
1371 * to see if we can find it in our list.
1372 *
1373 * NOTE: This search is definitely not optimized. Once its doing
1374 * the "right thing" we'll optimize it for efficiency if
1375 * necessary */
1376
1377 /* Search for this entry in the list and update it if it is
1378 * already there. */
1379
1380 spin_lock_irqsave(&ieee->lock, flags);
1381
1382 if (is_same_network(&ieee->current_network, &network, ieee)) {
1383 wmm_info = ieee->current_network.wmm_info;
1384 //YJ,add,080819,for hidden ap
1385 if (is_beacon == 0)
1386 network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags);
1387 else if (ieee->state == IEEE80211_LINKED)
1388 ieee->NumRxBcnInPeriod++;
1389 //YJ,add,080819,for hidden ap,end
1390 //printk("====>network.ssid=%s cur_ssid=%s\n", network.ssid, ieee->current_network.ssid);
1391 update_network(&ieee->current_network, &network);
1392 }
1393
1394 list_for_each_entry(target, &ieee->network_list, list) {
1395 if (is_same_network(target, &network, ieee))
1396 break;
1397 if ((oldest == NULL) ||
1398 (target->last_scanned < oldest->last_scanned))
1399 oldest = target;
1400 }
1401
1402 /* If we didn't find a match, then get a new network slot to initialize
1403 * with this beacon's information */
1404 if (&target->list == &ieee->network_list) {
1405 if (list_empty(&ieee->network_free_list)) {
1406 /* If there are no more slots, expire the oldest */
1407 list_del(&oldest->list);
1408 target = oldest;
1409 IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from "
1410 "network list.\n",
1411 escape_essid(target->ssid,
1412 target->ssid_len),
1413 target->bssid);
1414 } else {
1415 /* Otherwise just pull from the free list */
1416 target = list_entry(ieee->network_free_list.next,
1417 struct ieee80211_network, list);
1418 list_del(ieee->network_free_list.next);
1419 }
1420
1421
1422#ifdef CONFIG_IEEE80211_DEBUG
1423 IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n",
1424 escape_essid(network.ssid,
1425 network.ssid_len),
1426 network.bssid,
1427 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
1428 IEEE80211_STYPE_PROBE_RESP ?
1429 "PROBE RESPONSE" : "BEACON");
1430#endif
1431
1432 memcpy(target, &network, sizeof(*target));
1433 list_add_tail(&target->list, &ieee->network_list);
1434 } else {
1435 IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
1436 escape_essid(target->ssid,
1437 target->ssid_len),
1438 target->bssid,
1439 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
1440 IEEE80211_STYPE_PROBE_RESP ?
1441 "PROBE RESPONSE" : "BEACON");
1442
1443 /* we have an entry and we are going to update it. But this entry may
1444 * be already expired. In this case we do the same as we found a new
1445 * net and call the new_net handler
1446 */
1447 renew = !time_after(target->last_scanned + ieee->scan_age, jiffies);
1448 //YJ,add,080819,for hidden ap
1449 if (is_beacon == 0)
1450 network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags);
1451 //if(strncmp(network.ssid, "linksys-c",9) == 0)
1452 // printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags);
1453 if (((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \
1454 && (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\
1455 || ((ieee->current_network.ssid_len == network.ssid_len) && (strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0) && (ieee->state == IEEE80211_NOLINK))))
1456 renew = 1;
1457 //YJ,add,080819,for hidden ap,end
1458 update_network(target, &network);
1459 }
1460
1461 spin_unlock_irqrestore(&ieee->lock, flags);
1462}
1463
1464void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1465 struct ieee80211_hdr_4addr *header,
1466 struct ieee80211_rx_stats *stats)
1467{
1468 switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
1469
1470 case IEEE80211_STYPE_BEACON:
1471 IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
1472 WLAN_FC_GET_STYPE(header->frame_ctl));
1473 IEEE80211_DEBUG_SCAN("Beacon\n");
1474 ieee80211_process_probe_response(
1475 ieee, (struct ieee80211_probe_response *)header, stats);
1476 break;
1477
1478 case IEEE80211_STYPE_PROBE_RESP:
1479 IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
1480 WLAN_FC_GET_STYPE(header->frame_ctl));
1481 IEEE80211_DEBUG_SCAN("Probe response\n");
1482 ieee80211_process_probe_response(
1483 ieee, (struct ieee80211_probe_response *)header, stats);
1484 break;
1485 }
1486}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
deleted file mode 100644
index 03eb164798cd..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ /dev/null
@@ -1,2711 +0,0 @@
1/* IEEE 802.11 SoftMAC layer
2 * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
3 *
4 * Mostly extracted from the rtl8180-sa2400 driver for the
5 * in-kernel generic ieee802.11 stack.
6 *
7 * Few lines might be stolen from other part of the ieee80211
8 * stack. Copyright who own it's copyright
9 *
10 * WPA code stolen from the ipw2200 driver.
11 * Copyright who own it's copyright.
12 *
13 * released under the GPL
14 */
15
16#include "ieee80211.h"
17
18#include <linux/random.h>
19#include <linux/delay.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/uaccess.h>
23#include <linux/etherdevice.h>
24
25#include "dot11d.h"
26
27short ieee80211_is_54g(const struct ieee80211_network *net)
28{
29 return (net->rates_ex_len > 0) || (net->rates_len > 4);
30}
31
32short ieee80211_is_shortslot(const struct ieee80211_network *net)
33{
34 return net->capability & WLAN_CAPABILITY_SHORT_SLOT;
35}
36
37/* returns the total length needed for placing the RATE MFIE
38 * tag and the EXTENDED RATE MFIE tag if needed.
39 * It encludes two bytes per tag for the tag itself and its len
40 */
41static unsigned int ieee80211_MFIE_rate_len(struct ieee80211_device *ieee)
42{
43 unsigned int rate_len = 0;
44
45 if (ieee->modulation & IEEE80211_CCK_MODULATION)
46 rate_len = IEEE80211_CCK_RATE_LEN + 2;
47
48 if (ieee->modulation & IEEE80211_OFDM_MODULATION)
49
50 rate_len += IEEE80211_OFDM_RATE_LEN + 2;
51
52 return rate_len;
53}
54
55/* place the MFIE rate, tag to the memory (double) poised.
56 * Then it updates the pointer so that it points after the new MFIE tag added.
57 */
58static void ieee80211_MFIE_Brate(struct ieee80211_device *ieee, u8 **tag_p)
59{
60 u8 *tag = *tag_p;
61
62 if (ieee->modulation & IEEE80211_CCK_MODULATION) {
63 *tag++ = MFIE_TYPE_RATES;
64 *tag++ = 4;
65 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
66 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
67 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
68 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
69 }
70
71 /* We may add an option for custom rates that specific HW might support */
72 *tag_p = tag;
73}
74
75static void ieee80211_MFIE_Grate(struct ieee80211_device *ieee, u8 **tag_p)
76{
77 u8 *tag = *tag_p;
78
79 if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
80 *tag++ = MFIE_TYPE_RATES_EX;
81 *tag++ = 8;
82 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
83 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
84 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
85 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
86 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
87 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
88 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
89 *tag++ = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
90
91 }
92 /* We may add an option for custom rates that specific HW might support */
93 *tag_p = tag;
94}
95
96static void ieee80211_WMM_Info(struct ieee80211_device *ieee, u8 **tag_p)
97{
98 u8 *tag = *tag_p;
99
100 *tag++ = MFIE_TYPE_GENERIC; /* 0 */
101 *tag++ = 7;
102 *tag++ = 0x00;
103 *tag++ = 0x50;
104 *tag++ = 0xf2;
105 *tag++ = 0x02; /* 5 */
106 *tag++ = 0x00;
107 *tag++ = 0x01;
108#ifdef SUPPORT_USPD
109 if (ieee->current_network.wmm_info & 0x80)
110 *tag++ = 0x0f|MAX_SP_Len;
111 else
112 *tag++ = MAX_SP_Len;
113#else
114 *tag++ = MAX_SP_Len;
115#endif
116 *tag_p = tag;
117}
118
119static void ieee80211_TURBO_Info(struct ieee80211_device *ieee, u8 **tag_p)
120{
121 u8 *tag = *tag_p;
122 *tag++ = MFIE_TYPE_GENERIC; /* 0 */
123 *tag++ = 7;
124 *tag++ = 0x00;
125 *tag++ = 0xe0;
126 *tag++ = 0x4c;
127 *tag++ = 0x01; /* 5 */
128 *tag++ = 0x02;
129 *tag++ = 0x11;
130 *tag++ = 0x00;
131 *tag_p = tag;
132 printk(KERN_ALERT "This is enable turbo mode IE process\n");
133}
134
135static void enqueue_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb)
136{
137 int nh;
138 nh = (ieee->mgmt_queue_head + 1) % MGMT_QUEUE_NUM;
139
140 ieee->mgmt_queue_head = nh;
141 ieee->mgmt_queue_ring[nh] = skb;
142}
143
144static struct sk_buff *dequeue_mgmt(struct ieee80211_device *ieee)
145{
146 struct sk_buff *ret;
147
148 if (ieee->mgmt_queue_tail == ieee->mgmt_queue_head)
149 return NULL;
150
151 ret = ieee->mgmt_queue_ring[ieee->mgmt_queue_tail];
152
153 ieee->mgmt_queue_tail =
154 (ieee->mgmt_queue_tail + 1) % MGMT_QUEUE_NUM;
155
156 return ret;
157}
158
159static void init_mgmt_queue(struct ieee80211_device *ieee)
160{
161 ieee->mgmt_queue_tail = ieee->mgmt_queue_head = 0;
162}
163
164void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl);
165
166inline void softmac_mgmt_xmit(struct sk_buff *skb,
167 struct ieee80211_device *ieee)
168{
169 unsigned long flags;
170 short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
171 struct ieee80211_hdr_3addr *header =
172 (struct ieee80211_hdr_3addr *) skb->data;
173
174 spin_lock_irqsave(&ieee->lock, flags);
175
176 /* called with 2nd param 0, no mgmt lock required */
177 ieee80211_sta_wakeup(ieee, 0);
178
179 if (single) {
180 if (ieee->queue_stop) {
181 enqueue_mgmt(ieee, skb);
182 } else {
183 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
184
185 if (ieee->seq_ctrl[0] == 0xFFF)
186 ieee->seq_ctrl[0] = 0;
187 else
188 ieee->seq_ctrl[0]++;
189
190 /* avoid watchdog triggers */
191 ieee->dev->trans_start = jiffies;
192 ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
193 }
194
195 spin_unlock_irqrestore(&ieee->lock, flags);
196 } else {
197 spin_unlock_irqrestore(&ieee->lock, flags);
198 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
199
200 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
201
202 if (ieee->seq_ctrl[0] == 0xFFF)
203 ieee->seq_ctrl[0] = 0;
204 else
205 ieee->seq_ctrl[0]++;
206
207 /* avoid watchdog triggers */
208 ieee->dev->trans_start = jiffies;
209 ieee->softmac_hard_start_xmit(skb, ieee->dev);
210
211 spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags);
212 }
213}
214
215inline void softmac_ps_mgmt_xmit(struct sk_buff *skb,
216 struct ieee80211_device *ieee)
217{
218 short single = ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE;
219 struct ieee80211_hdr_3addr *header =
220 (struct ieee80211_hdr_3addr *) skb->data;
221
222 if (single) {
223 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
224
225 if (ieee->seq_ctrl[0] == 0xFFF)
226 ieee->seq_ctrl[0] = 0;
227 else
228 ieee->seq_ctrl[0]++;
229
230 /* avoid watchdog triggers */
231 ieee->dev->trans_start = jiffies;
232 ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
233 } else {
234 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
235
236 if (ieee->seq_ctrl[0] == 0xFFF)
237 ieee->seq_ctrl[0] = 0;
238 else
239 ieee->seq_ctrl[0]++;
240
241 /* avoid watchdog triggers */
242 ieee->dev->trans_start = jiffies;
243 ieee->softmac_hard_start_xmit(skb, ieee->dev);
244 }
245}
246
247inline struct sk_buff *
248ieee80211_disassociate_skb(struct ieee80211_network *beacon,
249 struct ieee80211_device *ieee, u8 asRsn)
250{
251 struct sk_buff *skb;
252 struct ieee80211_disassoc_frame *disass;
253
254 skb = dev_alloc_skb(sizeof(struct ieee80211_disassoc_frame));
255 if (!skb)
256 return NULL;
257
258 disass = (struct ieee80211_disassoc_frame *) skb_put(skb, sizeof(struct ieee80211_disassoc_frame));
259 disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
260 disass->header.duration_id = 0;
261
262 memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
263 memcpy(disass->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
264 memcpy(disass->header.addr3, beacon->bssid, ETH_ALEN);
265
266 disass->reasoncode = asRsn;
267 return skb;
268}
269
270void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn)
271{
272 struct ieee80211_network *beacon = &ieee->current_network;
273 struct sk_buff *skb;
274 skb = ieee80211_disassociate_skb(beacon, ieee, asRsn);
275 if (skb)
276 softmac_mgmt_xmit(skb, ieee);
277}
278
279inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
280{
281 unsigned int len, rate_len;
282 u8 *tag;
283 struct sk_buff *skb;
284 struct ieee80211_probe_request *req;
285
286 len = ieee->current_network.ssid_len;
287
288 rate_len = ieee80211_MFIE_rate_len(ieee);
289
290 skb = dev_alloc_skb(sizeof(struct ieee80211_probe_request) +
291 2 + len + rate_len);
292 if (!skb)
293 return NULL;
294
295 req = (struct ieee80211_probe_request *) skb_put(skb, sizeof(struct ieee80211_probe_request));
296 req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
297 req->header.duration_id = 0; /* FIXME: is this OK ? */
298
299 memset(req->header.addr1, 0xff, ETH_ALEN);
300 memcpy(req->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
301 memset(req->header.addr3, 0xff, ETH_ALEN);
302
303 tag = (u8 *) skb_put(skb, len + 2 + rate_len);
304
305 *tag++ = MFIE_TYPE_SSID;
306 *tag++ = len;
307 memcpy(tag, ieee->current_network.ssid, len);
308 tag += len;
309 ieee80211_MFIE_Brate(ieee, &tag);
310 ieee80211_MFIE_Grate(ieee, &tag);
311
312 return skb;
313}
314
315struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee);
316
317static void ieee80211_send_beacon(struct ieee80211_device *ieee)
318{
319 struct sk_buff *skb;
320
321 skb = ieee80211_get_beacon_(ieee);
322
323 if (skb) {
324 softmac_mgmt_xmit(skb, ieee);
325 ieee->softmac_stats.tx_beacons++;
326 dev_kfree_skb_any(skb);
327 }
328
329 ieee->beacon_timer.expires = jiffies +
330 (MSECS(ieee->current_network.beacon_interval - 5));
331
332 if (ieee->beacon_txing)
333 add_timer(&ieee->beacon_timer);
334}
335
336
337static void ieee80211_send_beacon_cb(unsigned long _ieee)
338{
339 struct ieee80211_device *ieee =
340 (struct ieee80211_device *) _ieee;
341 unsigned long flags;
342
343 spin_lock_irqsave(&ieee->beacon_lock, flags);
344 ieee80211_send_beacon(ieee);
345 spin_unlock_irqrestore(&ieee->beacon_lock, flags);
346}
347
348static void ieee80211_send_probe(struct ieee80211_device *ieee)
349{
350 struct sk_buff *skb;
351
352 skb = ieee80211_probe_req(ieee);
353 if (skb) {
354 softmac_mgmt_xmit(skb, ieee);
355 ieee->softmac_stats.tx_probe_rq++;
356 }
357}
358
359static void ieee80211_send_probe_requests(struct ieee80211_device *ieee)
360{
361 if (ieee->active_scan && (ieee->softmac_features & IEEE_SOFTMAC_PROBERQ)) {
362 ieee80211_send_probe(ieee);
363 ieee80211_send_probe(ieee);
364 }
365}
366
367/* this performs syncro scan blocking the caller until all channels
368 * in the allowed channel map has been checked.
369 */
370static void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
371{
372 short ch = 0;
373 u8 channel_map[MAX_CHANNEL_NUMBER+1];
374 memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
375 down(&ieee->scan_sem);
376
377 while (1) {
378 do {
379 ch++;
380 if (ch > MAX_CHANNEL_NUMBER)
381 goto out; /* scan completed */
382
383 } while (!channel_map[ch]);
384 /* this function can be called in two situations
385 * 1- We have switched to ad-hoc mode and we are
386 * performing a complete syncro scan before conclude
387 * there are no interesting cell and to create a
388 * new one. In this case the link state is
389 * IEEE80211_NOLINK until we found an interesting cell.
390 * If so the ieee8021_new_net, called by the RX path
391 * will set the state to IEEE80211_LINKED, so we stop
392 * scanning
393 * 2- We are linked and the root uses run iwlist scan.
394 * So we switch to IEEE80211_LINKED_SCANNING to remember
395 * that we are still logically linked (not interested in
396 * new network events, despite for updating the net list,
397 * but we are temporarily 'unlinked' as the driver shall
398 * not filter RX frames and the channel is changing.
399 * So the only situation in witch are interested is to check
400 * if the state become LINKED because of the #1 situation
401 */
402
403 if (ieee->state == IEEE80211_LINKED)
404 goto out;
405
406 ieee->set_chan(ieee->dev, ch);
407 if (channel_map[ch] == 1)
408 ieee80211_send_probe_requests(ieee);
409
410 /* this prevent excessive time wait when we
411 * need to wait for a syncro scan to end..
412 */
413 if (ieee->sync_scan_hurryup)
414 goto out;
415
416 msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME);
417 }
418out:
419 ieee->sync_scan_hurryup = 0;
420 up(&ieee->scan_sem);
421 if (IS_DOT11D_ENABLE(ieee))
422 DOT11D_ScanComplete(ieee);
423}
424
425void ieee80211_softmac_ips_scan_syncro(struct ieee80211_device *ieee)
426{
427 int ch;
428 unsigned int watch_dog = 0;
429 u8 channel_map[MAX_CHANNEL_NUMBER+1];
430 memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
431 down(&ieee->scan_sem);
432 ch = ieee->current_network.channel;
433
434 while (1) {
435 /* this function can be called in two situations
436 * 1- We have switched to ad-hoc mode and we are
437 * performing a complete syncro scan before conclude
438 * there are no interesting cell and to create a
439 * new one. In this case the link state is
440 * IEEE80211_NOLINK until we found an interesting cell.
441 * If so the ieee8021_new_net, called by the RX path
442 * will set the state to IEEE80211_LINKED, so we stop
443 * scanning
444 * 2- We are linked and the root uses run iwlist scan.
445 * So we switch to IEEE80211_LINKED_SCANNING to remember
446 * that we are still logically linked (not interested in
447 * new network events, despite for updating the net list,
448 * but we are temporarily 'unlinked' as the driver shall
449 * not filter RX frames and the channel is changing.
450 * So the only situation in witch are interested is to check
451 * if the state become LINKED because of the #1 situation
452 */
453 if (ieee->state == IEEE80211_LINKED)
454 goto out;
455
456 if (channel_map[ieee->current_network.channel] > 0)
457 ieee->set_chan(ieee->dev, ieee->current_network.channel);
458
459 if (channel_map[ieee->current_network.channel] == 1)
460 ieee80211_send_probe_requests(ieee);
461
462 msleep_interruptible_rtl(IEEE80211_SOFTMAC_SCAN_TIME);
463
464 do {
465 if (watch_dog++ >= MAX_CHANNEL_NUMBER)
466 goto out; /* scan completed */
467
468 ieee->current_network.channel = (ieee->current_network.channel + 1)%MAX_CHANNEL_NUMBER;
469 } while (!channel_map[ieee->current_network.channel]);
470 }
471out:
472 ieee->actscanning = false;
473 up(&ieee->scan_sem);
474 if (IS_DOT11D_ENABLE(ieee))
475 DOT11D_ScanComplete(ieee);
476}
477
478static void ieee80211_softmac_scan_wq(struct work_struct *work)
479{
480 struct delayed_work *dwork = to_delayed_work(work);
481 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, softmac_scan_wq);
482 static short watchdog;
483 u8 channel_map[MAX_CHANNEL_NUMBER+1];
484 memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
485 down(&ieee->scan_sem);
486
487 do {
488 ieee->current_network.channel =
489 (ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
490 if (watchdog++ > MAX_CHANNEL_NUMBER)
491 goto out; /* no good chans */
492 } while (!channel_map[ieee->current_network.channel]);
493
494 if (ieee->scanning == 0) {
495 printk("error out, scanning = 0\n");
496 goto out;
497 }
498 ieee->set_chan(ieee->dev, ieee->current_network.channel);
499 if (channel_map[ieee->current_network.channel] == 1)
500 ieee80211_send_probe_requests(ieee);
501
502 queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
503 up(&ieee->scan_sem);
504 return;
505out:
506 ieee->actscanning = false;
507 watchdog = 0;
508 ieee->scanning = 0;
509 up(&ieee->scan_sem);
510
511 if (IS_DOT11D_ENABLE(ieee))
512 DOT11D_ScanComplete(ieee);
513 return;
514}
515
516static void ieee80211_beacons_start(struct ieee80211_device *ieee)
517{
518 unsigned long flags;
519
520 spin_lock_irqsave(&ieee->beacon_lock, flags);
521
522 ieee->beacon_txing = 1;
523 ieee80211_send_beacon(ieee);
524
525 spin_unlock_irqrestore(&ieee->beacon_lock, flags);
526}
527
528static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
529{
530 unsigned long flags;
531
532 spin_lock_irqsave(&ieee->beacon_lock, flags);
533
534 ieee->beacon_txing = 0;
535 del_timer_sync(&ieee->beacon_timer);
536
537 spin_unlock_irqrestore(&ieee->beacon_lock, flags);
538}
539
540void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
541{
542 if (ieee->stop_send_beacons)
543 ieee->stop_send_beacons(ieee->dev);
544 if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
545 ieee80211_beacons_stop(ieee);
546}
547
548void ieee80211_start_send_beacons(struct ieee80211_device *ieee)
549{
550 if (ieee->start_send_beacons)
551 ieee->start_send_beacons(ieee->dev);
552 if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
553 ieee80211_beacons_start(ieee);
554}
555
556static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
557{
558 down(&ieee->scan_sem);
559
560 if (ieee->scanning == 1) {
561 ieee->scanning = 0;
562 cancel_delayed_work(&ieee->softmac_scan_wq);
563 }
564
565 up(&ieee->scan_sem);
566}
567
568void ieee80211_stop_scan(struct ieee80211_device *ieee)
569{
570 if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
571 ieee80211_softmac_stop_scan(ieee);
572 else
573 ieee->stop_scan(ieee->dev);
574}
575
576/* called with ieee->lock held */
577void ieee80211_rtl_start_scan(struct ieee80211_device *ieee)
578{
579 if (IS_DOT11D_ENABLE(ieee)) {
580 if (IS_COUNTRY_IE_VALID(ieee))
581 RESET_CIE_WATCHDOG(ieee);
582 }
583
584 if (ieee->softmac_features & IEEE_SOFTMAC_SCAN) {
585 if (ieee->scanning == 0) {
586 ieee->scanning = 1;
587#if 1
588 queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0);
589#endif
590 }
591 }else
592 ieee->start_scan(ieee->dev);
593}
594
595/* called with wx_sem held */
596void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
597{
598 if (IS_DOT11D_ENABLE(ieee)) {
599 if (IS_COUNTRY_IE_VALID(ieee))
600 RESET_CIE_WATCHDOG(ieee);
601 }
602 ieee->sync_scan_hurryup = 0;
603
604 if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
605 ieee80211_softmac_scan_syncro(ieee);
606 else
607 ieee->scan_syncro(ieee->dev);
608}
609
610inline struct sk_buff *
611ieee80211_authentication_req(struct ieee80211_network *beacon,
612 struct ieee80211_device *ieee, int challengelen)
613{
614 struct sk_buff *skb;
615 struct ieee80211_authentication *auth;
616
617 skb = dev_alloc_skb(sizeof(struct ieee80211_authentication) + challengelen);
618
619 if (!skb)
620 return NULL;
621
622 auth = (struct ieee80211_authentication *)
623 skb_put(skb, sizeof(struct ieee80211_authentication));
624
625 auth->header.frame_ctl = IEEE80211_STYPE_AUTH;
626 if (challengelen)
627 auth->header.frame_ctl |= IEEE80211_FCTL_WEP;
628
629 auth->header.duration_id = 0x013a; /* FIXME */
630
631 memcpy(auth->header.addr1, beacon->bssid, ETH_ALEN);
632 memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
633 memcpy(auth->header.addr3, beacon->bssid, ETH_ALEN);
634
635 auth->algorithm = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
636
637 auth->transaction = cpu_to_le16(ieee->associate_seq);
638 ieee->associate_seq++;
639
640 auth->status = cpu_to_le16(WLAN_STATUS_SUCCESS);
641
642 return skb;
643}
644
645static struct sk_buff *ieee80211_probe_resp(struct ieee80211_device *ieee,
646 u8 *dest)
647{
648 u8 *tag;
649 int beacon_size;
650 struct ieee80211_probe_response *beacon_buf;
651 struct sk_buff *skb;
652 int encrypt;
653 int atim_len, erp_len;
654 struct ieee80211_crypt_data *crypt;
655
656 char *ssid = ieee->current_network.ssid;
657 int ssid_len = ieee->current_network.ssid_len;
658 int rate_len = ieee->current_network.rates_len+2;
659 int rate_ex_len = ieee->current_network.rates_ex_len;
660 int wpa_ie_len = ieee->wpa_ie_len;
661 if (rate_ex_len > 0)
662 rate_ex_len += 2;
663
664 if (ieee->current_network.capability & WLAN_CAPABILITY_IBSS)
665 atim_len = 4;
666 else
667 atim_len = 0;
668
669 if (ieee80211_is_54g(&ieee->current_network))
670 erp_len = 3;
671 else
672 erp_len = 0;
673
674 beacon_size = sizeof(struct ieee80211_probe_response)+
675 ssid_len
676 +3 /* channel */
677 +rate_len
678 +rate_ex_len
679 +atim_len
680 +wpa_ie_len
681 +erp_len;
682
683 skb = dev_alloc_skb(beacon_size);
684
685 if (!skb)
686 return NULL;
687
688 beacon_buf = (struct ieee80211_probe_response *) skb_put(skb, beacon_size);
689
690 memcpy(beacon_buf->header.addr1, dest, ETH_ALEN);
691 memcpy(beacon_buf->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
692 memcpy(beacon_buf->header.addr3, ieee->current_network.bssid, ETH_ALEN);
693
694 beacon_buf->header.duration_id = 0; /* FIXME */
695 beacon_buf->beacon_interval =
696 cpu_to_le16(ieee->current_network.beacon_interval);
697 beacon_buf->capability =
698 cpu_to_le16(ieee->current_network.capability & WLAN_CAPABILITY_IBSS);
699
700 if (ieee->short_slot && (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT))
701 beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
702
703 crypt = ieee->crypt[ieee->tx_keyidx];
704
705 encrypt = ieee->host_encrypt && crypt && crypt->ops &&
706 ((0 == strcmp(crypt->ops->name, "WEP")) || wpa_ie_len);
707
708 if (encrypt)
709 beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
710
711
712 beacon_buf->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
713
714 beacon_buf->info_element.id = MFIE_TYPE_SSID;
715 beacon_buf->info_element.len = ssid_len;
716
717 tag = (u8 *) beacon_buf->info_element.data;
718
719 memcpy(tag, ssid, ssid_len);
720
721 tag += ssid_len;
722
723 *(tag++) = MFIE_TYPE_RATES;
724 *(tag++) = rate_len - 2;
725 memcpy(tag, ieee->current_network.rates, rate_len-2);
726 tag += rate_len - 2;
727
728 *(tag++) = MFIE_TYPE_DS_SET;
729 *(tag++) = 1;
730 *(tag++) = ieee->current_network.channel;
731
732 if (atim_len) {
733 *(tag++) = MFIE_TYPE_IBSS_SET;
734 *(tag++) = 2;
735 *((u16 *)(tag)) = cpu_to_le16(ieee->current_network.atim_window);
736 tag += 2;
737 }
738
739 if (erp_len) {
740 *(tag++) = MFIE_TYPE_ERP;
741 *(tag++) = 1;
742 *(tag++) = 0;
743 }
744
745 if (rate_ex_len) {
746 *(tag++) = MFIE_TYPE_RATES_EX;
747 *(tag++) = rate_ex_len-2;
748 memcpy(tag, ieee->current_network.rates_ex, rate_ex_len-2);
749 tag += rate_ex_len - 2;
750 }
751
752 if (wpa_ie_len) {
753 if (ieee->iw_mode == IW_MODE_ADHOC) {
754 /* as Windows will set pairwise key same as the group
755 * key which is not allowed in Linux, so set this for
756 * IOT issue.
757 */
758 memcpy(&ieee->wpa_ie[14], &ieee->wpa_ie[8], 4);
759 }
760
761 memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
762 }
763 skb->dev = ieee->dev;
764 return skb;
765}
766
767static struct sk_buff *ieee80211_assoc_resp(struct ieee80211_device *ieee,
768 u8 *dest)
769{
770 struct sk_buff *skb;
771 u8 *tag;
772
773 struct ieee80211_crypt_data *crypt;
774 struct ieee80211_assoc_response_frame *assoc;
775 short encrypt;
776
777 unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
778 int len = sizeof(struct ieee80211_assoc_response_frame) + rate_len;
779
780 skb = dev_alloc_skb(len);
781
782 if (!skb)
783 return NULL;
784
785 assoc = (struct ieee80211_assoc_response_frame *)
786 skb_put(skb, sizeof(struct ieee80211_assoc_response_frame));
787
788 assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
789 memcpy(assoc->header.addr1, dest, ETH_ALEN);
790 memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
791 memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
792 assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
793 WLAN_CAPABILITY_BSS : WLAN_CAPABILITY_IBSS);
794
795 if (ieee->short_slot)
796 assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
797
798 if (ieee->host_encrypt)
799 crypt = ieee->crypt[ieee->tx_keyidx];
800 else
801 crypt = NULL;
802
803 encrypt = (crypt && crypt->ops);
804
805 if (encrypt)
806 assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
807
808 assoc->status = 0;
809 assoc->aid = cpu_to_le16(ieee->assoc_id);
810 if (ieee->assoc_id == 0x2007)
811 ieee->assoc_id = 0;
812 else
813 ieee->assoc_id++;
814
815 tag = (u8 *) skb_put(skb, rate_len);
816
817 ieee80211_MFIE_Brate(ieee, &tag);
818 ieee80211_MFIE_Grate(ieee, &tag);
819
820 return skb;
821}
822
823static struct sk_buff *ieee80211_auth_resp(struct ieee80211_device *ieee,
824 int status, u8 *dest)
825{
826 struct sk_buff *skb;
827 struct ieee80211_authentication *auth;
828
829 skb = dev_alloc_skb(sizeof(struct ieee80211_authentication)+1);
830
831 if (!skb)
832 return NULL;
833
834 skb->len = sizeof(struct ieee80211_authentication);
835
836 auth = (struct ieee80211_authentication *)skb->data;
837
838 auth->status = cpu_to_le16(status);
839 auth->transaction = cpu_to_le16(2);
840 auth->algorithm = cpu_to_le16(WLAN_AUTH_OPEN);
841
842 memcpy(auth->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
843 memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
844 memcpy(auth->header.addr1, dest, ETH_ALEN);
845 auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
846 return skb;
847}
848
849static struct sk_buff *ieee80211_null_func(struct ieee80211_device *ieee, short pwr)
850{
851 struct sk_buff *skb;
852 struct ieee80211_hdr_3addr *hdr;
853
854 skb = dev_alloc_skb(sizeof(struct ieee80211_hdr_3addr));
855
856 if (!skb)
857 return NULL;
858
859 hdr = (struct ieee80211_hdr_3addr *)skb_put(skb, sizeof(struct ieee80211_hdr_3addr));
860
861 memcpy(hdr->addr1, ieee->current_network.bssid, ETH_ALEN);
862 memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
863 memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
864
865 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
866 IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
867 (pwr ? IEEE80211_FCTL_PM:0));
868
869 return skb;
870}
871
872static void ieee80211_resp_to_assoc_rq(struct ieee80211_device *ieee, u8 *dest)
873{
874 struct sk_buff *buf = ieee80211_assoc_resp(ieee, dest);
875
876 if (buf) {
877 softmac_mgmt_xmit(buf, ieee);
878 dev_kfree_skb_any(buf);
879 }
880}
881
882static void ieee80211_resp_to_auth(struct ieee80211_device *ieee, int s, u8 *dest)
883{
884 struct sk_buff *buf = ieee80211_auth_resp(ieee, s, dest);
885
886 if (buf) {
887 softmac_mgmt_xmit(buf, ieee);
888 dev_kfree_skb_any(buf);
889 }
890}
891
892static void ieee80211_resp_to_probe(struct ieee80211_device *ieee, u8 *dest)
893{
894 struct sk_buff *buf = ieee80211_probe_resp(ieee, dest);
895
896 if (buf) {
897 softmac_mgmt_xmit(buf, ieee);
898 dev_kfree_skb_any(buf);
899 }
900}
901
902inline struct sk_buff *
903ieee80211_association_req(struct ieee80211_network *beacon,
904 struct ieee80211_device *ieee)
905{
906 struct sk_buff *skb;
907
908 struct ieee80211_assoc_request_frame *hdr;
909 u8 *tag;
910 unsigned int wpa_len = beacon->wpa_ie_len;
911#if 1
912 /* for testing purpose */
913 unsigned int rsn_len = beacon->rsn_ie_len;
914#endif
915 unsigned int rate_len = ieee80211_MFIE_rate_len(ieee);
916 unsigned int wmm_info_len = beacon->QoS_Enable?9:0;
917 unsigned int turbo_info_len = beacon->Turbo_Enable?9:0;
918
919 u8 encry_proto = ieee->wpax_type_notify & 0xff;
920
921 int len = 0;
922
923 /* [0] Notify type of encryption: WPA/WPA2
924 * [1] pair wise type
925 * [2] authen type
926 */
927 if (ieee->wpax_type_set) {
928 if (IEEE_PROTO_WPA == encry_proto) {
929 rsn_len = 0;
930 } else if (IEEE_PROTO_RSN == encry_proto) {
931 wpa_len = 0;
932 }
933 }
934 len = sizeof(struct ieee80211_assoc_request_frame)+
935 + beacon->ssid_len /* essid tagged val */
936 + rate_len /* rates tagged val */
937 + wpa_len
938 + rsn_len
939 + wmm_info_len
940 + turbo_info_len;
941
942 skb = dev_alloc_skb(len);
943
944 if (!skb)
945 return NULL;
946
947 hdr = (struct ieee80211_assoc_request_frame *)
948 skb_put(skb, sizeof(struct ieee80211_assoc_request_frame));
949
950 hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
951 hdr->header.duration_id = 37; /* FIXME */
952 memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
953 memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
954 memcpy(hdr->header.addr3, beacon->bssid, ETH_ALEN);
955 memcpy(ieee->ap_mac_addr, beacon->bssid, ETH_ALEN); /* for HW security */
956
957 hdr->capability = cpu_to_le16(WLAN_CAPABILITY_BSS);
958 if (beacon->capability & WLAN_CAPABILITY_PRIVACY)
959 hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
960 if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
961 hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
962
963 if (ieee->short_slot)
964 hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT);
965
966 hdr->listen_interval = 0xa; /* FIXME */
967
968 hdr->info_element.id = MFIE_TYPE_SSID;
969
970 hdr->info_element.len = beacon->ssid_len;
971 tag = skb_put(skb, beacon->ssid_len);
972 memcpy(tag, beacon->ssid, beacon->ssid_len);
973
974 tag = skb_put(skb, rate_len);
975
976 ieee80211_MFIE_Brate(ieee, &tag);
977 ieee80211_MFIE_Grate(ieee, &tag);
978
979 /* add rsn==0 condition for ap's mix security mode(wpa+wpa2)
980 * choose AES encryption as default algorithm while using mixed mode.
981 */
982
983 tag = skb_put(skb, ieee->wpa_ie_len);
984 memcpy(tag, ieee->wpa_ie, ieee->wpa_ie_len);
985
986 tag = skb_put(skb, wmm_info_len);
987 if (wmm_info_len)
988 ieee80211_WMM_Info(ieee, &tag);
989
990 tag = skb_put(skb, turbo_info_len);
991 if (turbo_info_len)
992 ieee80211_TURBO_Info(ieee, &tag);
993
994 return skb;
995}
996
997void ieee80211_associate_abort(struct ieee80211_device *ieee)
998{
999 unsigned long flags;
1000 spin_lock_irqsave(&ieee->lock, flags);
1001
1002 ieee->associate_seq++;
1003
1004 /* don't scan, and avoid to have the RX path possibly
1005 * try again to associate. Even do not react to AUTH or
1006 * ASSOC response. Just wait for the retry wq to be scheduled.
1007 * Here we will check if there are good nets to associate
1008 * with, so we retry or just get back to NO_LINK and scanning
1009 */
1010 if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING) {
1011 IEEE80211_DEBUG_MGMT("Authentication failed\n");
1012 ieee->softmac_stats.no_auth_rs++;
1013 } else {
1014 IEEE80211_DEBUG_MGMT("Association failed\n");
1015 ieee->softmac_stats.no_ass_rs++;
1016 }
1017
1018 ieee->state = IEEE80211_ASSOCIATING_RETRY;
1019
1020 queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, IEEE80211_SOFTMAC_ASSOC_RETRY_TIME);
1021
1022 spin_unlock_irqrestore(&ieee->lock, flags);
1023}
1024
1025static void ieee80211_associate_abort_cb(unsigned long dev)
1026{
1027 ieee80211_associate_abort((struct ieee80211_device *) dev);
1028}
1029
1030static void ieee80211_associate_step1(struct ieee80211_device *ieee)
1031{
1032 struct ieee80211_network *beacon = &ieee->current_network;
1033 struct sk_buff *skb;
1034
1035 IEEE80211_DEBUG_MGMT("Stopping scan\n");
1036 ieee->softmac_stats.tx_auth_rq++;
1037 skb = ieee80211_authentication_req(beacon, ieee, 0);
1038 if (!skb) {
1039 ieee80211_associate_abort(ieee);
1040 } else {
1041 ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATING;
1042 IEEE80211_DEBUG_MGMT("Sending authentication request\n");
1043 softmac_mgmt_xmit(skb, ieee);
1044 /* BUGON when you try to add_timer twice, using mod_timer may
1045 * be better.
1046 */
1047 if (!timer_pending(&ieee->associate_timer)) {
1048 ieee->associate_timer.expires = jiffies + (HZ / 2);
1049 add_timer(&ieee->associate_timer);
1050 }
1051 /* If call dev_kfree_skb_any,a warning will ocur....
1052 * KERNEL: assertion (!atomic_read(&skb->users)) failed at
1053 * net/core/dev.c (1708)
1054 */
1055 }
1056}
1057
1058static void ieee80211_rtl_auth_challenge(struct ieee80211_device *ieee, u8 *challenge,
1059 int chlen)
1060{
1061 u8 *c;
1062 struct sk_buff *skb;
1063 struct ieee80211_network *beacon = &ieee->current_network;
1064 del_timer_sync(&ieee->associate_timer);
1065 ieee->associate_seq++;
1066 ieee->softmac_stats.tx_auth_rq++;
1067
1068 skb = ieee80211_authentication_req(beacon, ieee, chlen+2);
1069 if (!skb)
1070 ieee80211_associate_abort(ieee);
1071 else {
1072 c = skb_put(skb, chlen+2);
1073 *(c++) = MFIE_TYPE_CHALLENGE;
1074 *(c++) = chlen;
1075 memcpy(c, challenge, chlen);
1076
1077 IEEE80211_DEBUG_MGMT("Sending authentication challenge response\n");
1078
1079 ieee80211_encrypt_fragment(ieee, skb, sizeof(struct ieee80211_hdr_3addr));
1080
1081 softmac_mgmt_xmit(skb, ieee);
1082 if (!timer_pending(&ieee->associate_timer)) {
1083 ieee->associate_timer.expires = jiffies + (HZ / 2);
1084 add_timer(&ieee->associate_timer);
1085 }
1086 dev_kfree_skb_any(skb);
1087 }
1088 kfree(challenge);
1089}
1090
1091static void ieee80211_associate_step2(struct ieee80211_device *ieee)
1092{
1093 struct sk_buff *skb;
1094 struct ieee80211_network *beacon = &ieee->current_network;
1095
1096 del_timer_sync(&ieee->associate_timer);
1097
1098 IEEE80211_DEBUG_MGMT("Sending association request\n");
1099 ieee->softmac_stats.tx_ass_rq++;
1100 skb = ieee80211_association_req(beacon, ieee);
1101 if (!skb)
1102 ieee80211_associate_abort(ieee);
1103 else {
1104 softmac_mgmt_xmit(skb, ieee);
1105 if (!timer_pending(&ieee->associate_timer)) {
1106 ieee->associate_timer.expires = jiffies + (HZ / 2);
1107 add_timer(&ieee->associate_timer);
1108 }
1109 }
1110}
1111
1112static void ieee80211_associate_complete_wq(struct work_struct *work)
1113{
1114 struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_complete_wq);
1115
1116 printk(KERN_INFO "Associated successfully\n");
1117 if (ieee80211_is_54g(&ieee->current_network) &&
1118 (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
1119 ieee->rate = 540;
1120 printk(KERN_INFO"Using G rates\n");
1121 } else {
1122 ieee->rate = 110;
1123 printk(KERN_INFO"Using B rates\n");
1124 }
1125 ieee->link_change(ieee->dev);
1126 notify_wx_assoc_event(ieee);
1127 if (ieee->data_hard_resume)
1128 ieee->data_hard_resume(ieee->dev);
1129 netif_carrier_on(ieee->dev);
1130}
1131
1132static void ieee80211_associate_complete(struct ieee80211_device *ieee)
1133{
1134 del_timer_sync(&ieee->associate_timer);
1135
1136 ieee->state = IEEE80211_LINKED;
1137 IEEE80211_DEBUG_MGMT("Successfully associated\n");
1138
1139 queue_work(ieee->wq, &ieee->associate_complete_wq);
1140}
1141
1142static void ieee80211_associate_procedure_wq(struct work_struct *work)
1143{
1144 struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
1145
1146 ieee->sync_scan_hurryup = 1;
1147 down(&ieee->wx_sem);
1148
1149 if (ieee->data_hard_stop)
1150 ieee->data_hard_stop(ieee->dev);
1151
1152 ieee80211_stop_scan(ieee);
1153 ieee->set_chan(ieee->dev, ieee->current_network.channel);
1154
1155 ieee->associate_seq = 1;
1156 ieee80211_associate_step1(ieee);
1157
1158 up(&ieee->wx_sem);
1159}
1160
1161inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
1162 struct ieee80211_network *net)
1163{
1164 u8 tmp_ssid[IW_ESSID_MAX_SIZE+1];
1165 int tmp_ssid_len = 0;
1166
1167 short apset, ssidset, ssidbroad, apmatch, ssidmatch;
1168
1169 /* we are interested in new new only if we are not associated
1170 * and we are not associating / authenticating
1171 */
1172 if (ieee->state != IEEE80211_NOLINK)
1173 return;
1174
1175 if ((ieee->iw_mode == IW_MODE_INFRA) && !(net->capability & WLAN_CAPABILITY_BSS))
1176 return;
1177
1178 if ((ieee->iw_mode == IW_MODE_ADHOC) && !(net->capability & WLAN_CAPABILITY_IBSS))
1179 return;
1180
1181 if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
1182 /* if the user specified the AP MAC, we need also the essid
1183 * This could be obtained by beacons or, if the network does not
1184 * broadcast it, it can be put manually.
1185 */
1186 apset = ieee->wap_set;
1187 ssidset = ieee->ssid_set;
1188 ssidbroad = !(net->ssid_len == 0 || net->ssid[0] == '\0');
1189 apmatch = (memcmp(ieee->current_network.bssid, net->bssid, ETH_ALEN) == 0);
1190
1191 if (ieee->current_network.ssid_len != net->ssid_len)
1192 ssidmatch = 0;
1193 else
1194 ssidmatch = (0 == strncmp(ieee->current_network.ssid, net->ssid, net->ssid_len));
1195
1196 /* if the user set the AP check if match.
1197 * if the network does not broadcast essid we check the user
1198 * supplied ANY essid
1199 * if the network does broadcast and the user does not set essid
1200 * it is OK
1201 * if the network does broadcast and the user did set essid
1202 * chech if essid match
1203 * (apset && apmatch && ((ssidset && ssidbroad && ssidmatch) ||
1204 * (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
1205 * if the ap is not set, check that the user set the bssid and
1206 * the network does broadcast and that those two bssid matches
1207 * (!apset && ssidset && ssidbroad && ssidmatch)
1208 */
1209 if ((apset && apmatch && ((ssidset && ssidbroad && ssidmatch) ||
1210 (ssidbroad && !ssidset) || (!ssidbroad && ssidset))) ||
1211 (!apset && ssidset && ssidbroad && ssidmatch)) {
1212 /* if the essid is hidden replace it with the
1213 * essid provided by the user.
1214 */
1215 if (!ssidbroad) {
1216 strncpy(tmp_ssid, ieee->current_network.ssid, IW_ESSID_MAX_SIZE);
1217 tmp_ssid_len = ieee->current_network.ssid_len;
1218 }
1219 memcpy(&ieee->current_network, net, sizeof(struct ieee80211_network));
1220
1221 if (!ssidbroad) {
1222 strncpy(ieee->current_network.ssid, tmp_ssid, IW_ESSID_MAX_SIZE);
1223 ieee->current_network.ssid_len = tmp_ssid_len;
1224 }
1225 printk(KERN_INFO"Linking with %s: channel is %d\n", ieee->current_network.ssid, ieee->current_network.channel);
1226
1227 if (ieee->iw_mode == IW_MODE_INFRA) {
1228 ieee->state = IEEE80211_ASSOCIATING;
1229 ieee->beinretry = false;
1230 queue_work(ieee->wq, &ieee->associate_procedure_wq);
1231 } else {
1232 if (ieee80211_is_54g(&ieee->current_network) &&
1233 (ieee->modulation & IEEE80211_OFDM_MODULATION)) {
1234 ieee->rate = 540;
1235 printk(KERN_INFO"Using G rates\n");
1236 } else {
1237 ieee->rate = 110;
1238 printk(KERN_INFO"Using B rates\n");
1239 }
1240 ieee->state = IEEE80211_LINKED;
1241 ieee->beinretry = false;
1242 }
1243 }
1244 }
1245}
1246
1247void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee)
1248{
1249 unsigned long flags;
1250 struct ieee80211_network *target;
1251
1252 spin_lock_irqsave(&ieee->lock, flags);
1253 list_for_each_entry(target, &ieee->network_list, list) {
1254 /* if the state become different that NOLINK means
1255 * we had found what we are searching for
1256 */
1257 if (ieee->state != IEEE80211_NOLINK)
1258 break;
1259
1260 if (ieee->scan_age == 0 || time_after(target->last_scanned + ieee->scan_age, jiffies))
1261 ieee80211_softmac_new_net(ieee, target);
1262 }
1263 spin_unlock_irqrestore(&ieee->lock, flags);
1264}
1265
1266static inline u16 auth_parse(struct sk_buff *skb, u8 **challenge, int *chlen)
1267{
1268 struct ieee80211_authentication *a;
1269 u8 *t;
1270 if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
1271 IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
1272 return 0xcafe;
1273 }
1274 *challenge = NULL;
1275 a = (struct ieee80211_authentication *) skb->data;
1276 if (skb->len > (sizeof(struct ieee80211_authentication) + 3)) {
1277 t = skb->data + sizeof(struct ieee80211_authentication);
1278
1279 if (*(t++) == MFIE_TYPE_CHALLENGE) {
1280 *chlen = *(t++);
1281 *challenge = kmemdup(t, *chlen, GFP_ATOMIC);
1282 if (!*challenge)
1283 return -ENOMEM;
1284 }
1285 }
1286 return cpu_to_le16(a->status);
1287}
1288
1289static int auth_rq_parse(struct sk_buff *skb, u8 *dest)
1290{
1291 struct ieee80211_authentication *a;
1292
1293 if (skb->len < (sizeof(struct ieee80211_authentication) - sizeof(struct ieee80211_info_element))) {
1294 IEEE80211_DEBUG_MGMT("invalid len in auth request: %d\n", skb->len);
1295 return -1;
1296 }
1297 a = (struct ieee80211_authentication *) skb->data;
1298
1299 memcpy(dest, a->header.addr2, ETH_ALEN);
1300
1301 if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
1302 return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
1303
1304 return WLAN_STATUS_SUCCESS;
1305}
1306
1307static short probe_rq_parse(struct ieee80211_device *ieee, struct sk_buff *skb,
1308 u8 *src)
1309{
1310 u8 *tag;
1311 u8 *skbend;
1312 u8 *ssid = NULL;
1313 u8 ssidlen = 0;
1314
1315 struct ieee80211_hdr_3addr *header =
1316 (struct ieee80211_hdr_3addr *) skb->data;
1317
1318 if (skb->len < sizeof(struct ieee80211_hdr_3addr))
1319 return -1; /* corrupted */
1320
1321 memcpy(src, header->addr2, ETH_ALEN);
1322
1323 skbend = (u8 *)skb->data + skb->len;
1324
1325 tag = skb->data + sizeof(struct ieee80211_hdr_3addr);
1326
1327 while (tag+1 < skbend) {
1328 if (*tag == 0) {
1329 ssid = tag+2;
1330 ssidlen = *(tag+1);
1331 break;
1332 }
1333 tag++; /* point to the len field */
1334 tag = tag + *(tag); /* point to the last data byte of the tag */
1335 tag++; /* point to the next tag */
1336 }
1337
1338 if (ssidlen == 0)
1339 return 1;
1340
1341 if (!ssid)
1342 return 1; /* ssid not found in tagged param */
1343
1344 return (!strncmp(ssid, ieee->current_network.ssid, ssidlen));
1345
1346}
1347
1348static int assoc_rq_parse(struct sk_buff *skb, u8 *dest)
1349{
1350 struct ieee80211_assoc_request_frame *a;
1351
1352 if (skb->len < (sizeof(struct ieee80211_assoc_request_frame) -
1353 sizeof(struct ieee80211_info_element))) {
1354
1355 IEEE80211_DEBUG_MGMT("invalid len in auth request:%d\n", skb->len);
1356 return -1;
1357 }
1358
1359 a = (struct ieee80211_assoc_request_frame *) skb->data;
1360
1361 memcpy(dest, a->header.addr2, ETH_ALEN);
1362
1363 return 0;
1364}
1365
1366static inline u16 assoc_parse(struct sk_buff *skb, int *aid)
1367{
1368 struct ieee80211_assoc_response_frame *a;
1369 if (skb->len < sizeof(struct ieee80211_assoc_response_frame)) {
1370 IEEE80211_DEBUG_MGMT("invalid len in auth resp: %d\n", skb->len);
1371 return 0xcafe;
1372 }
1373
1374 a = (struct ieee80211_assoc_response_frame *) skb->data;
1375 *aid = le16_to_cpu(a->aid) & 0x3fff;
1376 return le16_to_cpu(a->status);
1377}
1378
1379static inline void ieee80211_rx_probe_rq(struct ieee80211_device *ieee,
1380 struct sk_buff *skb)
1381{
1382 u8 dest[ETH_ALEN];
1383
1384 ieee->softmac_stats.rx_probe_rq++;
1385 if (probe_rq_parse(ieee, skb, dest)) {
1386 ieee->softmac_stats.tx_probe_rs++;
1387 ieee80211_resp_to_probe(ieee, dest);
1388 }
1389}
1390
1391inline void ieee80211_rx_auth_rq(struct ieee80211_device *ieee,
1392 struct sk_buff *skb)
1393{
1394 u8 dest[ETH_ALEN];
1395 int status;
1396 ieee->softmac_stats.rx_auth_rq++;
1397
1398 status = auth_rq_parse(skb, dest);
1399 if (status != -1)
1400 ieee80211_resp_to_auth(ieee, status, dest);
1401}
1402
1403inline void
1404ieee80211_rx_assoc_rq(struct ieee80211_device *ieee, struct sk_buff *skb)
1405{
1406
1407 u8 dest[ETH_ALEN];
1408
1409 ieee->softmac_stats.rx_ass_rq++;
1410 if (assoc_rq_parse(skb, dest) != -1)
1411 ieee80211_resp_to_assoc_rq(ieee, dest);
1412
1413
1414 printk(KERN_INFO"New client associated: %pM\n", dest);
1415}
1416
1417void ieee80211_sta_ps_send_null_frame(struct ieee80211_device *ieee, short pwr)
1418{
1419 struct sk_buff *buf = ieee80211_null_func(ieee, pwr);
1420
1421 if (buf)
1422 softmac_ps_mgmt_xmit(buf, ieee);
1423}
1424
1425static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
1426 u32 *time_l)
1427{
1428 int timeout = 0;
1429
1430 u8 dtim;
1431 dtim = ieee->current_network.dtim_data;
1432
1433 if (!(dtim & IEEE80211_DTIM_VALID))
1434 return 0;
1435 else
1436 timeout = ieee->current_network.beacon_interval;
1437
1438 ieee->current_network.dtim_data = IEEE80211_DTIM_INVALID;
1439
1440 if (dtim & ((IEEE80211_DTIM_UCAST | IEEE80211_DTIM_MBCAST) & ieee->ps))
1441 return 2;
1442
1443 if (!time_after(jiffies, ieee->dev->trans_start + MSECS(timeout)))
1444 return 0;
1445
1446 if (!time_after(jiffies, ieee->last_rx_ps_time + MSECS(timeout)))
1447 return 0;
1448
1449 if ((ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) &&
1450 (ieee->mgmt_queue_tail != ieee->mgmt_queue_head))
1451 return 0;
1452
1453 if (time_l) {
1454 *time_l = ieee->current_network.last_dtim_sta_time[0]
1455 + MSECS((ieee->current_network.beacon_interval));
1456 }
1457
1458 if (time_h) {
1459 *time_h = ieee->current_network.last_dtim_sta_time[1];
1460 if (time_l && *time_l < ieee->current_network.last_dtim_sta_time[0])
1461 *time_h += 1;
1462 }
1463
1464 return 1;
1465}
1466
1467static inline void ieee80211_sta_ps(struct ieee80211_device *ieee)
1468{
1469
1470 u32 th, tl;
1471 short sleep;
1472
1473 unsigned long flags, flags2;
1474
1475 spin_lock_irqsave(&ieee->lock, flags);
1476
1477 if ((ieee->ps == IEEE80211_PS_DISABLED ||
1478 ieee->iw_mode != IW_MODE_INFRA ||
1479 ieee->state != IEEE80211_LINKED)) {
1480
1481 /* #warning CHECK_LOCK_HERE */
1482 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
1483
1484 ieee80211_sta_wakeup(ieee, 1);
1485
1486 spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
1487 }
1488
1489 sleep = ieee80211_sta_ps_sleep(ieee, &th, &tl);
1490 /* 2 wake, 1 sleep, 0 do nothing */
1491 if (sleep == 0)
1492 goto out;
1493
1494 if (sleep == 1) {
1495 if (ieee->sta_sleep == 1)
1496 ieee->enter_sleep_state(ieee->dev, th, tl);
1497
1498 else if (ieee->sta_sleep == 0) {
1499 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
1500 if (ieee->ps_is_queue_empty(ieee->dev)) {
1501 ieee->sta_sleep = 2;
1502
1503 ieee->ps_request_tx_ack(ieee->dev);
1504
1505 ieee80211_sta_ps_send_null_frame(ieee, 1);
1506
1507 ieee->ps_th = th;
1508 ieee->ps_tl = tl;
1509 }
1510 spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
1511 }
1512 } else if (sleep == 2) {
1513 /* #warning CHECK_LOCK_HERE */
1514 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
1515
1516 ieee80211_sta_wakeup(ieee, 1);
1517
1518 spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
1519 }
1520out:
1521 spin_unlock_irqrestore(&ieee->lock, flags);
1522}
1523
1524void ieee80211_sta_wakeup(struct ieee80211_device *ieee, short nl)
1525{
1526 if (ieee->sta_sleep == 0) {
1527 if (nl) {
1528 ieee->ps_request_tx_ack(ieee->dev);
1529 ieee80211_sta_ps_send_null_frame(ieee, 0);
1530 }
1531 return;
1532 }
1533
1534 if (ieee->sta_sleep == 1)
1535 ieee->sta_wake_up(ieee->dev);
1536
1537 ieee->sta_sleep = 0;
1538
1539 if (nl) {
1540 ieee->ps_request_tx_ack(ieee->dev);
1541 ieee80211_sta_ps_send_null_frame(ieee, 0);
1542 }
1543}
1544
1545void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success)
1546{
1547 unsigned long flags, flags2;
1548
1549 spin_lock_irqsave(&ieee->lock, flags);
1550 if (ieee->sta_sleep == 2) {
1551 /* Null frame with PS bit set */
1552 if (success) {
1553 ieee->sta_sleep = 1;
1554 ieee->enter_sleep_state(ieee->dev, ieee->ps_th, ieee->ps_tl);
1555 }
1556 /* if the card report not success we can't be sure the AP
1557 * has not RXed so we can't assume the AP believe us awake
1558 */
1559 } else {
1560 if ((ieee->sta_sleep == 0) && !success) {
1561 spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
1562 ieee80211_sta_ps_send_null_frame(ieee, 0);
1563 spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
1564 }
1565 }
1566 spin_unlock_irqrestore(&ieee->lock, flags);
1567}
1568
1569inline int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
1570 struct sk_buff *skb,
1571 struct ieee80211_rx_stats *rx_stats,
1572 u16 type, u16 stype)
1573{
1574 struct ieee80211_hdr_3addr *header = (struct ieee80211_hdr_3addr *) skb->data;
1575 u16 errcode;
1576 u8 *challenge = NULL;
1577 int chlen = 0;
1578 int aid = 0;
1579 struct ieee80211_assoc_response_frame *assoc_resp;
1580 struct ieee80211_info_element *info_element;
1581
1582 if (!ieee->proto_started)
1583 return 0;
1584
1585 if (ieee->sta_sleep || (ieee->ps != IEEE80211_PS_DISABLED &&
1586 ieee->iw_mode == IW_MODE_INFRA &&
1587 ieee->state == IEEE80211_LINKED))
1588
1589 tasklet_schedule(&ieee->ps_task);
1590
1591 if (WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_PROBE_RESP &&
1592 WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_BEACON)
1593 ieee->last_rx_ps_time = jiffies;
1594
1595 switch (WLAN_FC_GET_STYPE(header->frame_control)) {
1596 case IEEE80211_STYPE_ASSOC_RESP:
1597 case IEEE80211_STYPE_REASSOC_RESP:
1598 IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
1599 WLAN_FC_GET_STYPE(header->frame_ctl));
1600 if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
1601 ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
1602 ieee->iw_mode == IW_MODE_INFRA) {
1603 errcode = assoc_parse(skb, &aid);
1604 if (0 == errcode) {
1605 u16 left;
1606
1607 ieee->state = IEEE80211_LINKED;
1608 ieee->assoc_id = aid;
1609 ieee->softmac_stats.rx_ass_ok++;
1610 /* card type is 8187 */
1611 if (1 == rx_stats->nic_type)
1612 goto associate_complete;
1613
1614 assoc_resp = (struct ieee80211_assoc_response_frame *)skb->data;
1615 info_element = &assoc_resp->info_element;
1616 left = skb->len - ((void *)info_element - (void *)assoc_resp);
1617
1618 while (left >= sizeof(struct ieee80211_info_element_hdr)) {
1619 if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) {
1620 printk(KERN_WARNING "[re]associate response error!");
1621 return 1;
1622 }
1623 switch (info_element->id) {
1624 case MFIE_TYPE_GENERIC:
1625 IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len);
1626 if (info_element->len >= 8 &&
1627 info_element->data[0] == 0x00 &&
1628 info_element->data[1] == 0x50 &&
1629 info_element->data[2] == 0xf2 &&
1630 info_element->data[3] == 0x02 &&
1631 info_element->data[4] == 0x01) {
1632 /* Not care about version at present.
1633 * WMM Parameter Element.
1634 */
1635 memcpy(ieee->current_network.wmm_param, (u8 *)(info_element->data\
1636 + 8), (info_element->len - 8));
1637
1638 if (((ieee->current_network.wmm_info^info_element->data[6])& \
1639 0x0f) || (!ieee->init_wmmparam_flag)) {
1640 /* refresh parameter element for current network
1641 * update the register parameter for hardware.
1642 */
1643 ieee->init_wmmparam_flag = 1;
1644 queue_work(ieee->wq, &ieee->wmm_param_update_wq);
1645 }
1646 /* update info_element for current network */
1647 ieee->current_network.wmm_info = info_element->data[6];
1648 }
1649 break;
1650 default:
1651 /* nothing to do at present!!! */
1652 break;
1653 }
1654
1655 left -= sizeof(struct ieee80211_info_element_hdr) +
1656 info_element->len;
1657 info_element = (struct ieee80211_info_element *)
1658 &info_element->data[info_element->len];
1659 }
1660 /* legacy AP, reset the AC_xx_param register */
1661 if (!ieee->init_wmmparam_flag) {
1662 queue_work(ieee->wq, &ieee->wmm_param_update_wq);
1663 ieee->init_wmmparam_flag = 1; /* indicate AC_xx_param upated since last associate */
1664 }
1665associate_complete:
1666 ieee80211_associate_complete(ieee);
1667 } else {
1668 ieee->softmac_stats.rx_ass_err++;
1669 IEEE80211_DEBUG_MGMT(
1670 "Association response status code 0x%x\n",
1671 errcode);
1672 ieee80211_associate_abort(ieee);
1673 }
1674 }
1675 break;
1676 case IEEE80211_STYPE_ASSOC_REQ:
1677 case IEEE80211_STYPE_REASSOC_REQ:
1678 if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
1679 ieee->iw_mode == IW_MODE_MASTER)
1680
1681 ieee80211_rx_assoc_rq(ieee, skb);
1682 break;
1683 case IEEE80211_STYPE_AUTH:
1684 if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) {
1685 if (ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATING &&
1686 ieee->iw_mode == IW_MODE_INFRA){
1687 IEEE80211_DEBUG_MGMT("Received authentication response");
1688
1689 errcode = auth_parse(skb, &challenge, &chlen);
1690 if (0 == errcode) {
1691 if (ieee->open_wep || !challenge) {
1692 ieee->state = IEEE80211_ASSOCIATING_AUTHENTICATED;
1693 ieee->softmac_stats.rx_auth_rs_ok++;
1694
1695 ieee80211_associate_step2(ieee);
1696 } else {
1697 ieee80211_rtl_auth_challenge(ieee, challenge, chlen);
1698 }
1699 } else {
1700 ieee->softmac_stats.rx_auth_rs_err++;
1701 IEEE80211_DEBUG_MGMT("Authentication response status code 0x%x", errcode);
1702 ieee80211_associate_abort(ieee);
1703 }
1704
1705 } else if (ieee->iw_mode == IW_MODE_MASTER) {
1706 ieee80211_rx_auth_rq(ieee, skb);
1707 }
1708 }
1709 break;
1710 case IEEE80211_STYPE_PROBE_REQ:
1711 if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
1712 ((ieee->iw_mode == IW_MODE_ADHOC ||
1713 ieee->iw_mode == IW_MODE_MASTER) &&
1714 ieee->state == IEEE80211_LINKED))
1715
1716 ieee80211_rx_probe_rq(ieee, skb);
1717 break;
1718 case IEEE80211_STYPE_DISASSOC:
1719 case IEEE80211_STYPE_DEAUTH:
1720 /* FIXME for now repeat all the association procedure
1721 * both for disassociation and deauthentication
1722 */
1723 if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
1724 (ieee->state == IEEE80211_LINKED) &&
1725 (ieee->iw_mode == IW_MODE_INFRA) &&
1726 (!memcmp(header->addr2, ieee->current_network.bssid, ETH_ALEN))) {
1727 ieee->state = IEEE80211_ASSOCIATING;
1728 ieee->softmac_stats.reassoc++;
1729
1730 queue_work(ieee->wq, &ieee->associate_procedure_wq);
1731 }
1732 break;
1733 default:
1734 return -1;
1735 break;
1736 }
1737 return 0;
1738}
1739
1740/* following are for a simpler TX queue management.
1741 * Instead of using netif_[stop/wake]_queue the driver
1742 * will uses these two function (plus a reset one), that
1743 * will internally uses the kernel netif_* and takes
1744 * care of the ieee802.11 fragmentation.
1745 * So the driver receives a fragment per time and might
1746 * call the stop function when it want without take care
1747 * to have enough room to TX an entire packet.
1748 * This might be useful if each fragment need it's own
1749 * descriptor, thus just keep a total free memory > than
1750 * the max fragmentation threshold is not enough.. If the
1751 * ieee802.11 stack passed a TXB struct then you needed
1752 * to keep N free descriptors where
1753 * N = MAX_PACKET_SIZE / MIN_FRAG_TRESHOLD
1754 * In this way you need just one and the 802.11 stack
1755 * will take care of buffering fragments and pass them to
1756 * to the driver later, when it wakes the queue.
1757 */
1758
1759void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
1760 struct ieee80211_device *ieee)
1761{
1762 unsigned long flags;
1763 int i;
1764
1765 spin_lock_irqsave(&ieee->lock, flags);
1766
1767 /* called with 2nd parm 0, no tx mgmt lock required */
1768 ieee80211_sta_wakeup(ieee, 0);
1769
1770 for (i = 0; i < txb->nr_frags; i++) {
1771 if (ieee->queue_stop) {
1772 ieee->tx_pending.txb = txb;
1773 ieee->tx_pending.frag = i;
1774 goto exit;
1775 } else {
1776 ieee->softmac_data_hard_start_xmit(
1777 txb->fragments[i],
1778 ieee->dev, ieee->rate);
1779 ieee->stats.tx_packets++;
1780 ieee->stats.tx_bytes += txb->fragments[i]->len;
1781 ieee->dev->trans_start = jiffies;
1782 }
1783 }
1784
1785 ieee80211_txb_free(txb);
1786
1787 exit:
1788 spin_unlock_irqrestore(&ieee->lock, flags);
1789}
1790
1791/* called with ieee->lock acquired */
1792static void ieee80211_resume_tx(struct ieee80211_device *ieee)
1793{
1794 int i;
1795 for (i = ieee->tx_pending.frag; i < ieee->tx_pending.txb->nr_frags; i++) {
1796
1797 if (ieee->queue_stop) {
1798 ieee->tx_pending.frag = i;
1799 return;
1800 } else {
1801 ieee->softmac_data_hard_start_xmit(
1802 ieee->tx_pending.txb->fragments[i],
1803 ieee->dev, ieee->rate);
1804 ieee->stats.tx_packets++;
1805 ieee->dev->trans_start = jiffies;
1806 }
1807 }
1808
1809 ieee80211_txb_free(ieee->tx_pending.txb);
1810 ieee->tx_pending.txb = NULL;
1811}
1812
1813void ieee80211_reset_queue(struct ieee80211_device *ieee)
1814{
1815 unsigned long flags;
1816
1817 spin_lock_irqsave(&ieee->lock, flags);
1818 init_mgmt_queue(ieee);
1819 if (ieee->tx_pending.txb) {
1820 ieee80211_txb_free(ieee->tx_pending.txb);
1821 ieee->tx_pending.txb = NULL;
1822 }
1823 ieee->queue_stop = 0;
1824 spin_unlock_irqrestore(&ieee->lock, flags);
1825}
1826
1827void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
1828{
1829 unsigned long flags;
1830 struct sk_buff *skb;
1831 struct ieee80211_hdr_3addr *header;
1832
1833 spin_lock_irqsave(&ieee->lock, flags);
1834 if (!ieee->queue_stop)
1835 goto exit;
1836
1837 ieee->queue_stop = 0;
1838
1839 if (ieee->softmac_features & IEEE_SOFTMAC_SINGLE_QUEUE) {
1840 while (!ieee->queue_stop && (skb = dequeue_mgmt(ieee))) {
1841 header = (struct ieee80211_hdr_3addr *) skb->data;
1842
1843 header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
1844
1845 if (ieee->seq_ctrl[0] == 0xFFF)
1846 ieee->seq_ctrl[0] = 0;
1847 else
1848 ieee->seq_ctrl[0]++;
1849
1850 ieee->softmac_data_hard_start_xmit(skb, ieee->dev, ieee->basic_rate);
1851 dev_kfree_skb_any(skb);
1852 }
1853 }
1854 if (!ieee->queue_stop && ieee->tx_pending.txb)
1855 ieee80211_resume_tx(ieee);
1856
1857 if (!ieee->queue_stop && netif_queue_stopped(ieee->dev)) {
1858 ieee->softmac_stats.swtxawake++;
1859 netif_wake_queue(ieee->dev);
1860 }
1861exit:
1862 spin_unlock_irqrestore(&ieee->lock, flags);
1863}
1864
1865void ieee80211_rtl_stop_queue(struct ieee80211_device *ieee)
1866{
1867 if (!netif_queue_stopped(ieee->dev)) {
1868 netif_stop_queue(ieee->dev);
1869 ieee->softmac_stats.swtxstop++;
1870 }
1871 ieee->queue_stop = 1;
1872}
1873
1874inline void ieee80211_randomize_cell(struct ieee80211_device *ieee)
1875{
1876 random_ether_addr(ieee->current_network.bssid);
1877}
1878
1879/* called in user context only */
1880void ieee80211_start_master_bss(struct ieee80211_device *ieee)
1881{
1882 ieee->assoc_id = 1;
1883
1884 if (ieee->current_network.ssid_len == 0) {
1885 strncpy(ieee->current_network.ssid,
1886 IEEE80211_DEFAULT_TX_ESSID,
1887 IW_ESSID_MAX_SIZE);
1888
1889 ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
1890 ieee->ssid_set = 1;
1891 }
1892
1893 memcpy(ieee->current_network.bssid, ieee->dev->dev_addr, ETH_ALEN);
1894
1895 ieee->set_chan(ieee->dev, ieee->current_network.channel);
1896 ieee->state = IEEE80211_LINKED;
1897 ieee->link_change(ieee->dev);
1898 notify_wx_assoc_event(ieee);
1899
1900 if (ieee->data_hard_resume)
1901 ieee->data_hard_resume(ieee->dev);
1902
1903 netif_carrier_on(ieee->dev);
1904}
1905
1906static void ieee80211_start_monitor_mode(struct ieee80211_device *ieee)
1907{
1908 if (ieee->raw_tx) {
1909
1910 if (ieee->data_hard_resume)
1911 ieee->data_hard_resume(ieee->dev);
1912
1913 netif_carrier_on(ieee->dev);
1914 }
1915}
1916
1917static void ieee80211_start_ibss_wq(struct work_struct *work)
1918{
1919 struct delayed_work *dwork = to_delayed_work(work);
1920 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
1921
1922 /* iwconfig mode ad-hoc will schedule this and return
1923 * on the other hand this will block further iwconfig SET
1924 * operations because of the wx_sem hold.
1925 * Anyway some most set operations set a flag to speed-up
1926 * (abort) this wq (when syncro scanning) before sleeping
1927 * on the semaphore
1928 */
1929
1930 down(&ieee->wx_sem);
1931
1932 if (ieee->current_network.ssid_len == 0) {
1933 strcpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID);
1934 ieee->current_network.ssid_len = strlen(IEEE80211_DEFAULT_TX_ESSID);
1935 ieee->ssid_set = 1;
1936 }
1937
1938 /* check if we have this cell in our network list */
1939 ieee80211_softmac_check_all_nets(ieee);
1940
1941 if (ieee->state == IEEE80211_NOLINK)
1942 ieee->current_network.channel = 10;
1943 /* if not then the state is not linked. Maybe the user switched to
1944 * ad-hoc mode just after being in monitor mode, or just after
1945 * being very few time in managed mode (so the card have had no
1946 * time to scan all the chans..) or we have just run up the iface
1947 * after setting ad-hoc mode. So we have to give another try..
1948 * Here, in ibss mode, should be safe to do this without extra care
1949 * (in bss mode we had to make sure no-one tried to associate when
1950 * we had just checked the ieee->state and we was going to start the
1951 * scan) because in ibss mode the ieee80211_new_net function, when
1952 * finds a good net, just set the ieee->state to IEEE80211_LINKED,
1953 * so, at worst, we waste a bit of time to initiate an unneeded syncro
1954 * scan, that will stop at the first round because it sees the state
1955 * associated.
1956 */
1957 if (ieee->state == IEEE80211_NOLINK)
1958 ieee80211_start_scan_syncro(ieee);
1959
1960 /* the network definitively is not here.. create a new cell */
1961 if (ieee->state == IEEE80211_NOLINK) {
1962 printk("creating new IBSS cell\n");
1963 if (!ieee->wap_set)
1964 ieee80211_randomize_cell(ieee);
1965
1966 if (ieee->modulation & IEEE80211_CCK_MODULATION) {
1967 ieee->current_network.rates_len = 4;
1968
1969 ieee->current_network.rates[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
1970 ieee->current_network.rates[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
1971 ieee->current_network.rates[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
1972 ieee->current_network.rates[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
1973
1974 } else
1975 ieee->current_network.rates_len = 0;
1976
1977 if (ieee->modulation & IEEE80211_OFDM_MODULATION) {
1978 ieee->current_network.rates_ex_len = 8;
1979
1980 ieee->current_network.rates_ex[0] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
1981 ieee->current_network.rates_ex[1] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
1982 ieee->current_network.rates_ex[2] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
1983 ieee->current_network.rates_ex[3] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
1984 ieee->current_network.rates_ex[4] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
1985 ieee->current_network.rates_ex[5] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
1986 ieee->current_network.rates_ex[6] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
1987 ieee->current_network.rates_ex[7] = IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
1988
1989 ieee->rate = 540;
1990 } else {
1991 ieee->current_network.rates_ex_len = 0;
1992 ieee->rate = 110;
1993 }
1994
1995 /* By default, WMM function will be disabled in IBSS mode */
1996 ieee->current_network.QoS_Enable = 0;
1997
1998 ieee->current_network.atim_window = 0;
1999 ieee->current_network.capability = WLAN_CAPABILITY_IBSS;
2000 if (ieee->short_slot)
2001 ieee->current_network.capability |= WLAN_CAPABILITY_SHORT_SLOT;
2002 }
2003
2004 ieee->state = IEEE80211_LINKED;
2005 ieee->set_chan(ieee->dev, ieee->current_network.channel);
2006 ieee->link_change(ieee->dev);
2007
2008 notify_wx_assoc_event(ieee);
2009
2010 ieee80211_start_send_beacons(ieee);
2011 printk(KERN_WARNING "after sending beacon packet!\n");
2012
2013 if (ieee->data_hard_resume)
2014 ieee->data_hard_resume(ieee->dev);
2015
2016 netif_carrier_on(ieee->dev);
2017
2018 up(&ieee->wx_sem);
2019}
2020
2021inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
2022{
2023 queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 100);
2024}
2025
2026/* this is called only in user context, with wx_sem held */
2027void ieee80211_start_bss(struct ieee80211_device *ieee)
2028{
2029 unsigned long flags;
2030 /* Ref: 802.11d 11.1.3.3
2031 * STA shall not start a BSS unless properly formed Beacon frame
2032 * including a Country IE.
2033 */
2034 if (IS_DOT11D_ENABLE(ieee) && !IS_COUNTRY_IE_VALID(ieee)) {
2035 if (!ieee->bGlobalDomain)
2036 return;
2037 }
2038 /* check if we have already found the net we are interested in (if any).
2039 * if not (we are disassociated and we are not
2040 * in associating / authenticating phase) start the background scanning.
2041 */
2042 ieee80211_softmac_check_all_nets(ieee);
2043
2044 /* ensure no-one start an associating process (thus setting
2045 * the ieee->state to ieee80211_ASSOCIATING) while we
2046 * have just cheked it and we are going to enable scan.
2047 * The ieee80211_new_net function is always called with
2048 * lock held (from both ieee80211_softmac_check_all_nets and
2049 * the rx path), so we cannot be in the middle of such function
2050 */
2051 spin_lock_irqsave(&ieee->lock, flags);
2052
2053 if (ieee->state == IEEE80211_NOLINK) {
2054 ieee->actscanning = true;
2055 ieee80211_rtl_start_scan(ieee);
2056 }
2057 spin_unlock_irqrestore(&ieee->lock, flags);
2058}
2059
2060/* called only in userspace context */
2061void ieee80211_disassociate(struct ieee80211_device *ieee)
2062{
2063 netif_carrier_off(ieee->dev);
2064
2065 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)
2066 ieee80211_reset_queue(ieee);
2067
2068 if (ieee->data_hard_stop)
2069 ieee->data_hard_stop(ieee->dev);
2070
2071 if (IS_DOT11D_ENABLE(ieee))
2072 Dot11d_Reset(ieee);
2073
2074 ieee->link_change(ieee->dev);
2075 if (ieee->state == IEEE80211_LINKED)
2076 notify_wx_assoc_event(ieee);
2077 ieee->state = IEEE80211_NOLINK;
2078
2079}
2080static void ieee80211_associate_retry_wq(struct work_struct *work)
2081{
2082 struct delayed_work *dwork = to_delayed_work(work);
2083 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
2084 unsigned long flags;
2085 down(&ieee->wx_sem);
2086 if (!ieee->proto_started)
2087 goto exit;
2088 if (ieee->state != IEEE80211_ASSOCIATING_RETRY)
2089 goto exit;
2090 /* until we do not set the state to IEEE80211_NOLINK
2091 * there are no possibility to have someone else trying
2092 * to start an association procedure (we get here with
2093 * ieee->state = IEEE80211_ASSOCIATING).
2094 * When we set the state to IEEE80211_NOLINK it is possible
2095 * that the RX path run an attempt to associate, but
2096 * both ieee80211_softmac_check_all_nets and the
2097 * RX path works with ieee->lock held so there are no
2098 * problems. If we are still disassociated then start a scan.
2099 * the lock here is necessary to ensure no one try to start
2100 * an association procedure when we have just checked the
2101 * state and we are going to start the scan.
2102 */
2103 ieee->state = IEEE80211_NOLINK;
2104 ieee->beinretry = true;
2105 ieee80211_softmac_check_all_nets(ieee);
2106
2107 spin_lock_irqsave(&ieee->lock, flags);
2108
2109 if (ieee->state == IEEE80211_NOLINK) {
2110 ieee->beinretry = false;
2111 ieee->actscanning = true;
2112 ieee80211_rtl_start_scan(ieee);
2113 }
2114 if (ieee->state == IEEE80211_NOLINK)
2115 notify_wx_assoc_event(ieee);
2116 spin_unlock_irqrestore(&ieee->lock, flags);
2117
2118exit:
2119 up(&ieee->wx_sem);
2120}
2121
2122struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
2123{
2124 u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2125
2126 struct sk_buff *skb = NULL;
2127 struct ieee80211_probe_response *b;
2128
2129 skb = ieee80211_probe_resp(ieee, broadcast_addr);
2130 if (!skb)
2131 return NULL;
2132
2133 b = (struct ieee80211_probe_response *) skb->data;
2134 b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON);
2135
2136 return skb;
2137}
2138
2139struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
2140{
2141 struct sk_buff *skb;
2142 struct ieee80211_probe_response *b;
2143
2144 skb = ieee80211_get_beacon_(ieee);
2145 if (!skb)
2146 return NULL;
2147
2148 b = (struct ieee80211_probe_response *) skb->data;
2149 b->header.seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
2150
2151 if (ieee->seq_ctrl[0] == 0xFFF)
2152 ieee->seq_ctrl[0] = 0;
2153 else
2154 ieee->seq_ctrl[0]++;
2155
2156 return skb;
2157}
2158
2159void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee)
2160{
2161 ieee->sync_scan_hurryup = 1;
2162 down(&ieee->wx_sem);
2163 ieee80211_stop_protocol(ieee);
2164 up(&ieee->wx_sem);
2165}
2166
2167void ieee80211_stop_protocol(struct ieee80211_device *ieee)
2168{
2169 if (!ieee->proto_started)
2170 return;
2171
2172 ieee->proto_started = 0;
2173
2174 ieee80211_stop_send_beacons(ieee);
2175 if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state == IEEE80211_LINKED))
2176 SendDisassociation(ieee, NULL, WLAN_REASON_DISASSOC_STA_HAS_LEFT);
2177
2178 del_timer_sync(&ieee->associate_timer);
2179 cancel_delayed_work(&ieee->associate_retry_wq);
2180 cancel_delayed_work(&ieee->start_ibss_wq);
2181 ieee80211_stop_scan(ieee);
2182
2183 ieee80211_disassociate(ieee);
2184}
2185
2186void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
2187{
2188 ieee->sync_scan_hurryup = 0;
2189 down(&ieee->wx_sem);
2190 ieee80211_start_protocol(ieee);
2191 up(&ieee->wx_sem);
2192}
2193
2194void ieee80211_start_protocol(struct ieee80211_device *ieee)
2195{
2196 short ch = 0;
2197 int i = 0;
2198
2199 if (ieee->proto_started)
2200 return;
2201
2202 ieee->proto_started = 1;
2203
2204 if (ieee->current_network.channel == 0) {
2205 do {
2206 ch++;
2207 if (ch > MAX_CHANNEL_NUMBER)
2208 return; /* no channel found */
2209
2210 } while (!GET_DOT11D_INFO(ieee)->channel_map[ch]);
2211
2212 ieee->current_network.channel = ch;
2213 }
2214
2215 if (ieee->current_network.beacon_interval == 0)
2216 ieee->current_network.beacon_interval = 100;
2217 ieee->set_chan(ieee->dev, ieee->current_network.channel);
2218
2219 for (i = 0; i < 17; i++) {
2220 ieee->last_rxseq_num[i] = -1;
2221 ieee->last_rxfrag_num[i] = -1;
2222 ieee->last_packet_time[i] = 0;
2223 }
2224
2225 ieee->init_wmmparam_flag = 0; /* reinitialize AC_xx_PARAM registers. */
2226
2227 /* if the user set the MAC of the ad-hoc cell and then
2228 * switch to managed mode, shall we make sure that association
2229 * attempts does not fail just because the user provide the essid
2230 * and the nic is still checking for the AP MAC ??
2231 */
2232 switch (ieee->iw_mode) {
2233 case IW_MODE_AUTO:
2234 ieee->iw_mode = IW_MODE_INFRA;
2235 /* not set break here intentionly */
2236 case IW_MODE_INFRA:
2237 ieee80211_start_bss(ieee);
2238 break;
2239
2240 case IW_MODE_ADHOC:
2241 ieee80211_start_ibss(ieee);
2242 break;
2243
2244 case IW_MODE_MASTER:
2245 ieee80211_start_master_bss(ieee);
2246 break;
2247
2248 case IW_MODE_MONITOR:
2249 ieee80211_start_monitor_mode(ieee);
2250 break;
2251
2252 default:
2253 ieee->iw_mode = IW_MODE_INFRA;
2254 ieee80211_start_bss(ieee);
2255 break;
2256 }
2257}
2258
2259#define DRV_NAME "Ieee80211"
2260void ieee80211_softmac_init(struct ieee80211_device *ieee)
2261{
2262 int i;
2263 memset(&ieee->current_network, 0, sizeof(struct ieee80211_network));
2264
2265 ieee->state = IEEE80211_NOLINK;
2266 ieee->sync_scan_hurryup = 0;
2267 for (i = 0; i < 5; i++)
2268 ieee->seq_ctrl[i] = 0;
2269
2270 ieee->assoc_id = 0;
2271 ieee->queue_stop = 0;
2272 ieee->scanning = 0;
2273 ieee->softmac_features = 0; /* so IEEE2100-like driver are happy */
2274 ieee->wap_set = 0;
2275 ieee->ssid_set = 0;
2276 ieee->proto_started = 0;
2277 ieee->basic_rate = IEEE80211_DEFAULT_BASIC_RATE;
2278 ieee->rate = 3;
2279 ieee->ps = IEEE80211_PS_MBCAST|IEEE80211_PS_UNICAST;
2280 ieee->sta_sleep = 0;
2281 ieee->bInactivePs = false;
2282 ieee->actscanning = false;
2283 ieee->ListenInterval = 2;
2284 ieee->NumRxDataInPeriod = 0;
2285 ieee->NumRxBcnInPeriod = 0;
2286 ieee->NumRxOkTotal = 0;
2287 ieee->NumRxUnicast = 0; /* for keep alive */
2288 ieee->beinretry = false;
2289 ieee->bHwRadioOff = false;
2290
2291 init_mgmt_queue(ieee);
2292
2293 ieee->tx_pending.txb = NULL;
2294
2295 init_timer(&ieee->associate_timer);
2296 ieee->associate_timer.data = (unsigned long)ieee;
2297 ieee->associate_timer.function = ieee80211_associate_abort_cb;
2298
2299 init_timer(&ieee->beacon_timer);
2300 ieee->beacon_timer.data = (unsigned long) ieee;
2301 ieee->beacon_timer.function = ieee80211_send_beacon_cb;
2302
2303 ieee->wq = create_workqueue(DRV_NAME);
2304
2305 INIT_DELAYED_WORK(&ieee->start_ibss_wq, (void *) ieee80211_start_ibss_wq);
2306 INIT_WORK(&ieee->associate_complete_wq, (void *) ieee80211_associate_complete_wq);
2307 INIT_WORK(&ieee->associate_procedure_wq, (void *) ieee80211_associate_procedure_wq);
2308 INIT_DELAYED_WORK(&ieee->softmac_scan_wq, (void *) ieee80211_softmac_scan_wq);
2309 INIT_DELAYED_WORK(&ieee->associate_retry_wq, (void *) ieee80211_associate_retry_wq);
2310 INIT_WORK(&ieee->wx_sync_scan_wq, (void *) ieee80211_wx_sync_scan_wq);
2311
2312 sema_init(&ieee->wx_sem, 1);
2313 sema_init(&ieee->scan_sem, 1);
2314
2315 spin_lock_init(&ieee->mgmt_tx_lock);
2316 spin_lock_init(&ieee->beacon_lock);
2317
2318 tasklet_init(&ieee->ps_task,
2319 (void(*)(unsigned long)) ieee80211_sta_ps,
2320 (unsigned long)ieee);
2321 ieee->pDot11dInfo = kmalloc(sizeof(RT_DOT11D_INFO), GFP_ATOMIC);
2322}
2323
2324void ieee80211_softmac_free(struct ieee80211_device *ieee)
2325{
2326 down(&ieee->wx_sem);
2327
2328 del_timer_sync(&ieee->associate_timer);
2329 cancel_delayed_work(&ieee->associate_retry_wq);
2330
2331 /* add for RF power on power of */
2332 cancel_delayed_work(&ieee->GPIOChangeRFWorkItem);
2333
2334 destroy_workqueue(ieee->wq);
2335 kfree(ieee->pDot11dInfo);
2336 up(&ieee->wx_sem);
2337}
2338
2339/* Start of WPA code. This is stolen from the ipw2200 driver */
2340static int ieee80211_wpa_enable(struct ieee80211_device *ieee, int value)
2341{
2342 /* This is called when wpa_supplicant loads and closes the driver
2343 * interface. */
2344 printk("%s WPA\n", value ? "enabling" : "disabling");
2345 ieee->wpa_enabled = value;
2346 return 0;
2347}
2348
2349static void ieee80211_wpa_assoc_frame(struct ieee80211_device *ieee, char *wpa_ie,
2350 int wpa_ie_len)
2351{
2352 /* make sure WPA is enabled */
2353 ieee80211_wpa_enable(ieee, 1);
2354
2355 ieee80211_disassociate(ieee);
2356}
2357
2358static int ieee80211_wpa_mlme(struct ieee80211_device *ieee, int command,
2359 int reason)
2360{
2361 int ret = 0;
2362
2363 switch (command) {
2364 case IEEE_MLME_STA_DEAUTH:
2365 /* silently ignore */
2366 break;
2367
2368 case IEEE_MLME_STA_DISASSOC:
2369 ieee80211_disassociate(ieee);
2370 break;
2371
2372 default:
2373 printk("Unknown MLME request: %d\n", command);
2374 ret = -EOPNOTSUPP;
2375 }
2376
2377 return ret;
2378}
2379
2380static int ieee80211_wpa_set_wpa_ie(struct ieee80211_device *ieee,
2381 struct ieee_param *param, int plen)
2382{
2383 u8 *buf;
2384
2385 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
2386 (param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
2387 return -EINVAL;
2388
2389 if (param->u.wpa_ie.len) {
2390 buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
2391 GFP_KERNEL);
2392 if (buf == NULL)
2393 return -ENOMEM;
2394
2395 kfree(ieee->wpa_ie);
2396 ieee->wpa_ie = buf;
2397 ieee->wpa_ie_len = param->u.wpa_ie.len;
2398 } else {
2399 kfree(ieee->wpa_ie);
2400 ieee->wpa_ie = NULL;
2401 ieee->wpa_ie_len = 0;
2402 }
2403
2404 ieee80211_wpa_assoc_frame(ieee, ieee->wpa_ie, ieee->wpa_ie_len);
2405 return 0;
2406}
2407
2408#define AUTH_ALG_OPEN_SYSTEM 0x1
2409#define AUTH_ALG_SHARED_KEY 0x2
2410
2411static int ieee80211_wpa_set_auth_algs(struct ieee80211_device *ieee, int value)
2412{
2413 struct ieee80211_security sec = {
2414 .flags = SEC_AUTH_MODE,
2415 };
2416 int ret = 0;
2417
2418 if (value & AUTH_ALG_SHARED_KEY) {
2419 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
2420 ieee->open_wep = 0;
2421 } else {
2422 sec.auth_mode = WLAN_AUTH_OPEN;
2423 ieee->open_wep = 1;
2424 }
2425
2426 if (ieee->set_security)
2427 ieee->set_security(ieee->dev, &sec);
2428 else
2429 ret = -EOPNOTSUPP;
2430
2431 return ret;
2432}
2433
2434static int ieee80211_wpa_set_param(struct ieee80211_device *ieee, u8 name,
2435 u32 value)
2436{
2437 int ret = 0;
2438 unsigned long flags;
2439
2440 switch (name) {
2441 case IEEE_PARAM_WPA_ENABLED:
2442 ret = ieee80211_wpa_enable(ieee, value);
2443 break;
2444
2445 case IEEE_PARAM_TKIP_COUNTERMEASURES:
2446 ieee->tkip_countermeasures = value;
2447 break;
2448
2449 case IEEE_PARAM_DROP_UNENCRYPTED: {
2450 /* HACK:
2451 *
2452 * wpa_supplicant calls set_wpa_enabled when the driver
2453 * is loaded and unloaded, regardless of if WPA is being
2454 * used. No other calls are made which can be used to
2455 * determine if encryption will be used or not prior to
2456 * association being expected. If encryption is not being
2457 * used, drop_unencrypted is set to false, else true -- we
2458 * can use this to determine if the CAP_PRIVACY_ON bit should
2459 * be set.
2460 */
2461 struct ieee80211_security sec = {
2462 .flags = SEC_ENABLED,
2463 .enabled = value,
2464 };
2465 ieee->drop_unencrypted = value;
2466 /* We only change SEC_LEVEL for open mode. Others
2467 * are set by ipw_wpa_set_encryption.
2468 */
2469 if (!value) {
2470 sec.flags |= SEC_LEVEL;
2471 sec.level = SEC_LEVEL_0;
2472 } else {
2473 sec.flags |= SEC_LEVEL;
2474 sec.level = SEC_LEVEL_1;
2475 }
2476 if (ieee->set_security)
2477 ieee->set_security(ieee->dev, &sec);
2478 break;
2479 }
2480
2481 case IEEE_PARAM_PRIVACY_INVOKED:
2482 ieee->privacy_invoked = value;
2483 break;
2484 case IEEE_PARAM_AUTH_ALGS:
2485 ret = ieee80211_wpa_set_auth_algs(ieee, value);
2486 break;
2487 case IEEE_PARAM_IEEE_802_1X:
2488 ieee->ieee802_1x = value;
2489 break;
2490 case IEEE_PARAM_WPAX_SELECT:
2491 spin_lock_irqsave(&ieee->wpax_suitlist_lock, flags);
2492 ieee->wpax_type_set = 1;
2493 ieee->wpax_type_notify = value;
2494 spin_unlock_irqrestore(&ieee->wpax_suitlist_lock, flags);
2495 break;
2496 default:
2497 printk("Unknown WPA param: %d\n", name);
2498 ret = -EOPNOTSUPP;
2499 }
2500
2501 return ret;
2502}
2503
2504/* implementation borrowed from hostap driver */
2505
2506static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee,
2507 struct ieee_param *param, int param_len)
2508{
2509 int ret = 0;
2510
2511 struct ieee80211_crypto_ops *ops;
2512 struct ieee80211_crypt_data **crypt;
2513
2514 struct ieee80211_security sec = {
2515 .flags = 0,
2516 };
2517
2518 param->u.crypt.err = 0;
2519 param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
2520
2521 if (param_len !=
2522 (int) ((char *) param->u.crypt.key - (char *) param) +
2523 param->u.crypt.key_len) {
2524 printk("Len mismatch %d, %d\n", param_len,
2525 param->u.crypt.key_len);
2526 return -EINVAL;
2527 }
2528 if (is_broadcast_ether_addr(param->sta_addr)) {
2529 if (param->u.crypt.idx >= WEP_KEYS)
2530 return -EINVAL;
2531 crypt = &ieee->crypt[param->u.crypt.idx];
2532 } else {
2533 return -EINVAL;
2534 }
2535
2536 if (strcmp(param->u.crypt.alg, "none") == 0) {
2537 if (crypt) {
2538 sec.enabled = 0;
2539 /* FIXME FIXME */
2540 sec.level = SEC_LEVEL_0;
2541 sec.flags |= SEC_ENABLED | SEC_LEVEL;
2542 ieee80211_crypt_delayed_deinit(ieee, crypt);
2543 }
2544 goto done;
2545 }
2546 sec.enabled = 1;
2547 /* FIXME FIXME */
2548 sec.flags |= SEC_ENABLED;
2549
2550 /* IPW HW cannot build TKIP MIC, host decryption still needed. */
2551 if (!(ieee->host_encrypt || ieee->host_decrypt) &&
2552 strcmp(param->u.crypt.alg, "TKIP"))
2553 goto skip_host_crypt;
2554
2555 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2556 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0)
2557 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2558 else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0)
2559 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2560 else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0)
2561 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
2562 if (ops == NULL) {
2563 printk("unknown crypto alg '%s'\n", param->u.crypt.alg);
2564 param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG;
2565 ret = -EINVAL;
2566 goto done;
2567 }
2568
2569 if (*crypt == NULL || (*crypt)->ops != ops) {
2570 struct ieee80211_crypt_data *new_crypt;
2571
2572 ieee80211_crypt_delayed_deinit(ieee, crypt);
2573
2574 new_crypt = kmalloc(sizeof(*new_crypt), GFP_KERNEL);
2575 if (new_crypt == NULL) {
2576 ret = -ENOMEM;
2577 goto done;
2578 }
2579 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
2580 new_crypt->ops = ops;
2581 if (new_crypt->ops)
2582 new_crypt->priv =
2583 new_crypt->ops->init(param->u.crypt.idx);
2584
2585 if (new_crypt->priv == NULL) {
2586 kfree(new_crypt);
2587 param->u.crypt.err = IEEE_CRYPT_ERR_CRYPT_INIT_FAILED;
2588 ret = -EINVAL;
2589 goto done;
2590 }
2591
2592 *crypt = new_crypt;
2593 }
2594
2595 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
2596 (*crypt)->ops->set_key(param->u.crypt.key,
2597 param->u.crypt.key_len, param->u.crypt.seq,
2598 (*crypt)->priv) < 0) {
2599 printk("key setting failed\n");
2600 param->u.crypt.err = IEEE_CRYPT_ERR_KEY_SET_FAILED;
2601 ret = -EINVAL;
2602 goto done;
2603 }
2604
2605 skip_host_crypt:
2606 if (param->u.crypt.set_tx) {
2607 ieee->tx_keyidx = param->u.crypt.idx;
2608 sec.active_key = param->u.crypt.idx;
2609 sec.flags |= SEC_ACTIVE_KEY;
2610 } else
2611 sec.flags &= ~SEC_ACTIVE_KEY;
2612
2613 if (param->u.crypt.alg != NULL) {
2614 memcpy(sec.keys[param->u.crypt.idx],
2615 param->u.crypt.key,
2616 param->u.crypt.key_len);
2617 sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
2618 sec.flags |= (1 << param->u.crypt.idx);
2619
2620 if (strcmp(param->u.crypt.alg, "WEP") == 0) {
2621 sec.flags |= SEC_LEVEL;
2622 sec.level = SEC_LEVEL_1;
2623 } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
2624 sec.flags |= SEC_LEVEL;
2625 sec.level = SEC_LEVEL_2;
2626 } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
2627 sec.flags |= SEC_LEVEL;
2628 sec.level = SEC_LEVEL_3;
2629 }
2630 }
2631 done:
2632 if (ieee->set_security)
2633 ieee->set_security(ieee->dev, &sec);
2634
2635 /* Do not reset port if card is in Managed mode since resetting will
2636 * generate new IEEE 802.11 authentication which may end up in looping
2637 * with IEEE 802.1X. If your hardware requires a reset after WEP
2638 * configuration (for example... Prism2), implement the reset_port in
2639 * the callbacks structures used to initialize the 802.11 stack. */
2640 if (ieee->reset_on_keychange &&
2641 ieee->iw_mode != IW_MODE_INFRA &&
2642 ieee->reset_port &&
2643 ieee->reset_port(ieee->dev)) {
2644 printk("reset_port failed\n");
2645 param->u.crypt.err = IEEE_CRYPT_ERR_CARD_CONF_FAILED;
2646 return -EINVAL;
2647 }
2648
2649 return ret;
2650}
2651
2652int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
2653 struct iw_point *p)
2654{
2655 struct ieee_param *param;
2656 int ret = 0;
2657
2658 down(&ieee->wx_sem);
2659
2660 if (p->length < sizeof(struct ieee_param) || !p->pointer) {
2661 ret = -EINVAL;
2662 goto out;
2663 }
2664
2665 param = memdup_user(p->pointer, p->length);
2666 if (IS_ERR(param)) {
2667 ret = PTR_ERR(param);
2668 goto out;
2669 }
2670
2671 switch (param->cmd) {
2672 case IEEE_CMD_SET_WPA_PARAM:
2673 ret = ieee80211_wpa_set_param(ieee, param->u.wpa_param.name,
2674 param->u.wpa_param.value);
2675 break;
2676 case IEEE_CMD_SET_WPA_IE:
2677 ret = ieee80211_wpa_set_wpa_ie(ieee, param, p->length);
2678 break;
2679 case IEEE_CMD_SET_ENCRYPTION:
2680 ret = ieee80211_wpa_set_encryption(ieee, param, p->length);
2681 break;
2682 case IEEE_CMD_MLME:
2683 ret = ieee80211_wpa_mlme(ieee, param->u.mlme.command,
2684 param->u.mlme.reason_code);
2685 break;
2686 default:
2687 printk("Unknown WPA supplicant request: %d\n", param->cmd);
2688 ret = -EOPNOTSUPP;
2689 break;
2690 }
2691
2692 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
2693 ret = -EFAULT;
2694
2695 kfree(param);
2696out:
2697 up(&ieee->wx_sem);
2698
2699 return ret;
2700}
2701
2702void notify_wx_assoc_event(struct ieee80211_device *ieee)
2703{
2704 union iwreq_data wrqu;
2705 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
2706 if (ieee->state == IEEE80211_LINKED)
2707 memcpy(wrqu.ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN);
2708 else
2709 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
2710 wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
2711}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
deleted file mode 100644
index 46f35644126c..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ /dev/null
@@ -1,567 +0,0 @@
1/* IEEE 802.11 SoftMAC layer
2 * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
3 *
4 * Mostly extracted from the rtl8180-sa2400 driver for the
5 * in-kernel generic ieee802.11 stack.
6 *
7 * Some pieces of code might be stolen from ipw2100 driver
8 * copyright of who own it's copyright ;-)
9 *
10 * PS wx handler mostly stolen from hostap, copyright who
11 * own it's copyright ;-)
12 *
13 * released under the GPL
14 */
15
16
17#include <linux/etherdevice.h>
18
19#include "ieee80211.h"
20
21/* FIXME: add A freqs */
22
23const long ieee80211_wlan_frequencies[] = {
24 2412, 2417, 2422, 2427,
25 2432, 2437, 2442, 2447,
26 2452, 2457, 2462, 2467,
27 2472, 2484
28};
29
30
31int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
32 struct iw_request_info *a, union iwreq_data *wrqu,
33 char *b)
34{
35 int ret;
36 struct iw_freq *fwrq = &wrqu->freq;
37// printk("in %s\n",__func__);
38 down(&ieee->wx_sem);
39
40 if (ieee->iw_mode == IW_MODE_INFRA) {
41 ret = -EOPNOTSUPP;
42 goto out;
43 }
44
45 /* if setting by freq convert to channel */
46 if (fwrq->e == 1) {
47 if ((fwrq->m >= (int) 2.412e8 &&
48 fwrq->m <= (int) 2.487e8)) {
49 int f = fwrq->m / 100000;
50 int c = 0;
51
52 while ((c < 14) && (f != ieee80211_wlan_frequencies[c]))
53 c++;
54
55 /* hack to fall through */
56 fwrq->e = 0;
57 fwrq->m = c + 1;
58 }
59 }
60
61 if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1) {
62 ret = -EOPNOTSUPP;
63 goto out;
64
65 } else { /* Set the channel */
66
67
68 ieee->current_network.channel = fwrq->m;
69 ieee->set_chan(ieee->dev, ieee->current_network.channel);
70
71 if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
72 if (ieee->state == IEEE80211_LINKED) {
73 ieee80211_stop_send_beacons(ieee);
74 ieee80211_start_send_beacons(ieee);
75 }
76 }
77
78 ret = 0;
79out:
80 up(&ieee->wx_sem);
81 return ret;
82}
83
84
85int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
86 struct iw_request_info *a, union iwreq_data *wrqu,
87 char *b)
88{
89 struct iw_freq *fwrq = &wrqu->freq;
90
91 if (ieee->current_network.channel == 0)
92 return -1;
93
94 fwrq->m = ieee->current_network.channel;
95 fwrq->e = 0;
96
97 return 0;
98}
99
100int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
101 struct iw_request_info *info, union iwreq_data *wrqu,
102 char *extra)
103{
104 unsigned long flags;
105
106 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
107
108 if (ieee->iw_mode == IW_MODE_MONITOR)
109 return -1;
110
111 /* We want avoid to give to the user inconsistent infos*/
112 spin_lock_irqsave(&ieee->lock, flags);
113
114 if (ieee->state != IEEE80211_LINKED &&
115 ieee->state != IEEE80211_LINKED_SCANNING &&
116 ieee->wap_set == 0)
117
118 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
119 else
120 memcpy(wrqu->ap_addr.sa_data,
121 ieee->current_network.bssid, ETH_ALEN);
122
123 spin_unlock_irqrestore(&ieee->lock, flags);
124
125 return 0;
126}
127
128
129int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
130 struct iw_request_info *info, union iwreq_data *awrq,
131 char *extra)
132{
133
134 int ret = 0;
135 unsigned long flags;
136
137 short ifup = ieee->proto_started;//dev->flags & IFF_UP;
138 struct sockaddr *temp = (struct sockaddr *)awrq;
139
140 //printk("=======Set WAP:");
141 ieee->sync_scan_hurryup = 1;
142
143 down(&ieee->wx_sem);
144 /* use ifconfig hw ether */
145 if (ieee->iw_mode == IW_MODE_MASTER) {
146 ret = -1;
147 goto out;
148 }
149
150 if (temp->sa_family != ARPHRD_ETHER) {
151 ret = -EINVAL;
152 goto out;
153 }
154
155 if (ifup)
156 ieee80211_stop_protocol(ieee);
157
158 /* just to avoid to give inconsistent infos in the
159 * get wx method. not really needed otherwise
160 */
161 spin_lock_irqsave(&ieee->lock, flags);
162
163 memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN);
164 ieee->wap_set = !is_zero_ether_addr(temp->sa_data);
165 //printk(" %x:%x:%x:%x:%x:%x\n", ieee->current_network.bssid[0],ieee->current_network.bssid[1],ieee->current_network.bssid[2],ieee->current_network.bssid[3],ieee->current_network.bssid[4],ieee->current_network.bssid[5]);
166
167 spin_unlock_irqrestore(&ieee->lock, flags);
168
169 if (ifup)
170 ieee80211_start_protocol(ieee);
171
172out:
173 up(&ieee->wx_sem);
174 return ret;
175}
176
177int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
178 struct iw_request_info *a, union iwreq_data *wrqu,
179 char *b)
180{
181 int len, ret = 0;
182 unsigned long flags;
183
184 if (ieee->iw_mode == IW_MODE_MONITOR)
185 return -1;
186
187 /* We want avoid to give to the user inconsistent infos*/
188 spin_lock_irqsave(&ieee->lock, flags);
189
190 if (ieee->current_network.ssid[0] == '\0' ||
191 ieee->current_network.ssid_len == 0){
192 ret = -1;
193 goto out;
194 }
195
196 if (ieee->state != IEEE80211_LINKED &&
197 ieee->state != IEEE80211_LINKED_SCANNING &&
198 ieee->ssid_set == 0){
199 ret = -1;
200 goto out;
201 }
202 len = ieee->current_network.ssid_len;
203 wrqu->essid.length = len;
204 strncpy(b, ieee->current_network.ssid, len);
205 wrqu->essid.flags = 1;
206
207out:
208 spin_unlock_irqrestore(&ieee->lock, flags);
209
210 return ret;
211
212}
213
214int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
215 struct iw_request_info *info, union iwreq_data *wrqu,
216 char *extra)
217{
218
219 u32 target_rate = wrqu->bitrate.value;
220
221 //added by lizhaoming for auto mode
222 if (target_rate == -1)
223 ieee->rate = 110;
224 else
225 ieee->rate = target_rate/100000;
226
227 //FIXME: we might want to limit rate also in management protocols.
228 return 0;
229}
230
231
232
233int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
234 struct iw_request_info *info, union iwreq_data *wrqu,
235 char *extra)
236{
237
238 wrqu->bitrate.value = ieee->rate * 100000;
239
240 return 0;
241}
242
243int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
244 struct iw_request_info *a, union iwreq_data *wrqu,
245 char *b)
246{
247
248 ieee->sync_scan_hurryup = 1;
249
250 down(&ieee->wx_sem);
251
252 if (wrqu->mode == ieee->iw_mode)
253 goto out;
254
255 if (wrqu->mode == IW_MODE_MONITOR)
256 ieee->dev->type = ARPHRD_IEEE80211;
257 else
258 ieee->dev->type = ARPHRD_ETHER;
259
260 if (!ieee->proto_started) {
261 ieee->iw_mode = wrqu->mode;
262 } else {
263 ieee80211_stop_protocol(ieee);
264 ieee->iw_mode = wrqu->mode;
265 ieee80211_start_protocol(ieee);
266 }
267
268out:
269 up(&ieee->wx_sem);
270 return 0;
271}
272
273
274void ieee80211_wx_sync_scan_wq(struct work_struct *work)
275{
276 struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq);
277 short chan;
278
279 chan = ieee->current_network.channel;
280
281 if (ieee->data_hard_stop)
282 ieee->data_hard_stop(ieee->dev);
283
284 ieee80211_stop_send_beacons(ieee);
285
286 ieee->state = IEEE80211_LINKED_SCANNING;
287 ieee->link_change(ieee->dev);
288
289 ieee80211_start_scan_syncro(ieee);
290
291 ieee->set_chan(ieee->dev, chan);
292
293 ieee->state = IEEE80211_LINKED;
294 ieee->link_change(ieee->dev);
295
296 if (ieee->data_hard_resume)
297 ieee->data_hard_resume(ieee->dev);
298
299 if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
300 ieee80211_start_send_beacons(ieee);
301
302 //YJ,add,080828, In prevent of lossing ping packet during scanning
303 //ieee80211_sta_ps_send_null_frame(ieee, false);
304 //YJ,add,080828,end
305
306 up(&ieee->wx_sem);
307
308}
309
310int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
311 struct iw_request_info *a, union iwreq_data *wrqu,
312 char *b)
313{
314 int ret = 0;
315
316 down(&ieee->wx_sem);
317
318 if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
319 ret = -1;
320 goto out;
321 }
322 //YJ,add,080828
323 //In prevent of lossing ping packet during scanning
324 //ieee80211_sta_ps_send_null_frame(ieee, true);
325 //YJ,add,080828,end
326
327 if (ieee->state == IEEE80211_LINKED) {
328 queue_work(ieee->wq, &ieee->wx_sync_scan_wq);
329 /* intentionally forget to up sem */
330 return 0;
331 }
332
333out:
334 up(&ieee->wx_sem);
335 return ret;
336}
337
338int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
339 struct iw_request_info *a, union iwreq_data *wrqu,
340 char *extra)
341{
342
343 int ret = 0, len;
344 short proto_started;
345 unsigned long flags;
346
347 ieee->sync_scan_hurryup = 1;
348
349 down(&ieee->wx_sem);
350
351 proto_started = ieee->proto_started;
352
353 if (wrqu->essid.length > IW_ESSID_MAX_SIZE) {
354 ret = -E2BIG;
355 goto out;
356 }
357
358 if (ieee->iw_mode == IW_MODE_MONITOR) {
359 ret = -1;
360 goto out;
361 }
362
363 if (proto_started)
364 ieee80211_stop_protocol(ieee);
365
366 /* this is just to be sure that the GET wx callback
367 * has consistent infos. not needed otherwise
368 */
369 spin_lock_irqsave(&ieee->lock, flags);
370
371 if (wrqu->essid.flags && wrqu->essid.length) {
372//YJ,modified,080819
373 len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length) : IW_ESSID_MAX_SIZE;
374 memset(ieee->current_network.ssid, 0, ieee->current_network.ssid_len); //YJ,add,080819
375 strncpy(ieee->current_network.ssid, extra, len);
376 ieee->current_network.ssid_len = len;
377 ieee->ssid_set = 1;
378//YJ,modified,080819,end
379
380 //YJ,add,080819,for hidden ap
381 if (len == 0) {
382 memset(ieee->current_network.bssid, 0, ETH_ALEN);
383 ieee->current_network.capability = 0;
384 }
385 //YJ,add,080819,for hidden ap,end
386 } else {
387 ieee->ssid_set = 0;
388 ieee->current_network.ssid[0] = '\0';
389 ieee->current_network.ssid_len = 0;
390 }
391 //printk("==========set essid %s!\n",ieee->current_network.ssid);
392 spin_unlock_irqrestore(&ieee->lock, flags);
393
394 if (proto_started)
395 ieee80211_start_protocol(ieee);
396out:
397 up(&ieee->wx_sem);
398 return ret;
399}
400
401int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
402 struct iw_request_info *a, union iwreq_data *wrqu,
403 char *b)
404{
405
406 wrqu->mode = ieee->iw_mode;
407 return 0;
408}
409
410int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
411 struct iw_request_info *info, union iwreq_data *wrqu,
412 char *extra)
413{
414
415 int *parms = (int *)extra;
416 int enable = (parms[0] > 0);
417 short prev = ieee->raw_tx;
418
419 down(&ieee->wx_sem);
420
421 if (enable)
422 ieee->raw_tx = 1;
423 else
424 ieee->raw_tx = 0;
425
426 netdev_info(ieee->dev, "raw TX is %s\n",
427 ieee->raw_tx ? "enabled" : "disabled");
428
429 if (ieee->iw_mode == IW_MODE_MONITOR) {
430 if (prev == 0 && ieee->raw_tx) {
431 if (ieee->data_hard_resume)
432 ieee->data_hard_resume(ieee->dev);
433
434 netif_carrier_on(ieee->dev);
435 }
436
437 if (prev && ieee->raw_tx == 1)
438 netif_carrier_off(ieee->dev);
439 }
440
441 up(&ieee->wx_sem);
442
443 return 0;
444}
445
446int ieee80211_wx_get_name(struct ieee80211_device *ieee,
447 struct iw_request_info *info, union iwreq_data *wrqu,
448 char *extra)
449{
450 strlcpy(wrqu->name, "802.11", IFNAMSIZ);
451 if (ieee->modulation & IEEE80211_CCK_MODULATION) {
452 strlcat(wrqu->name, "b", IFNAMSIZ);
453 if (ieee->modulation & IEEE80211_OFDM_MODULATION)
454 strlcat(wrqu->name, "/g", IFNAMSIZ);
455 } else if (ieee->modulation & IEEE80211_OFDM_MODULATION)
456 strlcat(wrqu->name, "g", IFNAMSIZ);
457
458 if ((ieee->state == IEEE80211_LINKED) ||
459 (ieee->state == IEEE80211_LINKED_SCANNING))
460 strlcat(wrqu->name, " link", IFNAMSIZ);
461 else if (ieee->state != IEEE80211_NOLINK)
462 strlcat(wrqu->name, " .....", IFNAMSIZ);
463
464
465 return 0;
466}
467
468
469/* this is mostly stolen from hostap */
470int ieee80211_wx_set_power(struct ieee80211_device *ieee,
471 struct iw_request_info *info, union iwreq_data *wrqu,
472 char *extra)
473{
474 int ret = 0;
475
476 if ((!ieee->sta_wake_up) ||
477 (!ieee->ps_request_tx_ack) ||
478 (!ieee->enter_sleep_state) ||
479 (!ieee->ps_is_queue_empty)) {
480
481 printk("ERROR. PS mode tried to be use but driver missed a callback\n\n");
482
483 return -1;
484 }
485
486 down(&ieee->wx_sem);
487
488 if (wrqu->power.disabled) {
489 ieee->ps = IEEE80211_PS_DISABLED;
490
491 goto exit;
492 }
493 switch (wrqu->power.flags & IW_POWER_MODE) {
494 case IW_POWER_UNICAST_R:
495 ieee->ps = IEEE80211_PS_UNICAST;
496
497 break;
498 case IW_POWER_ALL_R:
499 ieee->ps = IEEE80211_PS_UNICAST | IEEE80211_PS_MBCAST;
500 break;
501
502 case IW_POWER_ON:
503 ieee->ps = IEEE80211_PS_DISABLED;
504 break;
505
506 default:
507 ret = -EINVAL;
508 goto exit;
509 }
510
511 if (wrqu->power.flags & IW_POWER_TIMEOUT) {
512
513 ieee->ps_timeout = wrqu->power.value / 1000;
514 printk("Timeout %d\n", ieee->ps_timeout);
515 }
516
517 if (wrqu->power.flags & IW_POWER_PERIOD) {
518
519 ret = -EOPNOTSUPP;
520 goto exit;
521 //wrq->value / 1024;
522
523 }
524exit:
525 up(&ieee->wx_sem);
526 return ret;
527
528}
529
530/* this is stolen from hostap */
531int ieee80211_wx_get_power(struct ieee80211_device *ieee,
532 struct iw_request_info *info, union iwreq_data *wrqu,
533 char *extra)
534{
535 int ret = 0;
536
537 down(&ieee->wx_sem);
538
539 if (ieee->ps == IEEE80211_PS_DISABLED) {
540 wrqu->power.disabled = 1;
541 goto exit;
542 }
543
544 wrqu->power.disabled = 0;
545
546// if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
547 wrqu->power.flags = IW_POWER_TIMEOUT;
548 wrqu->power.value = ieee->ps_timeout * 1000;
549// } else {
550// ret = -EOPNOTSUPP;
551// goto exit;
552 //wrqu->power.flags = IW_POWER_PERIOD;
553 //wrqu->power.value = ieee->current_network.dtim_period *
554 // ieee->current_network.beacon_interval * 1024;
555// }
556
557
558 if (ieee->ps & IEEE80211_PS_MBCAST)
559 wrqu->power.flags |= IW_POWER_ALL_R;
560 else
561 wrqu->power.flags |= IW_POWER_UNICAST_R;
562
563exit:
564 up(&ieee->wx_sem);
565 return ret;
566
567}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
deleted file mode 100644
index 0dc5ae414270..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ /dev/null
@@ -1,591 +0,0 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************
26
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andrea.merello@gmail.com>
29
30 A special thanks goes to Realtek for their support !
31
32******************************************************************************/
33
34#include <linux/compiler.h>
35#include <linux/errno.h>
36#include <linux/if_arp.h>
37#include <linux/in6.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/netdevice.h>
43#include <linux/pci.h>
44#include <linux/proc_fs.h>
45#include <linux/skbuff.h>
46#include <linux/slab.h>
47#include <linux/tcp.h>
48#include <linux/types.h>
49#include <linux/wireless.h>
50#include <linux/etherdevice.h>
51#include <asm/uaccess.h>
52#include <linux/if_vlan.h>
53
54#include "ieee80211.h"
55
56
57/*
58
59
60802.11 Data Frame
61
62
63802.11 frame_contorl for data frames - 2 bytes
64 ,-----------------------------------------------------------------------------------------.
65bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
66 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
67val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
68 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
69desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
70 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
71 '-----------------------------------------------------------------------------------------'
72 /\
73 |
74802.11 Data Frame |
75 ,--------- 'ctrl' expands to >-----------'
76 |
77 ,--'---,-------------------------------------------------------------.
78Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
79 |------|------|---------|---------|---------|------|---------|------|
80Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
81 | | tion | (BSSID) | | | ence | data | |
82 `--------------------------------------------------| |------'
83Total: 28 non-data bytes `----.----'
84 |
85 .- 'Frame data' expands to <---------------------------'
86 |
87 V
88 ,---------------------------------------------------.
89Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
90 |------|------|---------|----------|------|---------|
91Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
92 | DSAP | SSAP | | | | Packet |
93 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
94 `-----------------------------------------| |
95Total: 8 non-data bytes `----.----'
96 |
97 .- 'IP Packet' expands, if WEP enabled, to <--'
98 |
99 V
100 ,-----------------------.
101Bytes | 4 | 0-2296 | 4 |
102 |-----|-----------|-----|
103Desc. | IV | Encrypted | ICV |
104 | | IP Packet | |
105 `-----------------------'
106Total: 8 non-data bytes
107
108
109802.3 Ethernet Data Frame
110
111 ,-----------------------------------------.
112Bytes | 6 | 6 | 2 | Variable | 4 |
113 |-------|-------|------|-----------|------|
114Desc. | Dest. | Source| Type | IP Packet | fcs |
115 | MAC | MAC | | | |
116 `-----------------------------------------'
117Total: 18 non-data bytes
118
119In the event that fragmentation is required, the incoming payload is split into
120N parts of size ieee->fts. The first fragment contains the SNAP header and the
121remaining packets are just data.
122
123If encryption is enabled, each fragment payload size is reduced by enough space
124to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
125So if you have 1500 bytes of payload with ieee->fts set to 500 without
126encryption it will take 3 frames. With WEP it will take 4 frames as the
127payload of each frame is reduced to 492 bytes.
128
129* SKB visualization
130*
131* ,- skb->data
132* |
133* | ETHERNET HEADER ,-<-- PAYLOAD
134* | | 14 bytes from skb->data
135* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
136* | | | |
137* |,-Dest.--. ,--Src.---. | | |
138* | 6 bytes| | 6 bytes | | | |
139* v | | | | | |
140* 0 | v 1 | v | v 2
141* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
142* ^ | ^ | ^ |
143* | | | | | |
144* | | | | `T' <---- 2 bytes for Type
145* | | | |
146* | | '---SNAP--' <-------- 6 bytes for SNAP
147* | |
148* `-IV--' <-------------------- 4 bytes for IV (WEP)
149*
150* SNAP HEADER
151*
152*/
153
154static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
155static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
156
157static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
158{
159 struct ieee80211_snap_hdr *snap;
160 u8 *oui;
161
162 snap = (struct ieee80211_snap_hdr *)data;
163 snap->dsap = 0xaa;
164 snap->ssap = 0xaa;
165 snap->ctrl = 0x03;
166
167 if (h_proto == 0x8137 || h_proto == 0x80f3)
168 oui = P802_1H_OUI;
169 else
170 oui = RFC1042_OUI;
171 snap->oui[0] = oui[0];
172 snap->oui[1] = oui[1];
173 snap->oui[2] = oui[2];
174
175 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
176
177 return SNAP_SIZE + sizeof(u16);
178}
179
180int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
181 struct sk_buff *frag, int hdr_len)
182{
183 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
184 int res;
185
186 /*
187 * added to care about null crypt condition, to solve that system hangs
188 * when shared keys error
189 */
190 if (!crypt || !crypt->ops)
191 return -1;
192
193#ifdef CONFIG_IEEE80211_CRYPT_TKIP
194 struct ieee80211_hdr_4addr *header;
195
196 if (ieee->tkip_countermeasures &&
197 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
198 header = (struct ieee80211_hdr_4addr *)frag->data;
199 if (net_ratelimit()) {
200 netdev_dbg(ieee->dev, "TKIP countermeasures: dropped "
201 "TX packet to %pM\n", header->addr1);
202 }
203 return -1;
204 }
205#endif
206 /*
207 * To encrypt, frame format is:
208 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
209 *
210 * PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU
211 * encryption.
212 *
213 * Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
214 * call both MSDU and MPDU encryption functions from here.
215 */
216 atomic_inc(&crypt->refcnt);
217 res = 0;
218 if (crypt->ops->encrypt_msdu)
219 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
220 if (res == 0 && crypt->ops->encrypt_mpdu)
221 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
222
223 atomic_dec(&crypt->refcnt);
224 if (res < 0) {
225 netdev_info(ieee->dev, "Encryption failed: len=%d.\n", frag->len);
226 ieee->ieee_stats.tx_discards++;
227 return -1;
228 }
229
230 return 0;
231}
232
233
234void ieee80211_txb_free(struct ieee80211_txb *txb)
235{
236 int i;
237 if (unlikely(!txb))
238 return;
239 for (i = 0; i < txb->nr_frags; i++)
240 if (txb->fragments[i])
241 dev_kfree_skb_any(txb->fragments[i]);
242 kfree(txb);
243}
244
245static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
246 gfp_t gfp_mask)
247{
248 struct ieee80211_txb *txb;
249 int i;
250 txb = kmalloc(
251 sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
252 gfp_mask);
253 if (!txb)
254 return NULL;
255
256 memset(txb, 0, sizeof(struct ieee80211_txb));
257 txb->nr_frags = nr_frags;
258 txb->frag_size = txb_size;
259
260 for (i = 0; i < nr_frags; i++) {
261 txb->fragments[i] = dev_alloc_skb(txb_size);
262 if (unlikely(!txb->fragments[i])) {
263 i--;
264 break;
265 }
266 }
267 if (unlikely(i != nr_frags)) {
268 while (i >= 0)
269 dev_kfree_skb_any(txb->fragments[i--]);
270 kfree(txb);
271 return NULL;
272 }
273 return txb;
274}
275
276/*
277 * Classify the to-be send data packet
278 * Need to acquire the sent queue index.
279 */
280static int ieee80211_classify(struct sk_buff *skb,
281 struct ieee80211_network *network)
282{
283 struct ether_header *eh = (struct ether_header *)skb->data;
284 unsigned int wme_UP = 0;
285
286 if (!network->QoS_Enable) {
287 skb->priority = 0;
288 return(wme_UP);
289 }
290
291 if (eh->ether_type == __constant_htons(ETHERTYPE_IP)) {
292 const struct iphdr *ih = (struct iphdr *)(skb->data +
293 sizeof(struct ether_header));
294 wme_UP = (ih->tos >> 5)&0x07;
295 } else if (vlan_tx_tag_present(skb)) {/* vtag packet */
296#ifndef VLAN_PRI_SHIFT
297#define VLAN_PRI_SHIFT 13 /* Shift to find VLAN user priority */
298#define VLAN_PRI_MASK 7 /* Mask for user priority bits in VLAN */
299#endif
300 u32 tag = vlan_tx_tag_get(skb);
301 wme_UP = (tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
302 } else if (ETH_P_PAE == ntohs(((struct ethhdr *)skb->data)->h_proto)) {
303 wme_UP = 7;
304 }
305
306 skb->priority = wme_UP;
307 return(wme_UP);
308}
309
310/* SKBs are added to the ieee->tx_queue. */
311int ieee80211_rtl_xmit(struct sk_buff *skb, struct net_device *dev)
312{
313 struct ieee80211_device *ieee = netdev_priv(dev);
314 struct ieee80211_txb *txb = NULL;
315 struct ieee80211_hdr_3addrqos *frag_hdr;
316 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
317 unsigned long flags;
318 struct net_device_stats *stats = &ieee->stats;
319 int ether_type, encrypt;
320 int bytes, fc, qos_ctl, hdr_len;
321 struct sk_buff *skb_frag;
322 struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */
323 .duration_id = 0,
324 .seq_ctl = 0,
325 .qos_ctl = 0
326 };
327 u8 dest[ETH_ALEN], src[ETH_ALEN];
328
329 struct ieee80211_crypt_data* crypt;
330
331 spin_lock_irqsave(&ieee->lock, flags);
332
333 /*
334 * If there is no driver handler to take the TXB, don't bother
335 * creating it...
336 */
337 if ((!ieee->hard_start_xmit &&
338 !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)) ||
339 ((!ieee->softmac_data_hard_start_xmit &&
340 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
341 netdev_warn(ieee->dev, "No xmit handler.\n");
342 goto success;
343 }
344
345 ieee80211_classify(skb,&ieee->current_network);
346 if (likely(ieee->raw_tx == 0)){
347
348 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
349 netdev_warn(ieee->dev, "skb too small (%d).\n", skb->len);
350 goto success;
351 }
352
353 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
354
355 crypt = ieee->crypt[ieee->tx_keyidx];
356
357 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
358 ieee->host_encrypt && crypt && crypt->ops;
359
360 if (!encrypt && ieee->ieee802_1x &&
361 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
362 stats->tx_dropped++;
363 goto success;
364 }
365
366 #ifdef CONFIG_IEEE80211_DEBUG
367 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
368 struct eapol *eap = (struct eapol *)(skb->data +
369 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
370 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
371 eap_get_type(eap->type));
372 }
373 #endif
374
375 /* Save source and destination addresses */
376 memcpy(&dest, skb->data, ETH_ALEN);
377 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
378
379 /* Advance the SKB to the start of the payload */
380 skb_pull(skb, sizeof(struct ethhdr));
381
382 /* Determine total amount of storage required for TXB packets */
383 bytes = skb->len + SNAP_SIZE + sizeof(u16);
384
385 if (ieee->current_network.QoS_Enable) {
386 if (encrypt)
387 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA |
388 IEEE80211_FCTL_WEP;
389 else
390 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
391
392 } else {
393 if (encrypt)
394 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
395 IEEE80211_FCTL_WEP;
396 else
397 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
398 }
399
400 if (ieee->iw_mode == IW_MODE_INFRA) {
401 fc |= IEEE80211_FCTL_TODS;
402 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
403 memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
404 memcpy(&header.addr2, &src, ETH_ALEN);
405 memcpy(&header.addr3, &dest, ETH_ALEN);
406 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
407 /*
408 * not From/To DS: Addr1 = DA, Addr2 = SA,
409 * Addr3 = BSSID
410 */
411 memcpy(&header.addr1, dest, ETH_ALEN);
412 memcpy(&header.addr2, src, ETH_ALEN);
413 memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
414 }
415 header.frame_ctl = cpu_to_le16(fc);
416
417 /*
418 * Determine fragmentation size based on destination (multicast
419 * and broadcast are not fragmented)
420 */
421 if (is_multicast_ether_addr(header.addr1)) {
422 frag_size = MAX_FRAG_THRESHOLD;
423 qos_ctl = QOS_CTL_NOTCONTAIN_ACK;
424 } else {
425 /* default:392 */
426 frag_size = ieee->fts;
427 qos_ctl = 0;
428 }
429
430 if (ieee->current_network.QoS_Enable) {
431 hdr_len = IEEE80211_3ADDR_LEN + 2;
432 /* skb->priority is set in the ieee80211_classify() */
433 qos_ctl |= skb->priority;
434 header.qos_ctl = cpu_to_le16(qos_ctl);
435 } else {
436 hdr_len = IEEE80211_3ADDR_LEN;
437 }
438
439 /*
440 * Determine amount of payload per fragment. Regardless of if
441 * this stack is providing the full 802.11 header, one will
442 * eventually be affixed to this fragment -- so we must account
443 * for it when determining the amount of payload space.
444 */
445 bytes_per_frag = frag_size - hdr_len;
446 if (ieee->config &
447 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
448 bytes_per_frag -= IEEE80211_FCS_LEN;
449
450 /* Each fragment may need to have room for encryption pre/postfix */
451 if (encrypt)
452 bytes_per_frag -= crypt->ops->extra_prefix_len +
453 crypt->ops->extra_postfix_len;
454
455 /*
456 * Number of fragments is the total bytes_per_frag /
457 * payload_per_fragment
458 */
459 nr_frags = bytes / bytes_per_frag;
460 bytes_last_frag = bytes % bytes_per_frag;
461 if (bytes_last_frag)
462 nr_frags++;
463 else
464 bytes_last_frag = bytes_per_frag;
465
466 /*
467 * When we allocate the TXB we allocate enough space for the
468 * reserve and full fragment bytes (bytes_per_frag doesn't
469 * include prefix, postfix, header, FCS, etc.)
470 */
471 txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
472 if (unlikely(!txb)) {
473 netdev_warn(ieee->dev, "Could not allocate TXB\n");
474 goto failed;
475 }
476 txb->encrypted = encrypt;
477 txb->payload_size = bytes;
478
479 for (i = 0; i < nr_frags; i++) {
480 skb_frag = txb->fragments[i];
481 skb_frag->priority = UP2AC(skb->priority);
482 if (encrypt)
483 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
484
485 frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(
486 skb_frag, hdr_len);
487 memcpy(frag_hdr, &header, hdr_len);
488
489 /*
490 * If this is not the last fragment, then add the MOREFRAGS
491 * bit to the frame control
492 */
493 if (i != nr_frags - 1) {
494 frag_hdr->frame_ctl = cpu_to_le16(
495 fc | IEEE80211_FCTL_MOREFRAGS);
496 bytes = bytes_per_frag;
497
498 } else {
499 /* The last fragment takes the remaining length */
500 bytes = bytes_last_frag;
501 }
502 if (ieee->current_network.QoS_Enable) {
503 /*
504 * add 1 only indicate to corresponding seq
505 * number control 2006/7/12
506 */
507 frag_hdr->seq_ctl = cpu_to_le16(
508 ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
509 } else {
510 frag_hdr->seq_ctl = cpu_to_le16(
511 ieee->seq_ctrl[0]<<4 | i);
512 }
513
514 /* Put a SNAP header on the first fragment */
515 if (i == 0) {
516 ieee80211_put_snap(
517 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
518 ether_type);
519 bytes -= SNAP_SIZE + sizeof(u16);
520 }
521
522 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
523
524 /* Advance the SKB... */
525 skb_pull(skb, bytes);
526
527 /*
528 * Encryption routine will move the header forward in
529 * order to insert the IV between the header and the
530 * payload
531 */
532 if (encrypt)
533 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
534 if (ieee->config &
535 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
536 skb_put(skb_frag, 4);
537 }
538 /* Advance sequence number in data frame. */
539 if (ieee->current_network.QoS_Enable) {
540 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
541 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
542 else
543 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
544 } else {
545 if (ieee->seq_ctrl[0] == 0xFFF)
546 ieee->seq_ctrl[0] = 0;
547 else
548 ieee->seq_ctrl[0]++;
549 }
550 } else {
551 if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
552 netdev_warn(ieee->dev, "skb too small (%d).\n", skb->len);
553 goto success;
554 }
555
556 txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
557 if (!txb) {
558 netdev_warn(ieee->dev, "Could not allocate TXB\n");
559 goto failed;
560 }
561
562 txb->encrypted = 0;
563 txb->payload_size = skb->len;
564 memcpy(skb_put(txb->fragments[0], skb->len), skb->data, skb->len);
565 }
566
567 success:
568 spin_unlock_irqrestore(&ieee->lock, flags);
569 dev_kfree_skb_any(skb);
570 if (txb) {
571 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
572 ieee80211_softmac_xmit(txb, ieee);
573 } else {
574 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
575 stats->tx_packets++;
576 stats->tx_bytes += txb->payload_size;
577 return NETDEV_TX_OK;
578 }
579 ieee80211_txb_free(txb);
580 }
581 }
582
583 return NETDEV_TX_OK;
584
585 failed:
586 spin_unlock_irqrestore(&ieee->lock, flags);
587 netif_stop_queue(dev);
588 stats->tx_errors++;
589 return NETDEV_TX_BUSY;
590
591}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
deleted file mode 100644
index 07c3f715a6f5..000000000000
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c
+++ /dev/null
@@ -1,713 +0,0 @@
1/*
2 * Copyright(c) 2004 Intel Corporation. All rights reserved.
3 *
4 * Portions of this file are based on the WEP enablement code provided by the
5 * Host AP project hostap-drivers v0.1.3
6 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
7 * <jkmaline@cc.hut.fi>
8 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
25 *
26 * Contact Information:
27 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 */
30
31#include <linux/wireless.h>
32#include <linux/kmod.h>
33#include <linux/slab.h>
34#include <linux/module.h>
35#include <linux/etherdevice.h>
36
37#include "ieee80211.h"
38static const char *ieee80211_modes[] = {
39 "?", "a", "b", "ab", "g", "ag", "bg", "abg"
40};
41
42#define MAX_CUSTOM_LEN 64
43static inline char *rtl818x_translate_scan(struct ieee80211_device *ieee,
44 char *start, char *stop,
45 struct ieee80211_network *network,
46 struct iw_request_info *info)
47{
48 char custom[MAX_CUSTOM_LEN];
49 char *p;
50 struct iw_event iwe;
51 int i, j;
52 u8 max_rate, rate;
53
54 /* First entry *MUST* be the AP MAC address */
55 iwe.cmd = SIOCGIWAP;
56 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
57 ether_addr_copy(iwe.u.ap_addr.sa_data, network->bssid);
58 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
59
60 /* Remaining entries will be displayed in the order we provide them */
61
62 /* Add the ESSID */
63 iwe.cmd = SIOCGIWESSID;
64 iwe.u.data.flags = 1;
65 if (network->ssid_len == 0) {
66 iwe.u.data.length = sizeof("<hidden>");
67 start = iwe_stream_add_point(info, start, stop, &iwe, "<hidden>");
68 } else {
69 iwe.u.data.length = min_t(u8, network->ssid_len, 32);
70 start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
71 }
72 /* Add the protocol name */
73 iwe.cmd = SIOCGIWNAME;
74 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]);
75 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
76
77 /* Add mode */
78 iwe.cmd = SIOCGIWMODE;
79 if (network->capability &
80 (WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) {
81 if (network->capability & WLAN_CAPABILITY_BSS)
82 iwe.u.mode = IW_MODE_MASTER;
83 else
84 iwe.u.mode = IW_MODE_ADHOC;
85
86 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
87 }
88
89 /* Add frequency/channel */
90 iwe.cmd = SIOCGIWFREQ;
91 iwe.u.freq.m = network->channel;
92 iwe.u.freq.e = 0;
93 iwe.u.freq.i = 0;
94 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
95
96 /* Add encryption capability */
97 iwe.cmd = SIOCGIWENCODE;
98 if (network->capability & WLAN_CAPABILITY_PRIVACY)
99 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
100 else
101 iwe.u.data.flags = IW_ENCODE_DISABLED;
102 iwe.u.data.length = 0;
103 start = iwe_stream_add_point(info, start, stop, &iwe, network->ssid);
104
105 /* Add basic and extended rates */
106 max_rate = 0;
107 p = custom;
108 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
109 for (i = 0, j = 0; i < network->rates_len; ) {
110 if (j < network->rates_ex_len &&
111 ((network->rates_ex[j] & 0x7F) <
112 (network->rates[i] & 0x7F)))
113 rate = network->rates_ex[j++] & 0x7F;
114 else
115 rate = network->rates[i++] & 0x7F;
116 if (rate > max_rate)
117 max_rate = rate;
118 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
119 "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
120 }
121 for (; j < network->rates_ex_len; j++) {
122 rate = network->rates_ex[j] & 0x7F;
123 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
124 "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
125 if (rate > max_rate)
126 max_rate = rate;
127 }
128
129 iwe.cmd = SIOCGIWRATE;
130 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
131 iwe.u.bitrate.value = max_rate * 500000;
132 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN);
133
134 iwe.cmd = IWEVCUSTOM;
135 iwe.u.data.length = p - custom;
136 if (iwe.u.data.length)
137 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
138
139 /* Add quality statistics */
140 /* TODO: Fix these values... */
141 if (network->stats.signal == 0 || network->stats.rssi == 0)
142 netdev_info(ieee->dev, "========>signal:%d, rssi:%d\n",
143 network->stats.signal, network->stats.rssi);
144 iwe.cmd = IWEVQUAL;
145 iwe.u.qual.qual = network->stats.signalstrength;
146 iwe.u.qual.level = network->stats.signal;
147 iwe.u.qual.noise = network->stats.noise;
148 iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
149 if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
150 iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
151 if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
152 iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
153 if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
154 iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
155 iwe.u.qual.updated = 7;
156 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
157
158 iwe.cmd = IWEVCUSTOM;
159 p = custom;
160
161 iwe.u.data.length = p - custom;
162 if (iwe.u.data.length)
163 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
164
165 memset(&iwe, 0, sizeof(iwe));
166 if (network->wpa_ie_len) {
167 char buf[MAX_WPA_IE_LEN];
168 memcpy(buf, network->wpa_ie, network->wpa_ie_len);
169 iwe.cmd = IWEVGENIE;
170 iwe.u.data.length = network->wpa_ie_len;
171 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
172 }
173
174 memset(&iwe, 0, sizeof(iwe));
175 if (network->rsn_ie_len) {
176 char buf[MAX_WPA_IE_LEN];
177 memcpy(buf, network->rsn_ie, network->rsn_ie_len);
178 iwe.cmd = IWEVGENIE;
179 iwe.u.data.length = network->rsn_ie_len;
180 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
181 }
182
183 /* Add EXTRA: Age to display seconds since last beacon/probe response
184 * for given network.
185 */
186 iwe.cmd = IWEVCUSTOM;
187 p = custom;
188 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
189 " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
190 iwe.u.data.length = p - custom;
191 if (iwe.u.data.length)
192 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
193
194 return start;
195}
196
197int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
198 struct iw_request_info *info,
199 union iwreq_data *wrqu, char *extra)
200{
201 struct ieee80211_network *network;
202 unsigned long flags;
203 int err = 0;
204 char *ev = extra;
205 char *stop = ev + wrqu->data.length;
206 int i = 0;
207
208 IEEE80211_DEBUG_WX("Getting scan\n");
209 down(&ieee->wx_sem);
210 spin_lock_irqsave(&ieee->lock, flags);
211
212 if (!ieee->bHwRadioOff) {
213 list_for_each_entry(network, &ieee->network_list, list) {
214 i++;
215
216 if ((stop-ev) < 200) {
217 err = -E2BIG;
218 break;
219 }
220 if (ieee->scan_age == 0 ||
221 time_after(network->last_scanned + ieee->scan_age, jiffies)) {
222 ev = rtl818x_translate_scan(ieee, ev, stop, network, info);
223 } else
224 IEEE80211_DEBUG_SCAN(
225 "Not showing network '%s ("
226 "%pM)' due to age (%lums).\n",
227 escape_essid(network->ssid,
228 network->ssid_len),
229 network->bssid,
230 (jiffies - network->last_scanned) / (HZ / 100));
231 }
232 }
233 spin_unlock_irqrestore(&ieee->lock, flags);
234 up(&ieee->wx_sem);
235 wrqu->data.length = ev - extra;
236 wrqu->data.flags = 0;
237 IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
238
239 return err;
240}
241
242int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
243 struct iw_request_info *info,
244 union iwreq_data *wrqu, char *keybuf)
245{
246 struct iw_point *erq = &(wrqu->encoding);
247 struct net_device *dev = ieee->dev;
248 struct ieee80211_security sec = {
249 .flags = 0
250 };
251 int i, key, key_provided, len;
252 struct ieee80211_crypt_data **crypt;
253
254 IEEE80211_DEBUG_WX("SET_ENCODE\n");
255
256 key = erq->flags & IW_ENCODE_INDEX;
257 if (key) {
258 if (key > WEP_KEYS)
259 return -EINVAL;
260 key--;
261 key_provided = 1;
262 } else {
263 key_provided = 0;
264 key = ieee->tx_keyidx;
265 }
266
267 IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
268 "provided" : "default");
269
270 crypt = &ieee->crypt[key];
271
272 if (erq->flags & IW_ENCODE_DISABLED) {
273 if (key_provided && *crypt) {
274 IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
275 key);
276 ieee80211_crypt_delayed_deinit(ieee, crypt);
277 } else
278 IEEE80211_DEBUG_WX("Disabling encryption.\n");
279
280 /* Check all the keys to see if any are still configured,
281 * and if no key index was provided, de-init them all.
282 */
283 for (i = 0; i < WEP_KEYS; i++) {
284 if (ieee->crypt[i] != NULL) {
285 if (key_provided)
286 break;
287 ieee80211_crypt_delayed_deinit(
288 ieee, &ieee->crypt[i]);
289 }
290 }
291
292 if (i == WEP_KEYS) {
293 sec.enabled = 0;
294 sec.level = SEC_LEVEL_0;
295 sec.flags |= SEC_ENABLED | SEC_LEVEL;
296 }
297
298 goto done;
299 }
300
301 sec.enabled = 1;
302 sec.flags |= SEC_ENABLED;
303
304 if (*crypt != NULL && (*crypt)->ops != NULL &&
305 strcmp((*crypt)->ops->name, "WEP") != 0) {
306 /* changing to use WEP; deinit previously used algorithm
307 * on this key.
308 */
309 ieee80211_crypt_delayed_deinit(ieee, crypt);
310 }
311
312 if (*crypt == NULL) {
313 struct ieee80211_crypt_data *new_crypt;
314
315 /* take WEP into use */
316 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data),
317 GFP_KERNEL);
318 if (new_crypt == NULL)
319 return -ENOMEM;
320 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
321 if (!new_crypt->ops)
322 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
323
324 if (new_crypt->ops)
325 new_crypt->priv = new_crypt->ops->init(key);
326
327 if (!new_crypt->ops || !new_crypt->priv) {
328 kfree(new_crypt);
329 new_crypt = NULL;
330
331 netdev_warn(ieee->dev,
332 "could not initialize WEP: load module ieee80211_crypt_wep\n");
333 return -EOPNOTSUPP;
334 }
335 *crypt = new_crypt;
336 }
337
338 /* If a new key was provided, set it up */
339 if (erq->length > 0) {
340 len = erq->length <= 5 ? 5 : 13;
341 memcpy(sec.keys[key], keybuf, erq->length);
342 if (len > erq->length)
343 memset(sec.keys[key] + erq->length, 0,
344 len - erq->length);
345 IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
346 key, escape_essid(sec.keys[key], len),
347 erq->length, len);
348 sec.key_sizes[key] = len;
349 (*crypt)->ops->set_key(sec.keys[key], len, NULL,
350 (*crypt)->priv);
351 sec.flags |= (1 << key);
352 /* This ensures a key will be activated if no key is
353 * explicitly set.
354 */
355 if (key == sec.active_key)
356 sec.flags |= SEC_ACTIVE_KEY;
357 ieee->tx_keyidx = key;
358 } else {
359 len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
360 NULL, (*crypt)->priv);
361 if (len == 0) {
362 /* Set a default key of all 0 */
363 IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
364 key);
365 memset(sec.keys[key], 0, 13);
366 (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
367 (*crypt)->priv);
368 sec.key_sizes[key] = 13;
369 sec.flags |= (1 << key);
370 }
371
372 /* No key data - just set the default TX key index */
373 if (key_provided) {
374 IEEE80211_DEBUG_WX(
375 "Setting key %d to default Tx key.\n", key);
376 ieee->tx_keyidx = key;
377 sec.active_key = key;
378 sec.flags |= SEC_ACTIVE_KEY;
379 }
380 }
381
382 done:
383 ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
384 sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
385 sec.flags |= SEC_AUTH_MODE;
386 IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
387 "OPEN" : "SHARED KEY");
388
389 /* For now we just support WEP, so only set that security level...
390 * TODO: When WPA is added this is one place that needs to change
391 */
392 sec.flags |= SEC_LEVEL;
393 sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
394
395 if (ieee->set_security)
396 ieee->set_security(dev, &sec);
397
398 /* Do not reset port if card is in Managed mode since resetting will
399 * generate new IEEE 802.11 authentication which may end up in looping
400 * with IEEE 802.1X. If your hardware requires a reset after WEP
401 * configuration (for example... Prism2), implement the reset_port in
402 * the callbacks structures used to initialize the 802.11 stack.
403 */
404 if (ieee->reset_on_keychange &&
405 ieee->iw_mode != IW_MODE_INFRA &&
406 ieee->reset_port && ieee->reset_port(dev)) {
407 netdev_dbg(ieee->dev, "reset_port failed\n");
408 return -EINVAL;
409 }
410 return 0;
411}
412
413int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
414 struct iw_request_info *info,
415 union iwreq_data *wrqu, char *keybuf)
416{
417 struct iw_point *erq = &(wrqu->encoding);
418 int len, key;
419 struct ieee80211_crypt_data *crypt;
420
421 IEEE80211_DEBUG_WX("GET_ENCODE\n");
422
423 if (ieee->iw_mode == IW_MODE_MONITOR)
424 return -1;
425
426 key = erq->flags & IW_ENCODE_INDEX;
427 if (key) {
428 if (key > WEP_KEYS)
429 return -EINVAL;
430 key--;
431 } else
432 key = ieee->tx_keyidx;
433
434 crypt = ieee->crypt[key];
435 erq->flags = key + 1;
436
437 if (crypt == NULL || crypt->ops == NULL) {
438 erq->length = 0;
439 erq->flags |= IW_ENCODE_DISABLED;
440 return 0;
441 }
442
443 if (strcmp(crypt->ops->name, "WEP") != 0) {
444 /* only WEP is supported with wireless extensions, so just
445 * report that encryption is used.
446 */
447 erq->length = 0;
448 erq->flags |= IW_ENCODE_ENABLED;
449 return 0;
450 }
451
452 len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv);
453 erq->length = (len >= 0 ? len : 0);
454
455 erq->flags |= IW_ENCODE_ENABLED;
456
457 if (ieee->open_wep)
458 erq->flags |= IW_ENCODE_OPEN;
459 else
460 erq->flags |= IW_ENCODE_RESTRICTED;
461
462 return 0;
463}
464
465int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
466 struct iw_request_info *info,
467 union iwreq_data *wrqu, char *extra)
468{
469 struct net_device *dev = ieee->dev;
470 struct iw_point *encoding = &wrqu->encoding;
471 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
472 int i, idx, ret = 0;
473 int group_key = 0;
474 const char *alg;
475 struct ieee80211_crypto_ops *ops;
476 struct ieee80211_crypt_data **crypt;
477
478 struct ieee80211_security sec = {
479 .flags = 0,
480 };
481 idx = encoding->flags & IW_ENCODE_INDEX;
482 if (idx) {
483 if (idx < 1 || idx > WEP_KEYS)
484 return -EINVAL;
485 idx--;
486 } else
487 idx = ieee->tx_keyidx;
488
489 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
490 crypt = &ieee->crypt[idx];
491 group_key = 1;
492 } else {
493 /* some Cisco APs use idx>0 for unicast in dynamic WEP */
494 if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
495 return -EINVAL;
496 if (ieee->iw_mode == IW_MODE_INFRA)
497 crypt = &ieee->crypt[idx];
498 else
499 return -EINVAL;
500 }
501
502 sec.flags |= SEC_ENABLED;
503 if ((encoding->flags & IW_ENCODE_DISABLED) ||
504 ext->alg == IW_ENCODE_ALG_NONE) {
505 if (*crypt)
506 ieee80211_crypt_delayed_deinit(ieee, crypt);
507
508 for (i = 0; i < WEP_KEYS; i++)
509 if (ieee->crypt[i] != NULL)
510 break;
511
512 if (i == WEP_KEYS) {
513 sec.enabled = 0;
514 sec.level = SEC_LEVEL_0;
515 sec.flags |= SEC_LEVEL;
516 }
517 goto done;
518 }
519
520 sec.enabled = 1;
521
522 switch (ext->alg) {
523 case IW_ENCODE_ALG_WEP:
524 alg = "WEP";
525 break;
526 case IW_ENCODE_ALG_TKIP:
527 alg = "TKIP";
528 break;
529 case IW_ENCODE_ALG_CCMP:
530 alg = "CCMP";
531 break;
532 default:
533 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
534 dev->name, ext->alg);
535 ret = -EINVAL;
536 goto done;
537 }
538
539 ops = ieee80211_get_crypto_ops(alg);
540 if (ops == NULL)
541 ops = ieee80211_get_crypto_ops(alg);
542 if (ops == NULL) {
543 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
544 dev->name, ext->alg);
545 netdev_err(ieee->dev, "========>unknown crypto alg %d\n",
546 ext->alg);
547 ret = -EINVAL;
548 goto done;
549 }
550
551 if (*crypt == NULL || (*crypt)->ops != ops) {
552 struct ieee80211_crypt_data *new_crypt;
553
554 ieee80211_crypt_delayed_deinit(ieee, crypt);
555
556 new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
557 if (new_crypt == NULL) {
558 ret = -ENOMEM;
559 goto done;
560 }
561 new_crypt->ops = ops;
562 if (new_crypt->ops)
563 new_crypt->priv = new_crypt->ops->init(idx);
564 if (new_crypt->priv == NULL) {
565 kfree(new_crypt);
566 ret = -EINVAL;
567 goto done;
568 }
569 *crypt = new_crypt;
570
571 }
572
573 if (ext->key_len > 0 && (*crypt)->ops->set_key &&
574 (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
575 (*crypt)->priv) < 0) {
576 IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
577 netdev_err(ieee->dev, "key setting failed\n");
578 ret = -EINVAL;
579 goto done;
580 }
581#if 1
582 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
583 ieee->tx_keyidx = idx;
584 sec.active_key = idx;
585 sec.flags |= SEC_ACTIVE_KEY;
586 }
587
588 if (ext->alg != IW_ENCODE_ALG_NONE) {
589 memcpy(sec.keys[idx], ext->key, ext->key_len);
590 sec.key_sizes[idx] = ext->key_len;
591 sec.flags |= (1 << idx);
592 if (ext->alg == IW_ENCODE_ALG_WEP) {
593 sec.flags |= SEC_LEVEL;
594 sec.level = SEC_LEVEL_1;
595 } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
596 sec.flags |= SEC_LEVEL;
597 sec.level = SEC_LEVEL_2;
598 } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
599 sec.flags |= SEC_LEVEL;
600 sec.level = SEC_LEVEL_3;
601 }
602 /* Don't set sec level for group keys. */
603 if (group_key)
604 sec.flags &= ~SEC_LEVEL;
605 }
606#endif
607done:
608 if (ieee->set_security)
609 ieee->set_security(ieee->dev, &sec);
610
611 if (ieee->reset_on_keychange &&
612 ieee->iw_mode != IW_MODE_INFRA &&
613 ieee->reset_port && ieee->reset_port(dev)) {
614 IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
615 return -EINVAL;
616 }
617
618 return ret;
619}
620
621int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
622 struct iw_request_info *info,
623 union iwreq_data *wrqu, char *extra)
624{
625 struct iw_mlme *mlme = (struct iw_mlme *) extra;
626#if 1
627 switch (mlme->cmd) {
628 case IW_MLME_DEAUTH:
629 case IW_MLME_DISASSOC:
630 ieee80211_disassociate(ieee);
631 break;
632 default:
633 return -EOPNOTSUPP;
634 }
635#endif
636 return 0;
637}
638
639int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
640 struct iw_request_info *info,
641 struct iw_param *data, char *extra)
642{
643 switch (data->flags & IW_AUTH_INDEX) {
644 case IW_AUTH_WPA_VERSION:
645 /* need to support wpa2 here */
646 break;
647 case IW_AUTH_CIPHER_PAIRWISE:
648 case IW_AUTH_CIPHER_GROUP:
649 case IW_AUTH_KEY_MGMT:
650 /* Host AP driver does not use these parameters and allows
651 * wpa_supplicant to control them internally.
652 */
653 break;
654 case IW_AUTH_TKIP_COUNTERMEASURES:
655 ieee->tkip_countermeasures = data->value;
656 break;
657 case IW_AUTH_DROP_UNENCRYPTED:
658 ieee->drop_unencrypted = data->value;
659 break;
660
661 case IW_AUTH_80211_AUTH_ALG:
662 ieee->open_wep = (data->value&IW_AUTH_ALG_OPEN_SYSTEM) ? 1 : 0;
663 break;
664
665#if 1
666 case IW_AUTH_WPA_ENABLED:
667 ieee->wpa_enabled = (data->value) ? 1 : 0;
668 break;
669
670#endif
671 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
672 ieee->ieee802_1x = data->value;
673 break;
674 case IW_AUTH_PRIVACY_INVOKED:
675 ieee->privacy_invoked = data->value;
676 break;
677 default:
678 return -EOPNOTSUPP;
679 }
680 return 0;
681}
682
683#if 1
684int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len)
685{
686 u8 *buf = NULL;
687
688 if (len > MAX_WPA_IE_LEN || (len && ie == NULL)) {
689 netdev_err(ieee->dev, "return error out, len:%zu\n", len);
690 return -EINVAL;
691 }
692
693 if (len) {
694 if (len != ie[1]+2) {
695 netdev_err(ieee->dev, "len:%zu, ie:%d\n", len, ie[1]);
696 return -EINVAL;
697 }
698 buf = kmemdup(ie, len, GFP_KERNEL);
699 if (buf == NULL)
700 return -ENOMEM;
701 kfree(ieee->wpa_ie);
702 ieee->wpa_ie = buf;
703 ieee->wpa_ie_len = len;
704 } else {
705 kfree(ieee->wpa_ie);
706 ieee->wpa_ie = NULL;
707 ieee->wpa_ie_len = 0;
708 }
709
710 return 0;
711
712}
713#endif
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
deleted file mode 100644
index 9f931dba1d82..000000000000
--- a/drivers/staging/rtl8187se/r8180.h
+++ /dev/null
@@ -1,640 +0,0 @@
1/*
2 * This is part of rtl8180 OpenSource driver.
3 * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
4 * Released under the terms of GPL (General Public Licence)
5 *
6 * Parts of this driver are based on the GPL part of the official realtek driver
7 *
8 * Parts of this driver are based on the rtl8180 driver skeleton from Patric
9 * Schenke & Andres Salomon
10 *
11 * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
12 *
13 * We want to thanks the Authors of those projects and the Ndiswrapper project
14 * Authors.
15 */
16
17#ifndef R8180H
18#define R8180H
19
20#include <linux/interrupt.h>
21
22#define RTL8180_MODULE_NAME "r8180"
23#define DMESG(x, a...) printk(KERN_INFO RTL8180_MODULE_NAME ": " x "\n", ## a)
24#define DMESGW(x, a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": WW:" x "\n", ## a)
25#define DMESGE(x, a...) printk(KERN_WARNING RTL8180_MODULE_NAME ": EE:" x "\n", ## a)
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/ioport.h>
30#include <linux/sched.h>
31#include <linux/types.h>
32#include <linux/slab.h>
33#include <linux/netdevice.h>
34#include <linux/pci.h>
35#include <linux/etherdevice.h>
36#include <linux/delay.h>
37#include <linux/rtnetlink.h> /* for rtnl_lock() */
38#include <linux/wireless.h>
39#include <linux/timer.h>
40#include <linux/proc_fs.h> /* Necessary because we use the proc fs. */
41#include <linux/if_arp.h>
42#include "ieee80211/ieee80211.h"
43#include <asm/io.h>
44
45#define EPROM_93c46 0
46#define EPROM_93c56 1
47
48#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
49
50#define DEFAULT_FRAG_THRESHOLD 2342U
51#define MIN_FRAG_THRESHOLD 256U
52#define DEFAULT_RTS_THRESHOLD 2342U
53#define MIN_RTS_THRESHOLD 0U
54#define MAX_RTS_THRESHOLD 2342U
55#define DEFAULT_BEACONINTERVAL 0x64U
56
57#define DEFAULT_RETRY_RTS 7
58#define DEFAULT_RETRY_DATA 7
59
60#define BEACON_QUEUE 6
61
62#define aSifsTime 10
63
64#define sCrcLng 4
65#define sAckCtsLng 112 /* bits in ACK and CTS frames. */
66/* +by amy 080312. */
67#define RATE_ADAPTIVE_TIMER_PERIOD 300
68
69enum wireless_mode {
70 WIRELESS_MODE_UNKNOWN = 0x00,
71 WIRELESS_MODE_A = 0x01,
72 WIRELESS_MODE_B = 0x02,
73 WIRELESS_MODE_G = 0x04,
74 WIRELESS_MODE_AUTO = 0x08,
75};
76
77struct chnl_access_setting {
78 u16 sifs_timer;
79 u16 difs_timer;
80 u16 slot_time_timer;
81 u16 eifs_timer;
82 u16 cwmin_index;
83 u16 cwmax_index;
84};
85
86enum nic_t {
87 NIC_8185 = 1,
88 NIC_8185B
89};
90
91typedef u32 AC_CODING;
92#define AC0_BE 0 /* ACI: 0x00 */ /* Best Effort. */
93#define AC1_BK 1 /* ACI: 0x01 */ /* Background. */
94#define AC2_VI 2 /* ACI: 0x10 */ /* Video. */
95#define AC3_VO 3 /* ACI: 0x11 */ /* Voice. */
96#define AC_MAX 4 /* Max: define total number; Should not to be used as a real
97 * enum.
98 */
99
100/*
101 * ECWmin/ECWmax field.
102 * Ref: WMM spec 2.2.2: WME Parameter Element, p.13.
103 */
104typedef union _ECW {
105 u8 charData;
106 struct {
107 u8 ECWmin:4;
108 u8 ECWmax:4;
109 } f; /* Field */
110} ECW, *PECW;
111
112/*
113 * ACI/AIFSN Field. Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
114 */
115typedef union _ACI_AIFSN {
116 u8 charData;
117
118 struct {
119 u8 AIFSN:4;
120 u8 ACM:1;
121 u8 ACI:2;
122 u8 Reserved:1;
123 } f; /* Field */
124} ACI_AIFSN, *PACI_AIFSN;
125
126/*
127 * AC Parameters Record Format.
128 * Ref: WMM spec 2.2.2: WME Parameter Element, p.12.
129 */
130typedef union _AC_PARAM {
131 u32 longData;
132 u8 charData[4];
133
134 struct {
135 ACI_AIFSN AciAifsn;
136 ECW Ecw;
137 u16 TXOPLimit;
138 } f; /* Field */
139} AC_PARAM, *PAC_PARAM;
140
141struct buffer {
142 struct buffer *next;
143 u32 *buf;
144 dma_addr_t dma;
145};
146
147/* YJ,modified,080828. */
148struct stats {
149 unsigned long txrdu;
150 unsigned long rxrdu;
151 unsigned long rxnolast;
152 unsigned long rxnodata;
153 unsigned long rxnopointer;
154 unsigned long txnperr;
155 unsigned long txresumed;
156 unsigned long rxerr;
157 unsigned long rxoverflow;
158 unsigned long rxint;
159 unsigned long txbkpokint;
160 unsigned long txbepoking;
161 unsigned long txbkperr;
162 unsigned long txbeperr;
163 unsigned long txnpokint;
164 unsigned long txhpokint;
165 unsigned long txhperr;
166 unsigned long ints;
167 unsigned long shints;
168 unsigned long txoverflow;
169 unsigned long rxdmafail;
170 unsigned long txbeacon;
171 unsigned long txbeaconerr;
172 unsigned long txlpokint;
173 unsigned long txlperr;
174 unsigned long txretry; /* retry number tony 20060601 */
175 unsigned long rxcrcerrmin; /* crc error (0-500) */
176 unsigned long rxcrcerrmid; /* crc error (500-1000) */
177 unsigned long rxcrcerrmax; /* crc error (>1000) */
178 unsigned long rxicverr; /* ICV error */
179};
180
181#define MAX_LD_SLOT_NUM 10
182#define KEEP_ALIVE_INTERVAL 20 /* in seconds. */
183#define CHECK_FOR_HANG_PERIOD 2 /* be equal to watchdog check time. */
184#define DEFAULT_KEEP_ALIVE_LEVEL 1
185#define DEFAULT_SLOT_NUM 2
186#define POWER_PROFILE_AC 0
187#define POWER_PROFILE_BATTERY 1
188
189struct link_detect_t {
190 u32 rx_frame_num[MAX_LD_SLOT_NUM]; /* number of Rx Frame.
191 * CheckForHang_period to determine
192 * link status.
193 */
194 u16 slot_num; /* number of CheckForHang period to determine link status,
195 * default is 2.
196 */
197 u16 slot_index;
198 u32 num_tx_ok_in_period; /* number of packet transmitted during
199 * CheckForHang.
200 */
201 u32 num_rx_ok_in_period; /* number of packet received during
202 * CheckForHang.
203 */
204 u8 idle_count; /* (KEEP_ALIVE_INTERVAL / CHECK_FOR_HANG_PERIOD) */
205 u32 last_num_tx_unicast;
206 u32 last_num_rx_unicast;
207
208 bool b_busy_traffic; /* when it is set to 1, UI cann't scan at will. */
209};
210
211/* YJ,modified,080828,end */
212
213/* by amy for led
214 * ==========================================================================
215 * LED customization.
216 * ==========================================================================
217 */
218enum led_strategy_8185 {
219 SW_LED_MODE0,
220 SW_LED_MODE1,
221 HW_LED, /* HW control 2 LEDs, LED0 and LED1 (there are 4 different
222 * control modes). */
223};
224
225enum rt_rf_power_state {
226 RF_ON,
227 RF_SLEEP,
228 RF_OFF
229};
230
231enum _ReasonCode {
232 unspec_reason = 0x1,
233 auth_not_valid = 0x2,
234 deauth_lv_ss = 0x3,
235 inactivity = 0x4,
236 ap_overload = 0x5,
237 class2_err = 0x6,
238 class3_err = 0x7,
239 disas_lv_ss = 0x8,
240 asoc_not_auth = 0x9,
241
242 /* ----MIC_CHECK */
243 mic_failure = 0xe,
244 /* ----END MIC_CHECK */
245
246 /* Reason code defined in 802.11i D10.0 p.28. */
247 invalid_IE = 0x0d,
248 four_way_tmout = 0x0f,
249 two_way_tmout = 0x10,
250 IE_dismatch = 0x11,
251 invalid_Gcipher = 0x12,
252 invalid_Pcipher = 0x13,
253 invalid_AKMP = 0x14,
254 unsup_RSNIEver = 0x15,
255 invalid_RSNIE = 0x16,
256 auth_802_1x_fail = 0x17,
257 ciper_reject = 0x18,
258
259 /* Reason code defined in 7.3.1.7, 802.1e D13.0, p.42. Added by Annie,
260 * 2005-11-15.
261 */
262 QoS_unspec = 0x20, /* 32 */
263 QAP_bandwidth = 0x21, /* 33 */
264 poor_condition = 0x22, /* 34 */
265 no_facility = 0x23, /* 35 */
266 /* Where is 36??? */
267 req_declined = 0x25, /* 37 */
268 invalid_param = 0x26, /* 38 */
269 req_not_honored = 0x27, /* 39 */
270 TS_not_created = 0x2F, /* 47 */
271 DL_not_allowed = 0x30, /* 48 */
272 dest_not_exist = 0x31, /* 49 */
273 dest_not_QSTA = 0x32, /* 50 */
274};
275
276enum rt_ps_mode {
277 ACTIVE, /* Active/Continuous access. */
278 MAX_PS, /* Max power save mode. */
279 FAST_PS /* Fast power save mode. */
280};
281
282/* by amy for power save. */
283struct r8180_priv {
284 struct pci_dev *pdev;
285
286 short epromtype;
287 int irq;
288 struct ieee80211_device *ieee80211;
289
290 short plcp_preamble_mode; /* 0:auto 1:short 2:long */
291
292 spinlock_t irq_th_lock;
293 spinlock_t tx_lock;
294 spinlock_t ps_lock;
295 spinlock_t rf_ps_lock;
296
297 u16 irq_mask;
298 short irq_enabled;
299 struct net_device *dev;
300 short chan;
301 short sens;
302 short max_sens;
303 u8 chtxpwr[15]; /* channels from 1 to 14, 0 not used. */
304 u8 chtxpwr_ofdm[15]; /* channels from 1 to 14, 0 not used. */
305 u8 channel_plan; /* it's the channel plan index. */
306 short up;
307 short crcmon; /* if 1 allow bad crc frame reception in monitor mode. */
308
309 struct timer_list scan_timer;
310 spinlock_t scan_lock;
311 u8 active_probe;
312 struct semaphore wx_sem;
313 short hw_wep;
314
315 short digphy;
316 short antb;
317 short diversity;
318 u32 key0[4];
319 short (*rf_set_sens)(struct net_device *dev, short sens);
320 void (*rf_set_chan)(struct net_device *dev, short ch);
321 void (*rf_close)(struct net_device *dev);
322 void (*rf_init)(struct net_device *dev);
323 void (*rf_sleep)(struct net_device *dev);
324 void (*rf_wakeup)(struct net_device *dev);
325 /* short rate; */
326 short promisc;
327 /* stats */
328 struct stats stats;
329 struct link_detect_t link_detect; /* YJ,add,080828 */
330 struct iw_statistics wstats;
331
332 /* RX stuff. */
333 u32 *rxring;
334 u32 *rxringtail;
335 dma_addr_t rxringdma;
336 struct buffer *rxbuffer;
337 struct buffer *rxbufferhead;
338 int rxringcount;
339 u16 rxbuffersize;
340
341 struct sk_buff *rx_skb;
342
343 short rx_skb_complete;
344
345 u32 rx_prevlen;
346
347 u32 *txmapring;
348 u32 *txbkpring;
349 u32 *txbepring;
350 u32 *txvipring;
351 u32 *txvopring;
352 u32 *txhpring;
353 dma_addr_t txmapringdma;
354 dma_addr_t txbkpringdma;
355 dma_addr_t txbepringdma;
356 dma_addr_t txvipringdma;
357 dma_addr_t txvopringdma;
358 dma_addr_t txhpringdma;
359 u32 *txmapringtail;
360 u32 *txbkpringtail;
361 u32 *txbepringtail;
362 u32 *txvipringtail;
363 u32 *txvopringtail;
364 u32 *txhpringtail;
365 u32 *txmapringhead;
366 u32 *txbkpringhead;
367 u32 *txbepringhead;
368 u32 *txvipringhead;
369 u32 *txvopringhead;
370 u32 *txhpringhead;
371 struct buffer *txmapbufs;
372 struct buffer *txbkpbufs;
373 struct buffer *txbepbufs;
374 struct buffer *txvipbufs;
375 struct buffer *txvopbufs;
376 struct buffer *txhpbufs;
377 struct buffer *txmapbufstail;
378 struct buffer *txbkpbufstail;
379 struct buffer *txbepbufstail;
380 struct buffer *txvipbufstail;
381 struct buffer *txvopbufstail;
382 struct buffer *txhpbufstail;
383
384 int txringcount;
385 int txbuffsize;
386 struct tasklet_struct irq_rx_tasklet;
387 u8 dma_poll_mask;
388
389 /* adhoc/master mode stuff. */
390 u32 *txbeaconringtail;
391 dma_addr_t txbeaconringdma;
392 u32 *txbeaconring;
393 int txbeaconcount;
394 struct buffer *txbeaconbufs;
395 struct buffer *txbeaconbufstail;
396
397 u8 retry_data;
398 u8 retry_rts;
399 u16 rts;
400
401 /* by amy for led. */
402 enum led_strategy_8185 led_strategy;
403 /* by amy for led. */
404
405 /* by amy for power save. */
406 struct timer_list watch_dog_timer;
407 bool bInactivePs;
408 bool bSwRfProcessing;
409 enum rt_rf_power_state eInactivePowerState;
410 enum rt_rf_power_state eRFPowerState;
411 u32 RfOffReason;
412 bool RFChangeInProgress;
413 bool SetRFPowerStateInProgress;
414 u8 RFProgType;
415 bool bLeisurePs;
416 enum rt_ps_mode dot11PowerSaveMode;
417 u8 TxPollingTimes;
418
419 bool bApBufOurFrame; /* TRUE if AP buffer our unicast data , we will
420 * keep eAwake until receive data or timeout.
421 */
422 u8 WaitBufDataBcnCount;
423 u8 WaitBufDataTimeOut;
424
425 /* by amy for power save. */
426 /* by amy for antenna. */
427 u8 EEPROMSwAntennaDiversity;
428 bool EEPROMDefaultAntenna1;
429 u8 RegSwAntennaDiversityMechanism;
430 bool bSwAntennaDiverity;
431 u8 RegDefaultAntenna;
432 bool bDefaultAntenna1;
433 u8 SignalStrength;
434 long Stats_SignalStrength;
435 long LastSignalStrengthInPercent; /* In percentage, used for smoothing,
436 * e.g. Moving Average.
437 */
438 u8 SignalQuality; /* in 0-100 index. */
439 long Stats_SignalQuality;
440 long RecvSignalPower; /* in dBm. */
441 long Stats_RecvSignalPower;
442 u8 LastRxPktAntenna; /* +by amy 080312 Antenna which received the lasted
443 * packet. 0: Aux, 1:Main. Added by Roger,
444 * 2008.01.25.
445 */
446 u32 AdRxOkCnt;
447 long AdRxSignalStrength;
448 u8 CurrAntennaIndex; /* Index to current Antenna (both Tx and Rx). */
449 u8 AdTickCount; /* Times of SwAntennaDiversityTimer happened. */
450 u8 AdCheckPeriod; /* # of period SwAntennaDiversityTimer to check Rx
451 * signal strength for SW Antenna Diversity.
452 */
453 u8 AdMinCheckPeriod; /* Min value of AdCheckPeriod. */
454 u8 AdMaxCheckPeriod; /* Max value of AdCheckPeriod. */
455 long AdRxSsThreshold; /* Signal strength threshold to switch antenna. */
456 long AdMaxRxSsThreshold; /* Max value of AdRxSsThreshold. */
457 bool bAdSwitchedChecking; /* TRUE if we shall shall check Rx signal
458 * strength for last time switching antenna.
459 */
460 long AdRxSsBeforeSwitched; /* Rx signal strength before we switched
461 * antenna.
462 */
463 struct timer_list SwAntennaDiversityTimer;
464 /* by amy for antenna {by amy 080312 */
465
466 /* Crystal calibration. Added by Roger, 2007.12.11. */
467
468 bool bXtalCalibration; /* Crystal calibration.*/
469 u8 XtalCal_Xin; /* Crystal calibration for Xin. 0~7.5pF */
470 u8 XtalCal_Xout; /* Crystal calibration for Xout. 0~7.5pF */
471
472 /* Tx power tracking with thermal meter indication.
473 * Added by Roger, 2007.12.11.
474 */
475
476 bool bTxPowerTrack; /* Tx Power tracking. */
477 u8 ThermalMeter; /* Thermal meter reference indication. */
478
479 /* Dynamic Initial Gain Adjustment Mechanism. Added by Bruce,
480 * 2007-02-14.
481 */
482 bool bDigMechanism; /* TRUE if DIG is enabled, FALSE ow. */
483 bool bRegHighPowerMechanism; /* For High Power Mechanism. 061010,
484 * by rcnjko.
485 */
486 u32 FalseAlarmRegValue;
487 u8 RegDigOfdmFaUpTh; /* Upper threshold of OFDM false alarm, which is
488 * used in DIG.
489 */
490 u8 DIG_NumberFallbackVote;
491 u8 DIG_NumberUpgradeVote;
492 /* For HW antenna diversity, added by Roger, 2008.01.30. */
493 u32 AdMainAntennaRxOkCnt; /* Main antenna Rx OK count. */
494 u32 AdAuxAntennaRxOkCnt; /* Aux antenna Rx OK count. */
495 bool bHWAdSwitched; /* TRUE if we has switched default antenna by HW
496 * evaluation.
497 */
498 /* RF High Power upper/lower threshold. */
499 u8 RegHiPwrUpperTh;
500 u8 RegHiPwrLowerTh;
501 /* RF RSSI High Power upper/lower Threshold. */
502 u8 RegRSSIHiPwrUpperTh;
503 u8 RegRSSIHiPwrLowerTh;
504 /* Current CCK RSSI value to determine CCK high power, asked by SD3 DZ,
505 * by Bruce, 2007-04-12.
506 */
507 u8 CurCCKRSSI;
508 bool bCurCCKPkt;
509 /* High Power Mechanism. Added by amy, 080312. */
510 bool bToUpdateTxPwr;
511 long UndecoratedSmoothedSS;
512 long UndecoratedSmoothedRxPower;
513 u8 RSSI;
514 char RxPower;
515 u8 InitialGain;
516 /* For adjust Dig Threshold during Legacy/Leisure Power Save Mode. */
517 u32 DozePeriodInPast2Sec;
518 /* Don't access BB/RF under disable PLL situation. */
519 u8 InitialGainBackUp;
520 u8 RegBModeGainStage;
521 /* by amy for rate adaptive */
522 struct timer_list rateadapter_timer;
523 u32 RateAdaptivePeriod;
524 bool bEnhanceTxPwr;
525 bool bUpdateARFR;
526 int ForcedDataRate; /* Force Data Rate. 0: Auto, 0x02: 1M ~ 0x6C: 54M.)
527 */
528 u32 NumTxUnicast; /* YJ,add,080828,for keep alive. */
529 u8 keepAliveLevel; /*YJ,add,080828,for KeepAlive. */
530 unsigned long NumTxOkTotal;
531 u16 LastRetryCnt;
532 u16 LastRetryRate;
533 unsigned long LastTxokCnt;
534 unsigned long LastRxokCnt;
535 u16 CurrRetryCnt;
536 unsigned long LastTxOKBytes;
537 unsigned long NumTxOkBytesTotal;
538 u8 LastFailTxRate;
539 long LastFailTxRateSS;
540 u8 FailTxRateCount;
541 u32 LastTxThroughput;
542 /* for up rate. */
543 unsigned short bTryuping;
544 u8 CurrTxRate; /* the rate before up. */
545 u16 CurrRetryRate;
546 u16 TryupingCount;
547 u8 TryDownCountLowData;
548 u8 TryupingCountNoData;
549
550 u8 CurrentOperaRate;
551 struct work_struct reset_wq;
552 struct work_struct watch_dog_wq;
553 short ack_tx_to_ieee;
554
555 u8 dma_poll_stop_mask;
556
557 u16 ShortRetryLimit;
558 u16 LongRetryLimit;
559 u16 EarlyRxThreshold;
560 u32 TransmitConfig;
561 u32 ReceiveConfig;
562 u32 IntrMask;
563
564 struct chnl_access_setting ChannelAccessSetting;
565};
566
567#define MANAGE_PRIORITY 0
568#define BK_PRIORITY 1
569#define BE_PRIORITY 2
570#define VI_PRIORITY 3
571#define VO_PRIORITY 4
572#define HI_PRIORITY 5
573#define BEACON_PRIORITY 6
574
575#define LOW_PRIORITY VI_PRIORITY
576#define NORM_PRIORITY VO_PRIORITY
577/* AC2Queue mapping. */
578#define AC2Q(_ac) (((_ac) == WME_AC_VO) ? VO_PRIORITY : \
579 ((_ac) == WME_AC_VI) ? VI_PRIORITY : \
580 ((_ac) == WME_AC_BK) ? BK_PRIORITY : \
581 BE_PRIORITY)
582
583short rtl8180_tx(struct net_device *dev, u8 *skbuf, int len, int priority,
584 bool morefrag, short fragdesc, int rate);
585
586u8 read_nic_byte(struct net_device *dev, int x);
587u32 read_nic_dword(struct net_device *dev, int x);
588u16 read_nic_word(struct net_device *dev, int x);
589void write_nic_byte(struct net_device *dev, int x, u8 y);
590void write_nic_word(struct net_device *dev, int x, u16 y);
591void write_nic_dword(struct net_device *dev, int x, u32 y);
592void force_pci_posting(struct net_device *dev);
593
594void rtl8180_rtx_disable(struct net_device *);
595void rtl8180_set_anaparam(struct net_device *dev, u32 a);
596void rtl8185_set_anaparam2(struct net_device *dev, u32 a);
597void rtl8180_set_hw_wep(struct net_device *dev);
598void rtl8180_no_hw_wep(struct net_device *dev);
599void rtl8180_update_msr(struct net_device *dev);
600void rtl8180_beacon_tx_disable(struct net_device *dev);
601void rtl8180_beacon_rx_disable(struct net_device *dev);
602int rtl8180_down(struct net_device *dev);
603int rtl8180_up(struct net_device *dev);
604void rtl8180_commit(struct net_device *dev);
605void rtl8180_set_chan(struct net_device *dev, short ch);
606void write_phy(struct net_device *dev, u8 adr, u8 data);
607void write_phy_cck(struct net_device *dev, u8 adr, u32 data);
608void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
609void rtl8185_tx_antenna(struct net_device *dev, u8 ant);
610void rtl8185_rf_pins_enable(struct net_device *dev);
611void IPSEnter(struct net_device *dev);
612void IPSLeave(struct net_device *dev);
613int get_curr_tx_free_desc(struct net_device *dev, int priority);
614void UpdateInitialGain(struct net_device *dev);
615bool SetAntennaConfig87SE(struct net_device *dev, u8 DefaultAnt,
616 bool bAntDiversity);
617
618void rtl8185b_adapter_start(struct net_device *dev);
619void rtl8185b_rx_enable(struct net_device *dev);
620void rtl8185b_tx_enable(struct net_device *dev);
621void rtl8180_reset(struct net_device *dev);
622void rtl8185b_irq_enable(struct net_device *dev);
623void fix_rx_fifo(struct net_device *dev);
624void fix_tx_fifo(struct net_device *dev);
625void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch);
626void rtl8180_rate_adapter(struct work_struct *work);
627bool MgntActSet_RF_State(struct net_device *dev, enum rt_rf_power_state StateToSet,
628 u32 ChangeSource);
629
630#endif
631
632/* fun with the built-in ieee80211 stack... */
633extern int ieee80211_crypto_init(void);
634extern void ieee80211_crypto_deinit(void);
635extern int ieee80211_crypto_tkip_init(void);
636extern void ieee80211_crypto_tkip_exit(void);
637extern int ieee80211_crypto_ccmp_init(void);
638extern void ieee80211_crypto_ccmp_exit(void);
639extern int ieee80211_crypto_wep_init(void);
640extern void ieee80211_crypto_wep_exit(void);
diff --git a/drivers/staging/rtl8187se/r8180_93cx6.h b/drivers/staging/rtl8187se/r8180_93cx6.h
deleted file mode 100644
index b52b5b0610ab..000000000000
--- a/drivers/staging/rtl8187se/r8180_93cx6.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 This is part of rtl8180 OpenSource driver
3 Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
4 Released under the terms of GPL (General Public Licence)
5
6 Parts of this driver are based on the GPL part of the official realtek driver
7 Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon
8 Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
9
10 We want to tanks the Authors of such projects and the Ndiswrapper project Authors.
11*/
12
13/*This files contains card eeprom (93c46 or 93c56) programming routines*/
14/*memory is addressed by WORDS*/
15
16#include "r8180.h"
17#include "r8180_hw.h"
18
19#define EPROM_DELAY 10
20
21#define EPROM_ANAPARAM_ADDRLWORD 0xd
22#define EPROM_ANAPARAM_ADDRHWORD 0xe
23
24#define RFCHIPID 0x6
25#define RFCHIPID_INTERSIL 1
26#define RFCHIPID_RFMD 2
27#define RFCHIPID_PHILIPS 3
28#define RFCHIPID_MAXIM 4
29#define RFCHIPID_GCT 5
30#define RFCHIPID_RTL8225 9
31#define RF_ZEBRA2 11
32#define EPROM_TXPW_BASE 0x05
33#define RF_ZEBRA4 12
34#define RFCHIPID_RTL8255 0xa
35#define RF_PARAM 0x19
36#define RF_PARAM_DIGPHY_SHIFT 0
37#define RF_PARAM_ANTBDEFAULT_SHIFT 1
38#define RF_PARAM_CARRIERSENSE_SHIFT 2
39#define RF_PARAM_CARRIERSENSE_MASK (3<<2)
40#define ENERGY_TRESHOLD 0x17
41#define EPROM_VERSION 0x1E
42#define MAC_ADR 0x7
43
44#define CIS 0x18
45
46#define EPROM_TXPW_OFDM_CH1_2 0x20
47
48#define EPROM_TXPW_CH1_2 0x30
49
50#define RTL818X_EEPROM_CMD_READ (1 << 0)
51#define RTL818X_EEPROM_CMD_WRITE (1 << 1)
52#define RTL818X_EEPROM_CMD_CK (1 << 2)
53#define RTL818X_EEPROM_CMD_CS (1 << 3)
54
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
deleted file mode 100644
index a6022d4e7573..000000000000
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ /dev/null
@@ -1,3775 +0,0 @@
1/*
2 * This is part of rtl818x pci OpenSource driver - v 0.1
3 * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
4 * Released under the terms of GPL (General Public License)
5 *
6 * Parts of this driver are based on the GPL part of the official
7 * Realtek driver.
8 *
9 * Parts of this driver are based on the rtl8180 driver skeleton
10 * from Patric Schenke & Andres Salomon.
11 *
12 * Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
13 *
14 * Parts of BB/RF code are derived from David Young rtl8180 netbsd driver.
15 *
16 * RSSI calc function from 'The Deuce'
17 *
18 * Some ideas borrowed from the 8139too.c driver included in linux kernel.
19 *
20 * We (I?) want to thanks the Authors of those projecs and also the
21 * Ndiswrapper's project Authors.
22 *
23 * A big big thanks goes also to Realtek corp. for their help in my attempt to
24 * add RTL8185 and RTL8225 support, and to David Young also.
25 *
26 * Power management interface routines.
27 * Written by Mariusz Matuszek.
28 */
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#undef RX_DONT_PASS_UL
33#undef DUMMY_RX
34
35#include <linux/slab.h>
36#include <linux/syscalls.h>
37#include <linux/eeprom_93cx6.h>
38#include <linux/interrupt.h>
39#include <linux/proc_fs.h>
40#include <linux/seq_file.h>
41
42#include "r8180_hw.h"
43#include "r8180.h"
44#include "r8180_rtl8225.h" /* RTL8225 Radio frontend */
45#include "r8180_93cx6.h" /* Card EEPROM */
46#include "r8180_wx.h"
47#include "r8180_dm.h"
48
49#include "ieee80211/dot11d.h"
50
51static struct pci_device_id rtl8180_pci_id_tbl[] = {
52 {
53 .vendor = PCI_VENDOR_ID_REALTEK,
54 .device = 0x8199,
55 .subvendor = PCI_ANY_ID,
56 .subdevice = PCI_ANY_ID,
57 .driver_data = 0,
58 },
59 {
60 .vendor = 0,
61 .device = 0,
62 .subvendor = 0,
63 .subdevice = 0,
64 .driver_data = 0,
65 }
66};
67
68static char ifname[IFNAMSIZ] = "wlan%d";
69static int hwwep;
70
71MODULE_LICENSE("GPL");
72MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl);
73MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
74MODULE_DESCRIPTION("Linux driver for Realtek RTL8187SE WiFi cards");
75
76module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
77module_param(hwwep, int, S_IRUGO|S_IWUSR);
78
79MODULE_PARM_DESC(hwwep, " Try to use hardware WEP support. Still broken and not available on all cards");
80
81static int rtl8180_pci_probe(struct pci_dev *pdev,
82 const struct pci_device_id *id);
83
84static void rtl8180_pci_remove(struct pci_dev *pdev);
85
86static void rtl8180_shutdown(struct pci_dev *pdev)
87{
88 struct net_device *dev = pci_get_drvdata(pdev);
89 if (dev->netdev_ops->ndo_stop)
90 dev->netdev_ops->ndo_stop(dev);
91 pci_disable_device(pdev);
92}
93
94static int rtl8180_suspend(struct pci_dev *pdev, pm_message_t state)
95{
96 struct net_device *dev = pci_get_drvdata(pdev);
97
98 if (!netif_running(dev))
99 goto out_pci_suspend;
100
101 if (dev->netdev_ops->ndo_stop)
102 dev->netdev_ops->ndo_stop(dev);
103
104 netif_device_detach(dev);
105
106out_pci_suspend:
107 pci_save_state(pdev);
108 pci_disable_device(pdev);
109 pci_set_power_state(pdev, pci_choose_state(pdev, state));
110 return 0;
111}
112
113static int rtl8180_resume(struct pci_dev *pdev)
114{
115 struct net_device *dev = pci_get_drvdata(pdev);
116 int err;
117 u32 val;
118
119 pci_set_power_state(pdev, PCI_D0);
120
121 err = pci_enable_device(pdev);
122 if (err) {
123 dev_err(&pdev->dev, "pci_enable_device failed on resume\n");
124
125 return err;
126 }
127
128 pci_restore_state(pdev);
129
130 /*
131 * Suspend/Resume resets the PCI configuration space, so we have to
132 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
133 * from interfering with C3 CPU state. pci_restore_state won't help
134 * here since it only restores the first 64 bytes pci config header.
135 */
136 pci_read_config_dword(pdev, 0x40, &val);
137 if ((val & 0x0000ff00) != 0)
138 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
139
140 if (!netif_running(dev))
141 goto out;
142
143 if (dev->netdev_ops->ndo_open)
144 dev->netdev_ops->ndo_open(dev);
145
146 netif_device_attach(dev);
147out:
148 return 0;
149}
150
151static struct pci_driver rtl8180_pci_driver = {
152 .name = RTL8180_MODULE_NAME,
153 .id_table = rtl8180_pci_id_tbl,
154 .probe = rtl8180_pci_probe,
155 .remove = rtl8180_pci_remove,
156 .suspend = rtl8180_suspend,
157 .resume = rtl8180_resume,
158 .shutdown = rtl8180_shutdown,
159};
160
161u8 read_nic_byte(struct net_device *dev, int x)
162{
163 return 0xff&readb((u8 __iomem *)dev->mem_start + x);
164}
165
166u32 read_nic_dword(struct net_device *dev, int x)
167{
168 return readl((u8 __iomem *)dev->mem_start + x);
169}
170
171u16 read_nic_word(struct net_device *dev, int x)
172{
173 return readw((u8 __iomem *)dev->mem_start + x);
174}
175
176void write_nic_byte(struct net_device *dev, int x, u8 y)
177{
178 writeb(y, (u8 __iomem *)dev->mem_start + x);
179 udelay(20);
180}
181
182void write_nic_dword(struct net_device *dev, int x, u32 y)
183{
184 writel(y, (u8 __iomem *)dev->mem_start + x);
185 udelay(20);
186}
187
188void write_nic_word(struct net_device *dev, int x, u16 y)
189{
190 writew(y, (u8 __iomem *)dev->mem_start + x);
191 udelay(20);
192}
193
194inline void force_pci_posting(struct net_device *dev)
195{
196 read_nic_byte(dev, EPROM_CMD);
197 mb();
198}
199
200static irqreturn_t rtl8180_interrupt(int irq, void *netdev);
201void set_nic_rxring(struct net_device *dev);
202void set_nic_txring(struct net_device *dev);
203static struct net_device_stats *rtl8180_stats(struct net_device *dev);
204void rtl8180_commit(struct net_device *dev);
205void rtl8180_start_tx_beacon(struct net_device *dev);
206
207static struct proc_dir_entry *rtl8180_proc;
208
209static int proc_get_registers(struct seq_file *m, void *v)
210{
211 struct net_device *dev = m->private;
212 int i, n, max = 0xff;
213
214 /* This dump the current register page */
215 for (n = 0; n <= max;) {
216 seq_printf(m, "\nD: %2x > ", n);
217
218 for (i = 0; i < 16 && n <= max; i++, n++)
219 seq_printf(m, "%2x ", read_nic_byte(dev, n));
220 }
221 seq_putc(m, '\n');
222 return 0;
223}
224
225int get_curr_tx_free_desc(struct net_device *dev, int priority);
226
227static int proc_get_stats_hw(struct seq_file *m, void *v)
228{
229 return 0;
230}
231
232static int proc_get_stats_rx(struct seq_file *m, void *v)
233{
234 struct net_device *dev = m->private;
235 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
236
237 seq_printf(m,
238 "RX OK: %lu\n"
239 "RX Retry: %lu\n"
240 "RX CRC Error(0-500): %lu\n"
241 "RX CRC Error(500-1000): %lu\n"
242 "RX CRC Error(>1000): %lu\n"
243 "RX ICV Error: %lu\n",
244 priv->stats.rxint,
245 priv->stats.rxerr,
246 priv->stats.rxcrcerrmin,
247 priv->stats.rxcrcerrmid,
248 priv->stats.rxcrcerrmax,
249 priv->stats.rxicverr
250 );
251
252 return 0;
253}
254
255static int proc_get_stats_tx(struct seq_file *m, void *v)
256{
257 struct net_device *dev = m->private;
258 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
259 unsigned long totalOK;
260
261 totalOK = priv->stats.txnpokint + priv->stats.txhpokint +
262 priv->stats.txlpokint;
263
264 seq_printf(m,
265 "TX OK: %lu\n"
266 "TX Error: %lu\n"
267 "TX Retry: %lu\n"
268 "TX beacon OK: %lu\n"
269 "TX beacon error: %lu\n",
270 totalOK,
271 priv->stats.txnperr+priv->stats.txhperr+priv->stats.txlperr,
272 priv->stats.txretry,
273 priv->stats.txbeacon,
274 priv->stats.txbeaconerr
275 );
276
277 return 0;
278}
279
280static void rtl8180_proc_module_init(void)
281{
282 DMESG("Initializing proc filesystem");
283 rtl8180_proc = proc_mkdir(RTL8180_MODULE_NAME, init_net.proc_net);
284}
285
286static void rtl8180_proc_module_remove(void)
287{
288 remove_proc_entry(RTL8180_MODULE_NAME, init_net.proc_net);
289}
290
291static void rtl8180_proc_remove_one(struct net_device *dev)
292{
293 remove_proc_subtree(dev->name, rtl8180_proc);
294}
295
296/*
297 * seq_file wrappers for procfile show routines.
298 */
299static int rtl8180_proc_open(struct inode *inode, struct file *file)
300{
301 struct net_device *dev = proc_get_parent_data(inode);
302 int (*show)(struct seq_file *, void *) = PDE_DATA(inode);
303
304 return single_open(file, show, dev);
305}
306
307static const struct file_operations rtl8180_proc_fops = {
308 .open = rtl8180_proc_open,
309 .read = seq_read,
310 .llseek = seq_lseek,
311 .release = single_release,
312};
313
314/*
315 * Table of proc files we need to create.
316 */
317struct rtl8180_proc_file {
318 char name[12];
319 int (*show)(struct seq_file *, void *);
320};
321
322static const struct rtl8180_proc_file rtl8180_proc_files[] = {
323 { "stats-hw", &proc_get_stats_hw },
324 { "stats-rx", &proc_get_stats_rx },
325 { "stats-tx", &proc_get_stats_tx },
326 { "registers", &proc_get_registers },
327 { "" }
328};
329
330static void rtl8180_proc_init_one(struct net_device *dev)
331{
332 const struct rtl8180_proc_file *f;
333 struct proc_dir_entry *dir;
334
335 dir = proc_mkdir_data(dev->name, 0, rtl8180_proc, dev);
336 if (!dir) {
337 DMESGE("Unable to initialize /proc/net/r8180/%s\n", dev->name);
338 return;
339 }
340
341 for (f = rtl8180_proc_files; f->name[0]; f++) {
342 if (!proc_create_data(f->name, S_IFREG | S_IRUGO, dir,
343 &rtl8180_proc_fops, f->show)) {
344 DMESGE("Unable to initialize /proc/net/r8180/%s/%s\n",
345 dev->name, f->name);
346 return;
347 }
348 }
349}
350
351/*
352 * FIXME: check if we can use some standard already-existent
353 * data type+functions in kernel.
354 */
355
356static short buffer_add(struct buffer **buffer, u32 *buf, dma_addr_t dma,
357 struct buffer **bufferhead)
358{
359 struct buffer *tmp;
360
361 if (!*buffer) {
362
363 *buffer = kmalloc(sizeof(struct buffer), GFP_KERNEL);
364
365 if (*buffer == NULL) {
366 DMESGE("Failed to kmalloc head of TX/RX struct");
367 return -1;
368 }
369 (*buffer)->next = *buffer;
370 (*buffer)->buf = buf;
371 (*buffer)->dma = dma;
372 if (bufferhead != NULL)
373 (*bufferhead) = (*buffer);
374 return 0;
375 }
376 tmp = *buffer;
377
378 while (tmp->next != (*buffer))
379 tmp = tmp->next;
380 tmp->next = kmalloc(sizeof(struct buffer), GFP_KERNEL);
381 if (tmp->next == NULL) {
382 DMESGE("Failed to kmalloc TX/RX struct");
383 return -1;
384 }
385 tmp->next->buf = buf;
386 tmp->next->dma = dma;
387 tmp->next->next = *buffer;
388
389 return 0;
390}
391
392static void buffer_free(struct net_device *dev, struct buffer **buffer, int len,
393 short consistent)
394{
395
396 struct buffer *tmp, *next;
397 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
398 struct pci_dev *pdev = priv->pdev;
399
400 if (!*buffer)
401 return;
402
403 tmp = *buffer;
404
405 do {
406 next = tmp->next;
407 if (consistent) {
408 pci_free_consistent(pdev, len,
409 tmp->buf, tmp->dma);
410 } else {
411 pci_unmap_single(pdev, tmp->dma,
412 len, PCI_DMA_FROMDEVICE);
413 kfree(tmp->buf);
414 }
415 kfree(tmp);
416 tmp = next;
417 } while (next != *buffer);
418
419 *buffer = NULL;
420}
421
422int get_curr_tx_free_desc(struct net_device *dev, int priority)
423{
424 struct r8180_priv *priv = ieee80211_priv(dev);
425 u32 *tail;
426 u32 *head;
427 int ret;
428
429 switch (priority) {
430 case MANAGE_PRIORITY:
431 head = priv->txmapringhead;
432 tail = priv->txmapringtail;
433 break;
434 case BK_PRIORITY:
435 head = priv->txbkpringhead;
436 tail = priv->txbkpringtail;
437 break;
438 case BE_PRIORITY:
439 head = priv->txbepringhead;
440 tail = priv->txbepringtail;
441 break;
442 case VI_PRIORITY:
443 head = priv->txvipringhead;
444 tail = priv->txvipringtail;
445 break;
446 case VO_PRIORITY:
447 head = priv->txvopringhead;
448 tail = priv->txvopringtail;
449 break;
450 case HI_PRIORITY:
451 head = priv->txhpringhead;
452 tail = priv->txhpringtail;
453 break;
454 default:
455 return -1;
456 }
457
458 if (head <= tail)
459 ret = priv->txringcount - (tail - head)/8;
460 else
461 ret = (head - tail)/8;
462
463 if (ret > priv->txringcount)
464 DMESG("BUG");
465
466 return ret;
467}
468
469static short check_nic_enought_desc(struct net_device *dev, int priority)
470{
471 struct r8180_priv *priv = ieee80211_priv(dev);
472 struct ieee80211_device *ieee = netdev_priv(dev);
473 int requiredbyte;
474 int required;
475
476 requiredbyte = priv->ieee80211->fts +
477 sizeof(struct ieee80211_header_data);
478
479 if (ieee->current_network.QoS_Enable)
480 requiredbyte += 2;
481
482 required = requiredbyte / (priv->txbuffsize-4);
483
484 if (requiredbyte % priv->txbuffsize)
485 required++;
486
487 /* for now we keep two free descriptor as a safety boundary
488 * between the tail and the head
489 */
490
491 return required + 2 < get_curr_tx_free_desc(dev, priority);
492}
493
494void fix_tx_fifo(struct net_device *dev)
495{
496 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
497 u32 *tmp;
498 int i;
499
500 for (tmp = priv->txmapring, i = 0;
501 i < priv->txringcount;
502 tmp += 8, i++) {
503 *tmp = *tmp & ~(1<<31);
504 }
505
506 for (tmp = priv->txbkpring, i = 0;
507 i < priv->txringcount;
508 tmp += 8, i++) {
509 *tmp = *tmp & ~(1<<31);
510 }
511
512 for (tmp = priv->txbepring, i = 0;
513 i < priv->txringcount;
514 tmp += 8, i++) {
515 *tmp = *tmp & ~(1<<31);
516 }
517 for (tmp = priv->txvipring, i = 0;
518 i < priv->txringcount;
519 tmp += 8, i++) {
520 *tmp = *tmp & ~(1<<31);
521 }
522
523 for (tmp = priv->txvopring, i = 0;
524 i < priv->txringcount;
525 tmp += 8, i++) {
526 *tmp = *tmp & ~(1<<31);
527 }
528
529 for (tmp = priv->txhpring, i = 0;
530 i < priv->txringcount;
531 tmp += 8, i++) {
532 *tmp = *tmp & ~(1<<31);
533 }
534
535 for (tmp = priv->txbeaconring, i = 0;
536 i < priv->txbeaconcount;
537 tmp += 8, i++) {
538 *tmp = *tmp & ~(1<<31);
539 }
540
541 priv->txmapringtail = priv->txmapring;
542 priv->txmapringhead = priv->txmapring;
543 priv->txmapbufstail = priv->txmapbufs;
544
545 priv->txbkpringtail = priv->txbkpring;
546 priv->txbkpringhead = priv->txbkpring;
547 priv->txbkpbufstail = priv->txbkpbufs;
548
549 priv->txbepringtail = priv->txbepring;
550 priv->txbepringhead = priv->txbepring;
551 priv->txbepbufstail = priv->txbepbufs;
552
553 priv->txvipringtail = priv->txvipring;
554 priv->txvipringhead = priv->txvipring;
555 priv->txvipbufstail = priv->txvipbufs;
556
557 priv->txvopringtail = priv->txvopring;
558 priv->txvopringhead = priv->txvopring;
559 priv->txvopbufstail = priv->txvopbufs;
560
561 priv->txhpringtail = priv->txhpring;
562 priv->txhpringhead = priv->txhpring;
563 priv->txhpbufstail = priv->txhpbufs;
564
565 priv->txbeaconringtail = priv->txbeaconring;
566 priv->txbeaconbufstail = priv->txbeaconbufs;
567 set_nic_txring(dev);
568
569 ieee80211_reset_queue(priv->ieee80211);
570 priv->ack_tx_to_ieee = 0;
571}
572
573void fix_rx_fifo(struct net_device *dev)
574{
575 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
576 u32 *tmp;
577 struct buffer *rxbuf;
578 u8 rx_desc_size;
579
580 rx_desc_size = 8; /* 4*8 = 32 bytes */
581
582 for (tmp = priv->rxring, rxbuf = priv->rxbufferhead;
583 (tmp < (priv->rxring)+(priv->rxringcount)*rx_desc_size);
584 tmp += rx_desc_size, rxbuf = rxbuf->next) {
585 *(tmp+2) = rxbuf->dma;
586 *tmp = *tmp & ~0xfff;
587 *tmp = *tmp | priv->rxbuffersize;
588 *tmp |= (1<<31);
589 }
590
591 priv->rxringtail = priv->rxring;
592 priv->rxbuffer = priv->rxbufferhead;
593 priv->rx_skb_complete = 1;
594 set_nic_rxring(dev);
595}
596
597static void rtl8180_irq_disable(struct net_device *dev)
598{
599 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
600
601 write_nic_dword(dev, IMR, 0);
602 force_pci_posting(dev);
603 priv->irq_enabled = 0;
604}
605
606void rtl8180_set_mode(struct net_device *dev, int mode)
607{
608 u8 ecmd;
609
610 ecmd = read_nic_byte(dev, EPROM_CMD);
611 ecmd = ecmd & ~EPROM_CMD_OPERATING_MODE_MASK;
612 ecmd = ecmd | (mode<<EPROM_CMD_OPERATING_MODE_SHIFT);
613 ecmd = ecmd & ~(1<<EPROM_CS_SHIFT);
614 ecmd = ecmd & ~(1<<EPROM_CK_SHIFT);
615 write_nic_byte(dev, EPROM_CMD, ecmd);
616}
617
618void rtl8180_beacon_tx_enable(struct net_device *dev);
619
620void rtl8180_update_msr(struct net_device *dev)
621{
622 struct r8180_priv *priv = ieee80211_priv(dev);
623 u8 msr;
624 u32 rxconf;
625
626 msr = read_nic_byte(dev, MSR);
627 msr &= ~MSR_LINK_MASK;
628
629 rxconf = read_nic_dword(dev, RX_CONF);
630
631 if (priv->ieee80211->state == IEEE80211_LINKED) {
632 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
633 msr |= (MSR_LINK_ADHOC<<MSR_LINK_SHIFT);
634 else if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
635 msr |= (MSR_LINK_MASTER<<MSR_LINK_SHIFT);
636 else if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
637 msr |= (MSR_LINK_MANAGED<<MSR_LINK_SHIFT);
638 else
639 msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
640 rxconf |= (1<<RX_CHECK_BSSID_SHIFT);
641
642 } else {
643 msr |= (MSR_LINK_NONE<<MSR_LINK_SHIFT);
644 rxconf &= ~(1<<RX_CHECK_BSSID_SHIFT);
645 }
646
647 write_nic_byte(dev, MSR, msr);
648 write_nic_dword(dev, RX_CONF, rxconf);
649}
650
651void rtl8180_set_chan(struct net_device *dev, short ch)
652{
653 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
654
655 if ((ch > 14) || (ch < 1)) {
656 netdev_err(dev, "In %s: Invalid channel %d\n", __func__, ch);
657 return;
658 }
659
660 priv->chan = ch;
661 priv->rf_set_chan(dev, priv->chan);
662}
663
664void set_nic_txring(struct net_device *dev)
665{
666 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
667
668 write_nic_dword(dev, TX_MANAGEPRIORITY_RING_ADDR, priv->txmapringdma);
669 write_nic_dword(dev, TX_BKPRIORITY_RING_ADDR, priv->txbkpringdma);
670 write_nic_dword(dev, TX_BEPRIORITY_RING_ADDR, priv->txbepringdma);
671 write_nic_dword(dev, TX_VIPRIORITY_RING_ADDR, priv->txvipringdma);
672 write_nic_dword(dev, TX_VOPRIORITY_RING_ADDR, priv->txvopringdma);
673 write_nic_dword(dev, TX_HIGHPRIORITY_RING_ADDR, priv->txhpringdma);
674 write_nic_dword(dev, TX_BEACON_RING_ADDR, priv->txbeaconringdma);
675}
676
677void rtl8180_beacon_tx_enable(struct net_device *dev)
678{
679 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
680
681 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
682 priv->dma_poll_stop_mask &= ~(TPPOLLSTOP_BQ);
683 write_nic_byte(dev, TPPollStop, priv->dma_poll_mask);
684 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
685}
686
687void rtl8180_beacon_tx_disable(struct net_device *dev)
688{
689 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
690
691 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
692 priv->dma_poll_stop_mask |= TPPOLLSTOP_BQ;
693 write_nic_byte(dev, TPPollStop, priv->dma_poll_stop_mask);
694 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
695
696}
697
698void rtl8180_rtx_disable(struct net_device *dev)
699{
700 u8 cmd;
701 struct r8180_priv *priv = ieee80211_priv(dev);
702
703 cmd = read_nic_byte(dev, CMD);
704 write_nic_byte(dev, CMD, cmd &
705 ~((1<<CMD_RX_ENABLE_SHIFT)|(1<<CMD_TX_ENABLE_SHIFT)));
706 force_pci_posting(dev);
707 mdelay(10);
708
709 if (!priv->rx_skb_complete)
710 dev_kfree_skb_any(priv->rx_skb);
711}
712
713static short alloc_tx_desc_ring(struct net_device *dev, int bufsize, int count,
714 int addr)
715{
716 int i;
717 u32 *desc;
718 u32 *tmp;
719 dma_addr_t dma_desc, dma_tmp;
720 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
721 struct pci_dev *pdev = priv->pdev;
722 void *buf;
723
724 if ((bufsize & 0xfff) != bufsize) {
725 DMESGE("TX buffer allocation too large");
726 return 0;
727 }
728 desc = (u32 *)pci_alloc_consistent(pdev,
729 sizeof(u32)*8*count+256, &dma_desc);
730 if (desc == NULL)
731 return -1;
732
733 if (dma_desc & 0xff)
734 /*
735 * descriptor's buffer must be 256 byte aligned
736 * we shouldn't be here, since we set DMA mask !
737 */
738 WARN(1, "DMA buffer is not aligned\n");
739
740 tmp = desc;
741
742 for (i = 0; i < count; i++) {
743 buf = (void *)pci_alloc_consistent(pdev, bufsize, &dma_tmp);
744 if (buf == NULL)
745 return -ENOMEM;
746
747 switch (addr) {
748 case TX_MANAGEPRIORITY_RING_ADDR:
749 if (-1 == buffer_add(&priv->txmapbufs,
750 buf, dma_tmp, NULL)) {
751 DMESGE("Unable to allocate mem for buffer NP");
752 return -ENOMEM;
753 }
754 break;
755 case TX_BKPRIORITY_RING_ADDR:
756 if (-1 == buffer_add(&priv->txbkpbufs,
757 buf, dma_tmp, NULL)) {
758 DMESGE("Unable to allocate mem for buffer LP");
759 return -ENOMEM;
760 }
761 break;
762 case TX_BEPRIORITY_RING_ADDR:
763 if (-1 == buffer_add(&priv->txbepbufs,
764 buf, dma_tmp, NULL)) {
765 DMESGE("Unable to allocate mem for buffer NP");
766 return -ENOMEM;
767 }
768 break;
769 case TX_VIPRIORITY_RING_ADDR:
770 if (-1 == buffer_add(&priv->txvipbufs,
771 buf, dma_tmp, NULL)) {
772 DMESGE("Unable to allocate mem for buffer LP");
773 return -ENOMEM;
774 }
775 break;
776 case TX_VOPRIORITY_RING_ADDR:
777 if (-1 == buffer_add(&priv->txvopbufs,
778 buf, dma_tmp, NULL)) {
779 DMESGE("Unable to allocate mem for buffer NP");
780 return -ENOMEM;
781 }
782 break;
783 case TX_HIGHPRIORITY_RING_ADDR:
784 if (-1 == buffer_add(&priv->txhpbufs,
785 buf, dma_tmp, NULL)) {
786 DMESGE("Unable to allocate mem for buffer HP");
787 return -ENOMEM;
788 }
789 break;
790 case TX_BEACON_RING_ADDR:
791 if (-1 == buffer_add(&priv->txbeaconbufs,
792 buf, dma_tmp, NULL)) {
793 DMESGE("Unable to allocate mem for buffer BP");
794 return -ENOMEM;
795 }
796 break;
797 }
798 *tmp = *tmp & ~(1<<31); /* descriptor empty, owned by the drv */
799 *(tmp+2) = (u32)dma_tmp;
800 *(tmp+3) = bufsize;
801
802 if (i+1 < count)
803 *(tmp+4) = (u32)dma_desc+((i+1)*8*4);
804 else
805 *(tmp+4) = (u32)dma_desc;
806
807 tmp = tmp+8;
808 }
809
810 switch (addr) {
811 case TX_MANAGEPRIORITY_RING_ADDR:
812 priv->txmapringdma = dma_desc;
813 priv->txmapring = desc;
814 break;
815 case TX_BKPRIORITY_RING_ADDR:
816 priv->txbkpringdma = dma_desc;
817 priv->txbkpring = desc;
818 break;
819 case TX_BEPRIORITY_RING_ADDR:
820 priv->txbepringdma = dma_desc;
821 priv->txbepring = desc;
822 break;
823 case TX_VIPRIORITY_RING_ADDR:
824 priv->txvipringdma = dma_desc;
825 priv->txvipring = desc;
826 break;
827 case TX_VOPRIORITY_RING_ADDR:
828 priv->txvopringdma = dma_desc;
829 priv->txvopring = desc;
830 break;
831 case TX_HIGHPRIORITY_RING_ADDR:
832 priv->txhpringdma = dma_desc;
833 priv->txhpring = desc;
834 break;
835 case TX_BEACON_RING_ADDR:
836 priv->txbeaconringdma = dma_desc;
837 priv->txbeaconring = desc;
838 break;
839
840 }
841
842 return 0;
843}
844
845static void free_tx_desc_rings(struct net_device *dev)
846{
847 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
848 struct pci_dev *pdev = priv->pdev;
849 int count = priv->txringcount;
850
851 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
852 priv->txmapring, priv->txmapringdma);
853 buffer_free(dev, &(priv->txmapbufs), priv->txbuffsize, 1);
854
855 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
856 priv->txbkpring, priv->txbkpringdma);
857 buffer_free(dev, &(priv->txbkpbufs), priv->txbuffsize, 1);
858
859 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
860 priv->txbepring, priv->txbepringdma);
861 buffer_free(dev, &(priv->txbepbufs), priv->txbuffsize, 1);
862
863 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
864 priv->txvipring, priv->txvipringdma);
865 buffer_free(dev, &(priv->txvipbufs), priv->txbuffsize, 1);
866
867 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
868 priv->txvopring, priv->txvopringdma);
869 buffer_free(dev, &(priv->txvopbufs), priv->txbuffsize, 1);
870
871 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
872 priv->txhpring, priv->txhpringdma);
873 buffer_free(dev, &(priv->txhpbufs), priv->txbuffsize, 1);
874
875 count = priv->txbeaconcount;
876 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
877 priv->txbeaconring, priv->txbeaconringdma);
878 buffer_free(dev, &(priv->txbeaconbufs), priv->txbuffsize, 1);
879}
880
881static void free_rx_desc_ring(struct net_device *dev)
882{
883 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
884 struct pci_dev *pdev = priv->pdev;
885 int count = priv->rxringcount;
886
887 pci_free_consistent(pdev, sizeof(u32)*8*count+256,
888 priv->rxring, priv->rxringdma);
889
890 buffer_free(dev, &(priv->rxbuffer), priv->rxbuffersize, 0);
891}
892
893static short alloc_rx_desc_ring(struct net_device *dev, u16 bufsize, int count)
894{
895 int i;
896 u32 *desc;
897 u32 *tmp;
898 dma_addr_t dma_desc, dma_tmp;
899 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
900 struct pci_dev *pdev = priv->pdev;
901 void *buf;
902 u8 rx_desc_size;
903
904 rx_desc_size = 8; /* 4*8 = 32 bytes */
905
906 if ((bufsize & 0xfff) != bufsize) {
907 DMESGE("RX buffer allocation too large");
908 return -1;
909 }
910
911 desc = (u32 *)pci_alloc_consistent(pdev,
912 sizeof(u32) * rx_desc_size * count + 256, &dma_desc);
913
914 if (dma_desc & 0xff)
915 /*
916 * descriptor's buffer must be 256 byte aligned
917 * should never happen since we specify the DMA mask
918 */
919 WARN(1, "DMA buffer is not aligned\n");
920
921 priv->rxring = desc;
922 priv->rxringdma = dma_desc;
923 tmp = desc;
924
925 for (i = 0; i < count; i++) {
926 buf = kmalloc(bufsize * sizeof(u8), GFP_ATOMIC);
927 if (buf == NULL) {
928 DMESGE("Failed to kmalloc RX buffer");
929 return -1;
930 }
931
932 dma_tmp = pci_map_single(pdev, buf, bufsize * sizeof(u8),
933 PCI_DMA_FROMDEVICE);
934 if (pci_dma_mapping_error(pdev, dma_tmp))
935 return -1;
936 if (-1 == buffer_add(&(priv->rxbuffer), buf, dma_tmp,
937 &(priv->rxbufferhead))) {
938 DMESGE("Unable to allocate mem RX buf");
939 return -1;
940 }
941 *tmp = 0; /* zero pads the header of the descriptor */
942 *tmp = *tmp | (bufsize&0xfff);
943 *(tmp+2) = (u32)dma_tmp;
944 *tmp = *tmp | (1<<31); /* descriptor void, owned by the NIC */
945
946 tmp = tmp+rx_desc_size;
947 }
948
949 /* this is the last descriptor */
950 *(tmp - rx_desc_size) = *(tmp - rx_desc_size) | (1 << 30);
951
952 return 0;
953}
954
955
956void set_nic_rxring(struct net_device *dev)
957{
958 u8 pgreg;
959 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
960
961 pgreg = read_nic_byte(dev, PGSELECT);
962 write_nic_byte(dev, PGSELECT, pgreg & ~(1<<PGSELECT_PG_SHIFT));
963
964 write_nic_dword(dev, RXRING_ADDR, priv->rxringdma);
965}
966
967void rtl8180_reset(struct net_device *dev)
968{
969 u8 cr;
970
971 rtl8180_irq_disable(dev);
972
973 cr = read_nic_byte(dev, CMD);
974 cr = cr & 2;
975 cr = cr | (1<<CMD_RST_SHIFT);
976 write_nic_byte(dev, CMD, cr);
977
978 force_pci_posting(dev);
979
980 mdelay(200);
981
982 if (read_nic_byte(dev, CMD) & (1<<CMD_RST_SHIFT))
983 DMESGW("Card reset timeout!");
984 else
985 DMESG("Card successfully reset");
986
987 rtl8180_set_mode(dev, EPROM_CMD_LOAD);
988 force_pci_posting(dev);
989 mdelay(200);
990}
991
992inline u16 ieeerate2rtlrate(int rate)
993{
994 switch (rate) {
995 case 10:
996 return 0;
997 case 20:
998 return 1;
999 case 55:
1000 return 2;
1001 case 110:
1002 return 3;
1003 case 60:
1004 return 4;
1005 case 90:
1006 return 5;
1007 case 120:
1008 return 6;
1009 case 180:
1010 return 7;
1011 case 240:
1012 return 8;
1013 case 360:
1014 return 9;
1015 case 480:
1016 return 10;
1017 case 540:
1018 return 11;
1019 default:
1020 return 3;
1021 }
1022}
1023
1024static u16 rtl_rate[] = {10, 20, 55, 110, 60,
1025 90, 120, 180, 240, 360, 480, 540, 720};
1026
1027inline u16 rtl8180_rate2rate(short rate)
1028{
1029 if (rate > 12)
1030 return 10;
1031 return rtl_rate[rate];
1032}
1033
1034inline u8 rtl8180_IsWirelessBMode(u16 rate)
1035{
1036 if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
1037 return 1;
1038 else
1039 return 0;
1040}
1041
1042u16 N_DBPSOfRate(u16 DataRate);
1043
1044static u16 ComputeTxTime(u16 FrameLength, u16 DataRate, u8 bManagementFrame,
1045 u8 bShortPreamble)
1046{
1047 u16 FrameTime;
1048 u16 N_DBPS;
1049 u16 Ceiling;
1050
1051 if (rtl8180_IsWirelessBMode(DataRate)) {
1052 if (bManagementFrame || !bShortPreamble || DataRate == 10)
1053 /* long preamble */
1054 FrameTime = (u16)(144+48+(FrameLength*8/(DataRate/10)));
1055 else
1056 /* short preamble */
1057 FrameTime = (u16)(72+24+(FrameLength*8/(DataRate/10)));
1058
1059 if ((FrameLength*8 % (DataRate/10)) != 0) /* get the ceilling */
1060 FrameTime++;
1061 } else { /* 802.11g DSSS-OFDM PLCP length field calculation. */
1062 N_DBPS = N_DBPSOfRate(DataRate);
1063 Ceiling = (16 + 8*FrameLength + 6) / N_DBPS
1064 + (((16 + 8*FrameLength + 6) % N_DBPS) ? 1 : 0);
1065 FrameTime = (u16)(16 + 4 + 4*Ceiling + 6);
1066 }
1067 return FrameTime;
1068}
1069
1070u16 N_DBPSOfRate(u16 DataRate)
1071{
1072 u16 N_DBPS = 24;
1073
1074 switch (DataRate) {
1075 case 60:
1076 N_DBPS = 24;
1077 break;
1078 case 90:
1079 N_DBPS = 36;
1080 break;
1081 case 120:
1082 N_DBPS = 48;
1083 break;
1084 case 180:
1085 N_DBPS = 72;
1086 break;
1087 case 240:
1088 N_DBPS = 96;
1089 break;
1090 case 360:
1091 N_DBPS = 144;
1092 break;
1093 case 480:
1094 N_DBPS = 192;
1095 break;
1096 case 540:
1097 N_DBPS = 216;
1098 break;
1099 default:
1100 break;
1101 }
1102
1103 return N_DBPS;
1104}
1105
1106/*
1107 * For Netgear case, they want good-looking signal strength.
1108 */
1109static long NetgearSignalStrengthTranslate(long LastSS, long CurrSS)
1110{
1111 long RetSS;
1112
1113 /* Step 1. Scale mapping. */
1114 if (CurrSS >= 71 && CurrSS <= 100)
1115 RetSS = 90 + ((CurrSS - 70) / 3);
1116 else if (CurrSS >= 41 && CurrSS <= 70)
1117 RetSS = 78 + ((CurrSS - 40) / 3);
1118 else if (CurrSS >= 31 && CurrSS <= 40)
1119 RetSS = 66 + (CurrSS - 30);
1120 else if (CurrSS >= 21 && CurrSS <= 30)
1121 RetSS = 54 + (CurrSS - 20);
1122 else if (CurrSS >= 5 && CurrSS <= 20)
1123 RetSS = 42 + (((CurrSS - 5) * 2) / 3);
1124 else if (CurrSS == 4)
1125 RetSS = 36;
1126 else if (CurrSS == 3)
1127 RetSS = 27;
1128 else if (CurrSS == 2)
1129 RetSS = 18;
1130 else if (CurrSS == 1)
1131 RetSS = 9;
1132 else
1133 RetSS = CurrSS;
1134
1135 /* Step 2. Smoothing. */
1136 if (LastSS > 0)
1137 RetSS = ((LastSS * 5) + (RetSS) + 5) / 6;
1138
1139 return RetSS;
1140}
1141
1142/*
1143 * Translate 0-100 signal strength index into dBm.
1144 */
1145static long TranslateToDbm8185(u8 SignalStrengthIndex)
1146{
1147 long SignalPower;
1148
1149 /* Translate to dBm (x=0.5y-95). */
1150 SignalPower = (long)((SignalStrengthIndex + 1) >> 1);
1151 SignalPower -= 95;
1152
1153 return SignalPower;
1154}
1155
1156/*
1157 * Perform signal smoothing for dynamic mechanism.
1158 * This is different with PerformSignalSmoothing8185 in smoothing formula.
1159 * No dramatic adjustment is applied because dynamic mechanism need some
1160 * degree of correctness. Ported from 8187B.
1161 */
1162static void PerformUndecoratedSignalSmoothing8185(struct r8180_priv *priv,
1163 bool bCckRate)
1164{
1165 long smoothedSS;
1166 long smoothedRx;
1167
1168 /* Determine the current packet is CCK rate. */
1169 priv->bCurCCKPkt = bCckRate;
1170
1171 smoothedSS = priv->SignalStrength * 10;
1172
1173 if (priv->UndecoratedSmoothedSS >= 0)
1174 smoothedSS = ((priv->UndecoratedSmoothedSS * 5) +
1175 smoothedSS) / 6;
1176
1177 priv->UndecoratedSmoothedSS = smoothedSS;
1178
1179 smoothedRx = ((priv->UndecoratedSmoothedRxPower * 50) +
1180 (priv->RxPower * 11)) / 60;
1181
1182 priv->UndecoratedSmoothedRxPower = smoothedRx;
1183
1184 if (bCckRate)
1185 priv->CurCCKRSSI = priv->RSSI;
1186 else
1187 priv->CurCCKRSSI = 0;
1188}
1189
1190
1191/*
1192 * This is rough RX isr handling routine
1193 */
1194static void rtl8180_rx(struct net_device *dev)
1195{
1196 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1197 struct sk_buff *tmp_skb;
1198 short first, last;
1199 u32 len;
1200 int lastlen;
1201 unsigned char quality, signal;
1202 u8 rate;
1203 u32 *tmp, *tmp2;
1204 u8 rx_desc_size;
1205 u8 padding;
1206 char rxpower = 0;
1207 u32 RXAGC = 0;
1208 long RxAGC_dBm = 0;
1209 u8 LNA = 0, BB = 0;
1210 u8 LNA_gain[4] = {02, 17, 29, 39};
1211 u8 Antenna = 0;
1212 struct ieee80211_hdr_4addr *hdr;
1213 u16 fc, type;
1214 u8 bHwError = 0, bCRC = 0, bICV = 0;
1215 bool bCckRate = false;
1216 u8 RSSI = 0;
1217 long SignalStrengthIndex = 0;
1218 struct ieee80211_rx_stats stats = {
1219 .signal = 0,
1220 .noise = -98,
1221 .rate = 0,
1222 .freq = IEEE80211_24GHZ_BAND,
1223 };
1224
1225 stats.nic_type = NIC_8185B;
1226 rx_desc_size = 8;
1227
1228 if ((*(priv->rxringtail)) & (1<<31)) {
1229 /* we have got an RX int, but the descriptor. we are pointing
1230 * is empty.
1231 */
1232
1233 priv->stats.rxnodata++;
1234 priv->ieee80211->stats.rx_errors++;
1235
1236 tmp2 = NULL;
1237 tmp = priv->rxringtail;
1238 do {
1239 if (tmp == priv->rxring)
1240 tmp = priv->rxring + (priv->rxringcount - 1) *
1241 rx_desc_size;
1242 else
1243 tmp -= rx_desc_size;
1244
1245 if (!(*tmp & (1<<31)))
1246 tmp2 = tmp;
1247 } while (tmp != priv->rxring);
1248
1249 if (tmp2)
1250 priv->rxringtail = tmp2;
1251 }
1252
1253 /* while there are filled descriptors */
1254 while (!(*(priv->rxringtail) & (1<<31))) {
1255 if (*(priv->rxringtail) & (1<<26))
1256 DMESGW("RX buffer overflow");
1257 if (*(priv->rxringtail) & (1<<12))
1258 priv->stats.rxicverr++;
1259
1260 if (*(priv->rxringtail) & (1<<27)) {
1261 priv->stats.rxdmafail++;
1262 goto drop;
1263 }
1264
1265 pci_dma_sync_single_for_cpu(priv->pdev,
1266 priv->rxbuffer->dma,
1267 priv->rxbuffersize * sizeof(u8),
1268 PCI_DMA_FROMDEVICE);
1269
1270 first = *(priv->rxringtail) & (1<<29) ? 1 : 0;
1271 if (first)
1272 priv->rx_prevlen = 0;
1273
1274 last = *(priv->rxringtail) & (1<<28) ? 1 : 0;
1275 if (last) {
1276 lastlen = ((*priv->rxringtail) & 0xfff);
1277
1278 /* if the last descriptor (that should tell us the total
1279 * packet len) tell us something less than the
1280 * descriptors len we had until now, then there is some
1281 * problem..
1282 * workaround to prevent kernel panic
1283 */
1284 if (lastlen < priv->rx_prevlen)
1285 len = 0;
1286 else
1287 len = lastlen-priv->rx_prevlen;
1288
1289 if (*(priv->rxringtail) & (1<<13)) {
1290 if ((*(priv->rxringtail) & 0xfff) < 500)
1291 priv->stats.rxcrcerrmin++;
1292 else if ((*(priv->rxringtail) & 0x0fff) > 1000)
1293 priv->stats.rxcrcerrmax++;
1294 else
1295 priv->stats.rxcrcerrmid++;
1296
1297 }
1298
1299 } else {
1300 len = priv->rxbuffersize;
1301 }
1302
1303 if (first && last) {
1304 padding = ((*(priv->rxringtail+3))&(0x04000000))>>26;
1305 } else if (first) {
1306 padding = ((*(priv->rxringtail+3))&(0x04000000))>>26;
1307 if (padding)
1308 len -= 2;
1309 } else {
1310 padding = 0;
1311 }
1312 padding = 0;
1313 priv->rx_prevlen += len;
1314
1315 if (priv->rx_prevlen > MAX_FRAG_THRESHOLD + 100) {
1316 /* HW is probably passing several buggy frames without
1317 * FD or LD flag set.
1318 * Throw this garbage away to prevent skb memory
1319 * exhausting
1320 */
1321 if (!priv->rx_skb_complete)
1322 dev_kfree_skb_any(priv->rx_skb);
1323 priv->rx_skb_complete = 1;
1324 }
1325
1326 signal = (unsigned char)((*(priv->rxringtail + 3) &
1327 0x00ff0000) >> 16);
1328 signal = (signal & 0xfe) >> 1;
1329
1330 quality = (unsigned char)((*(priv->rxringtail+3)) & (0xff));
1331
1332 stats.mac_time[0] = *(priv->rxringtail+1);
1333 stats.mac_time[1] = *(priv->rxringtail+2);
1334
1335 rxpower = ((char)((*(priv->rxringtail + 4) &
1336 0x00ff0000) >> 16)) / 2 - 42;
1337
1338 RSSI = ((u8)((*(priv->rxringtail + 3) &
1339 0x0000ff00) >> 8)) & 0x7f;
1340
1341 rate = ((*(priv->rxringtail)) &
1342 ((1<<23)|(1<<22)|(1<<21)|(1<<20)))>>20;
1343
1344 stats.rate = rtl8180_rate2rate(rate);
1345 Antenna = (*(priv->rxringtail + 3) & 0x00008000) == 0 ? 0 : 1;
1346 if (!rtl8180_IsWirelessBMode(stats.rate)) { /* OFDM rate. */
1347 RxAGC_dBm = rxpower+1; /* bias */
1348 } else { /* CCK rate. */
1349 RxAGC_dBm = signal; /* bit 0 discard */
1350
1351 LNA = (u8) (RxAGC_dBm & 0x60) >> 5; /* bit 6~ bit 5 */
1352 BB = (u8) (RxAGC_dBm & 0x1F); /* bit 4 ~ bit 0 */
1353
1354 /* Pin_11b=-(LNA_gain+BB_gain) (dBm) */
1355 RxAGC_dBm = -(LNA_gain[LNA] + (BB * 2));
1356
1357 RxAGC_dBm += 4; /* bias */
1358 }
1359
1360 if (RxAGC_dBm & 0x80) /* absolute value */
1361 RXAGC = ~(RxAGC_dBm)+1;
1362 bCckRate = rtl8180_IsWirelessBMode(stats.rate);
1363 /* Translate RXAGC into 1-100. */
1364 if (!rtl8180_IsWirelessBMode(stats.rate)) { /* OFDM rate. */
1365 if (RXAGC > 90)
1366 RXAGC = 90;
1367 else if (RXAGC < 25)
1368 RXAGC = 25;
1369 RXAGC = (90-RXAGC)*100/65;
1370 } else { /* CCK rate. */
1371 if (RXAGC > 95)
1372 RXAGC = 95;
1373 else if (RXAGC < 30)
1374 RXAGC = 30;
1375 RXAGC = (95-RXAGC)*100/65;
1376 }
1377 priv->SignalStrength = (u8)RXAGC;
1378 priv->RecvSignalPower = RxAGC_dBm;
1379 priv->RxPower = rxpower;
1380 priv->RSSI = RSSI;
1381 /* SQ translation formula is provided by SD3 DZ. 2006.06.27 */
1382 if (quality >= 127)
1383 /* 0 causes epc to show signal zero, walk around now */
1384 quality = 1;
1385 else if (quality < 27)
1386 quality = 100;
1387 else
1388 quality = 127 - quality;
1389 priv->SignalQuality = quality;
1390
1391 stats.signal = (u8) quality;
1392
1393 stats.signalstrength = RXAGC;
1394 if (stats.signalstrength > 100)
1395 stats.signalstrength = 100;
1396 stats.signalstrength = (stats.signalstrength * 70) / 100 + 30;
1397 stats.rssi = priv->wstats.qual.qual = priv->SignalQuality;
1398 stats.noise = priv->wstats.qual.noise =
1399 100 - priv->wstats.qual.qual;
1400 bHwError = (((*(priv->rxringtail)) & (0x00000fff)) == 4080) |
1401 (((*(priv->rxringtail)) & (0x04000000)) != 0) |
1402 (((*(priv->rxringtail)) & (0x08000000)) != 0) |
1403 (((~(*(priv->rxringtail))) & (0x10000000)) != 0) |
1404 (((~(*(priv->rxringtail))) & (0x20000000)) != 0);
1405 bCRC = ((*(priv->rxringtail)) & (0x00002000)) >> 13;
1406 bICV = ((*(priv->rxringtail)) & (0x00001000)) >> 12;
1407 hdr = (struct ieee80211_hdr_4addr *)priv->rxbuffer->buf;
1408 fc = le16_to_cpu(hdr->frame_ctl);
1409 type = WLAN_FC_GET_TYPE(fc);
1410
1411 if (IEEE80211_FTYPE_CTL != type &&
1412 !bHwError && !bCRC && !bICV &&
1413 eqMacAddr(priv->ieee80211->current_network.bssid,
1414 fc & IEEE80211_FCTL_TODS ? hdr->addr1 :
1415 fc & IEEE80211_FCTL_FROMDS ? hdr->addr2 :
1416 hdr->addr3)) {
1417
1418 /* Perform signal smoothing for dynamic
1419 * mechanism on demand. This is different
1420 * with PerformSignalSmoothing8185 in smoothing
1421 * fomula. No dramatic adjustion is apply
1422 * because dynamic mechanism need some degree
1423 * of correctness. */
1424 PerformUndecoratedSignalSmoothing8185(priv, bCckRate);
1425
1426 /* For good-looking singal strength. */
1427 SignalStrengthIndex = NetgearSignalStrengthTranslate(
1428 priv->LastSignalStrengthInPercent,
1429 priv->SignalStrength);
1430
1431 priv->LastSignalStrengthInPercent = SignalStrengthIndex;
1432 priv->Stats_SignalStrength =
1433 TranslateToDbm8185((u8)SignalStrengthIndex);
1434
1435 /*
1436 * We need more correct power of received packets and
1437 * the "SignalStrength" of RxStats is beautified, so we
1438 * record the correct power here.
1439 */
1440
1441 priv->Stats_SignalQuality = (long)(
1442 priv->Stats_SignalQuality * 5 +
1443 (long)priv->SignalQuality + 5) / 6;
1444
1445 priv->Stats_RecvSignalPower = (long)(
1446 priv->Stats_RecvSignalPower * 5 +
1447 priv->RecvSignalPower - 1) / 6;
1448
1449 /*
1450 * Figure out which antenna received the last packet.
1451 * 0: aux, 1: main
1452 */
1453 priv->LastRxPktAntenna = Antenna ? 1 : 0;
1454 SwAntennaDiversityRxOk8185(dev, priv->SignalStrength);
1455 }
1456
1457 if (first) {
1458 if (!priv->rx_skb_complete) {
1459 /* seems that HW sometimes fails to receive and
1460 * doesn't provide the last descriptor.
1461 */
1462 dev_kfree_skb_any(priv->rx_skb);
1463 priv->stats.rxnolast++;
1464 }
1465 priv->rx_skb = dev_alloc_skb(len+2);
1466 if (!priv->rx_skb)
1467 goto drop;
1468
1469 priv->rx_skb_complete = 0;
1470 priv->rx_skb->dev = dev;
1471 } else {
1472 /* if we are here we should have already RXed the first
1473 * frame.
1474 * If we get here and the skb is not allocated then
1475 * we have just throw out garbage (skb not allocated)
1476 * and we are still rxing garbage....
1477 */
1478 if (!priv->rx_skb_complete) {
1479
1480 tmp_skb = dev_alloc_skb(
1481 priv->rx_skb->len + len + 2);
1482
1483 if (!tmp_skb)
1484 goto drop;
1485
1486 tmp_skb->dev = dev;
1487
1488 memcpy(skb_put(tmp_skb, priv->rx_skb->len),
1489 priv->rx_skb->data,
1490 priv->rx_skb->len);
1491
1492 dev_kfree_skb_any(priv->rx_skb);
1493
1494 priv->rx_skb = tmp_skb;
1495 }
1496 }
1497
1498 if (!priv->rx_skb_complete) {
1499 memcpy(skb_put(priv->rx_skb, len), ((unsigned char *)
1500 priv->rxbuffer->buf) + (padding ? 2 : 0), len);
1501 }
1502
1503 if (last && !priv->rx_skb_complete) {
1504 if (priv->rx_skb->len > 4)
1505 skb_trim(priv->rx_skb, priv->rx_skb->len-4);
1506 if (!ieee80211_rtl_rx(priv->ieee80211,
1507 priv->rx_skb, &stats))
1508 dev_kfree_skb_any(priv->rx_skb);
1509 priv->rx_skb_complete = 1;
1510 }
1511
1512 pci_dma_sync_single_for_device(priv->pdev,
1513 priv->rxbuffer->dma,
1514 priv->rxbuffersize * sizeof(u8),
1515 PCI_DMA_FROMDEVICE);
1516
1517drop: /* this is used when we have not enough mem */
1518 /* restore the descriptor */
1519 *(priv->rxringtail+2) = priv->rxbuffer->dma;
1520 *(priv->rxringtail) = *(priv->rxringtail) & ~0xfff;
1521 *(priv->rxringtail) =
1522 *(priv->rxringtail) | priv->rxbuffersize;
1523
1524 *(priv->rxringtail) =
1525 *(priv->rxringtail) | (1<<31);
1526
1527 priv->rxringtail += rx_desc_size;
1528 if (priv->rxringtail >=
1529 (priv->rxring)+(priv->rxringcount)*rx_desc_size)
1530 priv->rxringtail = priv->rxring;
1531
1532 priv->rxbuffer = (priv->rxbuffer->next);
1533 }
1534}
1535
1536
1537static void rtl8180_dma_kick(struct net_device *dev, int priority)
1538{
1539 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1540
1541 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
1542 write_nic_byte(dev, TX_DMA_POLLING,
1543 (1 << (priority + 1)) | priv->dma_poll_mask);
1544 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
1545
1546 force_pci_posting(dev);
1547}
1548
1549static void rtl8180_data_hard_stop(struct net_device *dev)
1550{
1551 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1552
1553 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
1554 priv->dma_poll_stop_mask |= TPPOLLSTOP_AC_VIQ;
1555 write_nic_byte(dev, TPPollStop, priv->dma_poll_stop_mask);
1556 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
1557}
1558
1559static void rtl8180_data_hard_resume(struct net_device *dev)
1560{
1561 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1562
1563 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
1564 priv->dma_poll_stop_mask &= ~(TPPOLLSTOP_AC_VIQ);
1565 write_nic_byte(dev, TPPollStop, priv->dma_poll_stop_mask);
1566 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
1567}
1568
1569/*
1570 * This function TX data frames when the ieee80211 stack requires this.
1571 * It checks also if we need to stop the ieee tx queue, eventually do it
1572 */
1573static void rtl8180_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
1574 int rate)
1575{
1576 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1577 int mode;
1578 struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *)skb->data;
1579 bool morefrag = le16_to_cpu(h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
1580 unsigned long flags;
1581 int priority;
1582
1583 mode = priv->ieee80211->iw_mode;
1584
1585 rate = ieeerate2rtlrate(rate);
1586 /*
1587 * This function doesn't require lock because we make sure it's called
1588 * with the tx_lock already acquired.
1589 * This come from the kernel's hard_xmit callback (through the ieee
1590 * stack, or from the try_wake_queue (again through the ieee stack.
1591 */
1592 priority = AC2Q(skb->priority);
1593 spin_lock_irqsave(&priv->tx_lock, flags);
1594
1595 if (priv->ieee80211->bHwRadioOff) {
1596 spin_unlock_irqrestore(&priv->tx_lock, flags);
1597
1598 return;
1599 }
1600
1601 if (!check_nic_enought_desc(dev, priority)) {
1602 DMESGW("Error: no descriptor left by previous TX (avail %d) ",
1603 get_curr_tx_free_desc(dev, priority));
1604 ieee80211_rtl_stop_queue(priv->ieee80211);
1605 }
1606 rtl8180_tx(dev, skb->data, skb->len, priority, morefrag, 0, rate);
1607 if (!check_nic_enought_desc(dev, priority))
1608 ieee80211_rtl_stop_queue(priv->ieee80211);
1609
1610 spin_unlock_irqrestore(&priv->tx_lock, flags);
1611}
1612
1613/*
1614 * This is a rough attempt to TX a frame
1615 * This is called by the ieee 80211 stack to TX management frames.
1616 * If the ring is full packets are dropped (for data frame the queue
1617 * is stopped before this can happen). For this reason it is better
1618 * if the descriptors are larger than the largest management frame
1619 * we intend to TX: i'm unsure what the HW does if it will not find
1620 * the last fragment of a frame because it has been dropped...
1621 * Since queues for Management and Data frames are different we
1622 * might use a different lock than tx_lock (for example mgmt_tx_lock)
1623 */
1624/* these function may loop if invoked with 0 descriptors or 0 len buffer */
1625static int rtl8180_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1626{
1627 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1628 unsigned long flags;
1629 int priority;
1630
1631 priority = MANAGE_PRIORITY;
1632
1633 spin_lock_irqsave(&priv->tx_lock, flags);
1634
1635 if (priv->ieee80211->bHwRadioOff) {
1636 spin_unlock_irqrestore(&priv->tx_lock, flags);
1637 dev_kfree_skb_any(skb);
1638 return NETDEV_TX_OK;
1639 }
1640
1641 rtl8180_tx(dev, skb->data, skb->len, priority,
1642 0, 0, ieeerate2rtlrate(priv->ieee80211->basic_rate));
1643
1644 priv->ieee80211->stats.tx_bytes += skb->len;
1645 priv->ieee80211->stats.tx_packets++;
1646 spin_unlock_irqrestore(&priv->tx_lock, flags);
1647
1648 dev_kfree_skb_any(skb);
1649 return NETDEV_TX_OK;
1650}
1651
1652static void rtl8180_prepare_beacon(struct net_device *dev)
1653{
1654 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1655 struct sk_buff *skb;
1656
1657 u16 word = read_nic_word(dev, BcnItv);
1658 word &= ~BcnItv_BcnItv; /* clear Bcn_Itv */
1659
1660 /* word |= 0x64; */
1661 word |= cpu_to_le16(priv->ieee80211->current_network.beacon_interval);
1662
1663 write_nic_word(dev, BcnItv, word);
1664
1665 skb = ieee80211_get_beacon(priv->ieee80211);
1666 if (skb) {
1667 rtl8180_tx(dev, skb->data, skb->len, BEACON_PRIORITY,
1668 0, 0, ieeerate2rtlrate(priv->ieee80211->basic_rate));
1669 dev_kfree_skb_any(skb);
1670 }
1671}
1672
1673/*
1674 * This function do the real dirty work: it enqueues a TX command descriptor in
1675 * the ring buffer, copyes the frame in a TX buffer and kicks the NIC to ensure
1676 * it does the DMA transfer.
1677 */
1678short rtl8180_tx(struct net_device *dev, u8 *txbuf, int len, int priority,
1679 bool morefrag, short descfrag, int rate)
1680{
1681 struct r8180_priv *priv = ieee80211_priv(dev);
1682 u32 *tail, *temp_tail;
1683 u32 *begin;
1684 u32 *buf;
1685 int i;
1686 int remain;
1687 int buflen;
1688 int count;
1689 struct buffer *buflist;
1690 struct ieee80211_hdr_3addr *frag_hdr =
1691 (struct ieee80211_hdr_3addr *)txbuf;
1692 u8 dest[ETH_ALEN];
1693 u8 bUseShortPreamble = 0;
1694 u8 bCTSEnable = 0;
1695 u8 bRTSEnable = 0;
1696 u16 Duration = 0;
1697 u16 RtsDur = 0;
1698 u16 ThisFrameTime = 0;
1699 u16 TxDescDuration = 0;
1700 bool ownbit_flag = false;
1701
1702 switch (priority) {
1703 case MANAGE_PRIORITY:
1704 tail = priv->txmapringtail;
1705 begin = priv->txmapring;
1706 buflist = priv->txmapbufstail;
1707 count = priv->txringcount;
1708 break;
1709 case BK_PRIORITY:
1710 tail = priv->txbkpringtail;
1711 begin = priv->txbkpring;
1712 buflist = priv->txbkpbufstail;
1713 count = priv->txringcount;
1714 break;
1715 case BE_PRIORITY:
1716 tail = priv->txbepringtail;
1717 begin = priv->txbepring;
1718 buflist = priv->txbepbufstail;
1719 count = priv->txringcount;
1720 break;
1721 case VI_PRIORITY:
1722 tail = priv->txvipringtail;
1723 begin = priv->txvipring;
1724 buflist = priv->txvipbufstail;
1725 count = priv->txringcount;
1726 break;
1727 case VO_PRIORITY:
1728 tail = priv->txvopringtail;
1729 begin = priv->txvopring;
1730 buflist = priv->txvopbufstail;
1731 count = priv->txringcount;
1732 break;
1733 case HI_PRIORITY:
1734 tail = priv->txhpringtail;
1735 begin = priv->txhpring;
1736 buflist = priv->txhpbufstail;
1737 count = priv->txringcount;
1738 break;
1739 case BEACON_PRIORITY:
1740 tail = priv->txbeaconringtail;
1741 begin = priv->txbeaconring;
1742 buflist = priv->txbeaconbufstail;
1743 count = priv->txbeaconcount;
1744 break;
1745 default:
1746 return -1;
1747 break;
1748 }
1749
1750 memcpy(&dest, frag_hdr->addr1, ETH_ALEN);
1751 if (is_multicast_ether_addr(dest)) {
1752 Duration = 0;
1753 RtsDur = 0;
1754 bRTSEnable = 0;
1755 bCTSEnable = 0;
1756
1757 ThisFrameTime = ComputeTxTime(len + sCrcLng,
1758 rtl8180_rate2rate(rate), 0, bUseShortPreamble);
1759 TxDescDuration = ThisFrameTime;
1760 } else { /* Unicast packet */
1761 u16 AckTime;
1762
1763 /* for Keep alive */
1764 priv->NumTxUnicast++;
1765
1766 /* Figure out ACK rate according to BSS basic rate
1767 * and Tx rate.
1768 * AckCTSLng = 14 use 1M bps send
1769 */
1770 AckTime = ComputeTxTime(14, 10, 0, 0);
1771
1772 if (((len + sCrcLng) > priv->rts) && priv->rts) { /* RTS/CTS. */
1773 u16 RtsTime, CtsTime;
1774 bRTSEnable = 1;
1775 bCTSEnable = 0;
1776
1777 /* Rate and time required for RTS. */
1778 RtsTime = ComputeTxTime(sAckCtsLng / 8,
1779 priv->ieee80211->basic_rate, 0, 0);
1780
1781 /* Rate and time required for CTS.
1782 * AckCTSLng = 14 use 1M bps send
1783 */
1784 CtsTime = ComputeTxTime(14, 10, 0, 0);
1785
1786 /* Figure out time required to transmit this frame. */
1787 ThisFrameTime = ComputeTxTime(len + sCrcLng,
1788 rtl8180_rate2rate(rate), 0,
1789 bUseShortPreamble);
1790
1791 /* RTS-CTS-ThisFrame-ACK. */
1792 RtsDur = CtsTime + ThisFrameTime +
1793 AckTime + 3 * aSifsTime;
1794
1795 TxDescDuration = RtsTime + RtsDur;
1796 } else { /* Normal case. */
1797 bCTSEnable = 0;
1798 bRTSEnable = 0;
1799 RtsDur = 0;
1800
1801 ThisFrameTime = ComputeTxTime(len + sCrcLng,
1802 rtl8180_rate2rate(rate), 0, bUseShortPreamble);
1803 TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
1804 }
1805
1806 if (!(le16_to_cpu(frag_hdr->frame_control) & IEEE80211_FCTL_MOREFRAGS)) {
1807 /* ThisFrame-ACK. */
1808 Duration = aSifsTime + AckTime;
1809 } else { /* One or more fragments remained. */
1810 u16 NextFragTime;
1811
1812 /* pretend following packet length = current packet */
1813 NextFragTime = ComputeTxTime(len + sCrcLng,
1814 rtl8180_rate2rate(rate), 0, bUseShortPreamble);
1815
1816 /* ThisFrag-ACk-NextFrag-ACK. */
1817 Duration = NextFragTime + 3 * aSifsTime + 2 * AckTime;
1818 }
1819
1820 } /* End of Unicast packet */
1821
1822 frag_hdr->duration_id = Duration;
1823
1824 buflen = priv->txbuffsize;
1825 remain = len;
1826 temp_tail = tail;
1827
1828 while (remain != 0) {
1829 mb();
1830 if (!buflist) {
1831 DMESGE("TX buffer error, cannot TX frames. pri %d.",
1832 priority);
1833 return -1;
1834 }
1835 buf = buflist->buf;
1836
1837 if ((*tail & (1 << 31)) && (priority != BEACON_PRIORITY)) {
1838 DMESGW("No more TX desc, returning %x of %x",
1839 remain, len);
1840 priv->stats.txrdu++;
1841 return remain;
1842 }
1843
1844 *tail = 0; /* zeroes header */
1845 *(tail+1) = 0;
1846 *(tail+3) = 0;
1847 *(tail+5) = 0;
1848 *(tail+6) = 0;
1849 *(tail+7) = 0;
1850
1851 /* FIXME: should be triggered by HW encryption parameters.*/
1852 *tail |= (1<<15); /* no encrypt */
1853
1854 if (remain == len && !descfrag) {
1855 ownbit_flag = false;
1856 *tail = *tail | (1 << 29); /* first segment of packet */
1857 *tail = *tail | (len);
1858 } else {
1859 ownbit_flag = true;
1860 }
1861
1862 for (i = 0; i < buflen && remain > 0; i++, remain--) {
1863 /* copy data into descriptor pointed DMAble buffer */
1864 ((u8 *)buf)[i] = txbuf[i];
1865
1866 if (remain == 4 && i+4 >= buflen)
1867 break;
1868 /* ensure the last desc has at least 4 bytes payload */
1869 }
1870 txbuf = txbuf + i;
1871 *(tail+3) = *(tail+3) & ~0xfff;
1872 *(tail+3) = *(tail+3) | i; /* buffer length */
1873
1874 if (bCTSEnable)
1875 *tail |= (1<<18);
1876
1877 if (bRTSEnable) { /* rts enable */
1878 /* RTS RATE */
1879 *tail |= (ieeerate2rtlrate(
1880 priv->ieee80211->basic_rate) << 19);
1881
1882 *tail |= (1<<23); /* rts enable */
1883 *(tail+1) |= (RtsDur&0xffff); /* RTS Duration */
1884 }
1885 *(tail+3) |= ((TxDescDuration&0xffff)<<16); /* DURATION */
1886
1887 *(tail + 5) |= (11 << 8); /* retry lim; */
1888
1889 *tail = *tail | ((rate&0xf) << 24);
1890
1891 if (morefrag)
1892 *tail = (*tail) | (1<<17); /* more fragment */
1893 if (!remain)
1894 *tail = (*tail) | (1<<28); /* last segment of frame */
1895
1896 *(tail+5) = *(tail+5)|(2<<27);
1897 *(tail+7) = *(tail+7)|(1<<4);
1898
1899 wmb();
1900 if (ownbit_flag)
1901 /* descriptor ready to be txed */
1902 *tail |= (1 << 31);
1903
1904 if ((tail - begin)/8 == count-1)
1905 tail = begin;
1906 else
1907 tail = tail+8;
1908
1909 buflist = buflist->next;
1910
1911 mb();
1912
1913 switch (priority) {
1914 case MANAGE_PRIORITY:
1915 priv->txmapringtail = tail;
1916 priv->txmapbufstail = buflist;
1917 break;
1918 case BK_PRIORITY:
1919 priv->txbkpringtail = tail;
1920 priv->txbkpbufstail = buflist;
1921 break;
1922 case BE_PRIORITY:
1923 priv->txbepringtail = tail;
1924 priv->txbepbufstail = buflist;
1925 break;
1926 case VI_PRIORITY:
1927 priv->txvipringtail = tail;
1928 priv->txvipbufstail = buflist;
1929 break;
1930 case VO_PRIORITY:
1931 priv->txvopringtail = tail;
1932 priv->txvopbufstail = buflist;
1933 break;
1934 case HI_PRIORITY:
1935 priv->txhpringtail = tail;
1936 priv->txhpbufstail = buflist;
1937 break;
1938 case BEACON_PRIORITY:
1939 /*
1940 * The HW seems to be happy with the 1st
1941 * descriptor filled and the 2nd empty...
1942 * So always update descriptor 1 and never
1943 * touch 2nd
1944 */
1945 break;
1946 }
1947 }
1948 *temp_tail = *temp_tail | (1<<31); /* descriptor ready to be txed */
1949 rtl8180_dma_kick(dev, priority);
1950
1951 return 0;
1952}
1953
1954void rtl8180_irq_rx_tasklet(struct r8180_priv *priv);
1955
1956static void rtl8180_link_change(struct net_device *dev)
1957{
1958 struct r8180_priv *priv = ieee80211_priv(dev);
1959 u16 beacon_interval;
1960 struct ieee80211_network *net = &priv->ieee80211->current_network;
1961
1962 rtl8180_update_msr(dev);
1963
1964 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
1965
1966 write_nic_dword(dev, BSSID, ((u32 *)net->bssid)[0]);
1967 write_nic_word(dev, BSSID+4, ((u16 *)net->bssid)[2]);
1968
1969 beacon_interval = read_nic_word(dev, BEACON_INTERVAL);
1970 beacon_interval &= ~BEACON_INTERVAL_MASK;
1971 beacon_interval |= net->beacon_interval;
1972 write_nic_word(dev, BEACON_INTERVAL, beacon_interval);
1973
1974 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
1975
1976 rtl8180_set_chan(dev, priv->chan);
1977}
1978
1979static void rtl8180_rq_tx_ack(struct net_device *dev)
1980{
1981
1982 struct r8180_priv *priv = ieee80211_priv(dev);
1983
1984 write_nic_byte(dev, CONFIG4,
1985 read_nic_byte(dev, CONFIG4) | CONFIG4_PWRMGT);
1986 priv->ack_tx_to_ieee = 1;
1987}
1988
1989static short rtl8180_is_tx_queue_empty(struct net_device *dev)
1990{
1991
1992 struct r8180_priv *priv = ieee80211_priv(dev);
1993 u32 *d;
1994
1995 for (d = priv->txmapring;
1996 d < priv->txmapring + priv->txringcount; d += 8)
1997 if (*d & (1<<31))
1998 return 0;
1999
2000 for (d = priv->txbkpring;
2001 d < priv->txbkpring + priv->txringcount; d += 8)
2002 if (*d & (1<<31))
2003 return 0;
2004
2005 for (d = priv->txbepring;
2006 d < priv->txbepring + priv->txringcount; d += 8)
2007 if (*d & (1<<31))
2008 return 0;
2009
2010 for (d = priv->txvipring;
2011 d < priv->txvipring + priv->txringcount; d += 8)
2012 if (*d & (1<<31))
2013 return 0;
2014
2015 for (d = priv->txvopring;
2016 d < priv->txvopring + priv->txringcount; d += 8)
2017 if (*d & (1<<31))
2018 return 0;
2019
2020 for (d = priv->txhpring;
2021 d < priv->txhpring + priv->txringcount; d += 8)
2022 if (*d & (1<<31))
2023 return 0;
2024 return 1;
2025}
2026
2027static void rtl8180_hw_wakeup(struct net_device *dev)
2028{
2029 unsigned long flags;
2030 struct r8180_priv *priv = ieee80211_priv(dev);
2031
2032 spin_lock_irqsave(&priv->ps_lock, flags);
2033 write_nic_byte(dev, CONFIG4,
2034 read_nic_byte(dev, CONFIG4) & ~CONFIG4_PWRMGT);
2035 if (priv->rf_wakeup)
2036 priv->rf_wakeup(dev);
2037 spin_unlock_irqrestore(&priv->ps_lock, flags);
2038}
2039
2040static void rtl8180_hw_sleep_down(struct net_device *dev)
2041{
2042 unsigned long flags;
2043 struct r8180_priv *priv = ieee80211_priv(dev);
2044
2045 spin_lock_irqsave(&priv->ps_lock, flags);
2046 if (priv->rf_sleep)
2047 priv->rf_sleep(dev);
2048 spin_unlock_irqrestore(&priv->ps_lock, flags);
2049}
2050
2051static void rtl8180_hw_sleep(struct net_device *dev, u32 th, u32 tl)
2052{
2053 struct r8180_priv *priv = ieee80211_priv(dev);
2054 u32 rb = jiffies;
2055 unsigned long flags;
2056
2057 spin_lock_irqsave(&priv->ps_lock, flags);
2058
2059 /*
2060 * Writing HW register with 0 equals to disable
2061 * the timer, that is not really what we want
2062 */
2063 tl -= MSECS(4+16+7);
2064
2065 /*
2066 * If the interval in which we are requested to sleep is too
2067 * short then give up and remain awake
2068 */
2069 if (((tl >= rb) && (tl-rb) <= MSECS(MIN_SLEEP_TIME))
2070 || ((rb > tl) && (rb-tl) < MSECS(MIN_SLEEP_TIME))) {
2071 spin_unlock_irqrestore(&priv->ps_lock, flags);
2072 netdev_warn(dev, "too short to sleep\n");
2073 return;
2074 }
2075
2076 {
2077 u32 tmp = (tl > rb) ? (tl-rb) : (rb-tl);
2078
2079 priv->DozePeriodInPast2Sec += jiffies_to_msecs(tmp);
2080 /* as tl may be less than rb */
2081 queue_delayed_work(priv->ieee80211->wq,
2082 &priv->ieee80211->hw_wakeup_wq, tmp);
2083 }
2084 /*
2085 * If we suspect the TimerInt is gone beyond tl
2086 * while setting it, then give up
2087 */
2088
2089 if (((tl > rb) && ((tl-rb) > MSECS(MAX_SLEEP_TIME))) ||
2090 ((tl < rb) && ((rb-tl) > MSECS(MAX_SLEEP_TIME)))) {
2091 spin_unlock_irqrestore(&priv->ps_lock, flags);
2092 return;
2093 }
2094
2095 queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->hw_sleep_wq);
2096 spin_unlock_irqrestore(&priv->ps_lock, flags);
2097}
2098
2099static void rtl8180_wmm_single_param_update(struct net_device *dev,
2100 u8 mode, AC_CODING eACI, PAC_PARAM param)
2101{
2102 u8 u1bAIFS;
2103 u32 u4bAcParam;
2104
2105 /* Retrieve parameters to update. */
2106 /* Mode G/A: slotTimeTimer = 9; Mode B: 20 */
2107 u1bAIFS = param->f.AciAifsn.f.AIFSN * ((mode & IEEE_G) == IEEE_G ?
2108 9 : 20) + aSifsTime;
2109 u4bAcParam = (((u32)param->f.TXOPLimit << AC_PARAM_TXOP_LIMIT_OFFSET) |
2110 ((u32)param->f.Ecw.f.ECWmax << AC_PARAM_ECW_MAX_OFFSET) |
2111 ((u32)param->f.Ecw.f.ECWmin << AC_PARAM_ECW_MIN_OFFSET) |
2112 ((u32)u1bAIFS << AC_PARAM_AIFS_OFFSET));
2113
2114 switch (eACI) {
2115 case AC1_BK:
2116 write_nic_dword(dev, AC_BK_PARAM, u4bAcParam);
2117 return;
2118 case AC0_BE:
2119 write_nic_dword(dev, AC_BE_PARAM, u4bAcParam);
2120 return;
2121 case AC2_VI:
2122 write_nic_dword(dev, AC_VI_PARAM, u4bAcParam);
2123 return;
2124 case AC3_VO:
2125 write_nic_dword(dev, AC_VO_PARAM, u4bAcParam);
2126 return;
2127 default:
2128 pr_warn("SetHwReg8185(): invalid ACI: %d!\n", eACI);
2129 return;
2130 }
2131}
2132
2133static void rtl8180_wmm_param_update(struct work_struct *work)
2134{
2135 struct ieee80211_device *ieee = container_of(work,
2136 struct ieee80211_device, wmm_param_update_wq);
2137 struct net_device *dev = ieee->dev;
2138 u8 *ac_param = (u8 *)(ieee->current_network.wmm_param);
2139 u8 mode = ieee->current_network.mode;
2140 AC_CODING eACI;
2141 AC_PARAM AcParam;
2142
2143 if (!ieee->current_network.QoS_Enable) {
2144 /* legacy ac_xx_param update */
2145 AcParam.longData = 0;
2146 AcParam.f.AciAifsn.f.AIFSN = 2; /* Follow 802.11 DIFS. */
2147 AcParam.f.AciAifsn.f.ACM = 0;
2148 AcParam.f.Ecw.f.ECWmin = 3; /* Follow 802.11 CWmin. */
2149 AcParam.f.Ecw.f.ECWmax = 7; /* Follow 802.11 CWmax. */
2150 AcParam.f.TXOPLimit = 0;
2151
2152 for (eACI = 0; eACI < AC_MAX; eACI++) {
2153 AcParam.f.AciAifsn.f.ACI = (u8)eACI;
2154
2155 rtl8180_wmm_single_param_update(dev, mode, eACI,
2156 (PAC_PARAM)&AcParam);
2157 }
2158 return;
2159 }
2160
2161 for (eACI = 0; eACI < AC_MAX; eACI++) {
2162 rtl8180_wmm_single_param_update(dev, mode,
2163 ((PAC_PARAM)ac_param)->f.AciAifsn.f.ACI,
2164 (PAC_PARAM)ac_param);
2165
2166 ac_param += sizeof(AC_PARAM);
2167 }
2168}
2169
2170void rtl8180_restart_wq(struct work_struct *work);
2171void rtl8180_watch_dog_wq(struct work_struct *work);
2172void rtl8180_hw_wakeup_wq(struct work_struct *work);
2173void rtl8180_hw_sleep_wq(struct work_struct *work);
2174void rtl8180_sw_antenna_wq(struct work_struct *work);
2175void rtl8180_watch_dog(struct net_device *dev);
2176
2177static void watch_dog_adaptive(unsigned long data)
2178{
2179 struct r8180_priv *priv = ieee80211_priv((struct net_device *)data);
2180
2181 if (!priv->up) {
2182 DMESG("<----watch_dog_adaptive():driver is not up!\n");
2183 return;
2184 }
2185
2186 /* Tx High Power Mechanism. */
2187 if (CheckHighPower((struct net_device *)data))
2188 queue_work(priv->ieee80211->wq,
2189 (void *)&priv->ieee80211->tx_pw_wq);
2190
2191 /* Tx Power Tracking on 87SE. */
2192 if (CheckTxPwrTracking((struct net_device *)data))
2193 TxPwrTracking87SE((struct net_device *)data);
2194
2195 /* Perform DIG immediately. */
2196 if (CheckDig((struct net_device *)data))
2197 queue_work(priv->ieee80211->wq,
2198 (void *)&priv->ieee80211->hw_dig_wq);
2199
2200 rtl8180_watch_dog((struct net_device *)data);
2201
2202 queue_work(priv->ieee80211->wq,
2203 (void *)&priv->ieee80211->GPIOChangeRFWorkItem);
2204
2205 priv->watch_dog_timer.expires = jiffies +
2206 MSECS(IEEE80211_WATCH_DOG_TIME);
2207
2208 add_timer(&priv->watch_dog_timer);
2209}
2210
2211static struct rtl8187se_channel_list channel_plan_list[] = {
2212 /* FCC */
2213 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40,
2214 44, 48, 52, 56, 60, 64}, 19},
2215
2216 /* IC */
2217 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11},
2218
2219 /* ETSI */
2220 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
2221 44, 48, 52, 56, 60, 64}, 21},
2222
2223 /* Spain. Change to ETSI. */
2224 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
2225 44, 48, 52, 56, 60, 64}, 21},
2226
2227 /* France. Change to ETSI. */
2228 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
2229 44, 48, 52, 56, 60, 64}, 21},
2230
2231 /* MKK */
2232 {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9},
2233
2234 /* MKK1 */
2235 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36,
2236 40, 44, 48, 52, 56, 60, 64}, 22},
2237
2238 /* Israel. */
2239 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40,
2240 44, 48, 52, 56, 60, 64}, 21},
2241
2242 /* For 11a , TELEC */
2243 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17},
2244
2245 /* For Global Domain. 1-11 active, 12-14 passive. */
2246 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14},
2247
2248 /* world wide 13: ch1~ch11 active, ch12~13 passive */
2249 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}
2250};
2251
2252static void rtl8180_set_channel_map(u8 channel_plan,
2253 struct ieee80211_device *ieee)
2254{
2255 int i;
2256
2257 ieee->MinPassiveChnlNum = MAX_CHANNEL_NUMBER+1;
2258 ieee->IbssStartChnl = 0;
2259
2260 switch (channel_plan) {
2261 case COUNTRY_CODE_FCC:
2262 case COUNTRY_CODE_IC:
2263 case COUNTRY_CODE_ETSI:
2264 case COUNTRY_CODE_SPAIN:
2265 case COUNTRY_CODE_FRANCE:
2266 case COUNTRY_CODE_MKK:
2267 case COUNTRY_CODE_MKK1:
2268 case COUNTRY_CODE_ISRAEL:
2269 case COUNTRY_CODE_TELEC:
2270 {
2271 Dot11d_Init(ieee);
2272 ieee->bGlobalDomain = false;
2273 if (channel_plan_list[channel_plan].len != 0) {
2274 /* Clear old channel map */
2275 memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
2276 /* Set new channel map */
2277 for (i = 0; i < channel_plan_list[channel_plan].len; i++) {
2278 if (channel_plan_list[channel_plan].channel[i] <= 14)
2279 GET_DOT11D_INFO(ieee)->channel_map[channel_plan_list[channel_plan].channel[i]] = 1;
2280 }
2281 }
2282 break;
2283 }
2284 case COUNTRY_CODE_GLOBAL_DOMAIN:
2285 {
2286 GET_DOT11D_INFO(ieee)->bEnabled = false;
2287 Dot11d_Reset(ieee);
2288 ieee->bGlobalDomain = true;
2289 break;
2290 }
2291 case COUNTRY_CODE_WORLD_WIDE_13_INDEX:
2292 {
2293 ieee->MinPassiveChnlNum = 12;
2294 ieee->IbssStartChnl = 10;
2295 break;
2296 }
2297 default:
2298 {
2299 Dot11d_Init(ieee);
2300 ieee->bGlobalDomain = false;
2301 memset(GET_DOT11D_INFO(ieee)->channel_map, 0, sizeof(GET_DOT11D_INFO(ieee)->channel_map));
2302 for (i = 1; i <= 14; i++)
2303 GET_DOT11D_INFO(ieee)->channel_map[i] = 1;
2304 break;
2305 }
2306 }
2307}
2308
2309void GPIOChangeRFWorkItemCallBack(struct work_struct *work);
2310
2311static void rtl8180_statistics_init(struct stats *pstats)
2312{
2313 memset(pstats, 0, sizeof(struct stats));
2314}
2315
2316static void rtl8180_link_detect_init(struct link_detect_t *plink_detect)
2317{
2318 memset(plink_detect, 0, sizeof(struct link_detect_t));
2319 plink_detect->slot_num = DEFAULT_SLOT_NUM;
2320}
2321
2322static void rtl8187se_eeprom_register_read(struct eeprom_93cx6 *eeprom)
2323{
2324 struct net_device *dev = eeprom->data;
2325 u8 reg = read_nic_byte(dev, EPROM_CMD);
2326
2327 eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE;
2328 eeprom->reg_data_out = reg & RTL818X_EEPROM_CMD_READ;
2329 eeprom->reg_data_clock = reg & RTL818X_EEPROM_CMD_CK;
2330 eeprom->reg_chip_select = reg & RTL818X_EEPROM_CMD_CS;
2331}
2332
2333static void rtl8187se_eeprom_register_write(struct eeprom_93cx6 *eeprom)
2334{
2335 struct net_device *dev = eeprom->data;
2336 u8 reg = 2 << 6;
2337
2338 if (eeprom->reg_data_in)
2339 reg |= RTL818X_EEPROM_CMD_WRITE;
2340 if (eeprom->reg_data_out)
2341 reg |= RTL818X_EEPROM_CMD_READ;
2342 if (eeprom->reg_data_clock)
2343 reg |= RTL818X_EEPROM_CMD_CK;
2344 if (eeprom->reg_chip_select)
2345 reg |= RTL818X_EEPROM_CMD_CS;
2346
2347 write_nic_byte(dev, EPROM_CMD, reg);
2348 read_nic_byte(dev, EPROM_CMD);
2349 udelay(10);
2350}
2351
2352static short rtl8180_init(struct net_device *dev)
2353{
2354 struct r8180_priv *priv = ieee80211_priv(dev);
2355 u16 word;
2356 u16 usValue;
2357 u16 tmpu16;
2358 int i, j;
2359 struct eeprom_93cx6 eeprom;
2360 u16 eeprom_val;
2361
2362 eeprom.data = dev;
2363 eeprom.register_read = rtl8187se_eeprom_register_read;
2364 eeprom.register_write = rtl8187se_eeprom_register_write;
2365 eeprom.width = PCI_EEPROM_WIDTH_93C46;
2366
2367 eeprom_93cx6_read(&eeprom, EEPROM_COUNTRY_CODE>>1, &eeprom_val);
2368 priv->channel_plan = eeprom_val & 0xFF;
2369 if (priv->channel_plan > COUNTRY_CODE_GLOBAL_DOMAIN) {
2370 netdev_err(dev, "rtl8180_init: Invalid channel plan! Set to default.\n");
2371 priv->channel_plan = 0;
2372 }
2373
2374 DMESG("Channel plan is %d\n", priv->channel_plan);
2375 rtl8180_set_channel_map(priv->channel_plan, priv->ieee80211);
2376
2377 /* FIXME: these constants are placed in a bad pleace. */
2378 priv->txbuffsize = 2048; /* 1024; */
2379 priv->txringcount = 32; /* 32; */
2380 priv->rxbuffersize = 2048; /* 1024; */
2381 priv->rxringcount = 64; /* 32; */
2382 priv->txbeaconcount = 2;
2383 priv->rx_skb_complete = 1;
2384
2385 priv->RFChangeInProgress = false;
2386 priv->SetRFPowerStateInProgress = false;
2387 priv->RFProgType = 0;
2388
2389 priv->irq_enabled = 0;
2390
2391 rtl8180_statistics_init(&priv->stats);
2392 rtl8180_link_detect_init(&priv->link_detect);
2393
2394 priv->ack_tx_to_ieee = 0;
2395 priv->ieee80211->current_network.beacon_interval =
2396 DEFAULT_BEACONINTERVAL;
2397 priv->ieee80211->iw_mode = IW_MODE_INFRA;
2398 priv->ieee80211->softmac_features = IEEE_SOFTMAC_SCAN |
2399 IEEE_SOFTMAC_ASSOCIATE | IEEE_SOFTMAC_PROBERQ |
2400 IEEE_SOFTMAC_PROBERS | IEEE_SOFTMAC_TX_QUEUE;
2401 priv->ieee80211->active_scan = 1;
2402 priv->ieee80211->rate = 110; /* 11 mbps */
2403 priv->ieee80211->modulation = IEEE80211_CCK_MODULATION;
2404 priv->ieee80211->host_encrypt = 1;
2405 priv->ieee80211->host_decrypt = 1;
2406 priv->ieee80211->sta_wake_up = rtl8180_hw_wakeup;
2407 priv->ieee80211->ps_request_tx_ack = rtl8180_rq_tx_ack;
2408 priv->ieee80211->enter_sleep_state = rtl8180_hw_sleep;
2409 priv->ieee80211->ps_is_queue_empty = rtl8180_is_tx_queue_empty;
2410
2411 priv->hw_wep = hwwep;
2412 priv->dev = dev;
2413 priv->retry_rts = DEFAULT_RETRY_RTS;
2414 priv->retry_data = DEFAULT_RETRY_DATA;
2415 priv->RFChangeInProgress = false;
2416 priv->SetRFPowerStateInProgress = false;
2417 priv->RFProgType = 0;
2418 priv->bInactivePs = true; /* false; */
2419 priv->ieee80211->bInactivePs = priv->bInactivePs;
2420 priv->bSwRfProcessing = false;
2421 priv->eRFPowerState = RF_OFF;
2422 priv->RfOffReason = 0;
2423 priv->led_strategy = SW_LED_MODE0;
2424 priv->TxPollingTimes = 0;
2425 priv->bLeisurePs = true;
2426 priv->dot11PowerSaveMode = ACTIVE;
2427 priv->AdMinCheckPeriod = 5;
2428 priv->AdMaxCheckPeriod = 10;
2429 priv->AdMaxRxSsThreshold = 30; /* 60->30 */
2430 priv->AdRxSsThreshold = 20; /* 50->20 */
2431 priv->AdCheckPeriod = priv->AdMinCheckPeriod;
2432 priv->AdTickCount = 0;
2433 priv->AdRxSignalStrength = -1;
2434 priv->RegSwAntennaDiversityMechanism = 0;
2435 priv->RegDefaultAntenna = 0;
2436 priv->SignalStrength = 0;
2437 priv->AdRxOkCnt = 0;
2438 priv->CurrAntennaIndex = 0;
2439 priv->AdRxSsBeforeSwitched = 0;
2440 init_timer(&priv->SwAntennaDiversityTimer);
2441 priv->SwAntennaDiversityTimer.data = (unsigned long)dev;
2442 priv->SwAntennaDiversityTimer.function =
2443 (void *)SwAntennaDiversityTimerCallback;
2444 priv->bDigMechanism = true;
2445 priv->InitialGain = 6;
2446 priv->bXtalCalibration = false;
2447 priv->XtalCal_Xin = 0;
2448 priv->XtalCal_Xout = 0;
2449 priv->bTxPowerTrack = false;
2450 priv->ThermalMeter = 0;
2451 priv->FalseAlarmRegValue = 0;
2452 priv->RegDigOfdmFaUpTh = 0xc; /* Upper threshold of OFDM false alarm,
2453 which is used in DIG. */
2454 priv->DIG_NumberFallbackVote = 0;
2455 priv->DIG_NumberUpgradeVote = 0;
2456 priv->LastSignalStrengthInPercent = 0;
2457 priv->Stats_SignalStrength = 0;
2458 priv->LastRxPktAntenna = 0;
2459 priv->SignalQuality = 0; /* in 0-100 index. */
2460 priv->Stats_SignalQuality = 0;
2461 priv->RecvSignalPower = 0; /* in dBm. */
2462 priv->Stats_RecvSignalPower = 0;
2463 priv->AdMainAntennaRxOkCnt = 0;
2464 priv->AdAuxAntennaRxOkCnt = 0;
2465 priv->bHWAdSwitched = false;
2466 priv->bRegHighPowerMechanism = true;
2467 priv->RegHiPwrUpperTh = 77;
2468 priv->RegHiPwrLowerTh = 75;
2469 priv->RegRSSIHiPwrUpperTh = 70;
2470 priv->RegRSSIHiPwrLowerTh = 20;
2471 priv->bCurCCKPkt = false;
2472 priv->UndecoratedSmoothedSS = -1;
2473 priv->bToUpdateTxPwr = false;
2474 priv->CurCCKRSSI = 0;
2475 priv->RxPower = 0;
2476 priv->RSSI = 0;
2477 priv->NumTxOkTotal = 0;
2478 priv->NumTxUnicast = 0;
2479 priv->keepAliveLevel = DEFAULT_KEEP_ALIVE_LEVEL;
2480 priv->CurrRetryCnt = 0;
2481 priv->LastRetryCnt = 0;
2482 priv->LastTxokCnt = 0;
2483 priv->LastRxokCnt = 0;
2484 priv->LastRetryRate = 0;
2485 priv->bTryuping = 0;
2486 priv->CurrTxRate = 0;
2487 priv->CurrRetryRate = 0;
2488 priv->TryupingCount = 0;
2489 priv->TryupingCountNoData = 0;
2490 priv->TryDownCountLowData = 0;
2491 priv->LastTxOKBytes = 0;
2492 priv->LastFailTxRate = 0;
2493 priv->LastFailTxRateSS = 0;
2494 priv->FailTxRateCount = 0;
2495 priv->LastTxThroughput = 0;
2496 priv->NumTxOkBytesTotal = 0;
2497 priv->ForcedDataRate = 0;
2498 priv->RegBModeGainStage = 1;
2499
2500 priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
2501 spin_lock_init(&priv->irq_th_lock);
2502 spin_lock_init(&priv->tx_lock);
2503 spin_lock_init(&priv->ps_lock);
2504 spin_lock_init(&priv->rf_ps_lock);
2505 sema_init(&priv->wx_sem, 1);
2506 INIT_WORK(&priv->reset_wq, (void *)rtl8180_restart_wq);
2507 INIT_DELAYED_WORK(&priv->ieee80211->hw_wakeup_wq,
2508 (void *)rtl8180_hw_wakeup_wq);
2509 INIT_DELAYED_WORK(&priv->ieee80211->hw_sleep_wq,
2510 (void *)rtl8180_hw_sleep_wq);
2511 INIT_WORK(&priv->ieee80211->wmm_param_update_wq,
2512 (void *)rtl8180_wmm_param_update);
2513 INIT_DELAYED_WORK(&priv->ieee80211->rate_adapter_wq,
2514 (void *)rtl8180_rate_adapter);
2515 INIT_DELAYED_WORK(&priv->ieee80211->hw_dig_wq,
2516 (void *)rtl8180_hw_dig_wq);
2517 INIT_DELAYED_WORK(&priv->ieee80211->tx_pw_wq,
2518 (void *)rtl8180_tx_pw_wq);
2519 INIT_DELAYED_WORK(&priv->ieee80211->GPIOChangeRFWorkItem,
2520 (void *) GPIOChangeRFWorkItemCallBack);
2521 tasklet_init(&priv->irq_rx_tasklet,
2522 (void(*)(unsigned long)) rtl8180_irq_rx_tasklet,
2523 (unsigned long)priv);
2524
2525 init_timer(&priv->watch_dog_timer);
2526 priv->watch_dog_timer.data = (unsigned long)dev;
2527 priv->watch_dog_timer.function = watch_dog_adaptive;
2528
2529 init_timer(&priv->rateadapter_timer);
2530 priv->rateadapter_timer.data = (unsigned long)dev;
2531 priv->rateadapter_timer.function = timer_rate_adaptive;
2532 priv->RateAdaptivePeriod = RATE_ADAPTIVE_TIMER_PERIOD;
2533 priv->bEnhanceTxPwr = false;
2534
2535 priv->ieee80211->softmac_hard_start_xmit = rtl8180_hard_start_xmit;
2536 priv->ieee80211->set_chan = rtl8180_set_chan;
2537 priv->ieee80211->link_change = rtl8180_link_change;
2538 priv->ieee80211->softmac_data_hard_start_xmit = rtl8180_hard_data_xmit;
2539 priv->ieee80211->data_hard_stop = rtl8180_data_hard_stop;
2540 priv->ieee80211->data_hard_resume = rtl8180_data_hard_resume;
2541
2542 priv->ieee80211->init_wmmparam_flag = 0;
2543
2544 priv->ieee80211->start_send_beacons = rtl8180_start_tx_beacon;
2545 priv->ieee80211->stop_send_beacons = rtl8180_beacon_tx_disable;
2546 priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
2547
2548 priv->ShortRetryLimit = 7;
2549 priv->LongRetryLimit = 7;
2550 priv->EarlyRxThreshold = 7;
2551
2552 priv->TransmitConfig = (1<<TCR_DurProcMode_OFFSET) |
2553 (7<<TCR_MXDMA_OFFSET) |
2554 (priv->ShortRetryLimit<<TCR_SRL_OFFSET) |
2555 (priv->LongRetryLimit<<TCR_LRL_OFFSET);
2556
2557 priv->ReceiveConfig = RCR_AMF | RCR_ADF | RCR_ACF |
2558 RCR_AB | RCR_AM | RCR_APM |
2559 (7<<RCR_MXDMA_OFFSET) |
2560 (priv->EarlyRxThreshold<<RCR_FIFO_OFFSET) |
2561 (priv->EarlyRxThreshold == 7 ?
2562 RCR_ONLYERLPKT : 0);
2563
2564 priv->IntrMask = IMR_TMGDOK | IMR_TBDER |
2565 IMR_THPDER | IMR_THPDOK |
2566 IMR_TVODER | IMR_TVODOK |
2567 IMR_TVIDER | IMR_TVIDOK |
2568 IMR_TBEDER | IMR_TBEDOK |
2569 IMR_TBKDER | IMR_TBKDOK |
2570 IMR_RDU |
2571 IMR_RER | IMR_ROK |
2572 IMR_RQoSOK;
2573
2574 priv->InitialGain = 6;
2575
2576 DMESG("MAC controller is a RTL8187SE b/g");
2577
2578 priv->ieee80211->modulation |= IEEE80211_OFDM_MODULATION;
2579 priv->ieee80211->short_slot = 1;
2580
2581 eeprom_93cx6_read(&eeprom, EEPROM_SW_REVD_OFFSET, &usValue);
2582 DMESG("usValue is %#hx\n", usValue);
2583 /* 3Read AntennaDiversity */
2584
2585 /* SW Antenna Diversity. */
2586 priv->EEPROMSwAntennaDiversity = (usValue & EEPROM_SW_AD_MASK) ==
2587 EEPROM_SW_AD_ENABLE;
2588
2589 /* Default Antenna to use. */
2590 priv->EEPROMDefaultAntenna1 = (usValue & EEPROM_DEF_ANT_MASK) ==
2591 EEPROM_DEF_ANT_1;
2592
2593 if (priv->RegSwAntennaDiversityMechanism == 0) /* Auto */
2594 /* 0: default from EEPROM. */
2595 priv->bSwAntennaDiverity = priv->EEPROMSwAntennaDiversity;
2596 else
2597 /* 1:disable antenna diversity, 2: enable antenna diversity. */
2598 priv->bSwAntennaDiverity =
2599 priv->RegSwAntennaDiversityMechanism == 2;
2600
2601 if (priv->RegDefaultAntenna == 0)
2602 /* 0: default from EEPROM. */
2603 priv->bDefaultAntenna1 = priv->EEPROMDefaultAntenna1;
2604 else
2605 /* 1: main, 2: aux. */
2606 priv->bDefaultAntenna1 = priv->RegDefaultAntenna == 2;
2607
2608 priv->plcp_preamble_mode = 2;
2609 /* the eeprom type is stored in RCR register bit #6 */
2610 if (RCR_9356SEL & read_nic_dword(dev, RCR))
2611 priv->epromtype = EPROM_93c56;
2612 else
2613 priv->epromtype = EPROM_93c46;
2614
2615 eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)
2616 dev->dev_addr, 3);
2617
2618 for (i = 1, j = 0; i < 14; i += 2, j++) {
2619 eeprom_93cx6_read(&eeprom, EPROM_TXPW_CH1_2 + j, &word);
2620 priv->chtxpwr[i] = word & 0xff;
2621 priv->chtxpwr[i+1] = (word & 0xff00)>>8;
2622 }
2623 for (i = 1, j = 0; i < 14; i += 2, j++) {
2624 eeprom_93cx6_read(&eeprom, EPROM_TXPW_OFDM_CH1_2 + j, &word);
2625 priv->chtxpwr_ofdm[i] = word & 0xff;
2626 priv->chtxpwr_ofdm[i+1] = (word & 0xff00) >> 8;
2627 }
2628
2629 /* 3Read crystal calibration and thermal meter indication on 87SE. */
2630 eeprom_93cx6_read(&eeprom, EEPROM_RSV>>1, &tmpu16);
2631
2632 /* Crystal calibration for Xin and Xout resp. */
2633 priv->XtalCal_Xout = tmpu16 & EEPROM_XTAL_CAL_XOUT_MASK;
2634 priv->XtalCal_Xin = (tmpu16 & EEPROM_XTAL_CAL_XIN_MASK) >> 4;
2635 if ((tmpu16 & EEPROM_XTAL_CAL_ENABLE) >> 12)
2636 priv->bXtalCalibration = true;
2637
2638 /* Thermal meter reference indication. */
2639 priv->ThermalMeter = (u8)((tmpu16 & EEPROM_THERMAL_METER_MASK) >> 8);
2640 if ((tmpu16 & EEPROM_THERMAL_METER_ENABLE) >> 13)
2641 priv->bTxPowerTrack = true;
2642
2643 priv->rf_sleep = rtl8225z4_rf_sleep;
2644 priv->rf_wakeup = rtl8225z4_rf_wakeup;
2645 DMESGW("**PLEASE** REPORT SUCCESSFUL/UNSUCCESSFUL TO Realtek!");
2646
2647 priv->rf_close = rtl8225z2_rf_close;
2648 priv->rf_init = rtl8225z2_rf_init;
2649 priv->rf_set_chan = rtl8225z2_rf_set_chan;
2650 priv->rf_set_sens = NULL;
2651
2652 if (0 != alloc_rx_desc_ring(dev, priv->rxbuffersize, priv->rxringcount))
2653 return -ENOMEM;
2654
2655 if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
2656 TX_MANAGEPRIORITY_RING_ADDR))
2657 return -ENOMEM;
2658
2659 if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
2660 TX_BKPRIORITY_RING_ADDR))
2661 return -ENOMEM;
2662
2663 if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
2664 TX_BEPRIORITY_RING_ADDR))
2665 return -ENOMEM;
2666
2667 if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
2668 TX_VIPRIORITY_RING_ADDR))
2669 return -ENOMEM;
2670
2671 if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
2672 TX_VOPRIORITY_RING_ADDR))
2673 return -ENOMEM;
2674
2675 if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txringcount,
2676 TX_HIGHPRIORITY_RING_ADDR))
2677 return -ENOMEM;
2678
2679 if (0 != alloc_tx_desc_ring(dev, priv->txbuffsize, priv->txbeaconcount,
2680 TX_BEACON_RING_ADDR))
2681 return -ENOMEM;
2682
2683 if (request_irq(dev->irq, rtl8180_interrupt,
2684 IRQF_SHARED, dev->name, dev)) {
2685 DMESGE("Error allocating IRQ %d", dev->irq);
2686 return -1;
2687 } else {
2688 priv->irq = dev->irq;
2689 DMESG("IRQ %d", dev->irq);
2690 }
2691
2692 return 0;
2693}
2694
2695void rtl8180_no_hw_wep(struct net_device *dev)
2696{
2697}
2698
2699void rtl8180_set_hw_wep(struct net_device *dev)
2700{
2701 struct r8180_priv *priv = ieee80211_priv(dev);
2702 u8 pgreg;
2703 u8 security;
2704 u32 key0_word4;
2705
2706 pgreg = read_nic_byte(dev, PGSELECT);
2707 write_nic_byte(dev, PGSELECT, pgreg & ~(1<<PGSELECT_PG_SHIFT));
2708
2709 key0_word4 = read_nic_dword(dev, KEY0+4+4+4);
2710 key0_word4 &= ~0xff;
2711 key0_word4 |= priv->key0[3] & 0xff;
2712 write_nic_dword(dev, KEY0, (priv->key0[0]));
2713 write_nic_dword(dev, KEY0+4, (priv->key0[1]));
2714 write_nic_dword(dev, KEY0+4+4, (priv->key0[2]));
2715 write_nic_dword(dev, KEY0+4+4+4, (key0_word4));
2716
2717 security = read_nic_byte(dev, SECURITY);
2718 security |= (1<<SECURITY_WEP_TX_ENABLE_SHIFT);
2719 security |= (1<<SECURITY_WEP_RX_ENABLE_SHIFT);
2720 security &= ~SECURITY_ENCRYP_MASK;
2721 security |= (SECURITY_ENCRYP_104<<SECURITY_ENCRYP_SHIFT);
2722
2723 write_nic_byte(dev, SECURITY, security);
2724
2725 DMESG("key %x %x %x %x", read_nic_dword(dev, KEY0+4+4+4),
2726 read_nic_dword(dev, KEY0+4+4), read_nic_dword(dev, KEY0+4),
2727 read_nic_dword(dev, KEY0));
2728}
2729
2730
2731void rtl8185_rf_pins_enable(struct net_device *dev)
2732{
2733 write_nic_word(dev, RFPinsEnable, 0x1fff); /* | tmp); */
2734}
2735
2736void rtl8185_set_anaparam2(struct net_device *dev, u32 a)
2737{
2738 u8 conf3;
2739
2740 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
2741
2742 conf3 = read_nic_byte(dev, CONFIG3);
2743 write_nic_byte(dev, CONFIG3, conf3 | (1<<CONFIG3_ANAPARAM_W_SHIFT));
2744 write_nic_dword(dev, ANAPARAM2, a);
2745
2746 conf3 = read_nic_byte(dev, CONFIG3);
2747 write_nic_byte(dev, CONFIG3, conf3 & ~(1<<CONFIG3_ANAPARAM_W_SHIFT));
2748 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
2749}
2750
2751void rtl8180_set_anaparam(struct net_device *dev, u32 a)
2752{
2753 u8 conf3;
2754
2755 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
2756
2757 conf3 = read_nic_byte(dev, CONFIG3);
2758 write_nic_byte(dev, CONFIG3, conf3 | (1<<CONFIG3_ANAPARAM_W_SHIFT));
2759 write_nic_dword(dev, ANAPARAM, a);
2760
2761 conf3 = read_nic_byte(dev, CONFIG3);
2762 write_nic_byte(dev, CONFIG3, conf3 & ~(1<<CONFIG3_ANAPARAM_W_SHIFT));
2763 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
2764}
2765
2766void rtl8185_tx_antenna(struct net_device *dev, u8 ant)
2767{
2768 write_nic_byte(dev, TX_ANTENNA, ant);
2769 force_pci_posting(dev);
2770 mdelay(1);
2771}
2772
2773static void rtl8185_write_phy(struct net_device *dev, u8 adr, u32 data)
2774{
2775 u32 phyw;
2776
2777 adr |= 0x80;
2778
2779 phyw = ((data<<8) | adr);
2780
2781 /* Note: we must write 0xff7c after 0x7d-0x7f to write BB register. */
2782 write_nic_byte(dev, 0x7f, ((phyw & 0xff000000) >> 24));
2783 write_nic_byte(dev, 0x7e, ((phyw & 0x00ff0000) >> 16));
2784 write_nic_byte(dev, 0x7d, ((phyw & 0x0000ff00) >> 8));
2785 write_nic_byte(dev, 0x7c, ((phyw & 0x000000ff)));
2786}
2787
2788inline void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data)
2789{
2790 data = data & 0xff;
2791 rtl8185_write_phy(dev, adr, data);
2792}
2793
2794void write_phy_cck(struct net_device *dev, u8 adr, u32 data)
2795{
2796 data = data & 0xff;
2797 rtl8185_write_phy(dev, adr, data | 0x10000);
2798}
2799
2800/*
2801 * This configures registers for beacon tx and enables it via
2802 * rtl8180_beacon_tx_enable(). rtl8180_beacon_tx_disable() might
2803 * be used to stop beacon transmission
2804 */
2805void rtl8180_start_tx_beacon(struct net_device *dev)
2806{
2807 u16 word;
2808
2809 DMESG("Enabling beacon TX");
2810 rtl8180_prepare_beacon(dev);
2811 rtl8180_irq_disable(dev);
2812 rtl8180_beacon_tx_enable(dev);
2813
2814 word = read_nic_word(dev, AtimWnd) & ~AtimWnd_AtimWnd;
2815 write_nic_word(dev, AtimWnd, word); /* word |= */
2816
2817 word = read_nic_word(dev, BintrItv);
2818 word &= ~BintrItv_BintrItv;
2819 word |= 1000; /* priv->ieee80211->current_network.beacon_interval *
2820 * ((priv->txbeaconcount > 1)?(priv->txbeaconcount-1):1);
2821 * FIXME: check if correct ^^ worked with 0x3e8;
2822 */
2823 write_nic_word(dev, BintrItv, word);
2824
2825 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
2826
2827 rtl8185b_irq_enable(dev);
2828}
2829
2830static struct net_device_stats *rtl8180_stats(struct net_device *dev)
2831{
2832 struct r8180_priv *priv = ieee80211_priv(dev);
2833
2834 return &priv->ieee80211->stats;
2835}
2836
2837/*
2838 * Change current and default preamble mode.
2839 */
2840static bool MgntActSet_802_11_PowerSaveMode(struct r8180_priv *priv,
2841 enum rt_ps_mode rtPsMode)
2842{
2843 /* Currently, we do not change power save mode on IBSS mode. */
2844 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
2845 return false;
2846
2847 priv->ieee80211->ps = rtPsMode;
2848
2849 return true;
2850}
2851
2852static void LeisurePSEnter(struct r8180_priv *priv)
2853{
2854 if (priv->bLeisurePs)
2855 if (priv->ieee80211->ps == IEEE80211_PS_DISABLED)
2856 /* IEEE80211_PS_ENABLE */
2857 MgntActSet_802_11_PowerSaveMode(priv,
2858 IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST);
2859}
2860
2861static void LeisurePSLeave(struct r8180_priv *priv)
2862{
2863 if (priv->bLeisurePs)
2864 if (priv->ieee80211->ps != IEEE80211_PS_DISABLED)
2865 MgntActSet_802_11_PowerSaveMode(
2866 priv, IEEE80211_PS_DISABLED);
2867}
2868
2869void rtl8180_hw_wakeup_wq(struct work_struct *work)
2870{
2871 struct delayed_work *dwork = to_delayed_work(work);
2872 struct ieee80211_device *ieee = container_of(
2873 dwork, struct ieee80211_device, hw_wakeup_wq);
2874 struct net_device *dev = ieee->dev;
2875
2876 rtl8180_hw_wakeup(dev);
2877}
2878
2879void rtl8180_hw_sleep_wq(struct work_struct *work)
2880{
2881 struct delayed_work *dwork = to_delayed_work(work);
2882 struct ieee80211_device *ieee = container_of(
2883 dwork, struct ieee80211_device, hw_sleep_wq);
2884 struct net_device *dev = ieee->dev;
2885
2886 rtl8180_hw_sleep_down(dev);
2887}
2888
2889static void MgntLinkKeepAlive(struct r8180_priv *priv)
2890{
2891 if (priv->keepAliveLevel == 0)
2892 return;
2893
2894 if (priv->ieee80211->state == IEEE80211_LINKED) {
2895 /*
2896 * Keep-Alive.
2897 */
2898
2899 if ((priv->keepAliveLevel == 2) ||
2900 (priv->link_detect.last_num_tx_unicast ==
2901 priv->NumTxUnicast &&
2902 priv->link_detect.last_num_rx_unicast ==
2903 priv->ieee80211->NumRxUnicast)
2904 ) {
2905 priv->link_detect.idle_count++;
2906
2907 /*
2908 * Send a Keep-Alive packet packet to AP if we had
2909 * been idle for a while.
2910 */
2911 if (priv->link_detect.idle_count >=
2912 KEEP_ALIVE_INTERVAL /
2913 CHECK_FOR_HANG_PERIOD - 1) {
2914 priv->link_detect.idle_count = 0;
2915 ieee80211_sta_ps_send_null_frame(
2916 priv->ieee80211, false);
2917 }
2918 } else {
2919 priv->link_detect.idle_count = 0;
2920 }
2921 priv->link_detect.last_num_tx_unicast = priv->NumTxUnicast;
2922 priv->link_detect.last_num_rx_unicast =
2923 priv->ieee80211->NumRxUnicast;
2924 }
2925}
2926
2927void rtl8180_watch_dog(struct net_device *dev)
2928{
2929 struct r8180_priv *priv = ieee80211_priv(dev);
2930 bool bEnterPS = false;
2931 bool bBusyTraffic = false;
2932 u32 TotalRxNum = 0;
2933 u16 SlotIndex = 0;
2934 u16 i = 0;
2935 if (priv->ieee80211->actscanning == false) {
2936 if ((priv->ieee80211->iw_mode != IW_MODE_ADHOC) &&
2937 (priv->ieee80211->state == IEEE80211_NOLINK) &&
2938 (priv->ieee80211->beinretry == false) &&
2939 (priv->eRFPowerState == RF_ON))
2940 IPSEnter(dev);
2941 }
2942 if ((priv->ieee80211->state == IEEE80211_LINKED) &&
2943 (priv->ieee80211->iw_mode == IW_MODE_INFRA)) {
2944 SlotIndex = (priv->link_detect.slot_index++) %
2945 priv->link_detect.slot_num;
2946
2947 priv->link_detect.rx_frame_num[SlotIndex] =
2948 priv->ieee80211->NumRxDataInPeriod +
2949 priv->ieee80211->NumRxBcnInPeriod;
2950
2951 for (i = 0; i < priv->link_detect.slot_num; i++)
2952 TotalRxNum += priv->link_detect.rx_frame_num[i];
2953
2954 if (TotalRxNum == 0) {
2955 priv->ieee80211->state = IEEE80211_ASSOCIATING;
2956 queue_work(priv->ieee80211->wq,
2957 &priv->ieee80211->associate_procedure_wq);
2958 }
2959 }
2960
2961 MgntLinkKeepAlive(priv);
2962
2963 LeisurePSLeave(priv);
2964
2965 if (priv->ieee80211->state == IEEE80211_LINKED) {
2966 priv->link_detect.num_rx_ok_in_period =
2967 priv->ieee80211->NumRxDataInPeriod;
2968 if (priv->link_detect.num_rx_ok_in_period > 666 ||
2969 priv->link_detect.num_tx_ok_in_period > 666) {
2970 bBusyTraffic = true;
2971 }
2972 if ((priv->link_detect.num_rx_ok_in_period +
2973 priv->link_detect.num_tx_ok_in_period > 8)
2974 || (priv->link_detect.num_rx_ok_in_period > 2)) {
2975 bEnterPS = false;
2976 } else
2977 bEnterPS = true;
2978
2979 if (bEnterPS)
2980 LeisurePSEnter(priv);
2981 else
2982 LeisurePSLeave(priv);
2983 } else
2984 LeisurePSLeave(priv);
2985 priv->link_detect.b_busy_traffic = bBusyTraffic;
2986 priv->link_detect.num_rx_ok_in_period = 0;
2987 priv->link_detect.num_tx_ok_in_period = 0;
2988 priv->ieee80211->NumRxDataInPeriod = 0;
2989 priv->ieee80211->NumRxBcnInPeriod = 0;
2990}
2991
2992static int _rtl8180_up(struct net_device *dev)
2993{
2994 struct r8180_priv *priv = ieee80211_priv(dev);
2995
2996 priv->up = 1;
2997
2998 DMESG("Bringing up iface");
2999 rtl8185b_adapter_start(dev);
3000 rtl8185b_rx_enable(dev);
3001 rtl8185b_tx_enable(dev);
3002 if (priv->bInactivePs) {
3003 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
3004 IPSLeave(dev);
3005 }
3006 timer_rate_adaptive((unsigned long)dev);
3007 watch_dog_adaptive((unsigned long)dev);
3008 if (priv->bSwAntennaDiverity)
3009 SwAntennaDiversityTimerCallback(dev);
3010 ieee80211_softmac_start_protocol(priv->ieee80211);
3011 return 0;
3012}
3013
3014static int rtl8180_open(struct net_device *dev)
3015{
3016 struct r8180_priv *priv = ieee80211_priv(dev);
3017 int ret;
3018
3019 down(&priv->wx_sem);
3020 ret = rtl8180_up(dev);
3021 up(&priv->wx_sem);
3022 return ret;
3023}
3024
3025int rtl8180_up(struct net_device *dev)
3026{
3027 struct r8180_priv *priv = ieee80211_priv(dev);
3028
3029 if (priv->up == 1)
3030 return -1;
3031
3032 return _rtl8180_up(dev);
3033}
3034
3035static int rtl8180_close(struct net_device *dev)
3036{
3037 struct r8180_priv *priv = ieee80211_priv(dev);
3038 int ret;
3039
3040 down(&priv->wx_sem);
3041 ret = rtl8180_down(dev);
3042 up(&priv->wx_sem);
3043
3044 return ret;
3045}
3046
3047int rtl8180_down(struct net_device *dev)
3048{
3049 struct r8180_priv *priv = ieee80211_priv(dev);
3050
3051 if (priv->up == 0)
3052 return -1;
3053
3054 priv->up = 0;
3055
3056 ieee80211_softmac_stop_protocol(priv->ieee80211);
3057 /* FIXME */
3058 if (!netif_queue_stopped(dev))
3059 netif_stop_queue(dev);
3060 rtl8180_rtx_disable(dev);
3061 rtl8180_irq_disable(dev);
3062 del_timer_sync(&priv->watch_dog_timer);
3063 del_timer_sync(&priv->rateadapter_timer);
3064 cancel_delayed_work(&priv->ieee80211->rate_adapter_wq);
3065 cancel_delayed_work(&priv->ieee80211->hw_wakeup_wq);
3066 cancel_delayed_work(&priv->ieee80211->hw_sleep_wq);
3067 cancel_delayed_work(&priv->ieee80211->hw_dig_wq);
3068 cancel_delayed_work(&priv->ieee80211->tx_pw_wq);
3069 del_timer_sync(&priv->SwAntennaDiversityTimer);
3070 SetZebraRFPowerState8185(dev, RF_OFF);
3071 memset(&priv->ieee80211->current_network,
3072 0, sizeof(struct ieee80211_network));
3073 priv->ieee80211->state = IEEE80211_NOLINK;
3074 return 0;
3075}
3076
3077void rtl8180_restart_wq(struct work_struct *work)
3078{
3079 struct r8180_priv *priv = container_of(
3080 work, struct r8180_priv, reset_wq);
3081 struct net_device *dev = priv->dev;
3082
3083 down(&priv->wx_sem);
3084
3085 rtl8180_commit(dev);
3086
3087 up(&priv->wx_sem);
3088}
3089
3090static void rtl8180_restart(struct net_device *dev)
3091{
3092 struct r8180_priv *priv = ieee80211_priv(dev);
3093
3094 schedule_work(&priv->reset_wq);
3095}
3096
3097void rtl8180_commit(struct net_device *dev)
3098{
3099 struct r8180_priv *priv = ieee80211_priv(dev);
3100
3101 if (priv->up == 0)
3102 return;
3103
3104 del_timer_sync(&priv->watch_dog_timer);
3105 del_timer_sync(&priv->rateadapter_timer);
3106 cancel_delayed_work(&priv->ieee80211->rate_adapter_wq);
3107 cancel_delayed_work(&priv->ieee80211->hw_wakeup_wq);
3108 cancel_delayed_work(&priv->ieee80211->hw_sleep_wq);
3109 cancel_delayed_work(&priv->ieee80211->hw_dig_wq);
3110 cancel_delayed_work(&priv->ieee80211->tx_pw_wq);
3111 del_timer_sync(&priv->SwAntennaDiversityTimer);
3112 ieee80211_softmac_stop_protocol(priv->ieee80211);
3113 rtl8180_irq_disable(dev);
3114 rtl8180_rtx_disable(dev);
3115 _rtl8180_up(dev);
3116}
3117
3118static void r8180_set_multicast(struct net_device *dev)
3119{
3120 struct r8180_priv *priv = ieee80211_priv(dev);
3121 short promisc;
3122
3123 promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
3124
3125 if (promisc != priv->promisc)
3126 rtl8180_restart(dev);
3127
3128 priv->promisc = promisc;
3129}
3130
3131static int r8180_set_mac_adr(struct net_device *dev, void *mac)
3132{
3133 struct r8180_priv *priv = ieee80211_priv(dev);
3134 struct sockaddr *addr = mac;
3135
3136 down(&priv->wx_sem);
3137
3138 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3139
3140 if (priv->ieee80211->iw_mode == IW_MODE_MASTER)
3141 memcpy(priv->ieee80211->current_network.bssid,
3142 dev->dev_addr, ETH_ALEN);
3143
3144 if (priv->up) {
3145 rtl8180_down(dev);
3146 rtl8180_up(dev);
3147 }
3148
3149 up(&priv->wx_sem);
3150
3151 return 0;
3152}
3153
3154/* based on ipw2200 driver */
3155static int rtl8180_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3156{
3157 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
3158 struct iwreq *wrq = (struct iwreq *) rq;
3159 int ret = -1;
3160
3161 switch (cmd) {
3162 case RTL_IOCTL_WPA_SUPPLICANT:
3163 ret = ieee80211_wpa_supplicant_ioctl(
3164 priv->ieee80211, &wrq->u.data);
3165 return ret;
3166 default:
3167 return -EOPNOTSUPP;
3168 }
3169
3170 return -EOPNOTSUPP;
3171}
3172
3173static const struct net_device_ops rtl8180_netdev_ops = {
3174 .ndo_open = rtl8180_open,
3175 .ndo_stop = rtl8180_close,
3176 .ndo_get_stats = rtl8180_stats,
3177 .ndo_tx_timeout = rtl8180_restart,
3178 .ndo_do_ioctl = rtl8180_ioctl,
3179 .ndo_set_rx_mode = r8180_set_multicast,
3180 .ndo_set_mac_address = r8180_set_mac_adr,
3181 .ndo_validate_addr = eth_validate_addr,
3182 .ndo_change_mtu = eth_change_mtu,
3183 .ndo_start_xmit = ieee80211_rtl_xmit,
3184};
3185
3186static int rtl8180_pci_probe(struct pci_dev *pdev,
3187 const struct pci_device_id *id)
3188{
3189 unsigned long ioaddr = 0;
3190 struct net_device *dev = NULL;
3191 struct r8180_priv *priv = NULL;
3192 u8 unit = 0;
3193 int ret = -ENODEV;
3194
3195 unsigned long pmem_start, pmem_len, pmem_flags;
3196
3197 DMESG("Configuring chip resources");
3198
3199 if (pci_enable_device(pdev)) {
3200 DMESG("Failed to enable PCI device");
3201 return -EIO;
3202 }
3203
3204 pci_set_master(pdev);
3205 pci_set_dma_mask(pdev, 0xffffff00ULL);
3206 pci_set_consistent_dma_mask(pdev, 0xffffff00ULL);
3207 dev = alloc_ieee80211(sizeof(struct r8180_priv));
3208 if (!dev) {
3209 ret = -ENOMEM;
3210 goto fail_free;
3211 }
3212 priv = ieee80211_priv(dev);
3213 priv->ieee80211 = netdev_priv(dev);
3214
3215 pci_set_drvdata(pdev, dev);
3216 SET_NETDEV_DEV(dev, &pdev->dev);
3217
3218 priv = ieee80211_priv(dev);
3219 priv->pdev = pdev;
3220
3221 pmem_start = pci_resource_start(pdev, 1);
3222 pmem_len = pci_resource_len(pdev, 1);
3223 pmem_flags = pci_resource_flags(pdev, 1);
3224
3225 if (!(pmem_flags & IORESOURCE_MEM)) {
3226 DMESG("region #1 not a MMIO resource, aborting");
3227 goto fail;
3228 }
3229
3230 if (!request_mem_region(pmem_start, pmem_len, RTL8180_MODULE_NAME)) {
3231 DMESG("request_mem_region failed!");
3232 goto fail;
3233 }
3234
3235 ioaddr = (unsigned long)ioremap_nocache(pmem_start, pmem_len);
3236 if (ioaddr == (unsigned long)NULL) {
3237 DMESG("ioremap failed!");
3238 goto fail1;
3239 }
3240
3241 dev->mem_start = ioaddr; /* shared mem start */
3242 dev->mem_end = ioaddr + pci_resource_len(pdev, 0); /* shared mem end */
3243
3244 pci_read_config_byte(pdev, 0x05, &unit);
3245 pci_write_config_byte(pdev, 0x05, unit & (~0x04));
3246
3247 dev->irq = pdev->irq;
3248 priv->irq = 0;
3249
3250 dev->netdev_ops = &rtl8180_netdev_ops;
3251 dev->wireless_handlers = &r8180_wx_handlers_def;
3252
3253 dev->type = ARPHRD_ETHER;
3254 dev->watchdog_timeo = HZ*3;
3255
3256 if (dev_alloc_name(dev, ifname) < 0) {
3257 DMESG("Oops: devname already taken! Trying wlan%%d...\n");
3258 strcpy(ifname, "wlan%d");
3259 dev_alloc_name(dev, ifname);
3260 }
3261
3262 if (rtl8180_init(dev) != 0) {
3263 DMESG("Initialization failed");
3264 goto fail1;
3265 }
3266
3267 netif_carrier_off(dev);
3268
3269 if (register_netdev(dev))
3270 goto fail1;
3271
3272 rtl8180_proc_init_one(dev);
3273
3274 DMESG("Driver probe completed\n");
3275 return 0;
3276fail1:
3277 if (dev->mem_start != (unsigned long)NULL) {
3278 iounmap((void __iomem *)dev->mem_start);
3279 release_mem_region(pci_resource_start(pdev, 1),
3280 pci_resource_len(pdev, 1));
3281 }
3282fail:
3283 if (dev) {
3284 if (priv->irq) {
3285 free_irq(dev->irq, dev);
3286 dev->irq = 0;
3287 }
3288 free_ieee80211(dev);
3289 }
3290
3291fail_free:
3292 pci_disable_device(pdev);
3293
3294 DMESG("wlan driver load failed\n");
3295 return ret;
3296}
3297
3298static void rtl8180_pci_remove(struct pci_dev *pdev)
3299{
3300 struct r8180_priv *priv;
3301 struct net_device *dev = pci_get_drvdata(pdev);
3302
3303 if (dev) {
3304 unregister_netdev(dev);
3305
3306 priv = ieee80211_priv(dev);
3307
3308 rtl8180_proc_remove_one(dev);
3309 rtl8180_down(dev);
3310 priv->rf_close(dev);
3311 rtl8180_reset(dev);
3312 mdelay(10);
3313
3314 if (priv->irq) {
3315 DMESG("Freeing irq %d", dev->irq);
3316 free_irq(dev->irq, dev);
3317 priv->irq = 0;
3318 }
3319
3320 free_rx_desc_ring(dev);
3321 free_tx_desc_rings(dev);
3322
3323 if (dev->mem_start != (unsigned long)NULL) {
3324 iounmap((void __iomem *)dev->mem_start);
3325 release_mem_region(pci_resource_start(pdev, 1),
3326 pci_resource_len(pdev, 1));
3327 }
3328
3329 free_ieee80211(dev);
3330 }
3331 pci_disable_device(pdev);
3332
3333 DMESG("wlan driver removed\n");
3334}
3335
3336static int __init rtl8180_pci_module_init(void)
3337{
3338 int ret;
3339
3340 ret = ieee80211_crypto_init();
3341 if (ret) {
3342 pr_err("ieee80211_crypto_init() failed %d\n", ret);
3343 return ret;
3344 }
3345 ret = ieee80211_crypto_tkip_init();
3346 if (ret) {
3347 pr_err("ieee80211_crypto_tkip_init() failed %d\n", ret);
3348 return ret;
3349 }
3350 ret = ieee80211_crypto_ccmp_init();
3351 if (ret) {
3352 pr_err("ieee80211_crypto_ccmp_init() failed %d\n", ret);
3353 return ret;
3354 }
3355 ret = ieee80211_crypto_wep_init();
3356 if (ret) {
3357 pr_err("ieee80211_crypto_wep_init() failed %d\n", ret);
3358 return ret;
3359 }
3360
3361 pr_info("\nLinux kernel driver for RTL8180 / RTL8185 based WLAN cards\n");
3362 pr_info("Copyright (c) 2004-2005, Andrea Merello\n");
3363 DMESG("Initializing module");
3364 DMESG("Wireless extensions version %d", WIRELESS_EXT);
3365 rtl8180_proc_module_init();
3366
3367 if (pci_register_driver(&rtl8180_pci_driver)) {
3368 DMESG("No device found");
3369 return -ENODEV;
3370 }
3371 return 0;
3372}
3373
3374static void __exit rtl8180_pci_module_exit(void)
3375{
3376 pci_unregister_driver(&rtl8180_pci_driver);
3377 rtl8180_proc_module_remove();
3378 ieee80211_crypto_tkip_exit();
3379 ieee80211_crypto_ccmp_exit();
3380 ieee80211_crypto_wep_exit();
3381 ieee80211_crypto_deinit();
3382 DMESG("Exiting");
3383}
3384
3385static void rtl8180_try_wake_queue(struct net_device *dev, int pri)
3386{
3387 unsigned long flags;
3388 short enough_desc;
3389 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
3390
3391 spin_lock_irqsave(&priv->tx_lock, flags);
3392 enough_desc = check_nic_enought_desc(dev, pri);
3393 spin_unlock_irqrestore(&priv->tx_lock, flags);
3394
3395 if (enough_desc)
3396 ieee80211_rtl_wake_queue(priv->ieee80211);
3397}
3398
3399static void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
3400{
3401 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
3402 u32 *tail; /* tail virtual addr */
3403 u32 *head; /* head virtual addr */
3404 u32 *begin; /* start of ring virtual addr */
3405 u32 *nicv; /* nic pointer virtual addr */
3406 u32 nic; /* nic pointer physical addr */
3407 u32 nicbegin; /* start of ring physical addr */
3408 unsigned long flag;
3409 /* physical addr are ok on 32 bits since we set DMA mask */
3410 int offs;
3411 int j, i;
3412 int hd;
3413 if (error)
3414 priv->stats.txretry++;
3415 spin_lock_irqsave(&priv->tx_lock, flag);
3416 switch (pri) {
3417 case MANAGE_PRIORITY:
3418 tail = priv->txmapringtail;
3419 begin = priv->txmapring;
3420 head = priv->txmapringhead;
3421 nic = read_nic_dword(dev, TX_MANAGEPRIORITY_RING_ADDR);
3422 nicbegin = priv->txmapringdma;
3423 break;
3424 case BK_PRIORITY:
3425 tail = priv->txbkpringtail;
3426 begin = priv->txbkpring;
3427 head = priv->txbkpringhead;
3428 nic = read_nic_dword(dev, TX_BKPRIORITY_RING_ADDR);
3429 nicbegin = priv->txbkpringdma;
3430 break;
3431 case BE_PRIORITY:
3432 tail = priv->txbepringtail;
3433 begin = priv->txbepring;
3434 head = priv->txbepringhead;
3435 nic = read_nic_dword(dev, TX_BEPRIORITY_RING_ADDR);
3436 nicbegin = priv->txbepringdma;
3437 break;
3438 case VI_PRIORITY:
3439 tail = priv->txvipringtail;
3440 begin = priv->txvipring;
3441 head = priv->txvipringhead;
3442 nic = read_nic_dword(dev, TX_VIPRIORITY_RING_ADDR);
3443 nicbegin = priv->txvipringdma;
3444 break;
3445 case VO_PRIORITY:
3446 tail = priv->txvopringtail;
3447 begin = priv->txvopring;
3448 head = priv->txvopringhead;
3449 nic = read_nic_dword(dev, TX_VOPRIORITY_RING_ADDR);
3450 nicbegin = priv->txvopringdma;
3451 break;
3452 case HI_PRIORITY:
3453 tail = priv->txhpringtail;
3454 begin = priv->txhpring;
3455 head = priv->txhpringhead;
3456 nic = read_nic_dword(dev, TX_HIGHPRIORITY_RING_ADDR);
3457 nicbegin = priv->txhpringdma;
3458 break;
3459
3460 default:
3461 spin_unlock_irqrestore(&priv->tx_lock, flag);
3462 return;
3463 }
3464
3465 nicv = (u32 *)((nic - nicbegin) + (u8 *)begin);
3466 if ((head <= tail && (nicv > tail || nicv < head)) ||
3467 (head > tail && (nicv > tail && nicv < head))) {
3468 DMESGW("nic has lost pointer");
3469 spin_unlock_irqrestore(&priv->tx_lock, flag);
3470 rtl8180_restart(dev);
3471 return;
3472 }
3473
3474 /*
3475 * We check all the descriptors between the head and the nic,
3476 * but not the currently pointed by the nic (the next to be txed)
3477 * and the previous of the pointed (might be in process ??)
3478 */
3479 offs = (nic - nicbegin);
3480 offs = offs / 8 / 4;
3481 hd = (head - begin) / 8;
3482
3483 if (offs >= hd)
3484 j = offs - hd;
3485 else
3486 j = offs + (priv->txringcount-1-hd);
3487
3488 j -= 2;
3489 if (j < 0)
3490 j = 0;
3491
3492 for (i = 0; i < j; i++) {
3493 if ((*head) & (1<<31))
3494 break;
3495 if (((*head)&(0x10000000)) != 0) {
3496 priv->CurrRetryCnt += (u16)((*head) & (0x000000ff));
3497 if (!error)
3498 priv->NumTxOkTotal++;
3499 }
3500
3501 if (!error)
3502 priv->NumTxOkBytesTotal += (*(head+3)) & (0x00000fff);
3503
3504 *head = *head & ~(1<<31);
3505
3506 if ((head - begin)/8 == priv->txringcount-1)
3507 head = begin;
3508 else
3509 head += 8;
3510 }
3511
3512 /*
3513 * The head has been moved to the last certainly TXed
3514 * (or at least processed by the nic) packet.
3515 * The driver take forcefully owning of all these packets
3516 * If the packet previous of the nic pointer has been
3517 * processed this doesn't matter: it will be checked
3518 * here at the next round. Anyway if no more packet are
3519 * TXed no memory leak occur at all.
3520 */
3521
3522 switch (pri) {
3523 case MANAGE_PRIORITY:
3524 priv->txmapringhead = head;
3525
3526 if (priv->ack_tx_to_ieee) {
3527 if (rtl8180_is_tx_queue_empty(dev)) {
3528 priv->ack_tx_to_ieee = 0;
3529 ieee80211_ps_tx_ack(priv->ieee80211, !error);
3530 }
3531 }
3532 break;
3533 case BK_PRIORITY:
3534 priv->txbkpringhead = head;
3535 break;
3536 case BE_PRIORITY:
3537 priv->txbepringhead = head;
3538 break;
3539 case VI_PRIORITY:
3540 priv->txvipringhead = head;
3541 break;
3542 case VO_PRIORITY:
3543 priv->txvopringhead = head;
3544 break;
3545 case HI_PRIORITY:
3546 priv->txhpringhead = head;
3547 break;
3548 }
3549
3550 spin_unlock_irqrestore(&priv->tx_lock, flag);
3551}
3552
3553static irqreturn_t rtl8180_interrupt(int irq, void *netdev)
3554{
3555 struct net_device *dev = (struct net_device *) netdev;
3556 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
3557 unsigned long flags;
3558 u32 inta;
3559
3560 /* We should return IRQ_NONE, but for now let me keep this */
3561 if (priv->irq_enabled == 0)
3562 return IRQ_HANDLED;
3563
3564 spin_lock_irqsave(&priv->irq_th_lock, flags);
3565
3566 /* ISR: 4bytes */
3567 inta = read_nic_dword(dev, ISR);
3568 write_nic_dword(dev, ISR, inta); /* reset int situation */
3569
3570 priv->stats.shints++;
3571
3572 if (!inta) {
3573 spin_unlock_irqrestore(&priv->irq_th_lock, flags);
3574 return IRQ_HANDLED;
3575 /*
3576 * most probably we can safely return IRQ_NONE,
3577 * but for now is better to avoid problems
3578 */
3579 }
3580
3581 if (inta == 0xffff) {
3582 /* HW disappeared */
3583 spin_unlock_irqrestore(&priv->irq_th_lock, flags);
3584 return IRQ_HANDLED;
3585 }
3586
3587 priv->stats.ints++;
3588
3589 if (!netif_running(dev)) {
3590 spin_unlock_irqrestore(&priv->irq_th_lock, flags);
3591 return IRQ_HANDLED;
3592 }
3593
3594 if (inta & ISR_TimeOut)
3595 write_nic_dword(dev, TimerInt, 0);
3596
3597 if (inta & ISR_TBDOK)
3598 priv->stats.txbeacon++;
3599
3600 if (inta & ISR_TBDER)
3601 priv->stats.txbeaconerr++;
3602
3603 if (inta & IMR_TMGDOK)
3604 rtl8180_tx_isr(dev, MANAGE_PRIORITY, 0);
3605
3606 if (inta & ISR_THPDER) {
3607 priv->stats.txhperr++;
3608 rtl8180_tx_isr(dev, HI_PRIORITY, 1);
3609 priv->ieee80211->stats.tx_errors++;
3610 }
3611
3612 if (inta & ISR_THPDOK) { /* High priority tx ok */
3613 priv->link_detect.num_tx_ok_in_period++;
3614 priv->stats.txhpokint++;
3615 rtl8180_tx_isr(dev, HI_PRIORITY, 0);
3616 }
3617
3618 if (inta & ISR_RER)
3619 priv->stats.rxerr++;
3620
3621 if (inta & ISR_TBKDER) { /* corresponding to BK_PRIORITY */
3622 priv->stats.txbkperr++;
3623 priv->ieee80211->stats.tx_errors++;
3624 rtl8180_tx_isr(dev, BK_PRIORITY, 1);
3625 rtl8180_try_wake_queue(dev, BK_PRIORITY);
3626 }
3627
3628 if (inta & ISR_TBEDER) { /* corresponding to BE_PRIORITY */
3629 priv->stats.txbeperr++;
3630 priv->ieee80211->stats.tx_errors++;
3631 rtl8180_tx_isr(dev, BE_PRIORITY, 1);
3632 rtl8180_try_wake_queue(dev, BE_PRIORITY);
3633 }
3634 if (inta & ISR_TNPDER) { /* corresponding to VO_PRIORITY */
3635 priv->stats.txnperr++;
3636 priv->ieee80211->stats.tx_errors++;
3637 rtl8180_tx_isr(dev, NORM_PRIORITY, 1);
3638 rtl8180_try_wake_queue(dev, NORM_PRIORITY);
3639 }
3640
3641 if (inta & ISR_TLPDER) { /* corresponding to VI_PRIORITY */
3642 priv->stats.txlperr++;
3643 priv->ieee80211->stats.tx_errors++;
3644 rtl8180_tx_isr(dev, LOW_PRIORITY, 1);
3645 rtl8180_try_wake_queue(dev, LOW_PRIORITY);
3646 }
3647
3648 if (inta & ISR_ROK) {
3649 priv->stats.rxint++;
3650 tasklet_schedule(&priv->irq_rx_tasklet);
3651 }
3652
3653 if (inta & ISR_RQoSOK) {
3654 priv->stats.rxint++;
3655 tasklet_schedule(&priv->irq_rx_tasklet);
3656 }
3657
3658 if (inta & ISR_BcnInt)
3659 rtl8180_prepare_beacon(dev);
3660
3661 if (inta & ISR_RDU) {
3662 DMESGW("No RX descriptor available");
3663 priv->stats.rxrdu++;
3664 tasklet_schedule(&priv->irq_rx_tasklet);
3665 }
3666
3667 if (inta & ISR_RXFOVW) {
3668 priv->stats.rxoverflow++;
3669 tasklet_schedule(&priv->irq_rx_tasklet);
3670 }
3671
3672 if (inta & ISR_TXFOVW)
3673 priv->stats.txoverflow++;
3674
3675 if (inta & ISR_TNPDOK) { /* Normal priority tx ok */
3676 priv->link_detect.num_tx_ok_in_period++;
3677 priv->stats.txnpokint++;
3678 rtl8180_tx_isr(dev, NORM_PRIORITY, 0);
3679 rtl8180_try_wake_queue(dev, NORM_PRIORITY);
3680 }
3681
3682 if (inta & ISR_TLPDOK) { /* Low priority tx ok */
3683 priv->link_detect.num_tx_ok_in_period++;
3684 priv->stats.txlpokint++;
3685 rtl8180_tx_isr(dev, LOW_PRIORITY, 0);
3686 rtl8180_try_wake_queue(dev, LOW_PRIORITY);
3687 }
3688
3689 if (inta & ISR_TBKDOK) { /* corresponding to BK_PRIORITY */
3690 priv->stats.txbkpokint++;
3691 priv->link_detect.num_tx_ok_in_period++;
3692 rtl8180_tx_isr(dev, BK_PRIORITY, 0);
3693 rtl8180_try_wake_queue(dev, BE_PRIORITY);
3694 }
3695
3696 if (inta & ISR_TBEDOK) { /* corresponding to BE_PRIORITY */
3697 priv->stats.txbeperr++;
3698 priv->link_detect.num_tx_ok_in_period++;
3699 rtl8180_tx_isr(dev, BE_PRIORITY, 0);
3700 rtl8180_try_wake_queue(dev, BE_PRIORITY);
3701 }
3702 force_pci_posting(dev);
3703 spin_unlock_irqrestore(&priv->irq_th_lock, flags);
3704
3705 return IRQ_HANDLED;
3706}
3707
3708void rtl8180_irq_rx_tasklet(struct r8180_priv *priv)
3709{
3710 rtl8180_rx(priv->dev);
3711}
3712
3713void GPIOChangeRFWorkItemCallBack(struct work_struct *work)
3714{
3715 struct ieee80211_device *ieee = container_of(
3716 work, struct ieee80211_device, GPIOChangeRFWorkItem.work);
3717 struct net_device *dev = ieee->dev;
3718 struct r8180_priv *priv = ieee80211_priv(dev);
3719 u8 btPSR;
3720 u8 btConfig0;
3721 enum rt_rf_power_state eRfPowerStateToSet;
3722 bool bActuallySet = false;
3723
3724 char *argv[3];
3725 static char *RadioPowerPath = "/etc/acpi/events/RadioPower.sh";
3726 static char *envp[] = {"HOME=/", "TERM=linux",
3727 "PATH=/usr/bin:/bin", NULL};
3728 static int readf_count;
3729
3730 readf_count = (readf_count+1)%0xffff;
3731 /* We should turn off LED before polling FF51[4]. */
3732
3733 /* Turn off LED. */
3734 btPSR = read_nic_byte(dev, PSR);
3735 write_nic_byte(dev, PSR, (btPSR & ~BIT3));
3736
3737 /* It need to delay 4us suggested */
3738 udelay(4);
3739
3740 /* HW radio On/Off according to the value of FF51[4](config0) */
3741 btConfig0 = btPSR = read_nic_byte(dev, CONFIG0);
3742
3743 eRfPowerStateToSet = (btConfig0 & BIT4) ? RF_ON : RF_OFF;
3744
3745 /* Turn LED back on when radio enabled */
3746 if (eRfPowerStateToSet == RF_ON)
3747 write_nic_byte(dev, PSR, btPSR | BIT3);
3748
3749 if ((priv->ieee80211->bHwRadioOff == true) &&
3750 (eRfPowerStateToSet == RF_ON)) {
3751 priv->ieee80211->bHwRadioOff = false;
3752 bActuallySet = true;
3753 } else if ((priv->ieee80211->bHwRadioOff == false) &&
3754 (eRfPowerStateToSet == RF_OFF)) {
3755 priv->ieee80211->bHwRadioOff = true;
3756 bActuallySet = true;
3757 }
3758
3759 if (bActuallySet) {
3760 MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
3761
3762 /* To update the UI status for Power status changed */
3763 if (priv->ieee80211->bHwRadioOff == true)
3764 argv[1] = "RFOFF";
3765 else
3766 argv[1] = "RFON";
3767 argv[0] = RadioPowerPath;
3768 argv[2] = NULL;
3769
3770 call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC);
3771 }
3772}
3773
3774module_init(rtl8180_pci_module_init);
3775module_exit(rtl8180_pci_module_exit);
diff --git a/drivers/staging/rtl8187se/r8180_dm.c b/drivers/staging/rtl8187se/r8180_dm.c
deleted file mode 100644
index 8c020e064869..000000000000
--- a/drivers/staging/rtl8187se/r8180_dm.c
+++ /dev/null
@@ -1,1139 +0,0 @@
1#include "r8180_dm.h"
2#include "r8180_hw.h"
3#include "r8180_93cx6.h"
4
5 /* Return TRUE if we shall perform High Power Mechanism, FALSE otherwise. */
6#define RATE_ADAPTIVE_TIMER_PERIOD 300
7
8bool CheckHighPower(struct net_device *dev)
9{
10 struct r8180_priv *priv = ieee80211_priv(dev);
11 struct ieee80211_device *ieee = priv->ieee80211;
12
13 if (!priv->bRegHighPowerMechanism)
14 return false;
15
16 if (ieee->state == IEEE80211_LINKED_SCANNING)
17 return false;
18
19 return true;
20}
21
22/*
23 * Description:
24 * Update Tx power level if necessary.
25 * See also DoRxHighPower() and SetTxPowerLevel8185() for reference.
26 *
27 * Note:
28 * The reason why we udpate Tx power level here instead of DoRxHighPower()
29 * is the number of IO to change Tx power is much more than channel TR switch
30 * and they are related to OFDM and MAC registers.
31 * So, we don't want to update it so frequently in per-Rx packet base.
32 */
33static void DoTxHighPower(struct net_device *dev)
34{
35 struct r8180_priv *priv = ieee80211_priv(dev);
36 u16 HiPwrUpperTh = 0;
37 u16 HiPwrLowerTh = 0;
38 u8 RSSIHiPwrUpperTh;
39 u8 RSSIHiPwrLowerTh;
40 u8 u1bTmp;
41 char OfdmTxPwrIdx, CckTxPwrIdx;
42
43 HiPwrUpperTh = priv->RegHiPwrUpperTh;
44 HiPwrLowerTh = priv->RegHiPwrLowerTh;
45
46 HiPwrUpperTh = HiPwrUpperTh * 10;
47 HiPwrLowerTh = HiPwrLowerTh * 10;
48 RSSIHiPwrUpperTh = priv->RegRSSIHiPwrUpperTh;
49 RSSIHiPwrLowerTh = priv->RegRSSIHiPwrLowerTh;
50
51 /* lzm add 080826 */
52 OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
53 CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
54
55 if ((priv->UndecoratedSmoothedSS > HiPwrUpperTh) ||
56 (priv->bCurCCKPkt && (priv->CurCCKRSSI > RSSIHiPwrUpperTh))) {
57 /* Stevenl suggested that degrade 8dbm in high power sate. 2007-12-04 Isaiah */
58
59 priv->bToUpdateTxPwr = true;
60 u1bTmp = read_nic_byte(dev, CCK_TXAGC);
61
62 /* If it never enter High Power. */
63 if (CckTxPwrIdx == u1bTmp) {
64 u1bTmp = (u1bTmp > 16) ? (u1bTmp - 16) : 0; /* 8dbm */
65 write_nic_byte(dev, CCK_TXAGC, u1bTmp);
66
67 u1bTmp = read_nic_byte(dev, OFDM_TXAGC);
68 u1bTmp = (u1bTmp > 16) ? (u1bTmp - 16) : 0; /* 8dbm */
69 write_nic_byte(dev, OFDM_TXAGC, u1bTmp);
70 }
71
72 } else if ((priv->UndecoratedSmoothedSS < HiPwrLowerTh) &&
73 (!priv->bCurCCKPkt || priv->CurCCKRSSI < RSSIHiPwrLowerTh)) {
74 if (priv->bToUpdateTxPwr) {
75 priv->bToUpdateTxPwr = false;
76 /* SD3 required. */
77 u1bTmp = read_nic_byte(dev, CCK_TXAGC);
78 if (u1bTmp < CckTxPwrIdx) {
79 write_nic_byte(dev, CCK_TXAGC, CckTxPwrIdx);
80 }
81
82 u1bTmp = read_nic_byte(dev, OFDM_TXAGC);
83 if (u1bTmp < OfdmTxPwrIdx) {
84 write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
85 }
86 }
87 }
88}
89
90
91/*
92 * Description:
93 * Callback function of UpdateTxPowerWorkItem.
94 * Because of some event happened, e.g. CCX TPC, High Power Mechanism,
95 * We update Tx power of current channel again.
96 */
97void rtl8180_tx_pw_wq(struct work_struct *work)
98{
99 struct delayed_work *dwork = to_delayed_work(work);
100 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, tx_pw_wq);
101 struct net_device *dev = ieee->dev;
102
103 DoTxHighPower(dev);
104}
105
106
107/*
108 * Return TRUE if we shall perform DIG Mechanism, FALSE otherwise.
109 */
110bool CheckDig(struct net_device *dev)
111{
112 struct r8180_priv *priv = ieee80211_priv(dev);
113 struct ieee80211_device *ieee = priv->ieee80211;
114
115 if (!priv->bDigMechanism)
116 return false;
117
118 if (ieee->state != IEEE80211_LINKED)
119 return false;
120
121 if ((priv->ieee80211->rate / 5) < 36) /* Schedule Dig under all OFDM rates. By Bruce, 2007-06-01. */
122 return false;
123 return true;
124}
125/*
126 * Implementation of DIG for Zebra and Zebra2.
127 */
128static void DIG_Zebra(struct net_device *dev)
129{
130 struct r8180_priv *priv = ieee80211_priv(dev);
131 u16 CCKFalseAlarm, OFDMFalseAlarm;
132 u16 OfdmFA1, OfdmFA2;
133 int InitialGainStep = 7; /* The number of initial gain stages. */
134 int LowestGainStage = 4; /* The capable lowest stage of performing dig workitem. */
135 u32 AwakePeriodIn2Sec = 0;
136
137 CCKFalseAlarm = (u16)(priv->FalseAlarmRegValue & 0x0000ffff);
138 OFDMFalseAlarm = (u16)((priv->FalseAlarmRegValue >> 16) & 0x0000ffff);
139 OfdmFA1 = 0x15;
140 OfdmFA2 = ((u16)(priv->RegDigOfdmFaUpTh)) << 8;
141
142 /* The number of initial gain steps is different, by Bruce, 2007-04-13. */
143 if (priv->InitialGain == 0) { /* autoDIG */
144 /* Advised from SD3 DZ */
145 priv->InitialGain = 4; /* In 87B, m74dBm means State 4 (m82dBm) */
146 }
147 /* Advised from SD3 DZ */
148 OfdmFA1 = 0x20;
149
150#if 1 /* lzm reserved 080826 */
151 AwakePeriodIn2Sec = (2000 - priv->DozePeriodInPast2Sec);
152 priv->DozePeriodInPast2Sec = 0;
153
154 if (AwakePeriodIn2Sec) {
155 OfdmFA1 = (u16)((OfdmFA1 * AwakePeriodIn2Sec) / 2000);
156 OfdmFA2 = (u16)((OfdmFA2 * AwakePeriodIn2Sec) / 2000);
157 } else {
158 ;
159 }
160#endif
161
162 InitialGainStep = 8;
163 LowestGainStage = priv->RegBModeGainStage; /* Lowest gain stage. */
164
165 if (OFDMFalseAlarm > OfdmFA1) {
166 if (OFDMFalseAlarm > OfdmFA2) {
167 priv->DIG_NumberFallbackVote++;
168 if (priv->DIG_NumberFallbackVote > 1) {
169 /* serious OFDM False Alarm, need fallback */
170 if (priv->InitialGain < InitialGainStep) {
171 priv->InitialGainBackUp = priv->InitialGain;
172
173 priv->InitialGain = (priv->InitialGain + 1);
174 UpdateInitialGain(dev);
175 }
176 priv->DIG_NumberFallbackVote = 0;
177 priv->DIG_NumberUpgradeVote = 0;
178 }
179 } else {
180 if (priv->DIG_NumberFallbackVote)
181 priv->DIG_NumberFallbackVote--;
182 }
183 priv->DIG_NumberUpgradeVote = 0;
184 } else {
185 if (priv->DIG_NumberFallbackVote)
186 priv->DIG_NumberFallbackVote--;
187 priv->DIG_NumberUpgradeVote++;
188
189 if (priv->DIG_NumberUpgradeVote > 9) {
190 if (priv->InitialGain > LowestGainStage) { /* In 87B, m78dBm means State 4 (m864dBm) */
191 priv->InitialGainBackUp = priv->InitialGain;
192
193 priv->InitialGain = (priv->InitialGain - 1);
194 UpdateInitialGain(dev);
195 }
196 priv->DIG_NumberFallbackVote = 0;
197 priv->DIG_NumberUpgradeVote = 0;
198 }
199 }
200}
201
202/*
203 * Dispatch DIG implementation according to RF.
204 */
205static void DynamicInitGain(struct net_device *dev)
206{
207 DIG_Zebra(dev);
208}
209
210void rtl8180_hw_dig_wq(struct work_struct *work)
211{
212 struct delayed_work *dwork = to_delayed_work(work);
213 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, hw_dig_wq);
214 struct net_device *dev = ieee->dev;
215 struct r8180_priv *priv = ieee80211_priv(dev);
216
217 /* Read CCK and OFDM False Alarm. */
218 priv->FalseAlarmRegValue = read_nic_dword(dev, CCK_FALSE_ALARM);
219
220
221 /* Adjust Initial Gain dynamically. */
222 DynamicInitGain(dev);
223
224}
225
226static int IncludedInSupportedRates(struct r8180_priv *priv, u8 TxRate)
227{
228 u8 rate_len;
229 u8 rate_ex_len;
230 u8 RateMask = 0x7F;
231 u8 idx;
232 unsigned short Found = 0;
233 u8 NaiveTxRate = TxRate&RateMask;
234
235 rate_len = priv->ieee80211->current_network.rates_len;
236 rate_ex_len = priv->ieee80211->current_network.rates_ex_len;
237 for (idx = 0; idx < rate_len; idx++) {
238 if ((priv->ieee80211->current_network.rates[idx] & RateMask) == NaiveTxRate) {
239 Found = 1;
240 goto found_rate;
241 }
242 }
243 for (idx = 0; idx < rate_ex_len; idx++) {
244 if ((priv->ieee80211->current_network.rates_ex[idx] & RateMask) == NaiveTxRate) {
245 Found = 1;
246 goto found_rate;
247 }
248 }
249 return Found;
250found_rate:
251 return Found;
252}
253
254/*
255 * Get the Tx rate one degree up form the input rate in the supported rates.
256 * Return the upgrade rate if it is successed, otherwise return the input rate.
257 */
258static u8 GetUpgradeTxRate(struct net_device *dev, u8 rate)
259{
260 struct r8180_priv *priv = ieee80211_priv(dev);
261 u8 UpRate;
262
263 /* Upgrade 1 degree. */
264 switch (rate) {
265 case 108: /* Up to 54Mbps. */
266 UpRate = 108;
267 break;
268
269 case 96: /* Up to 54Mbps. */
270 UpRate = 108;
271 break;
272
273 case 72: /* Up to 48Mbps. */
274 UpRate = 96;
275 break;
276
277 case 48: /* Up to 36Mbps. */
278 UpRate = 72;
279 break;
280
281 case 36: /* Up to 24Mbps. */
282 UpRate = 48;
283 break;
284
285 case 22: /* Up to 18Mbps. */
286 UpRate = 36;
287 break;
288
289 case 11: /* Up to 11Mbps. */
290 UpRate = 22;
291 break;
292
293 case 4: /* Up to 5.5Mbps. */
294 UpRate = 11;
295 break;
296
297 case 2: /* Up to 2Mbps. */
298 UpRate = 4;
299 break;
300
301 default:
302 printk("GetUpgradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
303 return rate;
304 }
305 /* Check if the rate is valid. */
306 if (IncludedInSupportedRates(priv, UpRate)) {
307 return UpRate;
308 } else {
309 return rate;
310 }
311 return rate;
312}
313/*
314 * Get the Tx rate one degree down form the input rate in the supported rates.
315 * Return the degrade rate if it is successed, otherwise return the input rate.
316 */
317
318static u8 GetDegradeTxRate(struct net_device *dev, u8 rate)
319{
320 struct r8180_priv *priv = ieee80211_priv(dev);
321 u8 DownRate;
322
323 /* Upgrade 1 degree. */
324 switch (rate) {
325 case 108: /* Down to 48Mbps. */
326 DownRate = 96;
327 break;
328
329 case 96: /* Down to 36Mbps. */
330 DownRate = 72;
331 break;
332
333 case 72: /* Down to 24Mbps. */
334 DownRate = 48;
335 break;
336
337 case 48: /* Down to 18Mbps. */
338 DownRate = 36;
339 break;
340
341 case 36: /* Down to 11Mbps. */
342 DownRate = 22;
343 break;
344
345 case 22: /* Down to 5.5Mbps. */
346 DownRate = 11;
347 break;
348
349 case 11: /* Down to 2Mbps. */
350 DownRate = 4;
351 break;
352
353 case 4: /* Down to 1Mbps. */
354 DownRate = 2;
355 break;
356
357 case 2: /* Down to 1Mbps. */
358 DownRate = 2;
359 break;
360
361 default:
362 printk("GetDegradeTxRate(): Input Tx Rate(%d) is undefined!\n", rate);
363 return rate;
364 }
365 /* Check if the rate is valid. */
366 if (IncludedInSupportedRates(priv, DownRate)) {
367 return DownRate;
368 } else {
369 return rate;
370 }
371 return rate;
372}
373/*
374 * Helper function to determine if specified data rate is
375 * CCK rate.
376 */
377
378static bool MgntIsCckRate(u16 rate)
379{
380 bool bReturn = false;
381
382 if ((rate <= 22) && (rate != 12) && (rate != 18)) {
383 bReturn = true;
384 }
385
386 return bReturn;
387}
388/*
389 * Description:
390 * Tx Power tracking mechanism routine on 87SE.
391 */
392void TxPwrTracking87SE(struct net_device *dev)
393{
394 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
395 u8 tmpu1Byte, CurrentThermal, Idx;
396 char CckTxPwrIdx, OfdmTxPwrIdx;
397
398 tmpu1Byte = read_nic_byte(dev, EN_LPF_CAL);
399 CurrentThermal = (tmpu1Byte & 0xf0) >> 4; /*[ 7:4]: thermal meter indication. */
400 CurrentThermal = (CurrentThermal > 0x0c) ? 0x0c : CurrentThermal;/* lzm add 080826 */
401
402 if (CurrentThermal != priv->ThermalMeter) {
403 /* Update Tx Power level on each channel. */
404 for (Idx = 1; Idx < 15; Idx++) {
405 CckTxPwrIdx = priv->chtxpwr[Idx];
406 OfdmTxPwrIdx = priv->chtxpwr_ofdm[Idx];
407
408 if (CurrentThermal > priv->ThermalMeter) {
409 /* higher thermal meter. */
410 CckTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
411 OfdmTxPwrIdx += (CurrentThermal - priv->ThermalMeter) * 2;
412
413 if (CckTxPwrIdx > 35)
414 CckTxPwrIdx = 35; /* Force TxPower to maximal index. */
415 if (OfdmTxPwrIdx > 35)
416 OfdmTxPwrIdx = 35;
417 } else {
418 /* lower thermal meter. */
419 CckTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
420 OfdmTxPwrIdx -= (priv->ThermalMeter - CurrentThermal) * 2;
421
422 if (CckTxPwrIdx < 0)
423 CckTxPwrIdx = 0;
424 if (OfdmTxPwrIdx < 0)
425 OfdmTxPwrIdx = 0;
426 }
427
428 /* Update TxPower level on CCK and OFDM resp. */
429 priv->chtxpwr[Idx] = CckTxPwrIdx;
430 priv->chtxpwr_ofdm[Idx] = OfdmTxPwrIdx;
431 }
432
433 /* Update TxPower level immediately. */
434 rtl8225z2_SetTXPowerLevel(dev, priv->ieee80211->current_network.channel);
435 }
436 priv->ThermalMeter = CurrentThermal;
437}
438static void StaRateAdaptive87SE(struct net_device *dev)
439{
440 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
441 unsigned long CurrTxokCnt;
442 u16 CurrRetryCnt;
443 u16 CurrRetryRate;
444 unsigned long CurrRxokCnt;
445 bool bTryUp = false;
446 bool bTryDown = false;
447 u8 TryUpTh = 1;
448 u8 TryDownTh = 2;
449 u32 TxThroughput;
450 long CurrSignalStrength;
451 bool bUpdateInitialGain = false;
452 u8 u1bOfdm = 0, u1bCck = 0;
453 char OfdmTxPwrIdx, CckTxPwrIdx;
454
455 priv->RateAdaptivePeriod = RATE_ADAPTIVE_TIMER_PERIOD;
456
457
458 CurrRetryCnt = priv->CurrRetryCnt;
459 CurrTxokCnt = priv->NumTxOkTotal - priv->LastTxokCnt;
460 CurrRxokCnt = priv->ieee80211->NumRxOkTotal - priv->LastRxokCnt;
461 CurrSignalStrength = priv->Stats_RecvSignalPower;
462 TxThroughput = (u32)(priv->NumTxOkBytesTotal - priv->LastTxOKBytes);
463 priv->LastTxOKBytes = priv->NumTxOkBytesTotal;
464 priv->CurrentOperaRate = priv->ieee80211->rate / 5;
465 /* 2 Compute retry ratio. */
466 if (CurrTxokCnt > 0) {
467 CurrRetryRate = (u16)(CurrRetryCnt * 100 / CurrTxokCnt);
468 } else {
469 /* It may be serious retry. To distinguish serious retry or no packets modified by Bruce */
470 CurrRetryRate = (u16)(CurrRetryCnt * 100 / 1);
471 }
472
473 priv->LastRetryCnt = priv->CurrRetryCnt;
474 priv->LastTxokCnt = priv->NumTxOkTotal;
475 priv->LastRxokCnt = priv->ieee80211->NumRxOkTotal;
476 priv->CurrRetryCnt = 0;
477
478 /* 2No Tx packets, return to init_rate or not? */
479 if (CurrRetryRate == 0 && CurrTxokCnt == 0) {
480 /*
481 * After 9 (30*300ms) seconds in this condition, we try to raise rate.
482 */
483 priv->TryupingCountNoData++;
484
485 /* [TRC Dell Lab] Extend raised period from 4.5sec to 9sec, Isaiah 2008-02-15 18:00 */
486 if (priv->TryupingCountNoData > 30) {
487 priv->TryupingCountNoData = 0;
488 priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
489 /* Reset Fail Record */
490 priv->LastFailTxRate = 0;
491 priv->LastFailTxRateSS = -200;
492 priv->FailTxRateCount = 0;
493 }
494 goto SetInitialGain;
495 } else {
496 priv->TryupingCountNoData = 0; /*Reset trying up times. */
497 }
498
499
500 /*
501 * For Netgear case, I comment out the following signal strength estimation,
502 * which can results in lower rate to transmit when sample is NOT enough (e.g. PING request).
503 *
504 * Restructure rate adaptive as the following main stages:
505 * (1) Add retry threshold in 54M upgrading condition with signal strength.
506 * (2) Add the mechanism to degrade to CCK rate according to signal strength
507 * and retry rate.
508 * (3) Remove all Initial Gain Updates over OFDM rate. To avoid the complicated
509 * situation, Initial Gain Update is upon on DIG mechanism except CCK rate.
510 * (4) Add the mechanism of trying to upgrade tx rate.
511 * (5) Record the information of upping tx rate to avoid trying upping tx rate constantly.
512 *
513 */
514
515 /*
516 * 11Mbps or 36Mbps
517 * Check more times in these rate(key rates).
518 */
519 if (priv->CurrentOperaRate == 22 || priv->CurrentOperaRate == 72)
520 TryUpTh += 9;
521 /*
522 * Let these rates down more difficult.
523 */
524 if (MgntIsCckRate(priv->CurrentOperaRate) || priv->CurrentOperaRate == 36)
525 TryDownTh += 1;
526
527 /* 1 Adjust Rate. */
528 if (priv->bTryuping == true) {
529 /* 2 For Test Upgrading mechanism
530 * Note:
531 * Sometimes the throughput is upon on the capability between the AP and NIC,
532 * thus the low data rate does not improve the performance.
533 * We randomly upgrade the data rate and check if the retry rate is improved.
534 */
535
536 /* Upgrading rate did not improve the retry rate, fallback to the original rate. */
537 if ((CurrRetryRate > 25) && TxThroughput < priv->LastTxThroughput) {
538 /*Not necessary raising rate, fall back rate. */
539 bTryDown = true;
540 } else {
541 priv->bTryuping = false;
542 }
543 } else if (CurrSignalStrength > -47 && (CurrRetryRate < 50)) {
544 /*
545 * 2For High Power
546 *
547 * Return to highest data rate, if signal strength is good enough.
548 * SignalStrength threshold(-50dbm) is for RTL8186.
549 * Revise SignalStrength threshold to -51dbm.
550 */
551 /* Also need to check retry rate for safety, by Bruce, 2007-06-05. */
552 if (priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate) {
553 bTryUp = true;
554 /* Upgrade Tx Rate directly. */
555 priv->TryupingCount += TryUpTh;
556 }
557
558 } else if (CurrTxokCnt > 9 && CurrTxokCnt < 100 && CurrRetryRate >= 600) {
559 /*
560 *2 For Serious Retry
561 *
562 * Traffic is not busy but our Tx retry is serious.
563 */
564 bTryDown = true;
565 /* Let Rate Mechanism to degrade tx rate directly. */
566 priv->TryDownCountLowData += TryDownTh;
567 } else if (priv->CurrentOperaRate == 108) {
568 /* 2For 54Mbps */
569 /* Air Link */
570 if ((CurrRetryRate > 26) && (priv->LastRetryRate > 25)) {
571 bTryDown = true;
572 }
573 /* Cable Link */
574 else if ((CurrRetryRate > 17) && (priv->LastRetryRate > 16) && (CurrSignalStrength > -72)) {
575 bTryDown = true;
576 }
577
578 if (bTryDown && (CurrSignalStrength < -75)) /* cable link */
579 priv->TryDownCountLowData += TryDownTh;
580 } else if (priv->CurrentOperaRate == 96) {
581 /* 2For 48Mbps */
582 /* Air Link */
583 if (((CurrRetryRate > 48) && (priv->LastRetryRate > 47))) {
584 bTryDown = true;
585 } else if (((CurrRetryRate > 21) && (priv->LastRetryRate > 20)) && (CurrSignalStrength > -74)) { /* Cable Link */
586 /* Down to rate 36Mbps. */
587 bTryDown = true;
588 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
589 bTryDown = true;
590 priv->TryDownCountLowData += TryDownTh;
591 } else if ((CurrRetryRate < 8) && (priv->LastRetryRate < 8)) { /* TO DO: need to consider (RSSI) */
592 bTryUp = true;
593 }
594
595 if (bTryDown && (CurrSignalStrength < -75)) {
596 priv->TryDownCountLowData += TryDownTh;
597 }
598 } else if (priv->CurrentOperaRate == 72) {
599 /* 2For 36Mbps */
600 if ((CurrRetryRate > 43) && (priv->LastRetryRate > 41)) {
601 /* Down to rate 24Mbps. */
602 bTryDown = true;
603 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
604 bTryDown = true;
605 priv->TryDownCountLowData += TryDownTh;
606 } else if ((CurrRetryRate < 15) && (priv->LastRetryRate < 16)) { /* TO DO: need to consider (RSSI) */
607 bTryUp = true;
608 }
609
610 if (bTryDown && (CurrSignalStrength < -80))
611 priv->TryDownCountLowData += TryDownTh;
612
613 } else if (priv->CurrentOperaRate == 48) {
614 /* 2For 24Mbps */
615 /* Air Link */
616 if (((CurrRetryRate > 63) && (priv->LastRetryRate > 62))) {
617 bTryDown = true;
618 } else if (((CurrRetryRate > 33) && (priv->LastRetryRate > 32)) && (CurrSignalStrength > -82)) { /* Cable Link */
619 bTryDown = true;
620 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
621 bTryDown = true;
622 priv->TryDownCountLowData += TryDownTh;
623 } else if ((CurrRetryRate < 20) && (priv->LastRetryRate < 21)) { /* TO DO: need to consider (RSSI) */
624 bTryUp = true;
625 }
626
627 if (bTryDown && (CurrSignalStrength < -82))
628 priv->TryDownCountLowData += TryDownTh;
629
630 } else if (priv->CurrentOperaRate == 36) {
631 if (((CurrRetryRate > 85) && (priv->LastRetryRate > 86))) {
632 bTryDown = true;
633 } else if ((CurrRetryRate > (priv->LastRetryRate + 50)) && (priv->FailTxRateCount > 2)) {
634 bTryDown = true;
635 priv->TryDownCountLowData += TryDownTh;
636 } else if ((CurrRetryRate < 22) && (priv->LastRetryRate < 23)) { /* TO DO: need to consider (RSSI) */
637 bTryUp = true;
638 }
639 } else if (priv->CurrentOperaRate == 22) {
640 /* 2For 11Mbps */
641 if (CurrRetryRate > 95) {
642 bTryDown = true;
643 } else if ((CurrRetryRate < 29) && (priv->LastRetryRate < 30)) { /*TO DO: need to consider (RSSI) */
644 bTryUp = true;
645 }
646 } else if (priv->CurrentOperaRate == 11) {
647 /* 2For 5.5Mbps */
648 if (CurrRetryRate > 149) {
649 bTryDown = true;
650 } else if ((CurrRetryRate < 60) && (priv->LastRetryRate < 65)) {
651 bTryUp = true;
652 }
653 } else if (priv->CurrentOperaRate == 4) {
654 /* 2For 2 Mbps */
655 if ((CurrRetryRate > 99) && (priv->LastRetryRate > 99)) {
656 bTryDown = true;
657 } else if ((CurrRetryRate < 65) && (priv->LastRetryRate < 70)) {
658 bTryUp = true;
659 }
660 } else if (priv->CurrentOperaRate == 2) {
661 /* 2For 1 Mbps */
662 if ((CurrRetryRate < 70) && (priv->LastRetryRate < 75)) {
663 bTryUp = true;
664 }
665 }
666
667 if (bTryUp && bTryDown)
668 printk("StaRateAdaptive87B(): Tx Rate tried upping and downing simultaneously!\n");
669
670 /* 1 Test Upgrading Tx Rate
671 * Sometimes the cause of the low throughput (high retry rate) is the compatibility between the AP and NIC.
672 * To test if the upper rate may cause lower retry rate, this mechanism randomly occurs to test upgrading tx rate.
673 */
674 if (!bTryUp && !bTryDown && (priv->TryupingCount == 0) && (priv->TryDownCountLowData == 0)
675 && priv->CurrentOperaRate != priv->ieee80211->current_network.HighestOperaRate && priv->FailTxRateCount < 2) {
676 if (jiffies % (CurrRetryRate + 101) == 0) {
677 bTryUp = true;
678 priv->bTryuping = true;
679 }
680 }
681
682 /* 1 Rate Mechanism */
683 if (bTryUp) {
684 priv->TryupingCount++;
685 priv->TryDownCountLowData = 0;
686
687 /*
688 * Check more times if we need to upgrade indeed.
689 * Because the largest value of pHalData->TryupingCount is 0xFFFF and
690 * the largest value of pHalData->FailTxRateCount is 0x14,
691 * this condition will be satisfied at most every 2 min.
692 */
693
694 if ((priv->TryupingCount > (TryUpTh + priv->FailTxRateCount * priv->FailTxRateCount)) ||
695 (CurrSignalStrength > priv->LastFailTxRateSS) || priv->bTryuping) {
696 priv->TryupingCount = 0;
697 /*
698 * When transferring from CCK to OFDM, DIG is an important issue.
699 */
700 if (priv->CurrentOperaRate == 22)
701 bUpdateInitialGain = true;
702
703 /*
704 * The difference in throughput between 48Mbps and 36Mbps is 8M.
705 * So, we must be careful in this rate scale. Isaiah 2008-02-15.
706 */
707 if (((priv->CurrentOperaRate == 72) || (priv->CurrentOperaRate == 48) || (priv->CurrentOperaRate == 36)) &&
708 (priv->FailTxRateCount > 2))
709 priv->RateAdaptivePeriod = (RATE_ADAPTIVE_TIMER_PERIOD / 2);
710
711 /* (1)To avoid upgrade frequently to the fail tx rate, add the FailTxRateCount into the threshold. */
712 /* (2)If the signal strength is increased, it may be able to upgrade. */
713
714 priv->CurrentOperaRate = GetUpgradeTxRate(dev, priv->CurrentOperaRate);
715
716 if (priv->CurrentOperaRate == 36) {
717 priv->bUpdateARFR = true;
718 write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
719 } else if (priv->bUpdateARFR) {
720 priv->bUpdateARFR = false;
721 write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
722 }
723
724 /* Update Fail Tx rate and count. */
725 if (priv->LastFailTxRate != priv->CurrentOperaRate) {
726 priv->LastFailTxRate = priv->CurrentOperaRate;
727 priv->FailTxRateCount = 0;
728 priv->LastFailTxRateSS = -200; /* Set lowest power. */
729 }
730 }
731 } else {
732 if (priv->TryupingCount > 0)
733 priv->TryupingCount--;
734 }
735
736 if (bTryDown) {
737 priv->TryDownCountLowData++;
738 priv->TryupingCount = 0;
739
740 /* Check if Tx rate can be degraded or Test trying upgrading should fallback. */
741 if (priv->TryDownCountLowData > TryDownTh || priv->bTryuping) {
742 priv->TryDownCountLowData = 0;
743 priv->bTryuping = false;
744 /* Update fail information. */
745 if (priv->LastFailTxRate == priv->CurrentOperaRate) {
746 priv->FailTxRateCount++;
747 /* Record the Tx fail rate signal strength. */
748 if (CurrSignalStrength > priv->LastFailTxRateSS)
749 priv->LastFailTxRateSS = CurrSignalStrength;
750 } else {
751 priv->LastFailTxRate = priv->CurrentOperaRate;
752 priv->FailTxRateCount = 1;
753 priv->LastFailTxRateSS = CurrSignalStrength;
754 }
755 priv->CurrentOperaRate = GetDegradeTxRate(dev, priv->CurrentOperaRate);
756
757 /* Reduce chariot training time at weak signal strength situation. SD3 ED demand. */
758 if ((CurrSignalStrength < -80) && (priv->CurrentOperaRate > 72)) {
759 priv->CurrentOperaRate = 72;
760 }
761
762 if (priv->CurrentOperaRate == 36) {
763 priv->bUpdateARFR = true;
764 write_nic_word(dev, ARFR, 0x0F8F); /* bypass 12/9/6 */
765 } else if (priv->bUpdateARFR) {
766 priv->bUpdateARFR = false;
767 write_nic_word(dev, ARFR, 0x0FFF); /* set 1M ~ 54Mbps. */
768 }
769
770 /*
771 * When it is CCK rate, it may need to update initial gain to receive lower power packets.
772 */
773 if (MgntIsCckRate(priv->CurrentOperaRate)) {
774 bUpdateInitialGain = true;
775 }
776 }
777 } else {
778 if (priv->TryDownCountLowData > 0)
779 priv->TryDownCountLowData--;
780 }
781
782 /*
783 * Keep the Tx fail rate count to equal to 0x15 at most.
784 * Reduce the fail count at least to 10 sec if tx rate is tending stable.
785 */
786 if (priv->FailTxRateCount >= 0x15 ||
787 (!bTryUp && !bTryDown && priv->TryDownCountLowData == 0 && priv->TryupingCount && priv->FailTxRateCount > 0x6)) {
788 priv->FailTxRateCount--;
789 }
790
791
792 OfdmTxPwrIdx = priv->chtxpwr_ofdm[priv->ieee80211->current_network.channel];
793 CckTxPwrIdx = priv->chtxpwr[priv->ieee80211->current_network.channel];
794
795 /* Mac0x9e increase 2 level in 36M~18M situation */
796 if ((priv->CurrentOperaRate < 96) && (priv->CurrentOperaRate > 22)) {
797 u1bCck = read_nic_byte(dev, CCK_TXAGC);
798 u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
799
800 /* case 1: Never enter High power */
801 if (u1bCck == CckTxPwrIdx) {
802 if (u1bOfdm != (OfdmTxPwrIdx + 2)) {
803 priv->bEnhanceTxPwr = true;
804 u1bOfdm = ((u1bOfdm + 2) > 35) ? 35 : (u1bOfdm + 2);
805 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
806 }
807 } else if (u1bCck < CckTxPwrIdx) {
808 /* case 2: enter high power */
809 if (!priv->bEnhanceTxPwr) {
810 priv->bEnhanceTxPwr = true;
811 u1bOfdm = ((u1bOfdm + 2) > 35) ? 35 : (u1bOfdm + 2);
812 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
813 }
814 }
815 } else if (priv->bEnhanceTxPwr) { /* 54/48/11/5.5/2/1 */
816 u1bCck = read_nic_byte(dev, CCK_TXAGC);
817 u1bOfdm = read_nic_byte(dev, OFDM_TXAGC);
818
819 /* case 1: Never enter High power */
820 if (u1bCck == CckTxPwrIdx) {
821 priv->bEnhanceTxPwr = false;
822 write_nic_byte(dev, OFDM_TXAGC, OfdmTxPwrIdx);
823 }
824 /* case 2: enter high power */
825 else if (u1bCck < CckTxPwrIdx) {
826 priv->bEnhanceTxPwr = false;
827 u1bOfdm = ((u1bOfdm - 2) > 0) ? (u1bOfdm - 2) : 0;
828 write_nic_byte(dev, OFDM_TXAGC, u1bOfdm);
829 }
830 }
831
832 /*
833 * We need update initial gain when we set tx rate "from OFDM to CCK" or
834 * "from CCK to OFDM".
835 */
836SetInitialGain:
837 if (bUpdateInitialGain) {
838 if (MgntIsCckRate(priv->CurrentOperaRate)) { /* CCK */
839 if (priv->InitialGain > priv->RegBModeGainStage) {
840 priv->InitialGainBackUp = priv->InitialGain;
841
842 if (CurrSignalStrength < -85) /* Low power, OFDM [0x17] = 26. */
843 /* SD3 SYs suggest that CurrSignalStrength < -65, ofdm 0x17=26. */
844 priv->InitialGain = priv->RegBModeGainStage;
845
846 else if (priv->InitialGain > priv->RegBModeGainStage + 1)
847 priv->InitialGain -= 2;
848
849 else
850 priv->InitialGain--;
851
852 printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n", priv->InitialGain, priv->CurrentOperaRate);
853 UpdateInitialGain(dev);
854 }
855 } else { /* OFDM */
856 if (priv->InitialGain < 4) {
857 priv->InitialGainBackUp = priv->InitialGain;
858
859 priv->InitialGain++;
860 printk("StaRateAdaptive87SE(): update init_gain to index %d for date rate %d\n", priv->InitialGain, priv->CurrentOperaRate);
861 UpdateInitialGain(dev);
862 }
863 }
864 }
865
866 /* Record the related info */
867 priv->LastRetryRate = CurrRetryRate;
868 priv->LastTxThroughput = TxThroughput;
869 priv->ieee80211->rate = priv->CurrentOperaRate * 5;
870}
871
872void rtl8180_rate_adapter(struct work_struct *work)
873{
874 struct delayed_work *dwork = to_delayed_work(work);
875 struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, rate_adapter_wq);
876 struct net_device *dev = ieee->dev;
877 StaRateAdaptive87SE(dev);
878}
879void timer_rate_adaptive(unsigned long data)
880{
881 struct r8180_priv *priv = ieee80211_priv((struct net_device *)data);
882 if (!priv->up) {
883 return;
884 }
885 if ((priv->ieee80211->iw_mode != IW_MODE_MASTER)
886 && (priv->ieee80211->state == IEEE80211_LINKED) &&
887 (priv->ForcedDataRate == 0)) {
888 queue_work(priv->ieee80211->wq, (void *)&priv->ieee80211->rate_adapter_wq);
889 }
890 priv->rateadapter_timer.expires = jiffies + MSECS(priv->RateAdaptivePeriod);
891 add_timer(&priv->rateadapter_timer);
892}
893
894void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength)
895{
896 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
897
898 priv->AdRxOkCnt++;
899
900 if (priv->AdRxSignalStrength != -1) {
901 priv->AdRxSignalStrength = ((priv->AdRxSignalStrength * 7) + (SignalStrength * 3)) / 10;
902 } else { /* Initialization case. */
903 priv->AdRxSignalStrength = SignalStrength;
904 }
905
906 if (priv->LastRxPktAntenna) /* Main antenna. */
907 priv->AdMainAntennaRxOkCnt++;
908 else /* Aux antenna. */
909 priv->AdAuxAntennaRxOkCnt++;
910}
911 /* Change Antenna Switch. */
912bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex)
913{
914 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
915 bool bAntennaSwitched = false;
916
917 switch (u1bAntennaIndex) {
918 case 0:
919 /* Mac register, main antenna */
920 write_nic_byte(dev, ANTSEL, 0x03);
921 /* base band */
922 write_phy_cck(dev, 0x11, 0x9b); /* Config CCK RX antenna. */
923 write_phy_ofdm(dev, 0x0d, 0x5c); /* Config OFDM RX antenna. */
924
925 bAntennaSwitched = true;
926 break;
927
928 case 1:
929 /* Mac register, aux antenna */
930 write_nic_byte(dev, ANTSEL, 0x00);
931 /* base band */
932 write_phy_cck(dev, 0x11, 0xbb); /* Config CCK RX antenna. */
933 write_phy_ofdm(dev, 0x0d, 0x54); /* Config OFDM RX antenna. */
934
935 bAntennaSwitched = true;
936
937 break;
938
939 default:
940 printk("SetAntenna8185: unknown u1bAntennaIndex(%d)\n", u1bAntennaIndex);
941 break;
942 }
943
944 if (bAntennaSwitched)
945 priv->CurrAntennaIndex = u1bAntennaIndex;
946
947 return bAntennaSwitched;
948}
949 /* Toggle Antenna switch. */
950bool SwitchAntenna(struct net_device *dev)
951{
952 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
953
954 bool bResult;
955
956 if (priv->CurrAntennaIndex == 0) {
957 bResult = SetAntenna8185(dev, 1);
958 } else {
959 bResult = SetAntenna8185(dev, 0);
960 }
961
962 return bResult;
963}
964/*
965 * Engine of SW Antenna Diversity mechanism.
966 * Since 8187 has no Tx part information,
967 * this implementation is only dependend on Rx part information.
968 */
969void SwAntennaDiversity(struct net_device *dev)
970{
971 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
972 bool bSwCheckSS = false;
973 if (bSwCheckSS) {
974 priv->AdTickCount++;
975
976 printk("(1) AdTickCount: %d, AdCheckPeriod: %d\n",
977 priv->AdTickCount, priv->AdCheckPeriod);
978 printk("(2) AdRxSignalStrength: %ld, AdRxSsThreshold: %ld\n",
979 priv->AdRxSignalStrength, priv->AdRxSsThreshold);
980 }
981
982 /* Case 1. No Link. */
983 if (priv->ieee80211->state != IEEE80211_LINKED) {
984 priv->bAdSwitchedChecking = false;
985 /* I switch antenna here to prevent any one of antenna is broken before link established, 2006.04.18, by rcnjko.. */
986 SwitchAntenna(dev);
987
988 /* Case 2. Linked but no packet receive.d */
989 } else if (priv->AdRxOkCnt == 0) {
990 priv->bAdSwitchedChecking = false;
991 SwitchAntenna(dev);
992
993 /* Case 3. Evaluate last antenna switch action and undo it if necessary. */
994 } else if (priv->bAdSwitchedChecking == true) {
995 priv->bAdSwitchedChecking = false;
996
997 /* Adjust Rx signal strength threshold. */
998 priv->AdRxSsThreshold = (priv->AdRxSignalStrength + priv->AdRxSsBeforeSwitched) / 2;
999
1000 priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
1001 priv->AdMaxRxSsThreshold : priv->AdRxSsThreshold;
1002 if (priv->AdRxSignalStrength < priv->AdRxSsBeforeSwitched) {
1003 /* Rx signal strength is not improved after we swtiched antenna. => Swich back. */
1004 /* Increase Antenna Diversity checking period due to bad decision. */
1005 priv->AdCheckPeriod *= 2;
1006 /* Increase Antenna Diversity checking period. */
1007 if (priv->AdCheckPeriod > priv->AdMaxCheckPeriod)
1008 priv->AdCheckPeriod = priv->AdMaxCheckPeriod;
1009
1010 /* Wrong decision => switch back. */
1011 SwitchAntenna(dev);
1012 } else {
1013 /* Rx Signal Strength is improved. */
1014
1015 /* Reset Antenna Diversity checking period to its min value. */
1016 priv->AdCheckPeriod = priv->AdMinCheckPeriod;
1017 }
1018
1019 }
1020 /* Case 4. Evaluate if we shall switch antenna now. */
1021 /* Cause Table Speed is very fast in TRC Dell Lab, we check it every time. */
1022 else {
1023 priv->AdTickCount = 0;
1024
1025 /*
1026 * <Roger_Notes> We evaluate RxOk counts for each antenna first and than
1027 * evaluate signal strength.
1028 * The following operation can overcome the disability of CCA on both two antennas
1029 * When signal strength was extremely low or high.
1030 * 2008.01.30.
1031 */
1032
1033 /*
1034 * Evaluate RxOk count from each antenna if we shall switch default antenna now.
1035 */
1036 if ((priv->AdMainAntennaRxOkCnt < priv->AdAuxAntennaRxOkCnt)
1037 && (priv->CurrAntennaIndex == 0)) {
1038 /* We set Main antenna as default but RxOk count was less than Aux ones. */
1039
1040 /* Switch to Aux antenna. */
1041 SwitchAntenna(dev);
1042 priv->bHWAdSwitched = true;
1043 } else if ((priv->AdAuxAntennaRxOkCnt < priv->AdMainAntennaRxOkCnt)
1044 && (priv->CurrAntennaIndex == 1)) {
1045 /* We set Aux antenna as default but RxOk count was less than Main ones. */
1046
1047 /* Switch to Main antenna. */
1048 SwitchAntenna(dev);
1049 priv->bHWAdSwitched = true;
1050 } else {
1051 /* Default antenna is better. */
1052
1053 /* Still need to check current signal strength. */
1054 priv->bHWAdSwitched = false;
1055 }
1056 /*
1057 * <Roger_Notes> We evaluate Rx signal strength ONLY when default antenna
1058 * didn't change by HW evaluation.
1059 * 2008.02.27.
1060 *
1061 * [TRC Dell Lab] SignalStrength is inaccuracy. Isaiah 2008-03-05
1062 * For example, Throughput of aux is better than main antenna(about 10M v.s 2M),
1063 * but AdRxSignalStrength is less than main.
1064 * Our guess is that main antenna have lower throughput and get many change
1065 * to receive more CCK packets(ex.Beacon) which have stronger SignalStrength.
1066 */
1067 if ((!priv->bHWAdSwitched) && (bSwCheckSS)) {
1068 /* Evaluate Rx signal strength if we shall switch antenna now. */
1069 if (priv->AdRxSignalStrength < priv->AdRxSsThreshold) {
1070 /* Rx signal strength is weak => Switch Antenna. */
1071 priv->AdRxSsBeforeSwitched = priv->AdRxSignalStrength;
1072 priv->bAdSwitchedChecking = true;
1073
1074 SwitchAntenna(dev);
1075 } else {
1076 /* Rx signal strength is OK. */
1077 priv->bAdSwitchedChecking = false;
1078 /* Increase Rx signal strength threshold if necessary. */
1079 if ((priv->AdRxSignalStrength > (priv->AdRxSsThreshold + 10)) && /* Signal is much stronger than current threshold */
1080 priv->AdRxSsThreshold <= priv->AdMaxRxSsThreshold) { /* Current threhold is not yet reach upper limit. */
1081
1082 priv->AdRxSsThreshold = (priv->AdRxSsThreshold + priv->AdRxSignalStrength) / 2;
1083 priv->AdRxSsThreshold = (priv->AdRxSsThreshold > priv->AdMaxRxSsThreshold) ?
1084 priv->AdMaxRxSsThreshold : priv->AdRxSsThreshold;/* +by amy 080312 */
1085 }
1086
1087 /* Reduce Antenna Diversity checking period if possible. */
1088 if (priv->AdCheckPeriod > priv->AdMinCheckPeriod)
1089 priv->AdCheckPeriod /= 2;
1090 }
1091 }
1092 }
1093 /* Reset antenna diversity Rx related statistics. */
1094 priv->AdRxOkCnt = 0;
1095 priv->AdMainAntennaRxOkCnt = 0;
1096 priv->AdAuxAntennaRxOkCnt = 0;
1097}
1098
1099 /* Return TRUE if we shall perform Tx Power Tracking Mechanism, FALSE otherwise. */
1100bool CheckTxPwrTracking(struct net_device *dev)
1101{
1102 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1103
1104 if (!priv->bTxPowerTrack)
1105 return false;
1106
1107 /* if 87SE is in High Power , don't do Tx Power Tracking. asked by SD3 ED. 2008-08-08 Isaiah */
1108 if (priv->bToUpdateTxPwr)
1109 return false;
1110
1111 return true;
1112}
1113
1114
1115 /* Timer callback function of SW Antenna Diversity. */
1116void SwAntennaDiversityTimerCallback(struct net_device *dev)
1117{
1118 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1119 enum rt_rf_power_state rtState;
1120
1121 /* We do NOT need to switch antenna while RF is off. */
1122 rtState = priv->eRFPowerState;
1123 do {
1124 if (rtState == RF_OFF) {
1125 break;
1126 } else if (rtState == RF_SLEEP) {
1127 /* Don't access BB/RF under Disable PLL situation. */
1128 break;
1129 }
1130 SwAntennaDiversity(dev);
1131
1132 } while (false);
1133
1134 if (priv->up) {
1135 priv->SwAntennaDiversityTimer.expires = jiffies + MSECS(ANTENNA_DIVERSITY_TIMER_PERIOD);
1136 add_timer(&priv->SwAntennaDiversityTimer);
1137 }
1138}
1139
diff --git a/drivers/staging/rtl8187se/r8180_dm.h b/drivers/staging/rtl8187se/r8180_dm.h
deleted file mode 100644
index cb4046f346ef..000000000000
--- a/drivers/staging/rtl8187se/r8180_dm.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef R8180_DM_H
2#define R8180_DM_H
3
4#include "r8180.h"
5/* #include "r8180_hw.h" */
6/* #include "r8180_93cx6.h" */
7void SwAntennaDiversityRxOk8185(struct net_device *dev, u8 SignalStrength);
8bool SetAntenna8185(struct net_device *dev, u8 u1bAntennaIndex);
9bool SwitchAntenna(struct net_device *dev);
10void SwAntennaDiversity(struct net_device *dev);
11void SwAntennaDiversityTimerCallback(struct net_device *dev);
12bool CheckDig(struct net_device *dev);
13bool CheckHighPower(struct net_device *dev);
14void rtl8180_hw_dig_wq(struct work_struct *work);
15void rtl8180_tx_pw_wq(struct work_struct *work);
16void rtl8180_rate_adapter(struct work_struct *work);
17void TxPwrTracking87SE(struct net_device *dev);
18bool CheckTxPwrTracking(struct net_device *dev);
19void rtl8180_rate_adapter(struct work_struct *work);
20void timer_rate_adaptive(unsigned long data);
21
22
23#endif
diff --git a/drivers/staging/rtl8187se/r8180_hw.h b/drivers/staging/rtl8187se/r8180_hw.h
deleted file mode 100644
index e59d74f8ecfc..000000000000
--- a/drivers/staging/rtl8187se/r8180_hw.h
+++ /dev/null
@@ -1,588 +0,0 @@
1/*
2 This is part of rtl8180 OpenSource driver.
3 Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
4 Released under the terms of GPL (General Public Licence)
5
6 Parts of this driver are based on the GPL part of the
7 official Realtek driver.
8 Parts of this driver are based on the rtl8180 driver skeleton
9 from Patric Schenke & Andres Salomon.
10 Parts of this driver are based on the Intel Pro Wireless
11 2100 GPL driver.
12
13 We want to tanks the Authors of those projects
14 and the Ndiswrapper project Authors.
15*/
16
17/* Mariusz Matuszek added full registers definition with Realtek's name */
18
19/* this file contains register definitions for the rtl8180 MAC controller */
20#ifndef R8180_HW
21#define R8180_HW
22
23
24#define BIT0 0x00000001
25#define BIT1 0x00000002
26#define BIT2 0x00000004
27#define BIT3 0x00000008
28#define BIT4 0x00000010
29#define BIT5 0x00000020
30#define BIT6 0x00000040
31#define BIT7 0x00000080
32#define BIT9 0x00000200
33#define BIT11 0x00000800
34#define BIT13 0x00002000
35#define BIT15 0x00008000
36#define BIT20 0x00100000
37#define BIT21 0x00200000
38#define BIT22 0x00400000
39#define BIT23 0x00800000
40#define BIT24 0x01000000
41#define BIT25 0x02000000
42#define BIT26 0x04000000
43#define BIT27 0x08000000
44#define BIT28 0x10000000
45#define BIT29 0x20000000
46#define BIT30 0x40000000
47#define BIT31 0x80000000
48
49#define MAX_SLEEP_TIME (10000)
50#define MIN_SLEEP_TIME (50)
51
52#define BB_HOST_BANG_EN (1<<2)
53#define BB_HOST_BANG_CLK (1<<1)
54
55#define MAC0 0
56#define MAC4 4
57
58#define CMD 0x37
59#define CMD_RST_SHIFT 4
60#define CMD_RX_ENABLE_SHIFT 3
61#define CMD_TX_ENABLE_SHIFT 2
62
63#define EPROM_CMD 0x50
64#define EPROM_CMD_RESERVED_MASK ((1<<5)|(1<<4))
65#define EPROM_CMD_OPERATING_MODE_SHIFT 6
66#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
67#define EPROM_CMD_CONFIG 0x3
68#define EPROM_CMD_NORMAL 0
69#define EPROM_CMD_LOAD 1
70#define EPROM_CMD_PROGRAM 2
71#define EPROM_CS_SHIFT 3
72#define EPROM_CK_SHIFT 2
73#define EPROM_W_SHIFT 1
74#define EPROM_R_SHIFT 0
75#define CONFIG2_DMA_POLLING_MODE_SHIFT 3
76
77#define INTA_TXOVERFLOW (1<<15)
78#define INTA_TIMEOUT (1<<14)
79#define INTA_HIPRIORITYDESCERR (1<<9)
80#define INTA_HIPRIORITYDESCOK (1<<8)
81#define INTA_NORMPRIORITYDESCERR (1<<7)
82#define INTA_NORMPRIORITYDESCOK (1<<6)
83#define INTA_RXOVERFLOW (1<<5)
84#define INTA_RXDESCERR (1<<4)
85#define INTA_LOWPRIORITYDESCERR (1<<3)
86#define INTA_LOWPRIORITYDESCOK (1<<2)
87#define INTA_RXOK (1)
88#define INTA_MASK 0x3c
89
90#define RXRING_ADDR 0xe4 /* page 0 */
91#define PGSELECT 0x5e
92#define PGSELECT_PG_SHIFT 0
93#define RX_CONF 0x44
94#define MAC_FILTER_MASK ((1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<5) | \
95(1<<12) | (1<<18) | (1<<19) | (1<<20) | (1<<21) | (1<<22) | (1<<23))
96#define RX_CHECK_BSSID_SHIFT 23
97#define ACCEPT_PWR_FRAME_SHIFT 22
98#define ACCEPT_MNG_FRAME_SHIFT 20
99#define ACCEPT_CTL_FRAME_SHIFT 19
100#define ACCEPT_DATA_FRAME_SHIFT 18
101#define ACCEPT_ICVERR_FRAME_SHIFT 12
102#define ACCEPT_CRCERR_FRAME_SHIFT 5
103#define ACCEPT_BCAST_FRAME_SHIFT 3
104#define ACCEPT_MCAST_FRAME_SHIFT 2
105#define ACCEPT_ALLMAC_FRAME_SHIFT 0
106#define ACCEPT_NICMAC_FRAME_SHIFT 1
107
108#define RX_FIFO_THRESHOLD_MASK ((1<<13) | (1<<14) | (1<<15))
109#define RX_FIFO_THRESHOLD_SHIFT 13
110#define RX_FIFO_THRESHOLD_NONE 7
111#define RX_AUTORESETPHY_SHIFT 28
112
113#define TX_CONF 0x40
114#define TX_CONF_HEADER_AUTOICREMENT_SHIFT 30
115#define TX_LOOPBACK_SHIFT 17
116#define TX_LOOPBACK_NONE 0
117#define TX_LOOPBACK_CONTINUE 3
118#define TX_LOOPBACK_MASK ((1<<17)|(1<<18))
119#define TX_DPRETRY_SHIFT 0
120#define R8180_MAX_RETRY 255
121#define TX_RTSRETRY_SHIFT 8
122#define TX_NOICV_SHIFT 19
123#define TX_NOCRC_SHIFT 16
124#define TX_DMA_POLLING 0xd9
125#define TX_DMA_POLLING_BEACON_SHIFT 7
126#define TX_DMA_POLLING_HIPRIORITY_SHIFT 6
127#define TX_DMA_POLLING_NORMPRIORITY_SHIFT 5
128#define TX_DMA_POLLING_LOWPRIORITY_SHIFT 4
129#define TX_MANAGEPRIORITY_RING_ADDR 0x0C
130#define TX_BKPRIORITY_RING_ADDR 0x10
131#define TX_BEPRIORITY_RING_ADDR 0x14
132#define TX_VIPRIORITY_RING_ADDR 0x20
133#define TX_VOPRIORITY_RING_ADDR 0x24
134#define TX_HIGHPRIORITY_RING_ADDR 0x28
135#define MAX_RX_DMA_MASK ((1<<8) | (1<<9) | (1<<10))
136#define MAX_RX_DMA_2048 7
137#define MAX_RX_DMA_1024 6
138#define MAX_RX_DMA_SHIFT 10
139#define INT_TIMEOUT 0x48
140#define CONFIG3_CLKRUN_SHIFT 2
141#define CONFIG3_ANAPARAM_W_SHIFT 6
142#define ANAPARAM 0x54
143#define BEACON_INTERVAL 0x70
144#define BEACON_INTERVAL_MASK ((1<<0)|(1<<1)|(1<<2)|(1<<3)|(1<<4)|(1<<5)| \
145(1<<6)|(1<<7)|(1<<8)|(1<<9))
146#define ATIM_MASK ((1<<0)|(1<<1)|(1<<2)|(1<<3)|(1<<4)|(1<<5)|(1<<6)|(1<<7)| \
147(1<<8)|(1<<9))
148#define ATIM 0x72
149#define EPROM_CS_SHIFT 3
150#define EPROM_CK_SHIFT 2
151#define PHY_ADR 0x7c
152#define SECURITY 0x5f /* 1209 this is sth wrong */
153#define SECURITY_WEP_TX_ENABLE_SHIFT 1
154#define SECURITY_WEP_RX_ENABLE_SHIFT 0
155#define SECURITY_ENCRYP_104 1
156#define SECURITY_ENCRYP_SHIFT 4
157#define SECURITY_ENCRYP_MASK ((1<<4)|(1<<5))
158#define KEY0 0x90 /* 1209 this is sth wrong */
159#define CONFIG2_ANTENNA_SHIFT 6
160#define TX_BEACON_RING_ADDR 0x4c
161#define CONFIG0_WEP40_SHIFT 7
162#define CONFIG0_WEP104_SHIFT 6
163#define AGCRESET_SHIFT 5
164
165
166
167/*
168 * Operational registers offsets in PCI (I/O) space.
169 * RealTek names are used.
170 */
171
172#define TSFTR 0x0018
173
174#define TLPDA 0x0020
175
176#define BSSID 0x002E
177
178#define CR 0x0037
179
180#define RF_SW_CONFIG 0x8 /* store data which is transmitted to RF for driver */
181#define RF_SW_CFG_SI BIT1
182#define EIFS 0x2D /* Extended InterFrame Space Timer, in unit of 4 us. */
183
184#define BRSR 0x34 /* Basic rate set */
185
186#define IMR 0x006C
187#define ISR 0x003C
188
189#define TCR 0x0040
190
191#define RCR 0x0044
192
193#define TimerInt 0x0048
194
195#define CR9346 0x0050
196
197#define CONFIG0 0x0051
198#define CONFIG2 0x0053
199
200#define MSR 0x0058
201
202#define CONFIG3 0x0059
203#define CONFIG4 0x005A
204 /* SD3 szuyitasi: Mac0x57= CC -> B0 Mac0x60= D1 -> C6 */
205 /* Mac0x60 = 0x000004C6 power save parameters */
206 #define ANAPARM_ASIC_ON 0xB0054D00
207 #define ANAPARM2_ASIC_ON 0x000004C6
208
209 #define ANAPARM_ON ANAPARM_ASIC_ON
210 #define ANAPARM2_ON ANAPARM2_ASIC_ON
211
212#define TESTR 0x005B
213
214#define PSR 0x005E
215
216#define BcnItv 0x0070
217
218#define AtimWnd 0x0072
219
220#define BintrItv 0x0074
221
222#define PhyAddr 0x007C
223#define PhyDataR 0x007E
224
225/* following are for rtl8185 */
226#define RFPinsOutput 0x80
227#define RFPinsEnable 0x82
228#define RF_TIMING 0x8c
229#define RFPinsSelect 0x84
230#define ANAPARAM2 0x60
231#define RF_PARA 0x88
232#define RFPinsInput 0x86
233#define GP_ENABLE 0x90
234#define GPIO 0x91
235#define SW_CONTROL_GPIO 0x400
236#define TX_ANTENNA 0x9f
237#define TX_GAIN_OFDM 0x9e
238#define TX_GAIN_CCK 0x9d
239#define WPA_CONFIG 0xb0
240#define TX_AGC_CTL 0x9c
241#define TX_AGC_CTL_PERPACKET_GAIN_SHIFT 0
242#define TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT 1
243#define TX_AGC_CTL_FEEDBACK_ANT 2
244#define RESP_RATE 0x34
245#define SIFS 0xb4
246#define DIFS 0xb5
247
248#define SLOT 0xb6
249#define CW_CONF 0xbc
250#define CW_CONF_PERPACKET_RETRY_SHIFT 1
251#define CW_CONF_PERPACKET_CW_SHIFT 0
252#define CW_VAL 0xbd
253#define MAX_RESP_RATE_SHIFT 4
254#define MIN_RESP_RATE_SHIFT 0
255#define RATE_FALLBACK 0xbe
256
257#define CONFIG5 0x00D8
258
259#define PHYPR 0xDA /* 0xDA - 0x0B PHY Parameter Register. */
260
261#define FEMR 0x1D4 /* Function Event Mask register */
262
263#define FFER 0x00FC
264#define FFER_END 0x00FF
265
266
267
268/*
269 * Bitmasks for specific register functions.
270 * Names are derived from the register name and function name.
271 *
272 * <REGISTER>_<FUNCTION>[<bit>]
273 *
274 * this leads to some awkward names...
275 */
276
277#define BRSR_BPLCP ((1 << 8))
278#define BRSR_MBR ((1 << 1)|(1 << 0))
279#define BRSR_MBR_8185 ((1 << 11)|(1 << 10)|(1 << 9)|(1 << 8)|(1 << 7)|(1 << 6)|(1 << 5)|(1 << 4)|(1 << 3)|(1 << 2)|(1 << 1)|(1 << 0))
280#define BRSR_MBR0 ((1 << 0))
281#define BRSR_MBR1 ((1 << 1))
282
283#define CR_RST ((1 << 4))
284#define CR_RE ((1 << 3))
285#define CR_TE ((1 << 2))
286#define CR_MulRW ((1 << 0))
287
288#define IMR_Dot11hInt ((1 << 25)) /*802.11h Measurement Interrupt */
289#define IMR_BcnDmaInt ((1 << 24)) /*Beacon DMA Interrupt */ /*What differenct between BcnDmaInt and BcnInt??? */
290#define IMR_WakeInt ((1 << 23)) /*Wake Up Interrupt */
291#define IMR_TXFOVW ((1 << 22)) /*Tx FIFO Overflow Interrupt */
292#define IMR_TimeOut1 ((1 << 21)) /*Time Out Interrupt 1 */
293#define IMR_BcnInt ((1 << 20)) /*Beacon Time out Interrupt */
294#define IMR_ATIMInt ((1 << 19)) /*ATIM Time Out Interrupt */
295#define IMR_TBDER ((1 << 18)) /*Tx Beacon Descriptor Error Interrupt */
296#define IMR_TBDOK ((1 << 17)) /*Tx Beacon Descriptor OK Interrupt */
297#define IMR_THPDER ((1 << 16)) /*Tx High Priority Descriptor Error Interrupt */
298#define IMR_THPDOK ((1 << 15)) /*Tx High Priority Descriptor OK Interrupt */
299#define IMR_TVODER ((1 << 14)) /*Tx AC_VO Descriptor Error Interrupt */
300#define IMR_TVODOK ((1 << 13)) /*Tx AC_VO Descriptor OK Interrupt */
301#define IMR_FOVW ((1 << 12)) /*Rx FIFO Overflow Interrupt */
302#define IMR_RDU ((1 << 11)) /*Rx Descriptor Unavailable Interrupt */
303#define IMR_TVIDER ((1 << 10)) /*Tx AC_VI Descriptor Error Interrupt */
304#define IMR_TVIDOK ((1 << 9)) /*Tx AC_VI Descriptor OK Interrupt */
305#define IMR_RER ((1 << 8)) /*Rx Error Interrupt */
306#define IMR_ROK ((1 << 7)) /*Receive OK Interrupt */
307#define IMR_TBEDER ((1 << 6)) /*Tx AC_BE Descriptor Error Interrupt */
308#define IMR_TBEDOK ((1 << 5)) /*Tx AC_BE Descriptor OK Interrupt */
309#define IMR_TBKDER ((1 << 4)) /*Tx AC_BK Descriptor Error Interrupt */
310#define IMR_TBKDOK ((1 << 3)) /*Tx AC_BK Descriptor OK Interrupt */
311#define IMR_RQoSOK ((1 << 2)) /*Rx QoS OK Interrupt */
312#define IMR_TimeOut2 ((1 << 1)) /*Time Out Interrupt 2 */
313#define IMR_TimeOut3 ((1 << 0)) /*Time Out Interrupt 3 */
314#define IMR_TMGDOK ((1 << 30))
315#define ISR_Dot11hInt ((1 << 25)) /*802.11h Measurement Interrupt */
316#define ISR_BcnDmaInt ((1 << 24)) /*Beacon DMA Interrupt */ /*What differenct between BcnDmaInt and BcnInt??? */
317#define ISR_WakeInt ((1 << 23)) /*Wake Up Interrupt */
318#define ISR_TXFOVW ((1 << 22)) /*Tx FIFO Overflow Interrupt */
319#define ISR_TimeOut1 ((1 << 21)) /*Time Out Interrupt 1 */
320#define ISR_BcnInt ((1 << 20)) /*Beacon Time out Interrupt */
321#define ISR_ATIMInt ((1 << 19)) /*ATIM Time Out Interrupt */
322#define ISR_TBDER ((1 << 18)) /*Tx Beacon Descriptor Error Interrupt */
323#define ISR_TBDOK ((1 << 17)) /*Tx Beacon Descriptor OK Interrupt */
324#define ISR_THPDER ((1 << 16)) /*Tx High Priority Descriptor Error Interrupt */
325#define ISR_THPDOK ((1 << 15)) /*Tx High Priority Descriptor OK Interrupt */
326#define ISR_TVODER ((1 << 14)) /*Tx AC_VO Descriptor Error Interrupt */
327#define ISR_TVODOK ((1 << 13)) /*Tx AC_VO Descriptor OK Interrupt */
328#define ISR_FOVW ((1 << 12)) /*Rx FIFO Overflow Interrupt */
329#define ISR_RDU ((1 << 11)) /*Rx Descriptor Unavailable Interrupt */
330#define ISR_TVIDER ((1 << 10)) /*Tx AC_VI Descriptor Error Interrupt */
331#define ISR_TVIDOK ((1 << 9)) /*Tx AC_VI Descriptor OK Interrupt */
332#define ISR_RER ((1 << 8)) /*Rx Error Interrupt */
333#define ISR_ROK ((1 << 7)) /*Receive OK Interrupt */
334#define ISR_TBEDER ((1 << 6)) /*Tx AC_BE Descriptor Error Interrupt */
335#define ISR_TBEDOK ((1 << 5)) /*Tx AC_BE Descriptor OK Interrupt */
336#define ISR_TBKDER ((1 << 4)) /*Tx AC_BK Descriptor Error Interrupt */
337#define ISR_TBKDOK ((1 << 3)) /*Tx AC_BK Descriptor OK Interrupt */
338#define ISR_RQoSOK ((1 << 2)) /*Rx QoS OK Interrupt */
339#define ISR_TimeOut2 ((1 << 1)) /*Time Out Interrupt 2 */
340#define ISR_TimeOut3 ((1 << 0)) /*Time Out Interrupt 3 */
341
342/* these definition is used for Tx/Rx test temporarily */
343#define ISR_TLPDER ISR_TVIDER
344#define ISR_TLPDOK ISR_TVIDOK
345#define ISR_TNPDER ISR_TVODER
346#define ISR_TNPDOK ISR_TVODOK
347#define ISR_TimeOut ISR_TimeOut1
348#define ISR_RXFOVW ISR_FOVW
349
350
351#define HW_VERID_R8180_F 3
352#define HW_VERID_R8180_ABCD 2
353#define HW_VERID_R8185_ABC 4
354#define HW_VERID_R8185_D 5
355#define HW_VERID_R8185B_B 6
356
357#define TCR_CWMIN ((1 << 31))
358#define TCR_SWSEQ ((1 << 30))
359#define TCR_HWVERID_MASK ((1 << 27)|(1 << 26)|(1 << 25))
360#define TCR_HWVERID_SHIFT 25
361#define TCR_SAT ((1 << 24))
362#define TCR_PLCP_LEN TCR_SAT /* rtl8180 */
363#define TCR_MXDMA_MASK ((1 << 23)|(1 << 22)|(1 << 21))
364#define TCR_MXDMA_1024 6
365#define TCR_MXDMA_2048 7
366#define TCR_MXDMA_SHIFT 21
367#define TCR_DISCW ((1 << 20))
368#define TCR_ICV ((1 << 19))
369#define TCR_LBK ((1 << 18)|(1 << 17))
370#define TCR_LBK1 ((1 << 18))
371#define TCR_LBK0 ((1 << 17))
372#define TCR_CRC ((1 << 16))
373#define TCR_DPRETRY_MASK ((1 << 15)|(1 << 14)|(1 << 13)|(1 << 12)|(1 << 11)|(1 << 10)|(1 << 9)|(1 << 8))
374#define TCR_RTSRETRY_MASK ((1 << 0)|(1 << 1)|(1 << 2)|(1 << 3)|(1 << 4)|(1 << 5)|(1 << 6)|(1 << 7))
375#define TCR_PROBE_NOTIMESTAMP_SHIFT 29 /* rtl8185 */
376
377#define RCR_ONLYERLPKT ((1 << 31))
378#define RCR_CS_SHIFT 29
379#define RCR_CS_MASK ((1 << 30) | (1 << 29))
380#define RCR_ENMARP ((1 << 28))
381#define RCR_CBSSID ((1 << 23))
382#define RCR_APWRMGT ((1 << 22))
383#define RCR_ADD3 ((1 << 21))
384#define RCR_AMF ((1 << 20))
385#define RCR_ACF ((1 << 19))
386#define RCR_ADF ((1 << 18))
387#define RCR_RXFTH ((1 << 15)|(1 << 14)|(1 << 13))
388#define RCR_RXFTH2 ((1 << 15))
389#define RCR_RXFTH1 ((1 << 14))
390#define RCR_RXFTH0 ((1 << 13))
391#define RCR_AICV ((1 << 12))
392#define RCR_MXDMA ((1 << 10)|(1 << 9)|(1 << 8))
393#define RCR_MXDMA2 ((1 << 10))
394#define RCR_MXDMA1 ((1 << 9))
395#define RCR_MXDMA0 ((1 << 8))
396#define RCR_9356SEL ((1 << 6))
397#define RCR_ACRC32 ((1 << 5))
398#define RCR_AB ((1 << 3))
399#define RCR_AM ((1 << 2))
400#define RCR_APM ((1 << 1))
401#define RCR_AAP ((1 << 0))
402
403#define CR9346_EEM ((1 << 7)|(1 << 6))
404#define CR9346_EEM1 ((1 << 7))
405#define CR9346_EEM0 ((1 << 6))
406#define CR9346_EECS ((1 << 3))
407#define CR9346_EESK ((1 << 2))
408#define CR9346_EED1 ((1 << 1))
409#define CR9346_EED0 ((1 << 0))
410
411#define CONFIG3_PARM_En ((1 << 6))
412#define CONFIG3_FuncRegEn ((1 << 1))
413
414#define CONFIG4_PWRMGT ((1 << 5))
415
416#define MSR_LINK_MASK ((1 << 2)|(1 << 3))
417#define MSR_LINK_MANAGED 2
418#define MSR_LINK_NONE 0
419#define MSR_LINK_SHIFT 2
420#define MSR_LINK_ADHOC 1
421#define MSR_LINK_MASTER 3
422
423#define BcnItv_BcnItv (0x01FF)
424
425#define AtimWnd_AtimWnd (0x01FF)
426
427#define BintrItv_BintrItv (0x01FF)
428
429#define FEMR_INTR ((1 << 15))
430#define FEMR_WKUP ((1 << 14))
431#define FEMR_GWAKE ((1 << 4))
432
433#define FFER_INTR ((1 << 15))
434#define FFER_GWAKE ((1 << 4))
435
436/* Three wire mode. */
437#define SW_THREE_WIRE 0
438#define HW_THREE_WIRE 2
439/* RTL8187S by amy */
440#define HW_THREE_WIRE_PI 5
441#define HW_THREE_WIRE_SI 6
442/* by amy */
443#define TCR_LRL_OFFSET 0
444#define TCR_SRL_OFFSET 8
445#define TCR_MXDMA_OFFSET 21
446#define TCR_DISReqQsize_OFFSET 28
447#define TCR_DurProcMode_OFFSET 30
448
449#define RCR_MXDMA_OFFSET 8
450#define RCR_FIFO_OFFSET 13
451
452#define AckTimeOutReg 0x79 /* ACK timeout register, in unit of 4 us. */
453
454#define RFTiming 0x8C
455
456#define TPPollStop 0x93
457
458#define TXAGC_CTL 0x9C /*< RJ_TODO_8185B> TX_AGC_CONTROL (0x9C seems be removed at 8185B, see p37). */
459#define CCK_TXAGC 0x9D
460#define OFDM_TXAGC 0x9E
461#define ANTSEL 0x9F
462
463#define ACM_CONTROL 0x00BF /* ACM Control Registe */
464
465#define IntMig 0xE2 /* Interrupt Migration (0xE2 ~ 0xE3) */
466
467#define TID_AC_MAP 0xE8 /* TID to AC Mapping Register */
468
469#define ANAPARAM3 0xEE /* <RJ_TODO_8185B> How to use it? */
470
471#define AC_VO_PARAM 0xF0 /* AC_VO Parameters Record */
472#define AC_VI_PARAM 0xF4 /* AC_VI Parameters Record */
473#define AC_BE_PARAM 0xF8 /* AC_BE Parameters Record */
474#define AC_BK_PARAM 0xFC /* AC_BK Parameters Record */
475
476#define GPIOCtrl 0x16B /*GPIO Control Register. */
477#define ARFR 0x1E0 /* Auto Rate Fallback Register (0x1e0 ~ 0x1e2) */
478
479#define RFSW_CTRL 0x272 /* 0x272-0x273. */
480#define SW_3W_DB0 0x274 /* Software 3-wire data buffer bit 31~0. */
481#define SW_3W_DB1 0x278 /* Software 3-wire data buffer bit 63~32. */
482#define SW_3W_CMD0 0x27C /* Software 3-wire Control/Status Register. */
483#define SW_3W_CMD1 0x27D /* Software 3-wire Control/Status Register. */
484
485#define PI_DATA_READ 0X360 /* 0x360 - 0x361 Parallel Interface Data Register. */
486#define SI_DATA_READ 0x362 /* 0x362 - 0x363 Serial Interface Data Register. */
487
488/*
489----------------------------------------------------------------------------
490 8185B TPPollStop bits (offset 0x93, 1 byte)
491----------------------------------------------------------------------------
492*/
493#define TPPOLLSTOP_BQ (0x01 << 7)
494#define TPPOLLSTOP_AC_VIQ (0x01 << 4)
495
496#define MSR_LINK_ENEDCA (1<<4)
497
498/*
499----------------------------------------------------------------------------
500 8187B AC_XX_PARAM bits
501----------------------------------------------------------------------------
502*/
503#define AC_PARAM_TXOP_LIMIT_OFFSET 16
504#define AC_PARAM_ECW_MAX_OFFSET 12
505#define AC_PARAM_ECW_MIN_OFFSET 8
506#define AC_PARAM_AIFS_OFFSET 0
507
508/*
509----------------------------------------------------------------------------
510 8187B ACM_CONTROL bits (Offset 0xBF, 1 Byte)
511----------------------------------------------------------------------------
512*/
513#define VOQ_ACM_EN (0x01 << 7) /*BIT7 */
514#define VIQ_ACM_EN (0x01 << 6) /*BIT6 */
515#define BEQ_ACM_EN (0x01 << 5) /*BIT5 */
516#define ACM_HW_EN (0x01 << 4) /*BIT4 */
517#define VOQ_ACM_CTL (0x01 << 2) /*BIT2 */ /* Set to 1 when AC_VO used time reaches or exceeds the admitted time */
518#define VIQ_ACM_CTL (0x01 << 1) /*BIT1 */ /* Set to 1 when AC_VI used time reaches or exceeds the admitted time */
519#define BEQ_ACM_CTL (0x01 << 0) /*BIT0 */ /* Set to 1 when AC_BE used time reaches or exceeds the admitted time */
520
521
522/*
523----------------------------------------------------------------------------
524 8185B SW_3W_CMD bits (Offset 0x27C-0x27D, 16bit)
525----------------------------------------------------------------------------
526*/
527#define SW_3W_CMD0_HOLD ((1 << 7))
528#define SW_3W_CMD1_RE ((1 << 0)) /* BIT8 */
529#define SW_3W_CMD1_WE ((1 << 1)) /* BIT9 */
530#define SW_3W_CMD1_DONE ((1 << 2)) /* BIT10 */
531
532#define BB_HOST_BANG_RW (1 << 3)
533
534/*
535----------------------------------------------------------------------------
536 8185B RATE_FALLBACK_CTL bits (Offset 0xBE, 8bit)
537----------------------------------------------------------------------------
538*/
539#define RATE_FALLBACK_CTL_ENABLE ((1 << 7))
540#define RATE_FALLBACK_CTL_ENABLE_RTSCTS ((1 << 6))
541/* Auto rate fallback per 2^n retry. */
542#define RATE_FALLBACK_CTL_AUTO_STEP0 0x00
543#define RATE_FALLBACK_CTL_AUTO_STEP1 0x01
544#define RATE_FALLBACK_CTL_AUTO_STEP2 0x02
545#define RATE_FALLBACK_CTL_AUTO_STEP3 0x03
546
547
548#define RTL8225z2_ANAPARAM_OFF 0x55480658
549#define RTL8225z2_ANAPARAM2_OFF 0x72003f70
550/* by amy for power save */
551#define RF_CHANGE_BY_HW BIT30
552#define RF_CHANGE_BY_PS BIT29
553#define RF_CHANGE_BY_IPS BIT28
554/* by amy for power save */
555/* by amy for antenna */
556#define EEPROM_SW_REVD_OFFSET 0x3f
557
558/* BIT[8-9] is for SW Antenna Diversity.
559 * Only the value EEPROM_SW_AD_ENABLE means enable, other values are disable.
560 */
561#define EEPROM_SW_AD_MASK 0x0300
562#define EEPROM_SW_AD_ENABLE 0x0100
563
564/* BIT[10-11] determine if Antenna 1 is the Default Antenna.
565 * Only the value EEPROM_DEF_ANT_1 means TRUE, other values are FALSE.
566 */
567#define EEPROM_DEF_ANT_MASK 0x0C00
568#define EEPROM_DEF_ANT_1 0x0400
569/*by amy for antenna */
570/* {by amy 080312 */
571/* 0x7C, 0x7D Crystal calibration and Tx Power tracking mechanism. Added by Roger. 2007.12.10. */
572#define EEPROM_RSV 0x7C
573#define EEPROM_XTAL_CAL_XOUT_MASK 0x0F /* 0x7C[3:0], Crystal calibration for Xout. */
574#define EEPROM_XTAL_CAL_XIN_MASK 0xF0 /* 0x7C[7:4], Crystal calibration for Xin. */
575#define EEPROM_THERMAL_METER_MASK 0x0F00 /* 0x7D[3:0], Thermal meter reference level. */
576#define EEPROM_XTAL_CAL_ENABLE 0x1000 /* 0x7D[4], Crystal calibration enabled/disabled BIT. */
577#define EEPROM_THERMAL_METER_ENABLE 0x2000 /* 0x7D[5], Thermal meter enabled/disabled BIT. */
578#define EN_LPF_CAL 0x238 /* Enable LPF Calibration. */
579#define PWR_METER_EN BIT1
580/* <RJ_TODO_8185B> where are false alarm counters in 8185B? */
581#define CCK_FALSE_ALARM 0xD0
582/* by amy 080312} */
583
584/* YJ,add for Country IE, 080630 */
585#define EEPROM_COUNTRY_CODE 0x2E
586/* YJ,add,080630,end */
587
588#endif
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h
deleted file mode 100644
index 7df73927b3cc..000000000000
--- a/drivers/staging/rtl8187se/r8180_rtl8225.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * This is part of the rtl8180-sa2400 driver released under the GPL (See file
3 * COPYING for details).
4 *
5 * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
6 *
7 * This files contains programming code for the rtl8225 radio frontend.
8 *
9 * *Many* thanks to Realtek Corp. for their great support!
10 */
11
12#include "r8180.h"
13
14#define RTL8225_ANAPARAM_ON 0xa0000b59
15#define RTL8225_ANAPARAM_OFF 0xa00beb59
16#define RTL8225_ANAPARAM2_OFF 0x840dec11
17#define RTL8225_ANAPARAM2_ON 0x860dec11
18#define RTL8225_ANAPARAM_SLEEP 0xa00bab59
19#define RTL8225_ANAPARAM2_SLEEP 0x840dec11
20
21void rtl8225z2_rf_init(struct net_device *dev);
22void rtl8225z2_rf_set_chan(struct net_device *dev, short ch);
23void rtl8225z2_rf_close(struct net_device *dev);
24
25void RF_WriteReg(struct net_device *dev, u8 offset, u16 data);
26u16 RF_ReadReg(struct net_device *dev, u8 offset);
27
28void rtl8180_set_mode(struct net_device *dev, int mode);
29void rtl8180_set_mode(struct net_device *dev, int mode);
30bool SetZebraRFPowerState8185(struct net_device *dev,
31 enum rt_rf_power_state eRFPowerState);
32void rtl8225z4_rf_sleep(struct net_device *dev);
33void rtl8225z4_rf_wakeup(struct net_device *dev);
34
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
deleted file mode 100644
index 47104fa05c55..000000000000
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ /dev/null
@@ -1,811 +0,0 @@
1/*
2 * This is part of the rtl8180-sa2400 driver
3 * released under the GPL (See file COPYING for details).
4 * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
5 *
6 * This files contains programming code for the rtl8225
7 * radio frontend.
8 *
9 * *Many* thanks to Realtek Corp. for their great support!
10 */
11
12#include "r8180_hw.h"
13#include "r8180_rtl8225.h"
14#include "r8180_93cx6.h"
15
16#include "ieee80211/dot11d.h"
17
18static void write_rtl8225(struct net_device *dev, u8 adr, u16 data)
19{
20 int i;
21 u16 out, select;
22 u8 bit;
23 u32 bangdata = (data << 4) | (adr & 0xf);
24
25 out = read_nic_word(dev, RFPinsOutput) & 0xfff3;
26
27 write_nic_word(dev, RFPinsEnable,
28 (read_nic_word(dev, RFPinsEnable) | 0x7));
29
30 select = read_nic_word(dev, RFPinsSelect);
31
32 write_nic_word(dev, RFPinsSelect, select | 0x7 |
33 SW_CONTROL_GPIO);
34
35 force_pci_posting(dev);
36 udelay(10);
37
38 write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN);
39
40 force_pci_posting(dev);
41 udelay(2);
42
43 write_nic_word(dev, RFPinsOutput, out);
44
45 force_pci_posting(dev);
46 udelay(10);
47
48 for (i = 15; i >= 0; i--) {
49 bit = (bangdata & (1 << i)) >> i;
50
51 write_nic_word(dev, RFPinsOutput, bit | out);
52
53 write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK);
54 write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK);
55
56 i--;
57 bit = (bangdata & (1 << i)) >> i;
58
59 write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK);
60 write_nic_word(dev, RFPinsOutput, bit | out | BB_HOST_BANG_CLK);
61
62 write_nic_word(dev, RFPinsOutput, bit | out);
63
64 }
65
66 write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN);
67
68 force_pci_posting(dev);
69 udelay(10);
70
71 write_nic_word(dev, RFPinsOutput, out | BB_HOST_BANG_EN);
72
73 write_nic_word(dev, RFPinsSelect, select | SW_CONTROL_GPIO);
74
75 rtl8185_rf_pins_enable(dev);
76}
77
78static const u8 rtl8225_agc[] = {
79 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e,
80 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96,
81 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e,
82 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86,
83 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e,
84 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36,
85 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e,
86 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26,
87 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e,
88 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16,
89 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e,
90 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06,
91 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01,
92 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
93 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
94 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
95};
96
97static const u32 rtl8225_chan[] = {
98 0,
99 0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
100 0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
101};
102
103static const u8 rtl8225z2_gain_bg[] = {
104 0x23, 0x15, 0xa5, /* -82-1dBm */
105 0x23, 0x15, 0xb5, /* -82-2dBm */
106 0x23, 0x15, 0xc5, /* -82-3dBm */
107 0x33, 0x15, 0xc5, /* -78dBm */
108 0x43, 0x15, 0xc5, /* -74dBm */
109 0x53, 0x15, 0xc5, /* -70dBm */
110 0x63, 0x15, 0xc5, /* -66dBm */
111};
112
113static const u8 rtl8225z2_gain_a[] = {
114 0x13, 0x27, 0x5a, /* -82dBm */
115 0x23, 0x23, 0x58, /* -82dBm */
116 0x33, 0x1f, 0x56, /* -82dBm */
117 0x43, 0x1b, 0x54, /* -78dBm */
118 0x53, 0x17, 0x51, /* -74dBm */
119 0x63, 0x24, 0x4f, /* -70dBm */
120 0x73, 0x0f, 0x4c, /* -66dBm */
121};
122
123static const u16 rtl8225z2_rxgain[] = {
124 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
125 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
126 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
127 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
128 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
129 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
130 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
131 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
132 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
133 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
134 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3,
135 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb
136
137};
138
139static void rtl8225z2_set_gain(struct net_device *dev, short gain)
140{
141 const u8 *rtl8225_gain;
142 struct r8180_priv *priv = ieee80211_priv(dev);
143 u8 mode = priv->ieee80211->mode;
144
145 if (mode == IEEE_B || mode == IEEE_G)
146 rtl8225_gain = rtl8225z2_gain_bg;
147 else
148 rtl8225_gain = rtl8225z2_gain_a;
149
150 write_phy_ofdm(dev, 0x0b, rtl8225_gain[gain * 3]);
151 write_phy_ofdm(dev, 0x1b, rtl8225_gain[gain * 3 + 1]);
152 write_phy_ofdm(dev, 0x1d, rtl8225_gain[gain * 3 + 2]);
153 write_phy_ofdm(dev, 0x21, 0x37);
154}
155
156static u32 read_rtl8225(struct net_device *dev, u8 adr)
157{
158 u32 data2Write = ((u32)(adr & 0x1f)) << 27;
159 u32 dataRead;
160 u32 mask;
161 u16 oval, oval2, oval3, tmp;
162 int i;
163 short bit, rw;
164 u8 wLength = 6;
165 u8 rLength = 12;
166 u8 low2high = 0;
167
168 oval = read_nic_word(dev, RFPinsOutput);
169 oval2 = read_nic_word(dev, RFPinsEnable);
170 oval3 = read_nic_word(dev, RFPinsSelect);
171
172 write_nic_word(dev, RFPinsEnable, (oval2|0xf));
173 write_nic_word(dev, RFPinsSelect, (oval3|0xf));
174
175 dataRead = 0;
176
177 oval &= ~0xf;
178
179 write_nic_word(dev, RFPinsOutput, oval | BB_HOST_BANG_EN);
180 udelay(4);
181
182 write_nic_word(dev, RFPinsOutput, oval);
183 udelay(5);
184
185 rw = 0;
186
187 mask = (low2high) ? 0x01 : (((u32)0x01)<<(32-1));
188
189 for (i = 0; i < wLength/2; i++) {
190 bit = ((data2Write&mask) != 0) ? 1 : 0;
191 write_nic_word(dev, RFPinsOutput, bit | oval | rw);
192 udelay(1);
193
194 write_nic_word(dev, RFPinsOutput,
195 bit | oval | BB_HOST_BANG_CLK | rw);
196 udelay(2);
197 write_nic_word(dev, RFPinsOutput,
198 bit | oval | BB_HOST_BANG_CLK | rw);
199 udelay(2);
200
201 mask = (low2high) ? (mask<<1) : (mask>>1);
202
203 if (i == 2) {
204 rw = BB_HOST_BANG_RW;
205 write_nic_word(dev, RFPinsOutput,
206 bit | oval | BB_HOST_BANG_CLK | rw);
207 udelay(2);
208 write_nic_word(dev, RFPinsOutput, bit | oval | rw);
209 udelay(2);
210 break;
211 }
212
213 bit = ((data2Write&mask) != 0) ? 1 : 0;
214
215 write_nic_word(dev, RFPinsOutput,
216 oval | bit | rw | BB_HOST_BANG_CLK);
217 udelay(2);
218 write_nic_word(dev, RFPinsOutput,
219 oval | bit | rw | BB_HOST_BANG_CLK);
220 udelay(2);
221
222 write_nic_word(dev, RFPinsOutput, oval | bit | rw);
223 udelay(1);
224
225 mask = (low2high) ? (mask<<1) : (mask>>1);
226 }
227
228 write_nic_word(dev, RFPinsOutput, rw|oval);
229 udelay(2);
230 mask = (low2high) ? 0x01 : (((u32)0x01) << (12-1));
231
232 /*
233 * We must set data pin to HW controlled, otherwise RF can't driver it
234 * and value RF register won't be able to read back properly.
235 */
236 write_nic_word(dev, RFPinsEnable, (oval2 & (~0x01)));
237
238 for (i = 0; i < rLength; i++) {
239 write_nic_word(dev, RFPinsOutput, rw|oval); udelay(1);
240
241 write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK);
242 udelay(2);
243 write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK);
244 udelay(2);
245 write_nic_word(dev, RFPinsOutput, rw|oval|BB_HOST_BANG_CLK);
246 udelay(2);
247 tmp = read_nic_word(dev, RFPinsInput);
248
249 dataRead |= (tmp & BB_HOST_BANG_CLK ? mask : 0);
250
251 write_nic_word(dev, RFPinsOutput, (rw|oval)); udelay(2);
252
253 mask = (low2high) ? (mask<<1) : (mask>>1);
254 }
255
256 write_nic_word(dev, RFPinsOutput,
257 BB_HOST_BANG_EN | BB_HOST_BANG_RW | oval);
258 udelay(2);
259
260 write_nic_word(dev, RFPinsEnable, oval2);
261 write_nic_word(dev, RFPinsSelect, oval3); /* Set To SW Switch */
262 write_nic_word(dev, RFPinsOutput, 0x3a0);
263
264 return dataRead;
265}
266
267void rtl8225z2_rf_close(struct net_device *dev)
268{
269 RF_WriteReg(dev, 0x4, 0x1f);
270
271 force_pci_posting(dev);
272 mdelay(1);
273
274 rtl8180_set_anaparam(dev, RTL8225z2_ANAPARAM_OFF);
275 rtl8185_set_anaparam2(dev, RTL8225z2_ANAPARAM2_OFF);
276}
277
278/*
279 * Map dBm into Tx power index according to current HW model, for example,
280 * RF and PA, and current wireless mode.
281 */
282static s8 DbmToTxPwrIdx(struct r8180_priv *priv,
283 enum wireless_mode mode, s32 PowerInDbm)
284{
285 bool bUseDefault = true;
286 s8 TxPwrIdx = 0;
287
288 /*
289 * OFDM Power in dBm = Index * 0.5 + 0
290 * CCK Power in dBm = Index * 0.25 + 13
291 */
292 s32 tmp = 0;
293
294 if (mode == WIRELESS_MODE_G) {
295 bUseDefault = false;
296 tmp = (2 * PowerInDbm);
297
298 if (tmp < 0)
299 TxPwrIdx = 0;
300 else if (tmp > 40) /* 40 means 20 dBm. */
301 TxPwrIdx = 40;
302 else
303 TxPwrIdx = (s8)tmp;
304 } else if (mode == WIRELESS_MODE_B) {
305 bUseDefault = false;
306 tmp = (4 * PowerInDbm) - 52;
307
308 if (tmp < 0)
309 TxPwrIdx = 0;
310 else if (tmp > 28) /* 28 means 20 dBm. */
311 TxPwrIdx = 28;
312 else
313 TxPwrIdx = (s8)tmp;
314 }
315
316 /*
317 * TRUE if we want to use a default implementation.
318 * We shall set it to FALSE when we have exact translation formula
319 * for target IC. 070622, by rcnjko.
320 */
321 if (bUseDefault) {
322 if (PowerInDbm < 0)
323 TxPwrIdx = 0;
324 else if (PowerInDbm > 35)
325 TxPwrIdx = 35;
326 else
327 TxPwrIdx = (u8)PowerInDbm;
328 }
329
330 return TxPwrIdx;
331}
332
333void rtl8225z2_SetTXPowerLevel(struct net_device *dev, short ch)
334{
335 struct r8180_priv *priv = ieee80211_priv(dev);
336 u8 max_cck_power_level;
337 u8 max_ofdm_power_level;
338 u8 min_ofdm_power_level;
339 char cck_power_level = (char)(0xff & priv->chtxpwr[ch]);
340 char ofdm_power_level = (char)(0xff & priv->chtxpwr_ofdm[ch]);
341
342 if (IS_DOT11D_ENABLE(priv->ieee80211) &&
343 IS_DOT11D_STATE_DONE(priv->ieee80211)) {
344 u8 MaxTxPwrInDbm = DOT11D_GetMaxTxPwrInDbm(priv->ieee80211, ch);
345 u8 CckMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_B,
346 MaxTxPwrInDbm);
347 u8 OfdmMaxPwrIdx = DbmToTxPwrIdx(priv, WIRELESS_MODE_G,
348 MaxTxPwrInDbm);
349
350 if (cck_power_level > CckMaxPwrIdx)
351 cck_power_level = CckMaxPwrIdx;
352 if (ofdm_power_level > OfdmMaxPwrIdx)
353 ofdm_power_level = OfdmMaxPwrIdx;
354 }
355
356 max_cck_power_level = 15;
357 max_ofdm_power_level = 25;
358 min_ofdm_power_level = 10;
359
360 if (cck_power_level > 35)
361 cck_power_level = 35;
362
363 write_nic_byte(dev, CCK_TXAGC, cck_power_level);
364 force_pci_posting(dev);
365 mdelay(1);
366
367 if (ofdm_power_level > 35)
368 ofdm_power_level = 35;
369
370 if (priv->up == 0) {
371 write_phy_ofdm(dev, 2, 0x42);
372 write_phy_ofdm(dev, 5, 0x00);
373 write_phy_ofdm(dev, 6, 0x40);
374 write_phy_ofdm(dev, 7, 0x00);
375 write_phy_ofdm(dev, 8, 0x40);
376 }
377
378 write_nic_byte(dev, OFDM_TXAGC, ofdm_power_level);
379
380 if (ofdm_power_level <= 11) {
381 write_phy_ofdm(dev, 0x07, 0x5c);
382 write_phy_ofdm(dev, 0x09, 0x5c);
383 }
384
385 if (ofdm_power_level <= 17) {
386 write_phy_ofdm(dev, 0x07, 0x54);
387 write_phy_ofdm(dev, 0x09, 0x54);
388 } else {
389 write_phy_ofdm(dev, 0x07, 0x50);
390 write_phy_ofdm(dev, 0x09, 0x50);
391 }
392
393 force_pci_posting(dev);
394 mdelay(1);
395}
396
397void rtl8225z2_rf_set_chan(struct net_device *dev, short ch)
398{
399 rtl8225z2_SetTXPowerLevel(dev, ch);
400
401 RF_WriteReg(dev, 0x7, rtl8225_chan[ch]);
402
403 if ((RF_ReadReg(dev, 0x7) & 0x0F80) != rtl8225_chan[ch])
404 RF_WriteReg(dev, 0x7, rtl8225_chan[ch]);
405
406 mdelay(1);
407
408 force_pci_posting(dev);
409 mdelay(10);
410}
411
412static void rtl8225_host_pci_init(struct net_device *dev)
413{
414 write_nic_word(dev, RFPinsOutput, 0x480);
415
416 rtl8185_rf_pins_enable(dev);
417
418 write_nic_word(dev, RFPinsSelect, 0x88 | SW_CONTROL_GPIO);
419
420 write_nic_byte(dev, GP_ENABLE, 0);
421
422 force_pci_posting(dev);
423 mdelay(200);
424
425 /* bit 6 is for RF on/off detection */
426 write_nic_word(dev, GP_ENABLE, 0xff & (~(1 << 6)));
427}
428
429void rtl8225z2_rf_init(struct net_device *dev)
430{
431 struct r8180_priv *priv = ieee80211_priv(dev);
432 int i;
433 short channel = 1;
434 u16 brsr;
435 u32 data;
436
437 priv->chan = channel;
438
439 rtl8225_host_pci_init(dev);
440
441 write_nic_dword(dev, RF_TIMING, 0x000a8008);
442
443 brsr = read_nic_word(dev, BRSR);
444
445 write_nic_word(dev, BRSR, 0xffff);
446
447 write_nic_dword(dev, RF_PARA, 0x100044);
448
449 rtl8180_set_mode(dev, EPROM_CMD_CONFIG);
450 write_nic_byte(dev, CONFIG3, 0x44);
451 rtl8180_set_mode(dev, EPROM_CMD_NORMAL);
452
453 rtl8185_rf_pins_enable(dev);
454
455 write_rtl8225(dev, 0x0, 0x2bf); mdelay(1);
456 write_rtl8225(dev, 0x1, 0xee0); mdelay(1);
457 write_rtl8225(dev, 0x2, 0x44d); mdelay(1);
458 write_rtl8225(dev, 0x3, 0x441); mdelay(1);
459 write_rtl8225(dev, 0x4, 0x8c3); mdelay(1);
460 write_rtl8225(dev, 0x5, 0xc72); mdelay(1);
461 write_rtl8225(dev, 0x6, 0xe6); mdelay(1);
462 write_rtl8225(dev, 0x7, rtl8225_chan[channel]); mdelay(1);
463 write_rtl8225(dev, 0x8, 0x3f); mdelay(1);
464 write_rtl8225(dev, 0x9, 0x335); mdelay(1);
465 write_rtl8225(dev, 0xa, 0x9d4); mdelay(1);
466 write_rtl8225(dev, 0xb, 0x7bb); mdelay(1);
467 write_rtl8225(dev, 0xc, 0x850); mdelay(1);
468 write_rtl8225(dev, 0xd, 0xcdf); mdelay(1);
469 write_rtl8225(dev, 0xe, 0x2b); mdelay(1);
470 write_rtl8225(dev, 0xf, 0x114);
471
472 mdelay(100);
473
474 write_rtl8225(dev, 0x0, 0x1b7);
475
476 for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) {
477 write_rtl8225(dev, 0x1, i + 1);
478 write_rtl8225(dev, 0x2, rtl8225z2_rxgain[i]);
479 }
480
481 write_rtl8225(dev, 0x3, 0x80);
482 write_rtl8225(dev, 0x5, 0x4);
483
484 write_rtl8225(dev, 0x0, 0xb7);
485
486 write_rtl8225(dev, 0x2, 0xc4d);
487
488 /* FIXME!! rtl8187 we have to check if calibrarion
489 * is successful and eventually cal. again (repeat
490 * the two write on reg 2)
491 */
492 data = read_rtl8225(dev, 6);
493 if (!(data & 0x00000080)) {
494 write_rtl8225(dev, 0x02, 0x0c4d);
495 force_pci_posting(dev); mdelay(200);
496 write_rtl8225(dev, 0x02, 0x044d);
497 force_pci_posting(dev); mdelay(100);
498 data = read_rtl8225(dev, 6);
499 if (!(data & 0x00000080))
500 DMESGW("RF Calibration Failed!!!!\n");
501 }
502
503 mdelay(200);
504
505 write_rtl8225(dev, 0x0, 0x2bf);
506
507 for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
508 write_phy_ofdm(dev, 0xb, rtl8225_agc[i]);
509 mdelay(1);
510
511 /* enable writing AGC table */
512 write_phy_ofdm(dev, 0xa, i + 0x80);
513 mdelay(1);
514 }
515
516 force_pci_posting(dev);
517 mdelay(1);
518
519 write_phy_ofdm(dev, 0x00, 0x01); mdelay(1);
520 write_phy_ofdm(dev, 0x01, 0x02); mdelay(1);
521 write_phy_ofdm(dev, 0x02, 0x62); mdelay(1);
522 write_phy_ofdm(dev, 0x03, 0x00); mdelay(1);
523 write_phy_ofdm(dev, 0x04, 0x00); mdelay(1);
524 write_phy_ofdm(dev, 0x05, 0x00); mdelay(1);
525 write_phy_ofdm(dev, 0x06, 0x40); mdelay(1);
526 write_phy_ofdm(dev, 0x07, 0x00); mdelay(1);
527 write_phy_ofdm(dev, 0x08, 0x40); mdelay(1);
528 write_phy_ofdm(dev, 0x09, 0xfe); mdelay(1);
529 write_phy_ofdm(dev, 0x0a, 0x08); mdelay(1);
530 write_phy_ofdm(dev, 0x0b, 0x80); mdelay(1);
531 write_phy_ofdm(dev, 0x0c, 0x01); mdelay(1);
532 write_phy_ofdm(dev, 0x0d, 0x43);
533 write_phy_ofdm(dev, 0x0e, 0xd3); mdelay(1);
534 write_phy_ofdm(dev, 0x0f, 0x38); mdelay(1);
535 write_phy_ofdm(dev, 0x10, 0x84); mdelay(1);
536 write_phy_ofdm(dev, 0x11, 0x07); mdelay(1);
537 write_phy_ofdm(dev, 0x12, 0x20); mdelay(1);
538 write_phy_ofdm(dev, 0x13, 0x20); mdelay(1);
539 write_phy_ofdm(dev, 0x14, 0x00); mdelay(1);
540 write_phy_ofdm(dev, 0x15, 0x40); mdelay(1);
541 write_phy_ofdm(dev, 0x16, 0x00); mdelay(1);
542 write_phy_ofdm(dev, 0x17, 0x40); mdelay(1);
543 write_phy_ofdm(dev, 0x18, 0xef); mdelay(1);
544 write_phy_ofdm(dev, 0x19, 0x19); mdelay(1);
545 write_phy_ofdm(dev, 0x1a, 0x20); mdelay(1);
546 write_phy_ofdm(dev, 0x1b, 0x15); mdelay(1);
547 write_phy_ofdm(dev, 0x1c, 0x04); mdelay(1);
548 write_phy_ofdm(dev, 0x1d, 0xc5); mdelay(1);
549 write_phy_ofdm(dev, 0x1e, 0x95); mdelay(1);
550 write_phy_ofdm(dev, 0x1f, 0x75); mdelay(1);
551 write_phy_ofdm(dev, 0x20, 0x1f); mdelay(1);
552 write_phy_ofdm(dev, 0x21, 0x17); mdelay(1);
553 write_phy_ofdm(dev, 0x22, 0x16); mdelay(1);
554 write_phy_ofdm(dev, 0x23, 0x80); mdelay(1); /* FIXME maybe not needed */
555 write_phy_ofdm(dev, 0x24, 0x46); mdelay(1);
556 write_phy_ofdm(dev, 0x25, 0x00); mdelay(1);
557 write_phy_ofdm(dev, 0x26, 0x90); mdelay(1);
558 write_phy_ofdm(dev, 0x27, 0x88); mdelay(1);
559
560 rtl8225z2_set_gain(dev, 4);
561
562 write_phy_cck(dev, 0x0, 0x98); mdelay(1);
563 write_phy_cck(dev, 0x3, 0x20); mdelay(1);
564 write_phy_cck(dev, 0x4, 0x7e); mdelay(1);
565 write_phy_cck(dev, 0x5, 0x12); mdelay(1);
566 write_phy_cck(dev, 0x6, 0xfc); mdelay(1);
567 write_phy_cck(dev, 0x7, 0x78); mdelay(1);
568 write_phy_cck(dev, 0x8, 0x2e); mdelay(1);
569 write_phy_cck(dev, 0x10, 0x93); mdelay(1);
570 write_phy_cck(dev, 0x11, 0x88); mdelay(1);
571 write_phy_cck(dev, 0x12, 0x47); mdelay(1);
572 write_phy_cck(dev, 0x13, 0xd0);
573 write_phy_cck(dev, 0x19, 0x00);
574 write_phy_cck(dev, 0x1a, 0xa0);
575 write_phy_cck(dev, 0x1b, 0x08);
576 write_phy_cck(dev, 0x40, 0x86); /* CCK Carrier Sense Threshold */
577 write_phy_cck(dev, 0x41, 0x8d); mdelay(1);
578 write_phy_cck(dev, 0x42, 0x15); mdelay(1);
579 write_phy_cck(dev, 0x43, 0x18); mdelay(1);
580 write_phy_cck(dev, 0x44, 0x36); mdelay(1);
581 write_phy_cck(dev, 0x45, 0x35); mdelay(1);
582 write_phy_cck(dev, 0x46, 0x2e); mdelay(1);
583 write_phy_cck(dev, 0x47, 0x25); mdelay(1);
584 write_phy_cck(dev, 0x48, 0x1c); mdelay(1);
585 write_phy_cck(dev, 0x49, 0x12); mdelay(1);
586 write_phy_cck(dev, 0x4a, 0x09); mdelay(1);
587 write_phy_cck(dev, 0x4b, 0x04); mdelay(1);
588 write_phy_cck(dev, 0x4c, 0x05); mdelay(1);
589
590 write_nic_byte(dev, 0x5b, 0x0d); mdelay(1);
591
592 rtl8225z2_SetTXPowerLevel(dev, channel);
593
594 /* RX antenna default to A */
595 write_phy_cck(dev, 0x11, 0x9b); mdelay(1); /* B: 0xDB */
596 write_phy_ofdm(dev, 0x26, 0x90); mdelay(1); /* B: 0x10 */
597
598 rtl8185_tx_antenna(dev, 0x03); /* B: 0x00 */
599
600 /* switch to high-speed 3-wire
601 * last digit. 2 for both cck and ofdm
602 */
603 write_nic_dword(dev, 0x94, 0x15c00002);
604 rtl8185_rf_pins_enable(dev);
605
606 rtl8225z2_rf_set_chan(dev, priv->chan);
607}
608
609#define MAX_DOZE_WAITING_TIMES_85B 20
610#define MAX_POLLING_24F_TIMES_87SE 10
611#define LPS_MAX_SLEEP_WAITING_TIMES_87SE 5
612
613bool SetZebraRFPowerState8185(struct net_device *dev,
614 enum rt_rf_power_state eRFPowerState)
615{
616 struct r8180_priv *priv = ieee80211_priv(dev);
617 u8 btCR9346, btConfig3;
618 bool bActionAllowed = true, bTurnOffBB = true;
619 u8 u1bTmp;
620 int i;
621 bool bResult = true;
622 u8 QueueID;
623
624 if (priv->SetRFPowerStateInProgress == true)
625 return false;
626
627 priv->SetRFPowerStateInProgress = true;
628
629 btCR9346 = read_nic_byte(dev, CR9346);
630 write_nic_byte(dev, CR9346, (btCR9346 | 0xC0));
631
632 btConfig3 = read_nic_byte(dev, CONFIG3);
633 write_nic_byte(dev, CONFIG3, (btConfig3 | CONFIG3_PARM_En));
634
635 switch (eRFPowerState) {
636 case RF_ON:
637 write_nic_word(dev, 0x37C, 0x00EC);
638
639 /* turn on AFE */
640 write_nic_byte(dev, 0x54, 0x00);
641 write_nic_byte(dev, 0x62, 0x00);
642
643 /* turn on RF */
644 RF_WriteReg(dev, 0x0, 0x009f); udelay(500);
645 RF_WriteReg(dev, 0x4, 0x0972); udelay(500);
646
647 /* turn on RF again */
648 RF_WriteReg(dev, 0x0, 0x009f); udelay(500);
649 RF_WriteReg(dev, 0x4, 0x0972); udelay(500);
650
651 /* turn on BB */
652 write_phy_ofdm(dev, 0x10, 0x40);
653 write_phy_ofdm(dev, 0x12, 0x40);
654
655 /* Avoid power down at init time. */
656 write_nic_byte(dev, CONFIG4, priv->RFProgType);
657
658 u1bTmp = read_nic_byte(dev, 0x24E);
659 write_nic_byte(dev, 0x24E, (u1bTmp & (~(BIT5 | BIT6))));
660 break;
661 case RF_SLEEP:
662 for (QueueID = 0, i = 0; QueueID < 6;) {
663 if (get_curr_tx_free_desc(dev, QueueID) ==
664 priv->txringcount) {
665 QueueID++;
666 continue;
667 } else {
668 priv->TxPollingTimes++;
669 if (priv->TxPollingTimes >=
670 LPS_MAX_SLEEP_WAITING_TIMES_87SE) {
671 bActionAllowed = false;
672 break;
673 } else
674 udelay(10);
675 }
676 }
677
678 if (bActionAllowed) {
679 /* turn off BB RXIQ matrix to cut off rx signal */
680 write_phy_ofdm(dev, 0x10, 0x00);
681 write_phy_ofdm(dev, 0x12, 0x00);
682
683 /* turn off RF */
684 RF_WriteReg(dev, 0x4, 0x0000);
685 RF_WriteReg(dev, 0x0, 0x0000);
686
687 /* turn off AFE except PLL */
688 write_nic_byte(dev, 0x62, 0xff);
689 write_nic_byte(dev, 0x54, 0xec);
690
691 mdelay(1);
692
693 {
694 int i = 0;
695 while (true) {
696 u8 tmp24F = read_nic_byte(dev, 0x24f);
697
698 if ((tmp24F == 0x01) ||
699 (tmp24F == 0x09)) {
700 bTurnOffBB = true;
701 break;
702 } else {
703 udelay(10);
704 i++;
705 priv->TxPollingTimes++;
706
707 if (priv->TxPollingTimes >= LPS_MAX_SLEEP_WAITING_TIMES_87SE) {
708 bTurnOffBB = false;
709 break;
710 } else
711 udelay(10);
712 }
713 }
714 }
715
716 if (bTurnOffBB) {
717 /* turn off BB */
718 u1bTmp = read_nic_byte(dev, 0x24E);
719 write_nic_byte(dev, 0x24E,
720 (u1bTmp | BIT5 | BIT6));
721
722 /* turn off AFE PLL */
723 write_nic_byte(dev, 0x54, 0xFC);
724 write_nic_word(dev, 0x37C, 0x00FC);
725 }
726 }
727 break;
728 case RF_OFF:
729 for (QueueID = 0, i = 0; QueueID < 6;) {
730 if (get_curr_tx_free_desc(dev, QueueID) ==
731 priv->txringcount) {
732 QueueID++;
733 continue;
734 } else {
735 udelay(10);
736 i++;
737 }
738
739 if (i >= MAX_DOZE_WAITING_TIMES_85B)
740 break;
741 }
742
743 /* turn off BB RXIQ matrix to cut off rx signal */
744 write_phy_ofdm(dev, 0x10, 0x00);
745 write_phy_ofdm(dev, 0x12, 0x00);
746
747 /* turn off RF */
748 RF_WriteReg(dev, 0x4, 0x0000);
749 RF_WriteReg(dev, 0x0, 0x0000);
750
751 /* turn off AFE except PLL */
752 write_nic_byte(dev, 0x62, 0xff);
753 write_nic_byte(dev, 0x54, 0xec);
754
755 mdelay(1);
756
757 {
758 int i = 0;
759
760 while (true) {
761 u8 tmp24F = read_nic_byte(dev, 0x24f);
762
763 if ((tmp24F == 0x01) || (tmp24F == 0x09)) {
764 bTurnOffBB = true;
765 break;
766 } else {
767 bTurnOffBB = false;
768 udelay(10);
769 i++;
770 }
771
772 if (i > MAX_POLLING_24F_TIMES_87SE)
773 break;
774 }
775 }
776
777 if (bTurnOffBB) {
778 /* turn off BB */
779 u1bTmp = read_nic_byte(dev, 0x24E);
780 write_nic_byte(dev, 0x24E, (u1bTmp | BIT5 | BIT6));
781
782 /* turn off AFE PLL (80M) */
783 write_nic_byte(dev, 0x54, 0xFC);
784 write_nic_word(dev, 0x37C, 0x00FC);
785 }
786 break;
787 }
788
789 btConfig3 &= ~(CONFIG3_PARM_En);
790 write_nic_byte(dev, CONFIG3, btConfig3);
791
792 btCR9346 &= ~(0xC0);
793 write_nic_byte(dev, CR9346, btCR9346);
794
795 if (bResult && bActionAllowed)
796 priv->eRFPowerState = eRFPowerState;
797
798 priv->SetRFPowerStateInProgress = false;
799
800 return bResult && bActionAllowed;
801}
802
803void rtl8225z4_rf_sleep(struct net_device *dev)
804{
805 MgntActSet_RF_State(dev, RF_SLEEP, RF_CHANGE_BY_PS);
806}
807
808void rtl8225z4_rf_wakeup(struct net_device *dev)
809{
810 MgntActSet_RF_State(dev, RF_ON, RF_CHANGE_BY_PS);
811}
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
deleted file mode 100644
index b55249170f18..000000000000
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ /dev/null
@@ -1,1409 +0,0 @@
1/*
2 This file contains wireless extension handlers.
3
4 This is part of rtl8180 OpenSource driver.
5 Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
6 Released under the terms of GPL (General Public Licence)
7
8 Parts of this driver are based on the GPL part
9 of the official realtek driver.
10
11 Parts of this driver are based on the rtl8180 driver skeleton
12 from Patric Schenke & Andres Salomon.
13
14 Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
15
16 We want to thanks the Authors of those projects and the Ndiswrapper
17 project Authors.
18*/
19
20
21#include "r8180.h"
22#include "r8180_hw.h"
23
24#include <net/iw_handler.h>
25#include "ieee80211/dot11d.h"
26
27static u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
28 6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
29
30#define RATE_COUNT ARRAY_SIZE(rtl8180_rates)
31
32static struct rtl8187se_channel_list default_channel_plan[] = {
33 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 36, 40, 44, 48, 52, 56, 60, 64}, 19}, /* FCC */
34 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* IC */
35 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* ETSI */
36 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Spain. Change to ETSI. */
37 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* France. Change to ETSI. */
38 {{14, 36, 40, 44, 48, 52, 56, 60, 64}, 9}, /* MKK */
39 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 36, 40, 44, 48, 52, 56, 60, 64}, 22}, /* MKK1 */
40 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 36, 40, 44, 48, 52, 56, 60, 64}, 21}, /* Israel */
41 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 34, 38, 42, 46}, 17}, /* For 11a , TELEC */
42 {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14} /* For Global Domain. 1-11:active scan, 12-14 passive scan.*/ /* +YJ, 080626 */
43};
44static int r8180_wx_get_freq(struct net_device *dev,
45 struct iw_request_info *a,
46 union iwreq_data *wrqu, char *b)
47{
48 struct r8180_priv *priv = ieee80211_priv(dev);
49
50 return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b);
51}
52
53
54static int r8180_wx_set_key(struct net_device *dev,
55 struct iw_request_info *info,
56 union iwreq_data *wrqu, char *key)
57{
58 struct r8180_priv *priv = ieee80211_priv(dev);
59 struct iw_point *erq = &(wrqu->encoding);
60
61 if (priv->ieee80211->bHwRadioOff)
62 return 0;
63
64 if (erq->length > 0) {
65 u32 *tkey = (u32 *) key;
66 priv->key0[0] = tkey[0];
67 priv->key0[1] = tkey[1];
68 priv->key0[2] = tkey[2];
69 priv->key0[3] = tkey[3] & 0xff;
70 DMESG("Setting wep key to %x %x %x %x",
71 tkey[0], tkey[1], tkey[2], tkey[3]);
72 rtl8180_set_hw_wep(dev);
73 }
74 return 0;
75}
76
77
78static int r8180_wx_set_beaconinterval(struct net_device *dev,
79 struct iw_request_info *aa,
80 union iwreq_data *wrqu, char *b)
81{
82 int *parms = (int *)b;
83 int bi = parms[0];
84
85 struct r8180_priv *priv = ieee80211_priv(dev);
86
87 if (priv->ieee80211->bHwRadioOff)
88 return 0;
89
90 down(&priv->wx_sem);
91 DMESG("setting beacon interval to %x", bi);
92
93 priv->ieee80211->current_network.beacon_interval = bi;
94 rtl8180_commit(dev);
95 up(&priv->wx_sem);
96
97 return 0;
98}
99
100
101
102static int r8180_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
103 union iwreq_data *wrqu, char *b)
104{
105 struct r8180_priv *priv = ieee80211_priv(dev);
106 return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b);
107}
108
109
110
111static int r8180_wx_get_rate(struct net_device *dev,
112 struct iw_request_info *info,
113 union iwreq_data *wrqu, char *extra)
114{
115 struct r8180_priv *priv = ieee80211_priv(dev);
116 return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra);
117}
118
119
120
121static int r8180_wx_set_rate(struct net_device *dev,
122 struct iw_request_info *info,
123 union iwreq_data *wrqu, char *extra)
124{
125 int ret;
126 struct r8180_priv *priv = ieee80211_priv(dev);
127
128
129 if (priv->ieee80211->bHwRadioOff)
130 return 0;
131
132 down(&priv->wx_sem);
133
134 ret = ieee80211_wx_set_rate(priv->ieee80211, info, wrqu, extra);
135
136 up(&priv->wx_sem);
137
138 return ret;
139}
140
141
142static int r8180_wx_set_crcmon(struct net_device *dev,
143 struct iw_request_info *info,
144 union iwreq_data *wrqu, char *extra)
145{
146 struct r8180_priv *priv = ieee80211_priv(dev);
147 int *parms = (int *)extra;
148 int enable = (parms[0] > 0);
149 short prev = priv->crcmon;
150
151
152 if (priv->ieee80211->bHwRadioOff)
153 return 0;
154
155 down(&priv->wx_sem);
156
157 if (enable)
158 priv->crcmon = 1;
159 else
160 priv->crcmon = 0;
161
162 DMESG("bad CRC in monitor mode are %s",
163 priv->crcmon ? "accepted" : "rejected");
164
165 if (prev != priv->crcmon && priv->up) {
166 rtl8180_down(dev);
167 rtl8180_up(dev);
168 }
169
170 up(&priv->wx_sem);
171
172 return 0;
173}
174
175
176static int r8180_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
177 union iwreq_data *wrqu, char *b)
178{
179 struct r8180_priv *priv = ieee80211_priv(dev);
180 int ret;
181
182
183 if (priv->ieee80211->bHwRadioOff)
184 return 0;
185
186 down(&priv->wx_sem);
187 if (priv->bInactivePs) {
188 if (wrqu->mode == IW_MODE_ADHOC)
189 IPSLeave(dev);
190 }
191 ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
192
193 up(&priv->wx_sem);
194 return ret;
195}
196
197/* YJ,add,080819,for hidden ap */
198struct iw_range_with_scan_capa {
199 /* Informative stuff (to choose between different interface) */
200
201 __u32 throughput; /* To give an idea... */
202
203 /* In theory this value should be the maximum benchmarked
204 * TCP/IP throughput, because with most of these devices the
205 * bit rate is meaningless (overhead an co) to estimate how
206 * fast the connection will go and pick the fastest one.
207 * I suggest people to play with Netperf or any benchmark...
208 */
209
210 /* NWID (or domain id) */
211 __u32 min_nwid; /* Minimal NWID we are able to set */
212 __u32 max_nwid; /* Maximal NWID we are able to set */
213
214 /* Old Frequency (backward compat - moved lower ) */
215 __u16 old_num_channels;
216 __u8 old_num_frequency;
217
218 /* Scan capabilities */
219 __u8 scan_capa;
220};
221/* YJ,add,080819,for hidden ap */
222
223
224static int rtl8180_wx_get_range(struct net_device *dev,
225 struct iw_request_info *info,
226 union iwreq_data *wrqu, char *extra)
227{
228 struct iw_range *range = (struct iw_range *)extra;
229 struct r8180_priv *priv = ieee80211_priv(dev);
230 u16 val;
231 int i;
232
233 wrqu->data.length = sizeof(*range);
234 memset(range, 0, sizeof(*range));
235
236 /* Let's try to keep this struct in the same order as in
237 * linux/include/wireless.h
238 */
239
240 /* TODO: See what values we can set, and remove the ones we can't
241 * set, or fill them with some default data.
242 */
243
244 /* ~5 Mb/s real (802.11b) */
245 range->throughput = 5 * 1000 * 1000;
246
247 /* TODO: Not used in 802.11b? */
248/* range->min_nwid; */ /* Minimal NWID we are able to set */
249 /* TODO: Not used in 802.11b? */
250/* range->max_nwid; */ /* Maximal NWID we are able to set */
251
252 /* Old Frequency (backward compat - moved lower ) */
253/* range->old_num_channels; */
254/* range->old_num_frequency; */
255/* range->old_freq[6]; */ /* Filler to keep "version" at the same offset */
256 if (priv->rf_set_sens != NULL)
257 range->sensitivity = priv->max_sens; /* signal level threshold range */
258
259 range->max_qual.qual = 100;
260 /* TODO: Find real max RSSI and stick here */
261 range->max_qual.level = 0;
262 range->max_qual.noise = -98;
263 range->max_qual.updated = 7; /* Updated all three */
264
265 range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
266 /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
267 range->avg_qual.level = 20 + -98;
268 range->avg_qual.noise = 0;
269 range->avg_qual.updated = 7; /* Updated all three */
270
271 range->num_bitrates = RATE_COUNT;
272
273 for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++)
274 range->bitrate[i] = rtl8180_rates[i];
275
276 range->min_frag = MIN_FRAG_THRESHOLD;
277 range->max_frag = MAX_FRAG_THRESHOLD;
278
279 range->pm_capa = 0;
280
281 range->we_version_compiled = WIRELESS_EXT;
282 range->we_version_source = 16;
283
284 range->num_channels = 14;
285
286 for (i = 0, val = 0; i < 14; i++) {
287
288 /* Include only legal frequencies for some countries */
289 if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) {
290 range->freq[val].i = i + 1;
291 range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000;
292 range->freq[val].e = 1;
293 val++;
294 } else {
295 /* FIXME: do we need to set anything for channels */
296 /* we don't use ? */
297 }
298
299 if (val == IW_MAX_FREQUENCIES)
300 break;
301 }
302
303 range->num_frequency = val;
304 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
305 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
306
307 return 0;
308}
309
310
311static int r8180_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
312 union iwreq_data *wrqu, char *b)
313{
314 struct r8180_priv *priv = ieee80211_priv(dev);
315 int ret;
316 struct ieee80211_device *ieee = priv->ieee80211;
317
318
319 if (priv->ieee80211->bHwRadioOff)
320 return 0;
321
322 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
323 struct iw_scan_req *req = (struct iw_scan_req *)b;
324 if (req->essid_len) {
325 ieee->current_network.ssid_len = req->essid_len;
326 memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
327 }
328 }
329
330 down(&priv->wx_sem);
331 if (priv->up) {
332 priv->ieee80211->actscanning = true;
333 if (priv->bInactivePs && (priv->ieee80211->state != IEEE80211_LINKED)) {
334 IPSLeave(dev);
335 ieee80211_softmac_ips_scan_syncro(priv->ieee80211);
336 ret = 0;
337 } else {
338 /* prevent scan in BusyTraffic */
339 /* FIXME: Need to consider last scan time */
340 if ((priv->link_detect.b_busy_traffic) && (true)) {
341 ret = 0;
342 printk("Now traffic is busy, please try later!\n");
343 } else
344 /* prevent scan in BusyTraffic,end */
345 ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
346 }
347 } else
348 ret = -1;
349
350 up(&priv->wx_sem);
351
352 return ret;
353}
354
355
356static int r8180_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
357 union iwreq_data *wrqu, char *b)
358{
359
360 int ret;
361 struct r8180_priv *priv = ieee80211_priv(dev);
362
363 down(&priv->wx_sem);
364 if (priv->up)
365 ret = ieee80211_wx_get_scan(priv->ieee80211, a, wrqu, b);
366 else
367 ret = -1;
368
369 up(&priv->wx_sem);
370 return ret;
371}
372
373
374static int r8180_wx_set_essid(struct net_device *dev,
375 struct iw_request_info *a,
376 union iwreq_data *wrqu, char *b)
377{
378 struct r8180_priv *priv = ieee80211_priv(dev);
379
380 int ret;
381
382 if (priv->ieee80211->bHwRadioOff)
383 return 0;
384
385 down(&priv->wx_sem);
386 if (priv->bInactivePs)
387 IPSLeave(dev);
388
389 ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
390
391 up(&priv->wx_sem);
392 return ret;
393}
394
395
396static int r8180_wx_get_essid(struct net_device *dev,
397 struct iw_request_info *a,
398 union iwreq_data *wrqu, char *b)
399{
400 int ret;
401 struct r8180_priv *priv = ieee80211_priv(dev);
402
403 down(&priv->wx_sem);
404
405 ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b);
406
407 up(&priv->wx_sem);
408
409 return ret;
410}
411
412
413static int r8180_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
414 union iwreq_data *wrqu, char *b)
415{
416 int ret;
417 struct r8180_priv *priv = ieee80211_priv(dev);
418
419
420 if (priv->ieee80211->bHwRadioOff)
421 return 0;
422
423 down(&priv->wx_sem);
424
425 ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b);
426
427 up(&priv->wx_sem);
428 return ret;
429}
430
431
432static int r8180_wx_get_name(struct net_device *dev,
433 struct iw_request_info *info,
434 union iwreq_data *wrqu, char *extra)
435{
436 struct r8180_priv *priv = ieee80211_priv(dev);
437 return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
438}
439
440static int r8180_wx_set_frag(struct net_device *dev,
441 struct iw_request_info *info,
442 union iwreq_data *wrqu, char *extra)
443{
444 struct r8180_priv *priv = ieee80211_priv(dev);
445
446 if (priv->ieee80211->bHwRadioOff)
447 return 0;
448
449 if (wrqu->frag.disabled)
450 priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
451 else {
452 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
453 wrqu->frag.value > MAX_FRAG_THRESHOLD)
454 return -EINVAL;
455
456 priv->ieee80211->fts = wrqu->frag.value & ~0x1;
457 }
458
459 return 0;
460}
461
462
463static int r8180_wx_get_frag(struct net_device *dev,
464 struct iw_request_info *info,
465 union iwreq_data *wrqu, char *extra)
466{
467 struct r8180_priv *priv = ieee80211_priv(dev);
468
469 wrqu->frag.value = priv->ieee80211->fts;
470 wrqu->frag.fixed = 0; /* no auto select */
471 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD);
472
473 return 0;
474}
475
476
477static int r8180_wx_set_wap(struct net_device *dev,
478 struct iw_request_info *info,
479 union iwreq_data *awrq, char *extra)
480{
481 int ret;
482 struct r8180_priv *priv = ieee80211_priv(dev);
483
484 if (priv->ieee80211->bHwRadioOff)
485 return 0;
486
487 down(&priv->wx_sem);
488
489 ret = ieee80211_wx_set_wap(priv->ieee80211, info, awrq, extra);
490
491 up(&priv->wx_sem);
492 return ret;
493
494}
495
496
497static int r8180_wx_get_wap(struct net_device *dev,
498 struct iw_request_info *info,
499 union iwreq_data *wrqu, char *extra)
500{
501 struct r8180_priv *priv = ieee80211_priv(dev);
502
503 return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra);
504}
505
506
507static int r8180_wx_set_enc(struct net_device *dev,
508 struct iw_request_info *info,
509 union iwreq_data *wrqu, char *key)
510{
511 struct r8180_priv *priv = ieee80211_priv(dev);
512 int ret;
513
514 if (priv->ieee80211->bHwRadioOff)
515 return 0;
516
517
518 down(&priv->wx_sem);
519
520 if (priv->hw_wep)
521 ret = r8180_wx_set_key(dev, info, wrqu, key);
522 else {
523 DMESG("Setting SW wep key");
524 ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key);
525 }
526
527 up(&priv->wx_sem);
528 return ret;
529}
530
531
532static int r8180_wx_get_enc(struct net_device *dev,
533 struct iw_request_info *info,
534 union iwreq_data *wrqu, char *key)
535{
536 struct r8180_priv *priv = ieee80211_priv(dev);
537
538 return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key);
539}
540
541
542static int r8180_wx_set_scan_type(struct net_device *dev,
543 struct iw_request_info *aa,
544 union iwreq_data *wrqu, char *p)
545{
546
547 struct r8180_priv *priv = ieee80211_priv(dev);
548 int *parms = (int *)p;
549 int mode = parms[0];
550
551 if (priv->ieee80211->bHwRadioOff)
552 return 0;
553
554 priv->ieee80211->active_scan = mode;
555
556 return 1;
557}
558
559static int r8180_wx_set_retry(struct net_device *dev,
560 struct iw_request_info *info,
561 union iwreq_data *wrqu, char *extra)
562{
563 struct r8180_priv *priv = ieee80211_priv(dev);
564 int err = 0;
565
566 if (priv->ieee80211->bHwRadioOff)
567 return 0;
568
569 down(&priv->wx_sem);
570
571 if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
572 wrqu->retry.disabled) {
573 err = -EINVAL;
574 goto exit;
575 }
576 if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) {
577 err = -EINVAL;
578 goto exit;
579 }
580
581 if (wrqu->retry.value > R8180_MAX_RETRY) {
582 err = -EINVAL;
583 goto exit;
584 }
585 if (wrqu->retry.flags & IW_RETRY_MAX) {
586 priv->retry_rts = wrqu->retry.value;
587 DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value);
588
589 } else {
590 priv->retry_data = wrqu->retry.value;
591 DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value);
592 }
593
594 /* FIXME !
595 * We might try to write directly the TX config register
596 * or to restart just the (R)TX process.
597 * I'm unsure if whole reset is really needed
598 */
599
600 rtl8180_commit(dev);
601exit:
602 up(&priv->wx_sem);
603
604 return err;
605}
606
607static int r8180_wx_get_retry(struct net_device *dev,
608 struct iw_request_info *info,
609 union iwreq_data *wrqu, char *extra)
610{
611 struct r8180_priv *priv = ieee80211_priv(dev);
612
613
614 wrqu->retry.disabled = 0; /* can't be disabled */
615
616 if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
617 IW_RETRY_LIFETIME)
618 return -EINVAL;
619
620 if (wrqu->retry.flags & IW_RETRY_MAX) {
621 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
622 wrqu->retry.value = priv->retry_rts;
623 } else {
624 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
625 wrqu->retry.value = priv->retry_data;
626 }
627
628 return 0;
629}
630
631static int r8180_wx_get_sens(struct net_device *dev,
632 struct iw_request_info *info,
633 union iwreq_data *wrqu, char *extra)
634{
635 struct r8180_priv *priv = ieee80211_priv(dev);
636 if (priv->rf_set_sens == NULL)
637 return -1; /* we have not this support for this radio */
638 wrqu->sens.value = priv->sens;
639 return 0;
640}
641
642
643static int r8180_wx_set_sens(struct net_device *dev,
644 struct iw_request_info *info,
645 union iwreq_data *wrqu, char *extra)
646{
647
648 struct r8180_priv *priv = ieee80211_priv(dev);
649
650 short err = 0;
651
652 if (priv->ieee80211->bHwRadioOff)
653 return 0;
654
655 down(&priv->wx_sem);
656 if (priv->rf_set_sens == NULL) {
657 err = -1; /* we have not this support for this radio */
658 goto exit;
659 }
660 if (priv->rf_set_sens(dev, wrqu->sens.value) == 0)
661 priv->sens = wrqu->sens.value;
662 else
663 err = -EINVAL;
664
665exit:
666 up(&priv->wx_sem);
667
668 return err;
669}
670
671
672static int r8180_wx_set_rawtx(struct net_device *dev,
673 struct iw_request_info *info,
674 union iwreq_data *wrqu, char *extra)
675{
676 struct r8180_priv *priv = ieee80211_priv(dev);
677 int ret;
678
679 if (priv->ieee80211->bHwRadioOff)
680 return 0;
681
682 down(&priv->wx_sem);
683
684 ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra);
685
686 up(&priv->wx_sem);
687
688 return ret;
689
690}
691
692static int r8180_wx_get_power(struct net_device *dev,
693 struct iw_request_info *info,
694 union iwreq_data *wrqu, char *extra)
695{
696 int ret;
697 struct r8180_priv *priv = ieee80211_priv(dev);
698
699 down(&priv->wx_sem);
700
701 ret = ieee80211_wx_get_power(priv->ieee80211, info, wrqu, extra);
702
703 up(&priv->wx_sem);
704
705 return ret;
706}
707
708static int r8180_wx_set_power(struct net_device *dev,
709 struct iw_request_info *info,
710 union iwreq_data *wrqu, char *extra)
711{
712 int ret;
713 struct r8180_priv *priv = ieee80211_priv(dev);
714
715
716 if (priv->ieee80211->bHwRadioOff)
717 return 0;
718
719 down(&priv->wx_sem);
720 printk("=>>>>>>>>>>=============================>set power:%d, %d!\n", wrqu->power.disabled, wrqu->power.flags);
721 if (wrqu->power.disabled == 0) {
722 wrqu->power.flags |= IW_POWER_ALL_R;
723 wrqu->power.flags |= IW_POWER_TIMEOUT;
724 wrqu->power.value = 1000;
725 }
726
727 ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
728
729 up(&priv->wx_sem);
730
731 return ret;
732}
733
734static int r8180_wx_set_rts(struct net_device *dev,
735 struct iw_request_info *info,
736 union iwreq_data *wrqu, char *extra)
737{
738 struct r8180_priv *priv = ieee80211_priv(dev);
739
740
741 if (priv->ieee80211->bHwRadioOff)
742 return 0;
743
744 if (wrqu->rts.disabled)
745 priv->rts = DEFAULT_RTS_THRESHOLD;
746 else {
747 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
748 wrqu->rts.value > MAX_RTS_THRESHOLD)
749 return -EINVAL;
750
751 priv->rts = wrqu->rts.value;
752 }
753
754 return 0;
755}
756static int r8180_wx_get_rts(struct net_device *dev,
757 struct iw_request_info *info,
758 union iwreq_data *wrqu, char *extra)
759{
760 struct r8180_priv *priv = ieee80211_priv(dev);
761
762
763
764 wrqu->rts.value = priv->rts;
765 wrqu->rts.fixed = 0; /* no auto select */
766 wrqu->rts.disabled = (wrqu->rts.value == 0);
767
768 return 0;
769}
770static int dummy(struct net_device *dev, struct iw_request_info *a,
771 union iwreq_data *wrqu, char *b)
772{
773 return -1;
774}
775
776static int r8180_wx_get_iwmode(struct net_device *dev,
777 struct iw_request_info *info,
778 union iwreq_data *wrqu, char *extra)
779{
780 struct r8180_priv *priv = ieee80211_priv(dev);
781 struct ieee80211_device *ieee;
782 int ret = 0;
783
784
785
786 down(&priv->wx_sem);
787
788 ieee = priv->ieee80211;
789
790 strcpy(extra, "802.11");
791 if (ieee->modulation & IEEE80211_CCK_MODULATION) {
792 strcat(extra, "b");
793 if (ieee->modulation & IEEE80211_OFDM_MODULATION)
794 strcat(extra, "/g");
795 } else if (ieee->modulation & IEEE80211_OFDM_MODULATION)
796 strcat(extra, "g");
797
798 up(&priv->wx_sem);
799
800 return ret;
801}
802static int r8180_wx_set_iwmode(struct net_device *dev,
803 struct iw_request_info *info,
804 union iwreq_data *wrqu, char *extra)
805{
806 struct r8180_priv *priv = ieee80211_priv(dev);
807 struct ieee80211_device *ieee = priv->ieee80211;
808 int *param = (int *)extra;
809 int ret = 0;
810 int modulation = 0, mode = 0;
811
812
813 if (priv->ieee80211->bHwRadioOff)
814 return 0;
815
816 down(&priv->wx_sem);
817
818 if (*param == 1) {
819 modulation |= IEEE80211_CCK_MODULATION;
820 mode = IEEE_B;
821 printk(KERN_INFO "B mode!\n");
822 } else if (*param == 2) {
823 modulation |= IEEE80211_OFDM_MODULATION;
824 mode = IEEE_G;
825 printk(KERN_INFO "G mode!\n");
826 } else if (*param == 3) {
827 modulation |= IEEE80211_CCK_MODULATION;
828 modulation |= IEEE80211_OFDM_MODULATION;
829 mode = IEEE_B|IEEE_G;
830 printk(KERN_INFO "B/G mode!\n");
831 }
832
833 if (ieee->proto_started) {
834 ieee80211_stop_protocol(ieee);
835 ieee->mode = mode;
836 ieee->modulation = modulation;
837 ieee80211_start_protocol(ieee);
838 } else {
839 ieee->mode = mode;
840 ieee->modulation = modulation;
841 }
842
843 up(&priv->wx_sem);
844
845 return ret;
846}
847static int r8180_wx_get_preamble(struct net_device *dev,
848 struct iw_request_info *info,
849 union iwreq_data *wrqu, char *extra)
850{
851 struct r8180_priv *priv = ieee80211_priv(dev);
852
853
854
855 down(&priv->wx_sem);
856
857
858
859 *extra = (char) priv->plcp_preamble_mode; /* 0:auto 1:short 2:long */
860 up(&priv->wx_sem);
861
862 return 0;
863}
864static int r8180_wx_set_preamble(struct net_device *dev,
865 struct iw_request_info *info,
866 union iwreq_data *wrqu, char *extra)
867{
868 struct r8180_priv *priv = ieee80211_priv(dev);
869 int ret = 0;
870
871
872 if (priv->ieee80211->bHwRadioOff)
873 return 0;
874
875 down(&priv->wx_sem);
876 if (*extra < 0 || *extra > 2)
877 ret = -1;
878 else
879 priv->plcp_preamble_mode = *((short *)extra);
880
881
882
883 up(&priv->wx_sem);
884
885 return ret;
886}
887static int r8180_wx_get_siglevel(struct net_device *dev,
888 struct iw_request_info *info,
889 union iwreq_data *wrqu, char *extra)
890{
891 struct r8180_priv *priv = ieee80211_priv(dev);
892 int ret = 0;
893
894
895
896 down(&priv->wx_sem);
897 /* Modify by hikaru 6.5 */
898 *((int *)extra) = priv->wstats.qual.level;/*for interface test ,it should be the priv->wstats.qual.level; */
899
900
901
902 up(&priv->wx_sem);
903
904 return ret;
905}
906static int r8180_wx_get_sigqual(struct net_device *dev,
907 struct iw_request_info *info,
908 union iwreq_data *wrqu, char *extra)
909{
910 struct r8180_priv *priv = ieee80211_priv(dev);
911 int ret = 0;
912
913
914
915 down(&priv->wx_sem);
916 /* Modify by hikaru 6.5 */
917 *((int *)extra) = priv->wstats.qual.qual;/* for interface test ,it should be the priv->wstats.qual.qual; */
918
919
920
921 up(&priv->wx_sem);
922
923 return ret;
924}
925static int r8180_wx_reset_stats(struct net_device *dev,
926 struct iw_request_info *info,
927 union iwreq_data *wrqu, char *extra)
928{
929 struct r8180_priv *priv = ieee80211_priv(dev);
930 down(&priv->wx_sem);
931
932 priv->stats.txrdu = 0;
933 priv->stats.rxrdu = 0;
934 priv->stats.rxnolast = 0;
935 priv->stats.rxnodata = 0;
936 priv->stats.rxnopointer = 0;
937 priv->stats.txnperr = 0;
938 priv->stats.txresumed = 0;
939 priv->stats.rxerr = 0;
940 priv->stats.rxoverflow = 0;
941 priv->stats.rxint = 0;
942
943 priv->stats.txnpokint = 0;
944 priv->stats.txhpokint = 0;
945 priv->stats.txhperr = 0;
946 priv->stats.ints = 0;
947 priv->stats.shints = 0;
948 priv->stats.txoverflow = 0;
949 priv->stats.rxdmafail = 0;
950 priv->stats.txbeacon = 0;
951 priv->stats.txbeaconerr = 0;
952 priv->stats.txlpokint = 0;
953 priv->stats.txlperr = 0;
954 priv->stats.txretry = 0;/* 20060601 */
955 priv->stats.rxcrcerrmin = 0 ;
956 priv->stats.rxcrcerrmid = 0;
957 priv->stats.rxcrcerrmax = 0;
958 priv->stats.rxicverr = 0;
959
960 up(&priv->wx_sem);
961
962 return 0;
963
964}
965static int r8180_wx_radio_on(struct net_device *dev,
966 struct iw_request_info *info,
967 union iwreq_data *wrqu, char *extra)
968{
969 struct r8180_priv *priv = ieee80211_priv(dev);
970
971 if (priv->ieee80211->bHwRadioOff)
972 return 0;
973
974
975 down(&priv->wx_sem);
976 priv->rf_wakeup(dev);
977
978 up(&priv->wx_sem);
979
980 return 0;
981
982}
983
984static int r8180_wx_radio_off(struct net_device *dev,
985 struct iw_request_info *info,
986 union iwreq_data *wrqu, char *extra)
987{
988 struct r8180_priv *priv = ieee80211_priv(dev);
989
990 if (priv->ieee80211->bHwRadioOff)
991 return 0;
992
993
994 down(&priv->wx_sem);
995 priv->rf_sleep(dev);
996
997 up(&priv->wx_sem);
998
999 return 0;
1000
1001}
1002static int r8180_wx_get_channelplan(struct net_device *dev,
1003 struct iw_request_info *info,
1004 union iwreq_data *wrqu, char *extra)
1005{
1006 struct r8180_priv *priv = ieee80211_priv(dev);
1007
1008
1009
1010 down(&priv->wx_sem);
1011 *extra = priv->channel_plan;
1012
1013
1014
1015 up(&priv->wx_sem);
1016
1017 return 0;
1018}
1019static int r8180_wx_set_channelplan(struct net_device *dev,
1020 struct iw_request_info *info,
1021 union iwreq_data *wrqu, char *extra)
1022{
1023 struct r8180_priv *priv = ieee80211_priv(dev);
1024 int *val = (int *)extra;
1025 int i;
1026 printk("-----in fun %s\n", __func__);
1027
1028 if (priv->ieee80211->bHwRadioOff)
1029 return 0;
1030
1031 /* unsigned long flags; */
1032 down(&priv->wx_sem);
1033 if (default_channel_plan[*val].len != 0) {
1034 priv->channel_plan = *val;
1035 /* Clear old channel map 8 */
1036 for (i = 1; i <= MAX_CHANNEL_NUMBER; i++)
1037 GET_DOT11D_INFO(priv->ieee80211)->channel_map[i] = 0;
1038
1039 /* Set new channel map */
1040 for (i = 1; i <= default_channel_plan[*val].len; i++)
1041 GET_DOT11D_INFO(priv->ieee80211)->channel_map[default_channel_plan[*val].channel[i-1]] = 1;
1042
1043 }
1044 up(&priv->wx_sem);
1045
1046 return 0;
1047}
1048
1049static int r8180_wx_get_version(struct net_device *dev,
1050 struct iw_request_info *info,
1051 union iwreq_data *wrqu, char *extra)
1052{
1053 struct r8180_priv *priv = ieee80211_priv(dev);
1054 /* struct ieee80211_device *ieee; */
1055
1056 down(&priv->wx_sem);
1057 strcpy(extra, "1020.0808");
1058 up(&priv->wx_sem);
1059
1060 return 0;
1061}
1062
1063/* added by amy 080818 */
1064/*receive datarate from user typing valid rate is from 2 to 108 (1 - 54M), if input 0, return to normal rate adaptive. */
1065static int r8180_wx_set_forcerate(struct net_device *dev,
1066 struct iw_request_info *info,
1067 union iwreq_data *wrqu, char *extra)
1068{
1069 struct r8180_priv *priv = ieee80211_priv(dev);
1070 u8 forcerate = *extra;
1071
1072 down(&priv->wx_sem);
1073
1074 printk("==============>%s(): forcerate is %d\n", __func__, forcerate);
1075 if ((forcerate == 2) || (forcerate == 4) || (forcerate == 11) || (forcerate == 22) || (forcerate == 12) ||
1076 (forcerate == 18) || (forcerate == 24) || (forcerate == 36) || (forcerate == 48) || (forcerate == 72) ||
1077 (forcerate == 96) || (forcerate == 108)) {
1078 priv->ForcedDataRate = 1;
1079 priv->ieee80211->rate = forcerate * 5;
1080 } else if (forcerate == 0) {
1081 priv->ForcedDataRate = 0;
1082 printk("OK! return rate adaptive\n");
1083 } else
1084 printk("ERR: wrong rate\n");
1085 up(&priv->wx_sem);
1086 return 0;
1087}
1088
1089static int r8180_wx_set_enc_ext(struct net_device *dev,
1090 struct iw_request_info *info,
1091 union iwreq_data *wrqu, char *extra)
1092{
1093
1094 struct r8180_priv *priv = ieee80211_priv(dev);
1095
1096 int ret = 0;
1097
1098 if (priv->ieee80211->bHwRadioOff)
1099 return 0;
1100
1101 down(&priv->wx_sem);
1102 ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra);
1103 up(&priv->wx_sem);
1104 return ret;
1105
1106}
1107static int r8180_wx_set_auth(struct net_device *dev,
1108 struct iw_request_info *info,
1109 union iwreq_data *wrqu, char *extra)
1110{
1111 struct r8180_priv *priv = ieee80211_priv(dev);
1112 int ret = 0;
1113
1114 if (priv->ieee80211->bHwRadioOff)
1115 return 0;
1116
1117 down(&priv->wx_sem);
1118 ret = ieee80211_wx_set_auth(priv->ieee80211, info, &wrqu->param, extra);
1119 up(&priv->wx_sem);
1120 return ret;
1121}
1122
1123static int r8180_wx_set_mlme(struct net_device *dev,
1124 struct iw_request_info *info,
1125 union iwreq_data *wrqu, char *extra)
1126{
1127 int ret = 0;
1128 struct r8180_priv *priv = ieee80211_priv(dev);
1129
1130
1131 if (priv->ieee80211->bHwRadioOff)
1132 return 0;
1133
1134
1135 down(&priv->wx_sem);
1136#if 1
1137 ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra);
1138#endif
1139 up(&priv->wx_sem);
1140 return ret;
1141}
1142static int r8180_wx_set_gen_ie(struct net_device *dev,
1143 struct iw_request_info *info,
1144 union iwreq_data *wrqu, char *extra)
1145{
1146 int ret = 0;
1147 struct r8180_priv *priv = ieee80211_priv(dev);
1148
1149
1150 if (priv->ieee80211->bHwRadioOff)
1151 return 0;
1152
1153 down(&priv->wx_sem);
1154#if 1
1155 ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, wrqu->data.length);
1156#endif
1157 up(&priv->wx_sem);
1158 return ret;
1159
1160
1161}
1162
1163static const iw_handler r8180_wx_handlers[] = {
1164 IW_HANDLER(SIOCGIWNAME, r8180_wx_get_name),
1165 IW_HANDLER(SIOCSIWNWID, dummy),
1166 IW_HANDLER(SIOCGIWNWID, dummy),
1167 IW_HANDLER(SIOCSIWFREQ, r8180_wx_set_freq),
1168 IW_HANDLER(SIOCGIWFREQ, r8180_wx_get_freq),
1169 IW_HANDLER(SIOCSIWMODE, r8180_wx_set_mode),
1170 IW_HANDLER(SIOCGIWMODE, r8180_wx_get_mode),
1171 IW_HANDLER(SIOCSIWSENS, r8180_wx_set_sens),
1172 IW_HANDLER(SIOCGIWSENS, r8180_wx_get_sens),
1173 IW_HANDLER(SIOCGIWRANGE, rtl8180_wx_get_range),
1174 IW_HANDLER(SIOCSIWSPY, dummy),
1175 IW_HANDLER(SIOCGIWSPY, dummy),
1176 IW_HANDLER(SIOCSIWAP, r8180_wx_set_wap),
1177 IW_HANDLER(SIOCGIWAP, r8180_wx_get_wap),
1178 IW_HANDLER(SIOCSIWMLME, r8180_wx_set_mlme),
1179 IW_HANDLER(SIOCGIWAPLIST, dummy), /* deprecated */
1180 IW_HANDLER(SIOCSIWSCAN, r8180_wx_set_scan),
1181 IW_HANDLER(SIOCGIWSCAN, r8180_wx_get_scan),
1182 IW_HANDLER(SIOCSIWESSID, r8180_wx_set_essid),
1183 IW_HANDLER(SIOCGIWESSID, r8180_wx_get_essid),
1184 IW_HANDLER(SIOCSIWNICKN, dummy),
1185 IW_HANDLER(SIOCGIWNICKN, dummy),
1186 IW_HANDLER(SIOCSIWRATE, r8180_wx_set_rate),
1187 IW_HANDLER(SIOCGIWRATE, r8180_wx_get_rate),
1188 IW_HANDLER(SIOCSIWRTS, r8180_wx_set_rts),
1189 IW_HANDLER(SIOCGIWRTS, r8180_wx_get_rts),
1190 IW_HANDLER(SIOCSIWFRAG, r8180_wx_set_frag),
1191 IW_HANDLER(SIOCGIWFRAG, r8180_wx_get_frag),
1192 IW_HANDLER(SIOCSIWTXPOW, dummy),
1193 IW_HANDLER(SIOCGIWTXPOW, dummy),
1194 IW_HANDLER(SIOCSIWRETRY, r8180_wx_set_retry),
1195 IW_HANDLER(SIOCGIWRETRY, r8180_wx_get_retry),
1196 IW_HANDLER(SIOCSIWENCODE, r8180_wx_set_enc),
1197 IW_HANDLER(SIOCGIWENCODE, r8180_wx_get_enc),
1198 IW_HANDLER(SIOCSIWPOWER, r8180_wx_set_power),
1199 IW_HANDLER(SIOCGIWPOWER, r8180_wx_get_power),
1200 IW_HANDLER(SIOCSIWGENIE, r8180_wx_set_gen_ie),
1201 IW_HANDLER(SIOCSIWAUTH, r8180_wx_set_auth),
1202 IW_HANDLER(SIOCSIWENCODEEXT, r8180_wx_set_enc_ext),
1203};
1204
1205static const struct iw_priv_args r8180_private_args[] = {
1206 {
1207 SIOCIWFIRSTPRIV + 0x0,
1208 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc"
1209 },
1210 { SIOCIWFIRSTPRIV + 0x1,
1211 0, 0, "dummy"
1212
1213 },
1214 {
1215 SIOCIWFIRSTPRIV + 0x2,
1216 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beaconint"
1217 },
1218 { SIOCIWFIRSTPRIV + 0x3,
1219 0, 0, "dummy"
1220
1221 },
1222 {
1223 SIOCIWFIRSTPRIV + 0x4,
1224 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
1225
1226 },
1227 { SIOCIWFIRSTPRIV + 0x5,
1228 0, 0, "dummy"
1229
1230 },
1231 {
1232 SIOCIWFIRSTPRIV + 0x6,
1233 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
1234
1235 },
1236 { SIOCIWFIRSTPRIV + 0x7,
1237 0, 0, "dummy"
1238
1239 },
1240 {
1241 SIOCIWFIRSTPRIV + 0x8,
1242 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setiwmode"
1243 },
1244 {
1245 SIOCIWFIRSTPRIV + 0x9,
1246 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 32, "getiwmode"
1247 },
1248 {
1249 SIOCIWFIRSTPRIV + 0xA,
1250 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setpreamble"
1251 },
1252 {
1253 SIOCIWFIRSTPRIV + 0xB,
1254 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpreamble"
1255 },
1256 { SIOCIWFIRSTPRIV + 0xC,
1257 0, 0, "dummy"
1258 },
1259 {
1260 SIOCIWFIRSTPRIV + 0xD,
1261 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getrssi"
1262 },
1263 { SIOCIWFIRSTPRIV + 0xE,
1264 0, 0, "dummy"
1265 },
1266 {
1267 SIOCIWFIRSTPRIV + 0xF,
1268 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getlinkqual"
1269 },
1270 {
1271 SIOCIWFIRSTPRIV + 0x10,
1272 0, 0, "resetstats"
1273 },
1274 {
1275 SIOCIWFIRSTPRIV + 0x11,
1276 0, 0, "dummy"
1277 },
1278 {
1279 SIOCIWFIRSTPRIV + 0x12,
1280 0, 0, "radioon"
1281 },
1282 {
1283 SIOCIWFIRSTPRIV + 0x13,
1284 0, 0, "radiooff"
1285 },
1286 {
1287 SIOCIWFIRSTPRIV + 0x14,
1288 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setchannel"
1289 },
1290 {
1291 SIOCIWFIRSTPRIV + 0x15,
1292 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getchannel"
1293 },
1294 {
1295 SIOCIWFIRSTPRIV + 0x16,
1296 0, 0, "dummy"
1297 },
1298 {
1299 SIOCIWFIRSTPRIV + 0x17,
1300 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 32, "getversion"
1301 },
1302 {
1303 SIOCIWFIRSTPRIV + 0x18,
1304 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "setrate"
1305 },
1306};
1307
1308
1309static iw_handler r8180_private_handler[] = {
1310 r8180_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
1311 dummy,
1312 r8180_wx_set_beaconinterval,
1313 dummy,
1314 /* r8180_wx_set_monitor_type, */
1315 r8180_wx_set_scan_type,
1316 dummy,
1317 r8180_wx_set_rawtx,
1318 dummy,
1319 r8180_wx_set_iwmode,
1320 r8180_wx_get_iwmode,
1321 r8180_wx_set_preamble,
1322 r8180_wx_get_preamble,
1323 dummy,
1324 r8180_wx_get_siglevel,
1325 dummy,
1326 r8180_wx_get_sigqual,
1327 r8180_wx_reset_stats,
1328 dummy,/* r8180_wx_get_stats */
1329 r8180_wx_radio_on,
1330 r8180_wx_radio_off,
1331 r8180_wx_set_channelplan,
1332 r8180_wx_get_channelplan,
1333 dummy,
1334 r8180_wx_get_version,
1335 r8180_wx_set_forcerate,
1336};
1337
1338static inline int is_same_network(struct ieee80211_network *src,
1339 struct ieee80211_network *dst,
1340 struct ieee80211_device *ieee)
1341{
1342 /* A network is only a duplicate if the channel, BSSID, ESSID
1343 * and the capability field (in particular IBSS and BSS) all match.
1344 * We treat all <hidden> with the same BSSID and channel
1345 * as one network
1346 */
1347 if (src->channel != dst->channel)
1348 return 0;
1349
1350 if (memcmp(src->bssid, dst->bssid, ETH_ALEN) != 0)
1351 return 0;
1352
1353 if (ieee->iw_mode != IW_MODE_INFRA) {
1354 if (src->ssid_len != dst->ssid_len)
1355 return 0;
1356 if (memcmp(src->ssid, dst->ssid, src->ssid_len) != 0)
1357 return 0;
1358 }
1359
1360 if ((src->capability & WLAN_CAPABILITY_IBSS) !=
1361 (dst->capability & WLAN_CAPABILITY_IBSS))
1362 return 0;
1363 if ((src->capability & WLAN_CAPABILITY_BSS) !=
1364 (dst->capability & WLAN_CAPABILITY_BSS))
1365 return 0;
1366
1367 return 1;
1368}
1369
1370/* WB modified to show signal to GUI on 18-01-2008 */
1371static struct iw_statistics *r8180_get_wireless_stats(struct net_device *dev)
1372{
1373 struct r8180_priv *priv = ieee80211_priv(dev);
1374 struct ieee80211_device *ieee = priv->ieee80211;
1375 struct iw_statistics *wstats = &priv->wstats;
1376 int tmp_level = 0;
1377 int tmp_qual = 0;
1378 int tmp_noise = 0;
1379
1380 if (ieee->state < IEEE80211_LINKED) {
1381 wstats->qual.qual = 0;
1382 wstats->qual.level = 0;
1383 wstats->qual.noise = 0;
1384 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
1385 return wstats;
1386 }
1387
1388 tmp_level = (&ieee->current_network)->stats.signal;
1389 tmp_qual = (&ieee->current_network)->stats.signalstrength;
1390 tmp_noise = (&ieee->current_network)->stats.noise;
1391
1392 wstats->qual.level = tmp_level;
1393 wstats->qual.qual = tmp_qual;
1394 wstats->qual.noise = tmp_noise;
1395 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
1396 return wstats;
1397}
1398
1399struct iw_handler_def r8180_wx_handlers_def = {
1400 .standard = r8180_wx_handlers,
1401 .num_standard = ARRAY_SIZE(r8180_wx_handlers),
1402 .private = r8180_private_handler,
1403 .num_private = ARRAY_SIZE(r8180_private_handler),
1404 .num_private_args = sizeof(r8180_private_args) / sizeof(struct iw_priv_args),
1405 .get_wireless_stats = r8180_get_wireless_stats,
1406 .private_args = (struct iw_priv_args *)r8180_private_args,
1407};
1408
1409
diff --git a/drivers/staging/rtl8187se/r8180_wx.h b/drivers/staging/rtl8187se/r8180_wx.h
deleted file mode 100644
index d471520ac772..000000000000
--- a/drivers/staging/rtl8187se/r8180_wx.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 This is part of rtl8180 OpenSource driver - v 0.3
3 Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
4 Released under the terms of GPL (General Public Licence)
5
6 Parts of this driver are based on the GPL part of the official realtek driver
7 Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon
8 Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver
9
10 We want to thanks the Authors of such projects and the Ndiswrapper project Authors.
11*/
12
13/* this file (will) contains wireless extension handlers*/
14
15#ifndef R8180_WX_H
16#define R8180_WX_H
17#include <linux/wireless.h>
18#include "ieee80211/ieee80211.h"
19extern struct iw_handler_def r8180_wx_handlers_def;
20
21#endif
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
deleted file mode 100644
index cc6f100814f3..000000000000
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ /dev/null
@@ -1,1464 +0,0 @@
1/*
2 * Copyright (c) Realtek Semiconductor Corp. All rights reserved.
3 *
4 * Module Name:
5 * r8185b_init.c
6 *
7 * Abstract:
8 * Hardware Initialization and Hardware IO for RTL8185B
9 *
10 * Major Change History:
11 * When Who What
12 * ---------- --------------- -------------------------------
13 * 2006-11-15 Xiong Created
14 *
15 * Notes:
16 * This file is ported from RTL8185B Windows driver.
17 *
18 *
19 */
20
21/*--------------------------Include File------------------------------------*/
22#include <linux/spinlock.h>
23#include "r8180_hw.h"
24#include "r8180.h"
25#include "r8180_rtl8225.h" /* RTL8225 Radio frontend */
26#include "r8180_93cx6.h" /* Card EEPROM */
27#include "r8180_wx.h"
28#include "ieee80211/dot11d.h"
29/* #define CONFIG_RTL8180_IO_MAP */
30#define TC_3W_POLL_MAX_TRY_CNT 5
31
32static u8 MAC_REG_TABLE[][2] = {
33 /*
34 * PAGE 0:
35 * 0x34(BRSR), 0xBE(RATE_FALLBACK_CTL), 0x1E0(ARFR) would set in
36 * HwConfigureRTL8185()
37 * 0x272(RFSW_CTRL), 0x1CE(AESMSK_QC) set in InitializeAdapter8185().
38 * 0x1F0~0x1F8 set in MacConfig_85BASIC()
39 */
40 {0x08, 0xae}, {0x0a, 0x72}, {0x5b, 0x42},
41 {0x84, 0x88}, {0x85, 0x24}, {0x88, 0x54}, {0x8b, 0xb8}, {0x8c, 0x03},
42 {0x8d, 0x40}, {0x8e, 0x00}, {0x8f, 0x00}, {0x5b, 0x18}, {0x91, 0x03},
43 {0x94, 0x0F}, {0x95, 0x32},
44 {0x96, 0x00}, {0x97, 0x07}, {0xb4, 0x22}, {0xdb, 0x00},
45 {0xf0, 0x32}, {0xf1, 0x32}, {0xf2, 0x00}, {0xf3, 0x00}, {0xf4, 0x32},
46 {0xf5, 0x43}, {0xf6, 0x00}, {0xf7, 0x00}, {0xf8, 0x46}, {0xf9, 0xa4},
47 {0xfa, 0x00}, {0xfb, 0x00}, {0xfc, 0x96}, {0xfd, 0xa4}, {0xfe, 0x00},
48 {0xff, 0x00},
49
50 /*
51 * PAGE 1:
52 * For Flextronics system Logo PCIHCT failure:
53 * 0x1C4~0x1CD set no-zero value to avoid PCI configuration
54 * space 0x45[7]=1
55 */
56 {0x5e, 0x01},
57 {0x58, 0x00}, {0x59, 0x00}, {0x5a, 0x04}, {0x5b, 0x00}, {0x60, 0x24},
58 {0x61, 0x97}, {0x62, 0xF0}, {0x63, 0x09}, {0x80, 0x0F}, {0x81, 0xFF},
59 {0x82, 0xFF}, {0x83, 0x03},
60 /* lzm add 080826 */
61 {0xC4, 0x22}, {0xC5, 0x22}, {0xC6, 0x22}, {0xC7, 0x22}, {0xC8, 0x22},
62 /* lzm add 080826 */
63 {0xC9, 0x22}, {0xCA, 0x22}, {0xCB, 0x22}, {0xCC, 0x22}, {0xCD, 0x22},
64 {0xe2, 0x00},
65
66
67 /* PAGE 2: */
68 {0x5e, 0x02},
69 {0x0c, 0x04}, {0x4c, 0x30}, {0x4d, 0x08}, {0x50, 0x05}, {0x51, 0xf5},
70 {0x52, 0x04}, {0x53, 0xa0}, {0x54, 0xff}, {0x55, 0xff}, {0x56, 0xff},
71 {0x57, 0xff}, {0x58, 0x08}, {0x59, 0x08}, {0x5a, 0x08}, {0x5b, 0x08},
72 {0x60, 0x08}, {0x61, 0x08}, {0x62, 0x08}, {0x63, 0x08}, {0x64, 0x2f},
73 {0x8c, 0x3f}, {0x8d, 0x3f}, {0x8e, 0x3f},
74 {0x8f, 0x3f}, {0xc4, 0xff}, {0xc5, 0xff}, {0xc6, 0xff}, {0xc7, 0xff},
75 {0xc8, 0x00}, {0xc9, 0x00}, {0xca, 0x80}, {0xcb, 0x00},
76
77 /* PAGE 0: */
78 {0x5e, 0x00}, {0x9f, 0x03}
79 };
80
81
82static u8 ZEBRA_AGC[] = {
83 0,
84 0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A, 0x79, 0x78, 0x77, 0x76,
85 0x75, 0x74, 0x73, 0x72, 0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A,
86 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x48, 0x47, 0x46, 0x45,
87 0x44, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07,
88 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
90 0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16, 0x17, 0x17, 0x18, 0x18,
91 0x19, 0x1a, 0x1a, 0x1b, 0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e,
92 0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21, 0x21, 0x21, 0x22, 0x22,
93 0x22, 0x23, 0x23, 0x24, 0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27,
94 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F
95 };
96
97static u32 ZEBRA_RF_RX_GAIN_TABLE[] = {
98 0x0096, 0x0076, 0x0056, 0x0036, 0x0016, 0x01f6, 0x01d6, 0x01b6,
99 0x0196, 0x0176, 0x00F7, 0x00D7, 0x00B7, 0x0097, 0x0077, 0x0057,
100 0x0037, 0x00FB, 0x00DB, 0x00BB, 0x00FF, 0x00E3, 0x00C3, 0x00A3,
101 0x0083, 0x0063, 0x0043, 0x0023, 0x0003, 0x01E3, 0x01C3, 0x01A3,
102 0x0183, 0x0163, 0x0143, 0x0123, 0x0103
103 };
104
105static u8 OFDM_CONFIG[] = {
106 /* OFDM reg0x06[7:0]=0xFF: Enable power saving mode in RX */
107 /* OFDM reg0x3C[4]=1'b1: Enable RX power saving mode */
108 /* ofdm 0x3a = 0x7b ,(original : 0xfb) For ECS shielding room TP test */
109 /* 0x00 */
110 0x10, 0x0F, 0x0A, 0x0C, 0x14, 0xFA, 0xFF, 0x50,
111 0x00, 0x50, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00,
112 /* 0x10 */
113 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xA8, 0x26,
114 0x32, 0x33, 0x06, 0xA5, 0x6F, 0x55, 0xC8, 0xBB,
115 /* 0x20 */
116 0x0A, 0xE1, 0x2C, 0x4A, 0x86, 0x83, 0x34, 0x00,
117 0x4F, 0x24, 0x6F, 0xC2, 0x03, 0x40, 0x80, 0x00,
118 /* 0x30 */
119 0xC0, 0xC1, 0x58, 0xF1, 0x00, 0xC4, 0x90, 0x3e,
120 0xD8, 0x3C, 0x7B, 0x10, 0x10
121 };
122
123 /*---------------------------------------------------------------
124 * Hardware IO
125 * the code is ported from Windows source code
126 *---------------------------------------------------------------
127 */
128
129static u8 PlatformIORead1Byte(struct net_device *dev, u32 offset)
130{
131 return read_nic_byte(dev, offset);
132}
133
134static void PlatformIOWrite1Byte(struct net_device *dev, u32 offset, u8 data)
135{
136 write_nic_byte(dev, offset, data);
137 /*
138 * To make sure write operation is completed,
139 * 2005.11.09, by rcnjko.
140 */
141 read_nic_byte(dev, offset);
142}
143
144static void PlatformIOWrite2Byte(struct net_device *dev, u32 offset, u16 data)
145{
146 write_nic_word(dev, offset, data);
147 /*
148 * To make sure write operation is completed,
149 * 2005.11.09, by rcnjko.
150 */
151 read_nic_word(dev, offset);
152}
153
154static void PlatformIOWrite4Byte(struct net_device *dev, u32 offset, u32 data)
155{
156 if (offset == PhyAddr) {
157 /* For Base Band configuration. */
158 unsigned char cmdByte;
159 unsigned long dataBytes;
160 unsigned char idx;
161 u8 u1bTmp;
162
163 cmdByte = (u8)(data & 0x000000ff);
164 dataBytes = data>>8;
165
166 /*
167 * 071010, rcnjko:
168 * The critical section is only BB read/write race
169 * condition. Assumption:
170 * 1. We assume NO one will access BB at DIRQL, otherwise,
171 * system will crash for
172 * acquiring the spinlock in such context.
173 * 2. PlatformIOWrite4Byte() MUST NOT be recursive.
174 */
175 /* NdisAcquireSpinLock( &(pDevice->IoSpinLock) ); */
176
177 for (idx = 0; idx < 30; idx++) {
178 /* Make sure command bit is clear before access it. */
179 u1bTmp = PlatformIORead1Byte(dev, PhyAddr);
180 if ((u1bTmp & BIT7) == 0)
181 break;
182 else
183 mdelay(10);
184 }
185
186 for (idx = 0; idx < 3; idx++)
187 PlatformIOWrite1Byte(dev, offset+1+idx,
188 ((u8 *)&dataBytes)[idx]);
189
190 write_nic_byte(dev, offset, cmdByte);
191
192 /* NdisReleaseSpinLock( &(pDevice->IoSpinLock) ); */
193 } else {
194 write_nic_dword(dev, offset, data);
195 /*
196 * To make sure write operation is completed, 2005.11.09,
197 * by rcnjko.
198 */
199 read_nic_dword(dev, offset);
200 }
201}
202
203static void SetOutputEnableOfRfPins(struct net_device *dev)
204{
205 write_nic_word(dev, RFPinsEnable, 0x1bff);
206}
207
208static bool HwHSSIThreeWire(struct net_device *dev,
209 u8 *pDataBuf,
210 bool write)
211{
212 u8 TryCnt;
213 u8 u1bTmp;
214
215 /* Check if WE and RE are cleared. */
216 for (TryCnt = 0; TryCnt < TC_3W_POLL_MAX_TRY_CNT; TryCnt++) {
217 u1bTmp = read_nic_byte(dev, SW_3W_CMD1);
218 if ((u1bTmp & (SW_3W_CMD1_RE|SW_3W_CMD1_WE)) == 0)
219 break;
220
221 udelay(10);
222 }
223 if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) {
224 netdev_err(dev,
225 "HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n",
226 u1bTmp);
227 return false;
228 }
229
230 /* RTL8187S HSSI Read/Write Function */
231 u1bTmp = read_nic_byte(dev, RF_SW_CONFIG);
232 u1bTmp |= RF_SW_CFG_SI; /* reg08[1]=1 Serial Interface(SI) */
233 write_nic_byte(dev, RF_SW_CONFIG, u1bTmp);
234
235 /* jong: HW SI read must set reg84[3]=0. */
236 u1bTmp = read_nic_byte(dev, RFPinsSelect);
237 u1bTmp &= ~BIT3;
238 write_nic_byte(dev, RFPinsSelect, u1bTmp);
239 /* Fill up data buffer for write operation. */
240
241 /* SI - reg274[3:0] : RF register's Address */
242 if (write)
243 write_nic_word(dev, SW_3W_DB0, *((u16 *)pDataBuf));
244 else
245 write_nic_word(dev, SW_3W_DB0, *((u16 *)pDataBuf));
246
247 /* Set up command: WE or RE. */
248 if (write)
249 write_nic_byte(dev, SW_3W_CMD1, SW_3W_CMD1_WE);
250 else
251 write_nic_byte(dev, SW_3W_CMD1, SW_3W_CMD1_RE);
252
253
254 /* Check if DONE is set. */
255 for (TryCnt = 0; TryCnt < TC_3W_POLL_MAX_TRY_CNT; TryCnt++) {
256 u1bTmp = read_nic_byte(dev, SW_3W_CMD1);
257 if (u1bTmp & SW_3W_CMD1_DONE)
258 break;
259
260 udelay(10);
261 }
262
263 write_nic_byte(dev, SW_3W_CMD1, 0);
264
265 /* Read back data for read operation. */
266 if (!write) {
267 /* Serial Interface : reg363_362[11:0] */
268 *((u16 *)pDataBuf) = read_nic_word(dev, SI_DATA_READ);
269 *((u16 *)pDataBuf) &= 0x0FFF;
270 }
271
272 return true;
273}
274
275void RF_WriteReg(struct net_device *dev, u8 offset, u16 data)
276{
277 u16 reg = (data << 4) | (offset & 0x0f);
278 HwHSSIThreeWire(dev, (u8 *)&reg, true);
279}
280
281u16 RF_ReadReg(struct net_device *dev, u8 offset)
282{
283 u16 reg = offset & 0x0f;
284 HwHSSIThreeWire(dev, (u8 *)&reg, false);
285 return reg;
286}
287
288static u8 ReadBBPortUchar(struct net_device *dev, u32 addr)
289{
290 PlatformIOWrite4Byte(dev, PhyAddr, addr & 0xffffff7f);
291 return PlatformIORead1Byte(dev, PhyDataR);
292}
293
294/* by Owen on 04/07/14 for writing BB register successfully */
295static void WriteBBPortUchar(struct net_device *dev, u32 Data)
296{
297 PlatformIOWrite4Byte(dev, PhyAddr, Data);
298 ReadBBPortUchar(dev, Data);
299}
300
301/*
302 * Description:
303 * Perform Antenna settings with antenna diversity on 87SE.
304 * Created by Roger, 2008.01.25.
305 */
306bool SetAntennaConfig87SE(struct net_device *dev,
307 u8 DefaultAnt, /* 0: Main, 1: Aux. */
308 bool bAntDiversity) /* 1:Enable, 0: Disable. */
309{
310 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
311 bool bAntennaSwitched = true;
312 /* 0x00 = disabled, 0x80 = enabled */
313 u8 ant_diversity_offset = 0x00;
314
315 /*
316 * printk("SetAntennaConfig87SE(): DefaultAnt(%d), bAntDiversity(%d)\n",
317 * DefaultAnt, bAntDiversity);
318 */
319
320 /* Threshold for antenna diversity. */
321 write_phy_cck(dev, 0x0c, 0x09); /* Reg0c : 09 */
322
323 if (bAntDiversity) /* Enable Antenna Diversity. */
324 ant_diversity_offset = 0x80;
325
326 if (DefaultAnt == 1) { /* aux Antenna */
327 /* Mac register, aux antenna */
328 write_nic_byte(dev, ANTSEL, 0x00);
329
330 /* Config CCK RX antenna. */
331 write_phy_cck(dev, 0x11, 0xbb); /* Reg11 : bb */
332
333 /* Reg01 : 47 | ant_diversity_offset */
334 write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset);
335
336 /* Config OFDM RX antenna. */
337 write_phy_ofdm(dev, 0x0D, 0x54); /* Reg0d : 54 */
338 /* Reg18 : 32 */
339 write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset);
340 } else { /* main Antenna */
341 /* Mac register, main antenna */
342 write_nic_byte(dev, ANTSEL, 0x03);
343
344 /* Config CCK RX antenna. */
345 write_phy_cck(dev, 0x11, 0x9b); /* Reg11 : 9b */
346 /* Reg01 : 47 */
347 write_phy_cck(dev, 0x01, 0x47|ant_diversity_offset);
348
349 /* Config OFDM RX antenna. */
350 write_phy_ofdm(dev, 0x0D, 0x5c); /* Reg0d : 5c */
351 /*Reg18 : 32 */
352 write_phy_ofdm(dev, 0x18, 0x32|ant_diversity_offset);
353 }
354 priv->CurrAntennaIndex = DefaultAnt; /* Update default settings. */
355 return bAntennaSwitched;
356}
357/*
358 *--------------------------------------------------------------
359 * Hardware Initialization.
360 * the code is ported from Windows source code
361 *--------------------------------------------------------------
362 */
363
364static void ZEBRA_Config_85BASIC_HardCode(struct net_device *dev)
365{
366
367 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
368 u32 i;
369 u32 addr, data;
370 u32 u4bRegOffset, u4bRegValue;
371 u16 u4bRF23, u4bRF24;
372 u8 u1b24E;
373 int d_cut = 0;
374
375
376/*
377 *===========================================================================
378 * 87S_PCIE :: RADIOCFG.TXT
379 *===========================================================================
380 */
381
382
383 /* Page1 : reg16-reg30 */
384 RF_WriteReg(dev, 0x00, 0x013f); mdelay(1); /* switch to page1 */
385 u4bRF23 = RF_ReadReg(dev, 0x08); mdelay(1);
386 u4bRF24 = RF_ReadReg(dev, 0x09); mdelay(1);
387
388 if (u4bRF23 == 0x818 && u4bRF24 == 0x70C) {
389 d_cut = 1;
390 netdev_info(dev, "card type changed from C- to D-cut\n");
391 }
392
393 /* Page0 : reg0-reg15 */
394
395 RF_WriteReg(dev, 0x00, 0x009f); mdelay(1);/* 1 */
396 RF_WriteReg(dev, 0x01, 0x06e0); mdelay(1);
397 RF_WriteReg(dev, 0x02, 0x004d); mdelay(1);/* 2 */
398 RF_WriteReg(dev, 0x03, 0x07f1); mdelay(1);/* 3 */
399 RF_WriteReg(dev, 0x04, 0x0975); mdelay(1);
400 RF_WriteReg(dev, 0x05, 0x0c72); mdelay(1);
401 RF_WriteReg(dev, 0x06, 0x0ae6); mdelay(1);
402 RF_WriteReg(dev, 0x07, 0x00ca); mdelay(1);
403 RF_WriteReg(dev, 0x08, 0x0e1c); mdelay(1);
404 RF_WriteReg(dev, 0x09, 0x02f0); mdelay(1);
405 RF_WriteReg(dev, 0x0a, 0x09d0); mdelay(1);
406 RF_WriteReg(dev, 0x0b, 0x01ba); mdelay(1);
407 RF_WriteReg(dev, 0x0c, 0x0640); mdelay(1);
408 RF_WriteReg(dev, 0x0d, 0x08df); mdelay(1);
409 RF_WriteReg(dev, 0x0e, 0x0020); mdelay(1);
410 RF_WriteReg(dev, 0x0f, 0x0990); mdelay(1);
411
412 /* Page1 : reg16-reg30 */
413 RF_WriteReg(dev, 0x00, 0x013f); mdelay(1);
414 RF_WriteReg(dev, 0x03, 0x0806); mdelay(1);
415 RF_WriteReg(dev, 0x04, 0x03a7); mdelay(1);
416 RF_WriteReg(dev, 0x05, 0x059b); mdelay(1);
417 RF_WriteReg(dev, 0x06, 0x0081); mdelay(1);
418 RF_WriteReg(dev, 0x07, 0x01A0); mdelay(1);
419/*
420 * Don't write RF23/RF24 to make a difference between 87S C cut and D cut.
421 * asked by SD3 stevenl.
422 */
423 RF_WriteReg(dev, 0x0a, 0x0001); mdelay(1);
424 RF_WriteReg(dev, 0x0b, 0x0418); mdelay(1);
425
426 if (d_cut) {
427 RF_WriteReg(dev, 0x0c, 0x0fbe); mdelay(1);
428 RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1);
429 /* RX LO buffer */
430 RF_WriteReg(dev, 0x0e, 0x0807); mdelay(1);
431 } else {
432 RF_WriteReg(dev, 0x0c, 0x0fbe); mdelay(1);
433 RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1);
434 /* RX LO buffer */
435 RF_WriteReg(dev, 0x0e, 0x0806); mdelay(1);
436 }
437
438 RF_WriteReg(dev, 0x0f, 0x0acc); mdelay(1);
439 RF_WriteReg(dev, 0x00, 0x01d7); mdelay(1); /* 6 */
440 RF_WriteReg(dev, 0x03, 0x0e00); mdelay(1);
441 RF_WriteReg(dev, 0x04, 0x0e50); mdelay(1);
442
443 for (i = 0; i <= 36; i++) {
444 RF_WriteReg(dev, 0x01, i); mdelay(1);
445 RF_WriteReg(dev, 0x02, ZEBRA_RF_RX_GAIN_TABLE[i]); mdelay(1);
446 }
447
448 RF_WriteReg(dev, 0x05, 0x0203); mdelay(1); /* 203, 343 */
449 RF_WriteReg(dev, 0x06, 0x0200); mdelay(1); /* 400 */
450 /* switch to reg16-reg30, and HSSI disable 137 */
451 RF_WriteReg(dev, 0x00, 0x0137); mdelay(1);
452 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
453
454 /* Z4 synthesizer loop filter setting, 392 */
455 RF_WriteReg(dev, 0x0d, 0x0008); mdelay(1);
456 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
457
458 /* switch to reg0-reg15, and HSSI disable */
459 RF_WriteReg(dev, 0x00, 0x0037); mdelay(1);
460 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
461
462 /* CBC on, Tx Rx disable, High gain */
463 RF_WriteReg(dev, 0x04, 0x0160); mdelay(1);
464 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
465
466 /* Z4 setted channel 1 */
467 RF_WriteReg(dev, 0x07, 0x0080); mdelay(1);
468 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
469
470 RF_WriteReg(dev, 0x02, 0x088D); mdelay(1); /* LC calibration */
471 mdelay(200); /* Deay 200 ms. */ /* 0xfd */
472 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
473 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
474
475 /* switch to reg16-reg30 137, and HSSI disable 137 */
476 RF_WriteReg(dev, 0x00, 0x0137); mdelay(1);
477 mdelay(10); /* Deay 10 ms. */ /* 0xfd */
478
479 RF_WriteReg(dev, 0x07, 0x0000); mdelay(1);
480 RF_WriteReg(dev, 0x07, 0x0180); mdelay(1);
481 RF_WriteReg(dev, 0x07, 0x0220); mdelay(1);
482 RF_WriteReg(dev, 0x07, 0x03E0); mdelay(1);
483
484 /* DAC calibration off 20070702 */
485 RF_WriteReg(dev, 0x06, 0x00c1); mdelay(1);
486 RF_WriteReg(dev, 0x0a, 0x0001); mdelay(1);
487 /* For crystal calibration, added by Roger, 2007.12.11. */
488 if (priv->bXtalCalibration) { /* reg 30. */
489 /*
490 * enable crystal calibration.
491 * RF Reg[30], (1)Xin:[12:9], Xout:[8:5], addr[4:0].
492 * (2)PA Pwr delay timer[15:14], default: 2.4us,
493 * set BIT15=0
494 * (3)RF signal on/off when calibration[13], default: on,
495 * set BIT13=0.
496 * So we should minus 4 BITs offset.
497 */
498 RF_WriteReg(dev, 0x0f, (priv->XtalCal_Xin<<5) |
499 (priv->XtalCal_Xout<<1) | BIT11 | BIT9); mdelay(1);
500 netdev_info(dev, "ZEBRA_Config_85BASIC_HardCode(): (%02x)\n",
501 (priv->XtalCal_Xin<<5) | (priv->XtalCal_Xout<<1) |
502 BIT11 | BIT9);
503 } else {
504 /* using default value. Xin=6, Xout=6. */
505 RF_WriteReg(dev, 0x0f, 0x0acc); mdelay(1);
506 }
507 /* switch to reg0-reg15, and HSSI enable */
508 RF_WriteReg(dev, 0x00, 0x00bf); mdelay(1);
509 /* Rx BB start calibration, 00c//+edward */
510 RF_WriteReg(dev, 0x0d, 0x08df); mdelay(1);
511 /* temperature meter off */
512 RF_WriteReg(dev, 0x02, 0x004d); mdelay(1);
513 RF_WriteReg(dev, 0x04, 0x0975); mdelay(1); /* Rx mode */
514 mdelay(10); /* Deay 10 ms.*/ /* 0xfe */
515 mdelay(10); /* Deay 10 ms.*/ /* 0xfe */
516 mdelay(10); /* Deay 10 ms.*/ /* 0xfe */
517 /* Rx mode*/ /*+edward */
518 RF_WriteReg(dev, 0x00, 0x0197); mdelay(1);
519 /* Rx mode*/ /*+edward */
520 RF_WriteReg(dev, 0x05, 0x05ab); mdelay(1);
521 /* Rx mode*/ /*+edward */
522 RF_WriteReg(dev, 0x00, 0x009f); mdelay(1);
523 /* Rx mode*/ /*+edward */
524 RF_WriteReg(dev, 0x01, 0x0000); mdelay(1);
525 /* Rx mode*/ /*+edward */
526 RF_WriteReg(dev, 0x02, 0x0000); mdelay(1);
527 /* power save parameters. */
528 u1b24E = read_nic_byte(dev, 0x24E);
529 write_nic_byte(dev, 0x24E, (u1b24E & (~(BIT5|BIT6))));
530
531 /*======================================================================
532 *
533 *======================================================================
534 * CCKCONF.TXT
535 *======================================================================
536 *
537 * [POWER SAVE] Power Saving Parameters by jong. 2007-11-27
538 * CCK reg0x00[7]=1'b1 :power saving for TX (default)
539 * CCK reg0x00[6]=1'b1: power saving for RX (default)
540 * CCK reg0x06[4]=1'b1: turn off channel estimation related
541 * circuits if not doing channel estimation.
542 * CCK reg0x06[3]=1'b1: turn off unused circuits before cca = 1
543 * CCK reg0x06[2]=1'b1: turn off cck's circuit if macrst =0
544 */
545
546 write_phy_cck(dev, 0x00, 0xc8);
547 write_phy_cck(dev, 0x06, 0x1c);
548 write_phy_cck(dev, 0x10, 0x78);
549 write_phy_cck(dev, 0x2e, 0xd0);
550 write_phy_cck(dev, 0x2f, 0x06);
551 write_phy_cck(dev, 0x01, 0x46);
552
553 /* power control */
554 write_nic_byte(dev, CCK_TXAGC, 0x10);
555 write_nic_byte(dev, OFDM_TXAGC, 0x1B);
556 write_nic_byte(dev, ANTSEL, 0x03);
557
558
559
560 /*
561 *======================================================================
562 * AGC.txt
563 *======================================================================
564 */
565
566 write_phy_ofdm(dev, 0x00, 0x12);
567
568 for (i = 0; i < 128; i++) {
569
570 data = ZEBRA_AGC[i+1];
571 data = data << 8;
572 data = data | 0x0000008F;
573
574 addr = i + 0x80; /* enable writing AGC table */
575 addr = addr << 8;
576 addr = addr | 0x0000008E;
577
578 WriteBBPortUchar(dev, data);
579 WriteBBPortUchar(dev, addr);
580 WriteBBPortUchar(dev, 0x0000008E);
581 }
582
583 PlatformIOWrite4Byte(dev, PhyAddr, 0x00001080); /* Annie, 2006-05-05 */
584
585 /*
586 *======================================================================
587 *
588 *======================================================================
589 * OFDMCONF.TXT
590 *======================================================================
591 */
592
593 for (i = 0; i < 60; i++) {
594 u4bRegOffset = i;
595 u4bRegValue = OFDM_CONFIG[i];
596
597 WriteBBPortUchar(dev,
598 (0x00000080 |
599 (u4bRegOffset & 0x7f) |
600 ((u4bRegValue & 0xff) << 8)));
601 }
602
603 /*
604 *======================================================================
605 * by amy for antenna
606 *======================================================================
607 */
608 /*
609 * Config Sw/Hw Combinational Antenna Diversity. Added by Roger,
610 * 2008.02.26.
611 */
612 SetAntennaConfig87SE(dev, priv->bDefaultAntenna1,
613 priv->bSwAntennaDiverity);
614}
615
616
617void UpdateInitialGain(struct net_device *dev)
618{
619 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
620
621 /* lzm add 080826 */
622 if (priv->eRFPowerState != RF_ON) {
623 /* Don't access BB/RF under disable PLL situation.
624 * RT_TRACE(COMP_DIG, DBG_LOUD, ("UpdateInitialGain -
625 * pHalData->eRFPowerState!=RF_ON\n"));
626 * Back to the original state
627 */
628 priv->InitialGain = priv->InitialGainBackUp;
629 return;
630 }
631
632 switch (priv->InitialGain) {
633 case 1: /* m861dBm */
634 write_phy_ofdm(dev, 0x17, 0x26); mdelay(1);
635 write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
636 write_phy_ofdm(dev, 0x05, 0xfa); mdelay(1);
637 break;
638
639 case 2: /* m862dBm */
640 write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
641 write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
642 write_phy_ofdm(dev, 0x05, 0xfa); mdelay(1);
643 break;
644
645 case 3: /* m863dBm */
646 write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
647 write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
648 write_phy_ofdm(dev, 0x05, 0xfb); mdelay(1);
649 break;
650
651 case 4: /* m864dBm */
652 write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
653 write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
654 write_phy_ofdm(dev, 0x05, 0xfb); mdelay(1);
655 break;
656
657 case 5: /* m82dBm */
658 write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
659 write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
660 write_phy_ofdm(dev, 0x05, 0xfb); mdelay(1);
661 break;
662
663 case 6: /* m78dBm */
664 write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
665 write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
666 write_phy_ofdm(dev, 0x05, 0xfc); mdelay(1);
667 break;
668
669 case 7: /* m74dBm */
670 write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
671 write_phy_ofdm(dev, 0x24, 0xa6); mdelay(1);
672 write_phy_ofdm(dev, 0x05, 0xfc); mdelay(1);
673 break;
674
675 case 8:
676 write_phy_ofdm(dev, 0x17, 0x66); mdelay(1);
677 write_phy_ofdm(dev, 0x24, 0xb6); mdelay(1);
678 write_phy_ofdm(dev, 0x05, 0xfc); mdelay(1);
679 break;
680
681 default: /* MP */
682 write_phy_ofdm(dev, 0x17, 0x26); mdelay(1);
683 write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
684 write_phy_ofdm(dev, 0x05, 0xfa); mdelay(1);
685 break;
686 }
687}
688/*
689 * Description:
690 * Tx Power tracking mechanism routine on 87SE.
691 * Created by Roger, 2007.12.11.
692 */
693static void InitTxPwrTracking87SE(struct net_device *dev)
694{
695 u32 u4bRfReg;
696
697 u4bRfReg = RF_ReadReg(dev, 0x02);
698
699 /* Enable Thermal meter indication. */
700 RF_WriteReg(dev, 0x02, u4bRfReg|PWR_METER_EN); mdelay(1);
701}
702
703static void PhyConfig8185(struct net_device *dev)
704{
705 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
706 write_nic_dword(dev, RCR, priv->ReceiveConfig);
707 priv->RFProgType = read_nic_byte(dev, CONFIG4) & 0x03;
708 /* RF config */
709 ZEBRA_Config_85BASIC_HardCode(dev);
710 /* Set default initial gain state to 4, approved by SD3 DZ, by Bruce,
711 * 2007-06-06.
712 */
713 if (priv->bDigMechanism) {
714 if (priv->InitialGain == 0)
715 priv->InitialGain = 4;
716 }
717
718 /*
719 * Enable thermal meter indication to implement TxPower tracking
720 * on 87SE. We initialize thermal meter here to avoid unsuccessful
721 * configuration. Added by Roger, 2007.12.11.
722 */
723 if (priv->bTxPowerTrack)
724 InitTxPwrTracking87SE(dev);
725
726 priv->InitialGainBackUp = priv->InitialGain;
727 UpdateInitialGain(dev);
728
729 return;
730}
731
732static void HwConfigureRTL8185(struct net_device *dev)
733{
734 /*
735 * RTL8185_TODO: Determine Retrylimit, TxAGC,
736 * AutoRateFallback control.
737 */
738 u8 bUNIVERSAL_CONTROL_RL = 0;
739 u8 bUNIVERSAL_CONTROL_AGC = 1;
740 u8 bUNIVERSAL_CONTROL_ANT = 1;
741 u8 bAUTO_RATE_FALLBACK_CTL = 1;
742 u8 val8;
743 write_nic_word(dev, BRSR, 0x0fff);
744 /* Retry limit */
745 val8 = read_nic_byte(dev, CW_CONF);
746
747 if (bUNIVERSAL_CONTROL_RL)
748 val8 = val8 & 0xfd;
749 else
750 val8 = val8 | 0x02;
751
752 write_nic_byte(dev, CW_CONF, val8);
753
754 /* Tx AGC */
755 val8 = read_nic_byte(dev, TXAGC_CTL);
756 if (bUNIVERSAL_CONTROL_AGC) {
757 write_nic_byte(dev, CCK_TXAGC, 128);
758 write_nic_byte(dev, OFDM_TXAGC, 128);
759 val8 = val8 & 0xfe;
760 } else {
761 val8 = val8 | 0x01;
762 }
763
764
765 write_nic_byte(dev, TXAGC_CTL, val8);
766
767 /* Tx Antenna including Feedback control */
768 val8 = read_nic_byte(dev, TXAGC_CTL);
769
770 if (bUNIVERSAL_CONTROL_ANT) {
771 write_nic_byte(dev, ANTSEL, 0x00);
772 val8 = val8 & 0xfd;
773 } else {
774 val8 = val8 & (val8|0x02); /* xiong-2006-11-15 */
775 }
776
777 write_nic_byte(dev, TXAGC_CTL, val8);
778
779 /* Auto Rate fallback control */
780 val8 = read_nic_byte(dev, RATE_FALLBACK);
781 val8 &= 0x7c;
782 if (bAUTO_RATE_FALLBACK_CTL) {
783 val8 |= RATE_FALLBACK_CTL_ENABLE | RATE_FALLBACK_CTL_AUTO_STEP1;
784
785 /* <RJ_TODO_8185B> We shall set up the ARFR according
786 * to user's setting.
787 */
788 PlatformIOWrite2Byte(dev, ARFR, 0x0fff); /* set 1M ~ 54Mbps. */
789 }
790 write_nic_byte(dev, RATE_FALLBACK, val8);
791}
792
793static void MacConfig_85BASIC_HardCode(struct net_device *dev)
794{
795 /*
796 *======================================================================
797 * MACREG.TXT
798 *======================================================================
799 */
800 int nLinesRead = 0;
801 u32 u4bRegOffset, u4bRegValue, u4bPageIndex = 0;
802 int i;
803
804 nLinesRead = sizeof(MAC_REG_TABLE)/2;
805
806 for (i = 0; i < nLinesRead; i++) { /* nLinesRead=101 */
807 u4bRegOffset = MAC_REG_TABLE[i][0];
808 u4bRegValue = MAC_REG_TABLE[i][1];
809
810 if (u4bRegOffset == 0x5e)
811 u4bPageIndex = u4bRegValue;
812 else
813 u4bRegOffset |= (u4bPageIndex << 8);
814
815 write_nic_byte(dev, u4bRegOffset, (u8)u4bRegValue);
816 }
817 /* ================================================================= */
818}
819
820static void MacConfig_85BASIC(struct net_device *dev)
821{
822
823 u8 u1DA;
824 MacConfig_85BASIC_HardCode(dev);
825
826 /* ================================================================= */
827
828 /* Follow TID_AC_MAP of WMac. */
829 write_nic_word(dev, TID_AC_MAP, 0xfa50);
830
831 /* Interrupt Migration, Jong suggested we use set 0x0000 first,
832 * 2005.12.14, by rcnjko.
833 */
834 write_nic_word(dev, IntMig, 0x0000);
835
836 /* Prevent TPC to cause CRC error. Added by Annie, 2006-06-10. */
837 PlatformIOWrite4Byte(dev, 0x1F0, 0x00000000);
838 PlatformIOWrite4Byte(dev, 0x1F4, 0x00000000);
839 PlatformIOWrite1Byte(dev, 0x1F8, 0x00);
840
841 /* Asked for by SD3 CM Lin, 2006.06.27, by rcnjko. */
842
843 /*
844 * power save parameter based on
845 * "87SE power save parameters 20071127.doc", as follow.
846 */
847
848 /* Enable DA10 TX power saving */
849 u1DA = read_nic_byte(dev, PHYPR);
850 write_nic_byte(dev, PHYPR, (u1DA | BIT2));
851
852 /* POWER: */
853 write_nic_word(dev, 0x360, 0x1000);
854 write_nic_word(dev, 0x362, 0x1000);
855
856 /* AFE. */
857 write_nic_word(dev, 0x370, 0x0560);
858 write_nic_word(dev, 0x372, 0x0560);
859 write_nic_word(dev, 0x374, 0x0DA4);
860 write_nic_word(dev, 0x376, 0x0DA4);
861 write_nic_word(dev, 0x378, 0x0560);
862 write_nic_word(dev, 0x37A, 0x0560);
863 write_nic_word(dev, 0x37C, 0x00EC);
864 write_nic_word(dev, 0x37E, 0x00EC); /* +edward */
865 write_nic_byte(dev, 0x24E, 0x01);
866}
867
868static u8 GetSupportedWirelessMode8185(struct net_device *dev)
869{
870 return WIRELESS_MODE_B | WIRELESS_MODE_G;
871}
872
873static void
874ActUpdateChannelAccessSetting(struct net_device *dev,
875 enum wireless_mode mode,
876 struct chnl_access_setting *chnl_access_setting)
877{
878 AC_CODING eACI;
879
880 /*
881 * <RJ_TODO_8185B>
882 * TODO: We still don't know how to set up these registers,
883 * just follow WMAC to verify 8185B FPAG.
884 *
885 * <RJ_TODO_8185B>
886 * Jong said CWmin/CWmax register are not functional in 8185B,
887 * so we shall fill channel access realted register into AC
888 * parameter registers,
889 * even in nQBss.
890 */
891
892 /* Suggested by Jong, 2005.12.08. */
893 chnl_access_setting->sifs_timer = 0x22;
894 chnl_access_setting->difs_timer = 0x1C; /* 2006.06.02, by rcnjko. */
895 chnl_access_setting->slot_time_timer = 9; /* 2006.06.02, by rcnjko. */
896 /*
897 * Suggested by wcchu, it is the default value of EIFS register,
898 * 2005.12.08.
899 */
900 chnl_access_setting->eifs_timer = 0x5B;
901 chnl_access_setting->cwmin_index = 3; /* 2006.06.02, by rcnjko. */
902 chnl_access_setting->cwmax_index = 7; /* 2006.06.02, by rcnjko. */
903
904 write_nic_byte(dev, SIFS, chnl_access_setting->sifs_timer);
905 /*
906 * Rewrited from directly use PlatformEFIOWrite1Byte(),
907 * by Annie, 2006-03-29.
908 */
909 write_nic_byte(dev, SLOT, chnl_access_setting->slot_time_timer);
910
911 write_nic_byte(dev, EIFS, chnl_access_setting->eifs_timer);
912
913 /*
914 * <RJ_EXPR_QOS> Suggested by wcchu, it is the default value of EIFS
915 * register, 2005.12.08.
916 */
917 write_nic_byte(dev, AckTimeOutReg, 0x5B);
918
919 for (eACI = 0; eACI < AC_MAX; eACI++)
920 write_nic_byte(dev, ACM_CONTROL, 0);
921}
922
923static void ActSetWirelessMode8185(struct net_device *dev, u8 btWirelessMode)
924{
925 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
926 struct ieee80211_device *ieee = priv->ieee80211;
927 u8 btSupportedWirelessMode = GetSupportedWirelessMode8185(dev);
928
929 if ((btWirelessMode & btSupportedWirelessMode) == 0) {
930 /*
931 * Don't switch to unsupported wireless mode, 2006.02.15,
932 * by rcnjko.
933 */
934 DMESGW("ActSetWirelessMode8185(): WirelessMode(%d) is not supported (%d)!\n",
935 btWirelessMode, btSupportedWirelessMode);
936 return;
937 }
938
939 /* 1. Assign wireless mode to switch if necessary. */
940 if (btWirelessMode == WIRELESS_MODE_AUTO) {
941 if ((btSupportedWirelessMode & WIRELESS_MODE_A)) {
942 btWirelessMode = WIRELESS_MODE_A;
943 } else if (btSupportedWirelessMode & WIRELESS_MODE_G) {
944 btWirelessMode = WIRELESS_MODE_G;
945
946 } else if ((btSupportedWirelessMode & WIRELESS_MODE_B)) {
947 btWirelessMode = WIRELESS_MODE_B;
948 } else {
949 DMESGW("ActSetWirelessMode8185(): No valid wireless mode supported, btSupportedWirelessMode(%x)!!!\n",
950 btSupportedWirelessMode);
951 btWirelessMode = WIRELESS_MODE_B;
952 }
953 }
954
955 /*
956 * 2. Swtich band: RF or BB specific actions,
957 * for example, refresh tables in omc8255, or change initial gain if
958 * necessary. Nothing to do for Zebra to switch band. Update current
959 * wireless mode if we switch to specified band successfully.
960 */
961
962 ieee->mode = (enum wireless_mode)btWirelessMode;
963
964 /* 3. Change related setting. */
965 if (ieee->mode == WIRELESS_MODE_A)
966 DMESG("WIRELESS_MODE_A\n");
967 else if (ieee->mode == WIRELESS_MODE_B)
968 DMESG("WIRELESS_MODE_B\n");
969 else if (ieee->mode == WIRELESS_MODE_G)
970 DMESG("WIRELESS_MODE_G\n");
971
972 ActUpdateChannelAccessSetting(dev, ieee->mode,
973 &priv->ChannelAccessSetting);
974}
975
976void rtl8185b_irq_enable(struct net_device *dev)
977{
978 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
979
980 priv->irq_enabled = 1;
981 write_nic_dword(dev, IMR, priv->IntrMask);
982}
983
984static void MgntDisconnectIBSS(struct net_device *dev)
985{
986 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
987 u8 i;
988
989 for (i = 0; i < 6; i++)
990 priv->ieee80211->current_network.bssid[i] = 0x55;
991
992
993
994 priv->ieee80211->state = IEEE80211_NOLINK;
995 /*
996 * Stop Beacon.
997 *
998 * Vista add a Adhoc profile, HW radio off until
999 * OID_DOT11_RESET_REQUEST Driver would set MSR=NO_LINK,
1000 * then HW Radio ON, MgntQueue Stuck. Because Bcn DMA isn't
1001 * complete, mgnt queue would stuck until Bcn packet send.
1002 *
1003 * Disable Beacon Queue Own bit, suggested by jong
1004 */
1005 ieee80211_stop_send_beacons(priv->ieee80211);
1006
1007 priv->ieee80211->link_change(dev);
1008 notify_wx_assoc_event(priv->ieee80211);
1009}
1010
1011static void MlmeDisassociateRequest(struct net_device *dev, u8 *asSta, u8 asRsn)
1012{
1013 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1014 u8 i;
1015
1016 SendDisassociation(priv->ieee80211, asSta, asRsn);
1017
1018 if (memcmp(priv->ieee80211->current_network.bssid, asSta, 6) == 0) {
1019 /* ShuChen TODO: change media status. */
1020
1021 for (i = 0; i < 6; i++)
1022 priv->ieee80211->current_network.bssid[i] = 0x22;
1023
1024 ieee80211_disassociate(priv->ieee80211);
1025 }
1026}
1027
1028static void MgntDisconnectAP(struct net_device *dev, u8 asRsn)
1029{
1030 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1031
1032 /*
1033 * Commented out by rcnjko, 2005.01.27:
1034 * I move SecClearAllKeys() to MgntActSet_802_11_DISASSOCIATE().
1035 *
1036 * 2004/09/15, kcwu, the key should be cleared, or the new
1037 * handshaking will not success
1038 *
1039 * In WPA WPA2 need to Clear all key ... because new key will set
1040 * after new handshaking. 2004.10.11, by rcnjko.
1041 */
1042 MlmeDisassociateRequest(dev, priv->ieee80211->current_network.bssid,
1043 asRsn);
1044
1045 priv->ieee80211->state = IEEE80211_NOLINK;
1046}
1047
1048static bool MgntDisconnect(struct net_device *dev, u8 asRsn)
1049{
1050 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1051 /*
1052 * Schedule an workitem to wake up for ps mode, 070109, by rcnjko.
1053 */
1054
1055 if (IS_DOT11D_ENABLE(priv->ieee80211))
1056 Dot11d_Reset(priv->ieee80211);
1057 /* In adhoc mode, update beacon frame. */
1058 if (priv->ieee80211->state == IEEE80211_LINKED) {
1059 if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
1060 MgntDisconnectIBSS(dev);
1061
1062 if (priv->ieee80211->iw_mode == IW_MODE_INFRA) {
1063 /*
1064 * We clear key here instead of MgntDisconnectAP()
1065 * because that MgntActSet_802_11_DISASSOCIATE()
1066 * is an interface called by OS, e.g.
1067 * OID_802_11_DISASSOCIATE in Windows while as
1068 * MgntDisconnectAP() is used to handle
1069 * disassociation related things to AP, e.g. send
1070 * Disassoc frame to AP. 2005.01.27, by rcnjko.
1071 */
1072 MgntDisconnectAP(dev, asRsn);
1073 }
1074 /* Indicate Disconnect, 2005.02.23, by rcnjko. */
1075 }
1076 return true;
1077}
1078/*
1079 * Description:
1080 * Chang RF Power State.
1081 * Note that, only MgntActSet_RF_State() is allowed to set
1082 * HW_VAR_RF_STATE.
1083 *
1084 * Assumption:
1085 * PASSIVE LEVEL.
1086 */
1087static bool SetRFPowerState(struct net_device *dev,
1088 enum rt_rf_power_state eRFPowerState)
1089{
1090 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1091 bool bResult = false;
1092
1093 if (eRFPowerState == priv->eRFPowerState)
1094 return bResult;
1095
1096 bResult = SetZebraRFPowerState8185(dev, eRFPowerState);
1097
1098 return bResult;
1099}
1100
1101bool MgntActSet_RF_State(struct net_device *dev, enum rt_rf_power_state StateToSet,
1102 u32 ChangeSource)
1103{
1104 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1105 bool bActionAllowed = false;
1106 bool bConnectBySSID = false;
1107 enum rt_rf_power_state rtState;
1108 u16 RFWaitCounter = 0;
1109 unsigned long flag;
1110 /*
1111 * Prevent the race condition of RF state change. By Bruce,
1112 * 2007-11-28. Only one thread can change the RF state at one time,
1113 * and others should wait to be executed.
1114 */
1115 while (true) {
1116 spin_lock_irqsave(&priv->rf_ps_lock, flag);
1117 if (priv->RFChangeInProgress) {
1118 spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
1119 /* Set RF after the previous action is done. */
1120 while (priv->RFChangeInProgress) {
1121 RFWaitCounter++;
1122 udelay(1000); /* 1 ms */
1123
1124 /*
1125 * Wait too long, return FALSE to avoid
1126 * to be stuck here.
1127 */
1128 if (RFWaitCounter > 1000) { /* 1sec */
1129 netdev_info(dev, "MgntActSet_RF_State(): Wait too long to set RF\n");
1130 /* TODO: Reset RF state? */
1131 return false;
1132 }
1133 }
1134 } else {
1135 priv->RFChangeInProgress = true;
1136 spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
1137 break;
1138 }
1139 }
1140 rtState = priv->eRFPowerState;
1141
1142 switch (StateToSet) {
1143 case RF_ON:
1144 /*
1145 * Turn On RF no matter the IPS setting because we need to
1146 * update the RF state to Ndis under Vista, or the Windows
1147 * does not allow the driver to perform site survey any
1148 * more. By Bruce, 2007-10-02.
1149 */
1150 priv->RfOffReason &= (~ChangeSource);
1151
1152 if (!priv->RfOffReason) {
1153 priv->RfOffReason = 0;
1154 bActionAllowed = true;
1155
1156 if (rtState == RF_OFF &&
1157 ChangeSource >= RF_CHANGE_BY_HW)
1158 bConnectBySSID = true;
1159 }
1160 break;
1161
1162 case RF_OFF:
1163 /* 070125, rcnjko: we always keep connected in AP mode. */
1164
1165 if (priv->RfOffReason > RF_CHANGE_BY_IPS) {
1166 /*
1167 * 060808, Annie:
1168 * Disconnect to current BSS when radio off.
1169 * Asked by QuanTa.
1170 *
1171 * Calling MgntDisconnect() instead of
1172 * MgntActSet_802_11_DISASSOCIATE(), because
1173 * we do NOT need to set ssid to dummy ones.
1174 */
1175 MgntDisconnect(dev, disas_lv_ss);
1176 /*
1177 * Clear content of bssDesc[] and bssDesc4Query[]
1178 * to avoid reporting old bss to UI.
1179 */
1180 }
1181
1182 priv->RfOffReason |= ChangeSource;
1183 bActionAllowed = true;
1184 break;
1185 case RF_SLEEP:
1186 priv->RfOffReason |= ChangeSource;
1187 bActionAllowed = true;
1188 break;
1189 default:
1190 break;
1191 }
1192
1193 if (bActionAllowed) {
1194 /* Config HW to the specified mode. */
1195 SetRFPowerState(dev, StateToSet);
1196 }
1197
1198 /* Release RF spinlock */
1199 spin_lock_irqsave(&priv->rf_ps_lock, flag);
1200 priv->RFChangeInProgress = false;
1201 spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
1202 return bActionAllowed;
1203}
1204
1205static void InactivePowerSave(struct net_device *dev)
1206{
1207 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1208 /*
1209 * This flag "bSwRfProcessing", indicates the status of IPS
1210 * procedure, should be set if the IPS workitem is really
1211 * scheduled. The old code, sets this flag before scheduling the
1212 * IPS workitem and however, at the same time the previous IPS
1213 * workitem did not end yet, fails to schedule the current
1214 * workitem. Thus, bSwRfProcessing blocks the IPS procedure of
1215 * switching RF.
1216 */
1217 priv->bSwRfProcessing = true;
1218
1219 MgntActSet_RF_State(dev, priv->eInactivePowerState, RF_CHANGE_BY_IPS);
1220
1221 /*
1222 * To solve CAM values miss in RF OFF, rewrite CAM values after
1223 * RF ON. By Bruce, 2007-09-20.
1224 */
1225
1226 priv->bSwRfProcessing = false;
1227}
1228
1229/*
1230 * Description:
1231 * Enter the inactive power save mode. RF will be off
1232 */
1233void IPSEnter(struct net_device *dev)
1234{
1235 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1236 enum rt_rf_power_state rtState;
1237 if (priv->bInactivePs) {
1238 rtState = priv->eRFPowerState;
1239
1240 /*
1241 * Do not enter IPS in the following conditions:
1242 * (1) RF is already OFF or
1243 * Sleep (2) bSwRfProcessing (indicates the IPS is still
1244 * under going) (3) Connected (only disconnected can
1245 * trigger IPS)(4) IBSS (send Beacon)
1246 * (5) AP mode (send Beacon)
1247 */
1248 if (rtState == RF_ON && !priv->bSwRfProcessing
1249 && (priv->ieee80211->state != IEEE80211_LINKED)) {
1250 priv->eInactivePowerState = RF_OFF;
1251 InactivePowerSave(dev);
1252 }
1253 }
1254}
1255void IPSLeave(struct net_device *dev)
1256{
1257 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1258 enum rt_rf_power_state rtState;
1259 if (priv->bInactivePs) {
1260 rtState = priv->eRFPowerState;
1261 if ((rtState == RF_OFF || rtState == RF_SLEEP) &&
1262 !priv->bSwRfProcessing
1263 && priv->RfOffReason <= RF_CHANGE_BY_IPS) {
1264 priv->eInactivePowerState = RF_ON;
1265 InactivePowerSave(dev);
1266 }
1267 }
1268}
1269
1270void rtl8185b_adapter_start(struct net_device *dev)
1271{
1272 struct r8180_priv *priv = ieee80211_priv(dev);
1273 struct ieee80211_device *ieee = priv->ieee80211;
1274
1275 u8 SupportedWirelessMode;
1276 u8 InitWirelessMode;
1277 u8 bInvalidWirelessMode = 0;
1278 u8 tmpu8;
1279 u8 btCR9346;
1280 u8 TmpU1b;
1281 u8 btPSR;
1282
1283 write_nic_byte(dev, 0x24e, (BIT5|BIT6|BIT0));
1284 rtl8180_reset(dev);
1285
1286 priv->dma_poll_mask = 0;
1287 priv->dma_poll_stop_mask = 0;
1288
1289 HwConfigureRTL8185(dev);
1290 write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
1291 write_nic_word(dev, MAC4, ((u32 *)dev->dev_addr)[1] & 0xffff);
1292 /* default network type to 'No Link' */
1293 write_nic_byte(dev, MSR, read_nic_byte(dev, MSR) & 0xf3);
1294 write_nic_word(dev, BcnItv, 100);
1295 write_nic_word(dev, AtimWnd, 2);
1296 PlatformIOWrite2Byte(dev, FEMR, 0xFFFF);
1297 write_nic_byte(dev, WPA_CONFIG, 0);
1298 MacConfig_85BASIC(dev);
1299 /* Override the RFSW_CTRL (MAC offset 0x272-0x273), 2006.06.07,
1300 * by rcnjko.
1301 */
1302 /* BT_DEMO_BOARD type */
1303 PlatformIOWrite2Byte(dev, RFSW_CTRL, 0x569a);
1304
1305 /*
1306 *---------------------------------------------------------------------
1307 * Set up PHY related.
1308 *---------------------------------------------------------------------
1309 */
1310 /* Enable Config3.PARAM_En to revise AnaaParm. */
1311 write_nic_byte(dev, CR9346, 0xc0); /* enable config register write */
1312 tmpu8 = read_nic_byte(dev, CONFIG3);
1313 write_nic_byte(dev, CONFIG3, (tmpu8 | CONFIG3_PARM_En));
1314 /* Turn on Analog power. */
1315 /* Asked for by William, otherwise, MAC 3-wire can't work,
1316 * 2006.06.27, by rcnjko.
1317 */
1318 write_nic_dword(dev, ANAPARAM2, ANAPARM2_ASIC_ON);
1319 write_nic_dword(dev, ANAPARAM, ANAPARM_ASIC_ON);
1320 write_nic_word(dev, ANAPARAM3, 0x0010);
1321
1322 write_nic_byte(dev, CONFIG3, tmpu8);
1323 write_nic_byte(dev, CR9346, 0x00);
1324 /* enable EEM0 and EEM1 in 9346CR */
1325 btCR9346 = read_nic_byte(dev, CR9346);
1326 write_nic_byte(dev, CR9346, (btCR9346 | 0xC0));
1327
1328 /* B cut use LED1 to control HW RF on/off */
1329 TmpU1b = read_nic_byte(dev, CONFIG5);
1330 TmpU1b = TmpU1b & ~BIT3;
1331 write_nic_byte(dev, CONFIG5, TmpU1b);
1332
1333 /* disable EEM0 and EEM1 in 9346CR */
1334 btCR9346 &= ~(0xC0);
1335 write_nic_byte(dev, CR9346, btCR9346);
1336
1337 /* Enable Led (suggested by Jong) */
1338 /* B-cut RF Radio on/off 5e[3]=0 */
1339 btPSR = read_nic_byte(dev, PSR);
1340 write_nic_byte(dev, PSR, (btPSR | BIT3));
1341 /* setup initial timing for RFE. */
1342 write_nic_word(dev, RFPinsOutput, 0x0480);
1343 SetOutputEnableOfRfPins(dev);
1344 write_nic_word(dev, RFPinsSelect, 0x2488);
1345
1346 /* PHY config. */
1347 PhyConfig8185(dev);
1348
1349 /*
1350 * We assume RegWirelessMode has already been initialized before,
1351 * however, we has to validate the wireless mode here and provide a
1352 * reasonable initialized value if necessary. 2005.01.13,
1353 * by rcnjko.
1354 */
1355 SupportedWirelessMode = GetSupportedWirelessMode8185(dev);
1356 if ((ieee->mode != WIRELESS_MODE_B) &&
1357 (ieee->mode != WIRELESS_MODE_G) &&
1358 (ieee->mode != WIRELESS_MODE_A) &&
1359 (ieee->mode != WIRELESS_MODE_AUTO)) {
1360 /* It should be one of B, G, A, or AUTO. */
1361 bInvalidWirelessMode = 1;
1362 } else {
1363 /* One of B, G, A, or AUTO. */
1364 /* Check if the wireless mode is supported by RF. */
1365 if ((ieee->mode != WIRELESS_MODE_AUTO) &&
1366 (ieee->mode & SupportedWirelessMode) == 0) {
1367 bInvalidWirelessMode = 1;
1368 }
1369 }
1370
1371 if (bInvalidWirelessMode || ieee->mode == WIRELESS_MODE_AUTO) {
1372 /* Auto or other invalid value. */
1373 /* Assigne a wireless mode to initialize. */
1374 if ((SupportedWirelessMode & WIRELESS_MODE_A)) {
1375 InitWirelessMode = WIRELESS_MODE_A;
1376 } else if ((SupportedWirelessMode & WIRELESS_MODE_G)) {
1377 InitWirelessMode = WIRELESS_MODE_G;
1378 } else if ((SupportedWirelessMode & WIRELESS_MODE_B)) {
1379 InitWirelessMode = WIRELESS_MODE_B;
1380 } else {
1381 DMESGW("InitializeAdapter8185(): No valid wireless mode supported, SupportedWirelessMode(%x)!!!\n",
1382 SupportedWirelessMode);
1383 InitWirelessMode = WIRELESS_MODE_B;
1384 }
1385
1386 /* Initialize RegWirelessMode if it is not a valid one. */
1387 if (bInvalidWirelessMode)
1388 ieee->mode = (enum wireless_mode)InitWirelessMode;
1389
1390 } else {
1391 /* One of B, G, A. */
1392 InitWirelessMode = ieee->mode;
1393 }
1394 priv->eRFPowerState = RF_OFF;
1395 priv->RfOffReason = 0;
1396 {
1397 MgntActSet_RF_State(dev, RF_ON, 0);
1398 }
1399 /*
1400 * If inactive power mode is enabled, disable rf while in
1401 * disconnected state.
1402 */
1403 if (priv->bInactivePs)
1404 MgntActSet_RF_State(dev , RF_OFF, RF_CHANGE_BY_IPS);
1405
1406 ActSetWirelessMode8185(dev, (u8)(InitWirelessMode));
1407
1408 /* ----------------------------------------------------------------- */
1409
1410 rtl8185b_irq_enable(dev);
1411
1412 netif_start_queue(dev);
1413}
1414
1415void rtl8185b_rx_enable(struct net_device *dev)
1416{
1417 u8 cmd;
1418 /* for now we accept data, management & ctl frame*/
1419 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1420
1421
1422 if (dev->flags & IFF_PROMISC)
1423 DMESG("NIC in promisc mode");
1424
1425 if (priv->ieee80211->iw_mode == IW_MODE_MONITOR || dev->flags &
1426 IFF_PROMISC) {
1427 priv->ReceiveConfig = priv->ReceiveConfig & (~RCR_APM);
1428 priv->ReceiveConfig = priv->ReceiveConfig | RCR_AAP;
1429 }
1430
1431 if (priv->ieee80211->iw_mode == IW_MODE_MONITOR)
1432 priv->ReceiveConfig = priv->ReceiveConfig | RCR_ACF |
1433 RCR_APWRMGT | RCR_AICV;
1434
1435
1436 if (priv->crcmon == 1 && priv->ieee80211->iw_mode == IW_MODE_MONITOR)
1437 priv->ReceiveConfig = priv->ReceiveConfig | RCR_ACRC32;
1438
1439 write_nic_dword(dev, RCR, priv->ReceiveConfig);
1440
1441 fix_rx_fifo(dev);
1442
1443 cmd = read_nic_byte(dev, CMD);
1444 write_nic_byte(dev, CMD, cmd | (1<<CMD_RX_ENABLE_SHIFT));
1445
1446}
1447
1448void rtl8185b_tx_enable(struct net_device *dev)
1449{
1450 u8 cmd;
1451 u8 byte;
1452 struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
1453
1454 write_nic_dword(dev, TCR, priv->TransmitConfig);
1455 byte = read_nic_byte(dev, MSR);
1456 byte |= MSR_LINK_ENEDCA;
1457 write_nic_byte(dev, MSR, byte);
1458
1459 fix_tx_fifo(dev);
1460
1461 cmd = read_nic_byte(dev, CMD);
1462 write_nic_byte(dev, CMD, cmd | (1<<CMD_TX_ENABLE_SHIFT));
1463}
1464
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 636ec553ae83..e305d43ebd06 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -545,20 +545,18 @@ static struct recv_frame *decryptor(struct adapter *padapter,
545static struct recv_frame *portctrl(struct adapter *adapter, 545static struct recv_frame *portctrl(struct adapter *adapter,
546 struct recv_frame *precv_frame) 546 struct recv_frame *precv_frame)
547{ 547{
548 u8 *psta_addr = NULL, *ptr; 548 u8 *psta_addr, *ptr;
549 uint auth_alg; 549 uint auth_alg;
550 struct recv_frame *pfhdr; 550 struct recv_frame *pfhdr;
551 struct sta_info *psta; 551 struct sta_info *psta;
552 struct sta_priv *pstapriv; 552 struct sta_priv *pstapriv;
553 struct recv_frame *prtnframe; 553 struct recv_frame *prtnframe;
554 u16 ether_type = 0; 554 u16 ether_type;
555 u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */ 555 u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
556 struct rx_pkt_attrib *pattrib; 556 struct rx_pkt_attrib *pattrib;
557 __be16 be_tmp;
558 557
559 558
560 pstapriv = &adapter->stapriv; 559 pstapriv = &adapter->stapriv;
561 psta = rtw_get_stainfo(pstapriv, psta_addr);
562 560
563 auth_alg = adapter->securitypriv.dot11AuthAlgrthm; 561 auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
564 562
@@ -566,24 +564,23 @@ static struct recv_frame *portctrl(struct adapter *adapter,
566 pfhdr = precv_frame; 564 pfhdr = precv_frame;
567 pattrib = &pfhdr->attrib; 565 pattrib = &pfhdr->attrib;
568 psta_addr = pattrib->ta; 566 psta_addr = pattrib->ta;
567 psta = rtw_get_stainfo(pstapriv, psta_addr);
569 568
570 prtnframe = NULL; 569 prtnframe = NULL;
571 570
572 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:adapter->securitypriv.dot11AuthAlgrthm=%d\n", adapter->securitypriv.dot11AuthAlgrthm)); 571 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:adapter->securitypriv.dot11AuthAlgrthm=%d\n", adapter->securitypriv.dot11AuthAlgrthm));
573 572
574 if (auth_alg == 2) { 573 if (auth_alg == 2) {
574 /* get ether_type */
575 ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
576 memcpy(&ether_type, ptr, 2);
577 ether_type = ntohs((unsigned short)ether_type);
578
575 if ((psta != NULL) && (psta->ieee8021x_blocked)) { 579 if ((psta != NULL) && (psta->ieee8021x_blocked)) {
576 /* blocked */ 580 /* blocked */
577 /* only accept EAPOL frame */ 581 /* only accept EAPOL frame */
578 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==1\n")); 582 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==1\n"));
579 583
580 prtnframe = precv_frame;
581
582 /* get ether_type */
583 ptr = ptr+pfhdr->attrib.hdrlen+pfhdr->attrib.iv_len+LLC_HEADER_SIZE;
584 memcpy(&be_tmp, ptr, 2);
585 ether_type = ntohs(be_tmp);
586
587 if (ether_type == eapol_type) { 584 if (ether_type == eapol_type) {
588 prtnframe = precv_frame; 585 prtnframe = precv_frame;
589 } else { 586 } else {
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 2636e7f3dbb8..cf30a08912d1 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -359,7 +359,7 @@ static char *translate_scan(struct adapter *padapter,
359 if (wpa_len > 0) { 359 if (wpa_len > 0) {
360 p = buf; 360 p = buf;
361 _rtw_memset(buf, 0, MAX_WPA_IE_LEN); 361 _rtw_memset(buf, 0, MAX_WPA_IE_LEN);
362 p += sprintf(p, "wpa_ie ="); 362 p += sprintf(p, "wpa_ie=");
363 for (i = 0; i < wpa_len; i++) 363 for (i = 0; i < wpa_len; i++)
364 p += sprintf(p, "%02x", wpa_ie[i]); 364 p += sprintf(p, "%02x", wpa_ie[i]);
365 365
@@ -376,7 +376,7 @@ static char *translate_scan(struct adapter *padapter,
376 if (rsn_len > 0) { 376 if (rsn_len > 0) {
377 p = buf; 377 p = buf;
378 _rtw_memset(buf, 0, MAX_WPA_IE_LEN); 378 _rtw_memset(buf, 0, MAX_WPA_IE_LEN);
379 p += sprintf(p, "rsn_ie ="); 379 p += sprintf(p, "rsn_ie=");
380 for (i = 0; i < rsn_len; i++) 380 for (i = 0; i < rsn_len; i++)
381 p += sprintf(p, "%02x", rsn_ie[i]); 381 p += sprintf(p, "%02x", rsn_ie[i]);
382 _rtw_memset(&iwe, 0, sizeof(iwe)); 382 _rtw_memset(&iwe, 0, sizeof(iwe));
@@ -2899,7 +2899,7 @@ static int rtw_p2p_get_status(struct net_device *dev,
2899 /* Commented by Albert 2010/10/12 */ 2899 /* Commented by Albert 2010/10/12 */
2900 /* Because of the output size limitation, I had removed the "Role" information. */ 2900 /* Because of the output size limitation, I had removed the "Role" information. */
2901 /* About the "Role" information, we will use the new private IOCTL to get the "Role" information. */ 2901 /* About the "Role" information, we will use the new private IOCTL to get the "Role" information. */
2902 sprintf(extra, "\n\nStatus =%.2d\n", rtw_p2p_state(pwdinfo)); 2902 sprintf(extra, "\n\nStatus=%.2d\n", rtw_p2p_state(pwdinfo));
2903 wrqu->data.length = strlen(extra); 2903 wrqu->data.length = strlen(extra);
2904 2904
2905 return ret; 2905 return ret;
@@ -2918,7 +2918,7 @@ static int rtw_p2p_get_req_cm(struct net_device *dev,
2918 struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev); 2918 struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
2919 struct wifidirect_info *pwdinfo = &(padapter->wdinfo); 2919 struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
2920 2920
2921 sprintf(extra, "\n\nCM =%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req); 2921 sprintf(extra, "\n\nCM=%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
2922 wrqu->data.length = strlen(extra); 2922 wrqu->data.length = strlen(extra);
2923 return ret; 2923 return ret;
2924} 2924}
@@ -2935,7 +2935,7 @@ static int rtw_p2p_get_role(struct net_device *dev,
2935 pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1], pwdinfo->p2p_peer_interface_addr[2], 2935 pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1], pwdinfo->p2p_peer_interface_addr[2],
2936 pwdinfo->p2p_peer_interface_addr[3], pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]); 2936 pwdinfo->p2p_peer_interface_addr[3], pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
2937 2937
2938 sprintf(extra, "\n\nRole =%.2d\n", rtw_p2p_role(pwdinfo)); 2938 sprintf(extra, "\n\nRole=%.2d\n", rtw_p2p_role(pwdinfo));
2939 wrqu->data.length = strlen(extra); 2939 wrqu->data.length = strlen(extra);
2940 return ret; 2940 return ret;
2941} 2941}
@@ -3022,7 +3022,7 @@ static int rtw_p2p_get_op_ch(struct net_device *dev,
3022 3022
3023 DBG_88E("[%s] Op_ch = %02x\n", __func__, pwdinfo->operating_channel); 3023 DBG_88E("[%s] Op_ch = %02x\n", __func__, pwdinfo->operating_channel);
3024 3024
3025 sprintf(extra, "\n\nOp_ch =%.2d\n", pwdinfo->operating_channel); 3025 sprintf(extra, "\n\nOp_ch=%.2d\n", pwdinfo->operating_channel);
3026 wrqu->data.length = strlen(extra); 3026 wrqu->data.length = strlen(extra);
3027 return ret; 3027 return ret;
3028} 3028}
@@ -3043,7 +3043,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
3043 u8 blnMatch = 0; 3043 u8 blnMatch = 0;
3044 u16 attr_content = 0; 3044 u16 attr_content = 0;
3045 uint attr_contentlen = 0; 3045 uint attr_contentlen = 0;
3046 /* 6 is the string "wpsCM =", 17 is the MAC addr, we have to clear it at wrqu->data.pointer */ 3046 /* 6 is the string "wpsCM=", 17 is the MAC addr, we have to clear it at wrqu->data.pointer */
3047 u8 attr_content_str[6 + 17] = {0x00}; 3047 u8 attr_content_str[6 + 17] = {0x00};
3048 3048
3049 /* Commented by Albert 20110727 */ 3049 /* Commented by Albert 20110727 */
@@ -3079,7 +3079,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
3079 rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_CONF_METHOD, (u8 *) &be_tmp, &attr_contentlen); 3079 rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_CONF_METHOD, (u8 *) &be_tmp, &attr_contentlen);
3080 if (attr_contentlen) { 3080 if (attr_contentlen) {
3081 attr_content = be16_to_cpu(be_tmp); 3081 attr_content = be16_to_cpu(be_tmp);
3082 sprintf(attr_content_str, "\n\nM =%.4d", attr_content); 3082 sprintf(attr_content_str, "\n\nM=%.4d", attr_content);
3083 blnMatch = 1; 3083 blnMatch = 1;
3084 } 3084 }
3085 } 3085 }
@@ -3091,7 +3091,7 @@ static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
3091 spin_unlock_bh(&pmlmepriv->scanned_queue.lock); 3091 spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
3092 3092
3093 if (!blnMatch) 3093 if (!blnMatch)
3094 sprintf(attr_content_str, "\n\nM = 0000"); 3094 sprintf(attr_content_str, "\n\nM=0000");
3095 3095
3096 if (copy_to_user(wrqu->data.pointer, attr_content_str, 6 + 17)) 3096 if (copy_to_user(wrqu->data.pointer, attr_content_str, 6 + 17))
3097 return -EFAULT; 3097 return -EFAULT;
@@ -3172,9 +3172,9 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
3172 spin_unlock_bh(&pmlmepriv->scanned_queue.lock); 3172 spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
3173 3173
3174 if (!blnMatch) 3174 if (!blnMatch)
3175 snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL"); 3175 snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add=NULL");
3176 else 3176 else
3177 snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X", 3177 snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add=%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
3178 attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]); 3178 attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
3179 3179
3180 if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str))) 3180 if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
@@ -3198,7 +3198,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
3198 u8 blnMatch = 0; 3198 u8 blnMatch = 0;
3199 u8 dev_type[8] = {0x00}; 3199 u8 dev_type[8] = {0x00};
3200 uint dev_type_len = 0; 3200 uint dev_type_len = 0;
3201 u8 dev_type_str[17 + 9] = {0x00}; /* +9 is for the str "dev_type =", we have to clear it at wrqu->data.pointer */ 3201 u8 dev_type_str[17 + 9] = {0x00}; /* +9 is for the str "dev_type=", we have to clear it at wrqu->data.pointer */
3202 3202
3203 /* Commented by Albert 20121209 */ 3203 /* Commented by Albert 20121209 */
3204 /* The input data is the MAC address which the application wants to know its device type. */ 3204 /* The input data is the MAC address which the application wants to know its device type. */
@@ -3239,7 +3239,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
3239 3239
3240 memcpy(&be_tmp, dev_type, 2); 3240 memcpy(&be_tmp, dev_type, 2);
3241 type = be16_to_cpu(be_tmp); 3241 type = be16_to_cpu(be_tmp);
3242 sprintf(dev_type_str, "\n\nN =%.2d", type); 3242 sprintf(dev_type_str, "\n\nN=%.2d", type);
3243 blnMatch = 1; 3243 blnMatch = 1;
3244 } 3244 }
3245 } 3245 }
@@ -3252,7 +3252,7 @@ static int rtw_p2p_get_device_type(struct net_device *dev,
3252 spin_unlock_bh(&pmlmepriv->scanned_queue.lock); 3252 spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
3253 3253
3254 if (!blnMatch) 3254 if (!blnMatch)
3255 sprintf(dev_type_str, "\n\nN = 00"); 3255 sprintf(dev_type_str, "\n\nN=00");
3256 3256
3257 if (copy_to_user(wrqu->data.pointer, dev_type_str, 9 + 17)) { 3257 if (copy_to_user(wrqu->data.pointer, dev_type_str, 9 + 17)) {
3258 return -EFAULT; 3258 return -EFAULT;
@@ -3277,7 +3277,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
3277 u8 blnMatch = 0; 3277 u8 blnMatch = 0;
3278 u8 dev_name[WPS_MAX_DEVICE_NAME_LEN] = {0x00}; 3278 u8 dev_name[WPS_MAX_DEVICE_NAME_LEN] = {0x00};
3279 uint dev_len = 0; 3279 uint dev_len = 0;
3280 u8 dev_name_str[WPS_MAX_DEVICE_NAME_LEN + 5] = {0x00}; /* +5 is for the str "devN =", we have to clear it at wrqu->data.pointer */ 3280 u8 dev_name_str[WPS_MAX_DEVICE_NAME_LEN + 5] = {0x00}; /* +5 is for the str "devN=", we have to clear it at wrqu->data.pointer */
3281 3281
3282 /* Commented by Albert 20121225 */ 3282 /* Commented by Albert 20121225 */
3283 /* The input data is the MAC address which the application wants to know its device name. */ 3283 /* The input data is the MAC address which the application wants to know its device name. */
@@ -3310,7 +3310,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
3310 if (wpsie) { 3310 if (wpsie) {
3311 rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_DEVICE_NAME, dev_name, &dev_len); 3311 rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_DEVICE_NAME, dev_name, &dev_len);
3312 if (dev_len) { 3312 if (dev_len) {
3313 sprintf(dev_name_str, "\n\nN =%s", dev_name); 3313 sprintf(dev_name_str, "\n\nN=%s", dev_name);
3314 blnMatch = 1; 3314 blnMatch = 1;
3315 } 3315 }
3316 } 3316 }
@@ -3323,7 +3323,7 @@ static int rtw_p2p_get_device_name(struct net_device *dev,
3323 spin_unlock_bh(&pmlmepriv->scanned_queue.lock); 3323 spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
3324 3324
3325 if (!blnMatch) 3325 if (!blnMatch)
3326 sprintf(dev_name_str, "\n\nN = 0000"); 3326 sprintf(dev_name_str, "\n\nN=0000");
3327 3327
3328 if (copy_to_user(wrqu->data.pointer, dev_name_str, 5 + ((dev_len > 17) ? dev_len : 17))) 3328 if (copy_to_user(wrqu->data.pointer, dev_name_str, 5 + ((dev_len > 17) ? dev_len : 17)))
3329 return -EFAULT; 3329 return -EFAULT;
@@ -3349,7 +3349,7 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
3349 u8 attr_content[2] = {0x00}; 3349 u8 attr_content[2] = {0x00};
3350 3350
3351 u8 inv_proc_str[17 + 8] = {0x00}; 3351 u8 inv_proc_str[17 + 8] = {0x00};
3352 /* +8 is for the str "InvProc =", we have to clear it at wrqu->data.pointer */ 3352 /* +8 is for the str "InvProc=", we have to clear it at wrqu->data.pointer */
3353 3353
3354 /* Commented by Ouden 20121226 */ 3354 /* Commented by Ouden 20121226 */
3355 /* The application wants to know P2P initiation procedure is supported or not. */ 3355 /* The application wants to know P2P initiation procedure is supported or not. */
@@ -3397,12 +3397,12 @@ static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
3397 spin_unlock_bh(&pmlmepriv->scanned_queue.lock); 3397 spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
3398 3398
3399 if (!blnMatch) { 3399 if (!blnMatch) {
3400 sprintf(inv_proc_str, "\nIP =-1"); 3400 sprintf(inv_proc_str, "\nIP=-1");
3401 } else { 3401 } else {
3402 if (attr_content[0] & 0x20) 3402 if (attr_content[0] & 0x20)
3403 sprintf(inv_proc_str, "\nIP = 1"); 3403 sprintf(inv_proc_str, "\nIP=1");
3404 else 3404 else
3405 sprintf(inv_proc_str, "\nIP = 0"); 3405 sprintf(inv_proc_str, "\nIP=0");
3406 } 3406 }
3407 if (copy_to_user(wrqu->data.pointer, inv_proc_str, 8 + 17)) 3407 if (copy_to_user(wrqu->data.pointer, inv_proc_str, 8 + 17))
3408 return -EFAULT; 3408 return -EFAULT;
@@ -3512,7 +3512,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
3512 /* The input data contains two informations. */ 3512 /* The input data contains two informations. */
3513 /* 1. First information is the P2P device address which you want to send to. */ 3513 /* 1. First information is the P2P device address which you want to send to. */
3514 /* 2. Second information is the group id which combines with GO's mac address, space and GO's ssid. */ 3514 /* 2. Second information is the group id which combines with GO's mac address, space and GO's ssid. */
3515 /* Command line sample: iwpriv wlan0 p2p_set invite ="00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy" */ 3515 /* Command line sample: iwpriv wlan0 p2p_set invite="00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy" */
3516 /* Format: 00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy */ 3516 /* Format: 00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy */
3517 3517
3518 DBG_88E("[%s] data = %s\n", __func__, extra); 3518 DBG_88E("[%s] data = %s\n", __func__, extra);
@@ -3805,48 +3805,48 @@ static int rtw_p2p_set(struct net_device *dev,
3805 3805
3806#ifdef CONFIG_88EU_P2P 3806#ifdef CONFIG_88EU_P2P
3807 DBG_88E("[%s] extra = %s\n", __func__, extra); 3807 DBG_88E("[%s] extra = %s\n", __func__, extra);
3808 if (!memcmp(extra, "enable =", 7)) { 3808 if (!memcmp(extra, "enable=", 7)) {
3809 rtw_wext_p2p_enable(dev, info, wrqu, &extra[7]); 3809 rtw_wext_p2p_enable(dev, info, wrqu, &extra[7]);
3810 } else if (!memcmp(extra, "setDN =", 6)) { 3810 } else if (!memcmp(extra, "setDN=", 6)) {
3811 wrqu->data.length -= 6; 3811 wrqu->data.length -= 6;
3812 rtw_p2p_setDN(dev, info, wrqu, &extra[6]); 3812 rtw_p2p_setDN(dev, info, wrqu, &extra[6]);
3813 } else if (!memcmp(extra, "profilefound =", 13)) { 3813 } else if (!memcmp(extra, "profilefound=", 13)) {
3814 wrqu->data.length -= 13; 3814 wrqu->data.length -= 13;
3815 rtw_p2p_profilefound(dev, info, wrqu, &extra[13]); 3815 rtw_p2p_profilefound(dev, info, wrqu, &extra[13]);
3816 } else if (!memcmp(extra, "prov_disc =", 10)) { 3816 } else if (!memcmp(extra, "prov_disc=", 10)) {
3817 wrqu->data.length -= 10; 3817 wrqu->data.length -= 10;
3818 rtw_p2p_prov_disc(dev, info, wrqu, &extra[10]); 3818 rtw_p2p_prov_disc(dev, info, wrqu, &extra[10]);
3819 } else if (!memcmp(extra, "nego =", 5)) { 3819 } else if (!memcmp(extra, "nego=", 5)) {
3820 wrqu->data.length -= 5; 3820 wrqu->data.length -= 5;
3821 rtw_p2p_connect(dev, info, wrqu, &extra[5]); 3821 rtw_p2p_connect(dev, info, wrqu, &extra[5]);
3822 } else if (!memcmp(extra, "intent =", 7)) { 3822 } else if (!memcmp(extra, "intent=", 7)) {
3823 /* Commented by Albert 2011/03/23 */ 3823 /* Commented by Albert 2011/03/23 */
3824 /* The wrqu->data.length will include the null character */ 3824 /* The wrqu->data.length will include the null character */
3825 /* So, we will decrease 7 + 1 */ 3825 /* So, we will decrease 7 + 1 */
3826 wrqu->data.length -= 8; 3826 wrqu->data.length -= 8;
3827 rtw_p2p_set_intent(dev, info, wrqu, &extra[7]); 3827 rtw_p2p_set_intent(dev, info, wrqu, &extra[7]);
3828 } else if (!memcmp(extra, "ssid =", 5)) { 3828 } else if (!memcmp(extra, "ssid=", 5)) {
3829 wrqu->data.length -= 5; 3829 wrqu->data.length -= 5;
3830 rtw_p2p_set_go_nego_ssid(dev, info, wrqu, &extra[5]); 3830 rtw_p2p_set_go_nego_ssid(dev, info, wrqu, &extra[5]);
3831 } else if (!memcmp(extra, "got_wpsinfo =", 12)) { 3831 } else if (!memcmp(extra, "got_wpsinfo=", 12)) {
3832 wrqu->data.length -= 12; 3832 wrqu->data.length -= 12;
3833 rtw_p2p_got_wpsinfo(dev, info, wrqu, &extra[12]); 3833 rtw_p2p_got_wpsinfo(dev, info, wrqu, &extra[12]);
3834 } else if (!memcmp(extra, "listen_ch =", 10)) { 3834 } else if (!memcmp(extra, "listen_ch=", 10)) {
3835 /* Commented by Albert 2011/05/24 */ 3835 /* Commented by Albert 2011/05/24 */
3836 /* The wrqu->data.length will include the null character */ 3836 /* The wrqu->data.length will include the null character */
3837 /* So, we will decrease (10 + 1) */ 3837 /* So, we will decrease (10 + 1) */
3838 wrqu->data.length -= 11; 3838 wrqu->data.length -= 11;
3839 rtw_p2p_set_listen_ch(dev, info, wrqu, &extra[10]); 3839 rtw_p2p_set_listen_ch(dev, info, wrqu, &extra[10]);
3840 } else if (!memcmp(extra, "op_ch =", 6)) { 3840 } else if (!memcmp(extra, "op_ch=", 6)) {
3841 /* Commented by Albert 2011/05/24 */ 3841 /* Commented by Albert 2011/05/24 */
3842 /* The wrqu->data.length will include the null character */ 3842 /* The wrqu->data.length will include the null character */
3843 /* So, we will decrease (6 + 1) */ 3843 /* So, we will decrease (6 + 1) */
3844 wrqu->data.length -= 7; 3844 wrqu->data.length -= 7;
3845 rtw_p2p_set_op_ch(dev, info, wrqu, &extra[6]); 3845 rtw_p2p_set_op_ch(dev, info, wrqu, &extra[6]);
3846 } else if (!memcmp(extra, "invite =", 7)) { 3846 } else if (!memcmp(extra, "invite=", 7)) {
3847 wrqu->data.length -= 8; 3847 wrqu->data.length -= 8;
3848 rtw_p2p_invite_req(dev, info, wrqu, &extra[7]); 3848 rtw_p2p_invite_req(dev, info, wrqu, &extra[7]);
3849 } else if (!memcmp(extra, "persistent =", 11)) { 3849 } else if (!memcmp(extra, "persistent=", 11)) {
3850 wrqu->data.length -= 11; 3850 wrqu->data.length -= 11;
3851 rtw_p2p_set_persistent(dev, info, wrqu, &extra[11]); 3851 rtw_p2p_set_persistent(dev, info, wrqu, &extra[11]);
3852 } 3852 }
@@ -3887,7 +3887,7 @@ static int rtw_p2p_get(struct net_device *dev,
3887 "group_id", 8)) { 3887 "group_id", 8)) {
3888 rtw_p2p_get_groupid(dev, info, wrqu, extra); 3888 rtw_p2p_get_groupid(dev, info, wrqu, extra);
3889 } else if (!memcmp((__force const char *)wrqu->data.pointer, 3889 } else if (!memcmp((__force const char *)wrqu->data.pointer,
3890 "peer_deva_inv", 9)) { 3890 "peer_deva_inv", 13)) {
3891 /* Get the P2P device address when receiving the P2P Invitation request frame. */ 3891 /* Get the P2P device address when receiving the P2P Invitation request frame. */
3892 rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra); 3892 rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
3893 } else if (!memcmp((__force const char *)wrqu->data.pointer, 3893 } else if (!memcmp((__force const char *)wrqu->data.pointer,
@@ -6920,7 +6920,7 @@ static int rtw_mp_ctx(struct net_device *dev,
6920 6920
6921 DBG_88E("%s: in =%s\n", __func__, extra); 6921 DBG_88E("%s: in =%s\n", __func__, extra);
6922 6922
6923 countPkTx = strncmp(extra, "count =", 5); /* strncmp true is 0 */ 6923 countPkTx = strncmp(extra, "count=", 6); /* strncmp true is 0 */
6924 cotuTx = strncmp(extra, "background", 20); 6924 cotuTx = strncmp(extra, "background", 20);
6925 CarrSprTx = strncmp(extra, "background, cs", 20); 6925 CarrSprTx = strncmp(extra, "background, cs", 20);
6926 scTx = strncmp(extra, "background, sc", 20); 6926 scTx = strncmp(extra, "background, sc", 20);
@@ -7044,7 +7044,7 @@ static int rtw_mp_arx(struct net_device *dev,
7044 DBG_88E("%s: %s\n", __func__, input); 7044 DBG_88E("%s: %s\n", __func__, input);
7045 7045
7046 bStartRx = (strncmp(input, "start", 5) == 0) ? 1 : 0; /* strncmp true is 0 */ 7046 bStartRx = (strncmp(input, "start", 5) == 0) ? 1 : 0; /* strncmp true is 0 */
7047 bStopRx = (strncmp(input, "stop", 5) == 0) ? 1 : 0; /* strncmp true is 0 */ 7047 bStopRx = (strncmp(input, "stop", 4) == 0) ? 1 : 0; /* strncmp true is 0 */
7048 bQueryPhy = (strncmp(input, "phy", 3) == 0) ? 1 : 0; /* strncmp true is 0 */ 7048 bQueryPhy = (strncmp(input, "phy", 3) == 0) ? 1 : 0; /* strncmp true is 0 */
7049 7049
7050 if (bStartRx) { 7050 if (bStartRx) {
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 23ec684b60e1..274c359279ef 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -254,7 +254,7 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
254 struct sta_info *psta; 254 struct sta_info *psta;
255 struct sta_priv *pstapriv; 255 struct sta_priv *pstapriv;
256 union recv_frame *prtnframe; 256 union recv_frame *prtnframe;
257 u16 ether_type = 0; 257 u16 ether_type;
258 258
259 pstapriv = &adapter->stapriv; 259 pstapriv = &adapter->stapriv;
260 ptr = get_recvframe_data(precv_frame); 260 ptr = get_recvframe_data(precv_frame);
@@ -263,15 +263,14 @@ union recv_frame *r8712_portctrl(struct _adapter *adapter,
263 psta = r8712_get_stainfo(pstapriv, psta_addr); 263 psta = r8712_get_stainfo(pstapriv, psta_addr);
264 auth_alg = adapter->securitypriv.AuthAlgrthm; 264 auth_alg = adapter->securitypriv.AuthAlgrthm;
265 if (auth_alg == 2) { 265 if (auth_alg == 2) {
266 /* get ether_type */
267 ptr = ptr + pfhdr->attrib.hdrlen + LLC_HEADER_SIZE;
268 memcpy(&ether_type, ptr, 2);
269 ether_type = ntohs((unsigned short)ether_type);
270
266 if ((psta != NULL) && (psta->ieee8021x_blocked)) { 271 if ((psta != NULL) && (psta->ieee8021x_blocked)) {
267 /* blocked 272 /* blocked
268 * only accept EAPOL frame */ 273 * only accept EAPOL frame */
269 prtnframe = precv_frame;
270 /*get ether_type */
271 ptr = ptr + pfhdr->attrib.hdrlen +
272 pfhdr->attrib.iv_len + LLC_HEADER_SIZE;
273 memcpy(&ether_type, ptr, 2);
274 ether_type = ntohs((unsigned short)ether_type);
275 if (ether_type == 0x888e) 274 if (ether_type == 0x888e)
276 prtnframe = precv_frame; 275 prtnframe = precv_frame;
277 else { 276 else {
diff --git a/drivers/staging/rtl8723au/core/rtw_ieee80211.c b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
index 780631fd3b6d..a48ab25a7d8a 100644
--- a/drivers/staging/rtl8723au/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723au/core/rtw_ieee80211.c
@@ -1496,45 +1496,23 @@ void rtw_wlan_bssid_ex_remove_p2p_attr23a(struct wlan_bssid_ex *bss_ex, u8 attr_
1496int rtw_get_wfd_ie(u8 *in_ie, int in_len, u8 *wfd_ie, uint *wfd_ielen) 1496int rtw_get_wfd_ie(u8 *in_ie, int in_len, u8 *wfd_ie, uint *wfd_ielen)
1497{ 1497{
1498 int match; 1498 int match;
1499 uint cnt = 0; 1499 const u8 *ie;
1500 u8 eid, wfd_oui[4] = {0x50, 0x6F, 0x9A, 0x0A};
1501 1500
1502 match = false; 1501 match = 0;
1503 1502
1504 if (in_len < 0) { 1503 if (in_len < 0)
1505 return match; 1504 return match;
1506 }
1507
1508 while (cnt < in_len)
1509 {
1510 eid = in_ie[cnt];
1511 1505
1512 if ((eid == _VENDOR_SPECIFIC_IE_) && 1506 ie = cfg80211_find_vendor_ie(0x506F9A, 0x0A, in_ie, in_len);
1513 !memcmp(&in_ie[cnt+2], wfd_oui, 4)) { 1507 if (ie && (ie[1] <= (MAX_WFD_IE_LEN - 2))) {
1514 if (wfd_ie != NULL) { 1508 if (wfd_ie) {
1515 memcpy(wfd_ie, &in_ie[cnt], in_ie[cnt + 1] + 2); 1509 *wfd_ielen = ie[1] + 2;
1516 1510 memcpy(wfd_ie, ie, ie[1] + 2);
1517 } else { 1511 } else
1518 if (wfd_ielen != NULL) { 1512 if (wfd_ielen)
1519 *wfd_ielen = 0; 1513 *wfd_ielen = 0;
1520 }
1521 }
1522
1523 if (wfd_ielen != NULL) {
1524 *wfd_ielen = in_ie[cnt + 1] + 2;
1525 }
1526
1527 cnt += in_ie[cnt + 1] + 2;
1528
1529 match = true;
1530 break;
1531 } else {
1532 cnt += in_ie[cnt + 1] +2; /* goto next */
1533 }
1534 }
1535 1514
1536 if (match == true) { 1515 match = 1;
1537 match = cnt;
1538 } 1516 }
1539 1517
1540 return match; 1518 return match;
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index 4c753639ea5a..1f3e8a0aece4 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -1281,7 +1281,7 @@ unsigned int OnAssocReq23a(struct rtw_adapter *padapter, struct recv_frame *prec
1281 u8 p2p_status_code = P2P_STATUS_SUCCESS; 1281 u8 p2p_status_code = P2P_STATUS_SUCCESS;
1282 u8 *p2pie; 1282 u8 *p2pie;
1283 u32 p2pielen = 0; 1283 u32 p2pielen = 0;
1284 u8 wfd_ie[ 128 ] = { 0x00 }; 1284 u8 wfd_ie[MAX_WFD_IE_LEN] = { 0x00 };
1285 u32 wfd_ielen = 0; 1285 u32 wfd_ielen = 0;
1286#endif /* CONFIG_8723AU_P2P */ 1286#endif /* CONFIG_8723AU_P2P */
1287 1287
diff --git a/drivers/staging/rtl8723au/core/rtw_p2p.c b/drivers/staging/rtl8723au/core/rtw_p2p.c
index 27a6cc76973d..1a961e3f3a55 100644
--- a/drivers/staging/rtl8723au/core/rtw_p2p.c
+++ b/drivers/staging/rtl8723au/core/rtw_p2p.c
@@ -2535,7 +2535,7 @@ u8 process_p2p_group_negotation_req23a(struct wifidirect_info *pwdinfo, u8 *pfra
2535 u16 wps_devicepassword_id = 0x0000; 2535 u16 wps_devicepassword_id = 0x0000;
2536 uint wps_devicepassword_id_len = 0; 2536 uint wps_devicepassword_id_len = 0;
2537#ifdef CONFIG_8723AU_P2P 2537#ifdef CONFIG_8723AU_P2P
2538 u8 wfd_ie[ 128 ] = { 0x00 }; 2538 u8 wfd_ie[MAX_WFD_IE_LEN] = { 0x00 };
2539 u32 wfd_ielen = 0; 2539 u32 wfd_ielen = 0;
2540#endif /* CONFIG_8723AU_P2P */ 2540#endif /* CONFIG_8723AU_P2P */
2541 2541
@@ -2741,7 +2741,7 @@ u8 process_p2p_group_negotation_resp23a(struct wifidirect_info *pwdinfo, u8 *pfr
2741 u32 ies_len; 2741 u32 ies_len;
2742 u8 * p2p_ie; 2742 u8 * p2p_ie;
2743#ifdef CONFIG_8723AU_P2P 2743#ifdef CONFIG_8723AU_P2P
2744 u8 wfd_ie[ 128 ] = { 0x00 }; 2744 u8 wfd_ie[MAX_WFD_IE_LEN] = { 0x00 };
2745 u32 wfd_ielen = 0; 2745 u32 wfd_ielen = 0;
2746#endif /* CONFIG_8723AU_P2P */ 2746#endif /* CONFIG_8723AU_P2P */
2747 2747
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
index 0dfcfbce3b52..99d81e612e7b 100644
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -570,7 +570,7 @@ void flush_all_cam_entry23a(struct rtw_adapter *padapter)
570int WFD_info_handler(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE) 570int WFD_info_handler(struct rtw_adapter *padapter, struct ndis_802_11_var_ies * pIE)
571{ 571{
572 struct wifidirect_info *pwdinfo; 572 struct wifidirect_info *pwdinfo;
573 u8 wfd_ie[128] = {0x00}; 573 u8 wfd_ie[MAX_WFD_IE_LEN] = {0x00};
574 u32 wfd_ielen = 0; 574 u32 wfd_ielen = 0;
575 575
576 pwdinfo = &padapter->wdinfo; 576 pwdinfo = &padapter->wdinfo;
@@ -681,7 +681,7 @@ void WMMOnAssocRsp23a(struct rtw_adapter *padapter)
681 inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3; 681 inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
682 682
683 if (pregpriv->wifi_spec == 1) { 683 if (pregpriv->wifi_spec == 1) {
684 u32 j, tmp, change_inx; 684 u32 j, tmp, change_inx = false;
685 685
686 /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */ 686 /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
687 for (i = 0; i < 4; i++) { 687 for (i = 0; i < 4; i++) {
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index 57eca7a45672..4fe751f7c2bf 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev)
953#endif /* CONFIG_8723AU_P2P */ 953#endif /* CONFIG_8723AU_P2P */
954 954
955 rtw_scan_abort23a(padapter); 955 rtw_scan_abort23a(padapter);
956 /* set this at the end */
957 padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
958 956
959 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); 957 RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
960 DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); 958 DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index c49160e477d8..07e542e5d156 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
26 if (addr == RECV_BULK_IN_ADDR) { 26 if (addr == RECV_BULK_IN_ADDR) {
27 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); 27 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
28 } else if (addr == RECV_INT_IN_ADDR) { 28 } else if (addr == RECV_INT_IN_ADDR) {
29 pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); 29 pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
30 } else if (addr < HW_QUEUE_ENTRY) { 30 } else if (addr < HW_QUEUE_ENTRY) {
31 ep_num = pdvobj->Queue2Pipe[addr]; 31 ep_num = pdvobj->Queue2Pipe[addr];
32 pipe = usb_sndbulkpipe(pusbd, ep_num); 32 pipe = usb_sndbulkpipe(pusbd, ep_num);
diff --git a/drivers/staging/rtl8821ae/base.c b/drivers/staging/rtl8821ae/base.c
index e5073fe24770..a4c9cc437bc6 100644
--- a/drivers/staging/rtl8821ae/base.c
+++ b/drivers/staging/rtl8821ae/base.c
@@ -388,7 +388,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
388 388
389} 389}
390 390
391static void _rtl_init_deferred_work(struct ieee80211_hw *hw) 391static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
392{ 392{
393 struct rtl_priv *rtlpriv = rtl_priv(hw); 393 struct rtl_priv *rtlpriv = rtl_priv(hw);
394 394
@@ -410,6 +410,9 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
410 rtlpriv->works.rtl_wq = create_workqueue(rtlpriv->cfg->name); 410 rtlpriv->works.rtl_wq = create_workqueue(rtlpriv->cfg->name);
411#endif 411#endif
412/*<delete in kernel end>*/ 412/*<delete in kernel end>*/
413 if (!rtlpriv->works.rtl_wq)
414 return -ENOMEM;
415
413 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, 416 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
414 (void *)rtl_watchdog_wq_callback); 417 (void *)rtl_watchdog_wq_callback);
415 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, 418 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
@@ -421,6 +424,8 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
421 INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, 424 INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
422 (void *)rtl_fwevt_wq_callback); 425 (void *)rtl_fwevt_wq_callback);
423 426
427 return 0;
428
424} 429}
425 430
426void rtl_deinit_deferred_work(struct ieee80211_hw *hw) 431void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
@@ -519,7 +524,8 @@ int rtl_init_core(struct ieee80211_hw *hw)
519 INIT_LIST_HEAD(&rtlpriv->entry_list); 524 INIT_LIST_HEAD(&rtlpriv->entry_list);
520 525
521 /* <6> init deferred work */ 526 /* <6> init deferred work */
522 _rtl_init_deferred_work(hw); 527 if (_rtl_init_deferred_work(hw))
528 return 1;
523 529
524 /* <7> */ 530 /* <7> */
525#ifdef VIF_TODO 531#ifdef VIF_TODO
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index ef5933b93590..3b6e5358c723 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -1855,8 +1855,9 @@ static int handle_goto(struct vc_data *vc, u_char type, u_char ch, u_short key)
1855{ 1855{
1856 static u_char goto_buf[8]; 1856 static u_char goto_buf[8];
1857 static int num; 1857 static int num;
1858 int maxlen, go_pos; 1858 int maxlen;
1859 char *cp; 1859 char *cp;
1860
1860 if (type == KT_SPKUP && ch == SPEAKUP_GOTO) 1861 if (type == KT_SPKUP && ch == SPEAKUP_GOTO)
1861 goto do_goto; 1862 goto do_goto;
1862 if (type == KT_LATIN && ch == '\n') 1863 if (type == KT_LATIN && ch == '\n')
@@ -1891,25 +1892,24 @@ oops:
1891 spk_special_handler = NULL; 1892 spk_special_handler = NULL;
1892 return 1; 1893 return 1;
1893 } 1894 }
1894 go_pos = kstrtol(goto_buf, 10, (long *)&cp); 1895
1895 goto_pos = (u_long) go_pos; 1896 goto_pos = simple_strtoul(goto_buf, &cp, 10);
1897
1896 if (*cp == 'x') { 1898 if (*cp == 'x') {
1897 if (*goto_buf < '0') 1899 if (*goto_buf < '0')
1898 goto_pos += spk_x; 1900 goto_pos += spk_x;
1899 else 1901 else if (goto_pos > 0)
1900 goto_pos--; 1902 goto_pos--;
1901 if (goto_pos < 0) 1903
1902 goto_pos = 0;
1903 if (goto_pos >= vc->vc_cols) 1904 if (goto_pos >= vc->vc_cols)
1904 goto_pos = vc->vc_cols - 1; 1905 goto_pos = vc->vc_cols - 1;
1905 goto_x = 1; 1906 goto_x = 1;
1906 } else { 1907 } else {
1907 if (*goto_buf < '0') 1908 if (*goto_buf < '0')
1908 goto_pos += spk_y; 1909 goto_pos += spk_y;
1909 else 1910 else if (goto_pos > 0)
1910 goto_pos--; 1911 goto_pos--;
1911 if (goto_pos < 0) 1912
1912 goto_pos = 0;
1913 if (goto_pos >= vc->vc_rows) 1913 if (goto_pos >= vc->vc_rows)
1914 goto_pos = vc->vc_rows - 1; 1914 goto_pos = vc->vc_rows - 1;
1915 goto_x = 0; 1915 goto_x = 0;
diff --git a/drivers/staging/unisys/uislib/uislib.c b/drivers/staging/unisys/uislib/uislib.c
index 8ea9c46e56ae..3152a2180c45 100644
--- a/drivers/staging/unisys/uislib/uislib.c
+++ b/drivers/staging/unisys/uislib/uislib.c
@@ -381,17 +381,17 @@ create_bus(CONTROLVM_MESSAGE *msg, char *buf)
381 cmd.add_vbus.busTypeGuid = msg->cmd.createBus.busDataTypeGuid; 381 cmd.add_vbus.busTypeGuid = msg->cmd.createBus.busDataTypeGuid;
382 cmd.add_vbus.busInstGuid = msg->cmd.createBus.busInstGuid; 382 cmd.add_vbus.busInstGuid = msg->cmd.createBus.busInstGuid;
383 if (!VirtControlChanFunc) { 383 if (!VirtControlChanFunc) {
384 kfree(bus);
385 LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci callback not registered."); 384 LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci callback not registered.");
386 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo, 385 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
387 POSTCODE_SEVERITY_ERR); 386 POSTCODE_SEVERITY_ERR);
387 kfree(bus);
388 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE; 388 return CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_FAILURE;
389 } 389 }
390 if (!VirtControlChanFunc(&cmd)) { 390 if (!VirtControlChanFunc(&cmd)) {
391 kfree(bus);
392 LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci GUEST_ADD_VBUS returned error."); 391 LOGERR("CONTROLVM_BUS_CREATE Failed: virtpci GUEST_ADD_VBUS returned error.");
393 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo, 392 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus->busNo,
394 POSTCODE_SEVERITY_ERR); 393 POSTCODE_SEVERITY_ERR);
394 kfree(bus);
395 return 395 return
396 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR; 396 CONTROLVM_RESP_ERROR_VIRTPCI_DRIVER_CALLBACK_ERROR;
397 } 397 }
diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
index d4bf203cdfdf..d95825dc5414 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset.h
+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
@@ -104,9 +104,9 @@ finddevice(struct list_head *list, U32 busNo, U32 devNo)
104 104
105static inline void delbusdevices(struct list_head *list, U32 busNo) 105static inline void delbusdevices(struct list_head *list, U32 busNo)
106{ 106{
107 VISORCHIPSET_DEVICE_INFO *p; 107 VISORCHIPSET_DEVICE_INFO *p, *tmp;
108 108
109 list_for_each_entry(p, list, entry) { 109 list_for_each_entry_safe(p, tmp, list, entry) {
110 if (p->busNo == busNo) { 110 if (p->busNo == busNo) {
111 list_del(&p->entry); 111 list_del(&p->entry);
112 kfree(p); 112 kfree(p);
diff --git a/drivers/staging/unisys/visorchipset/visorchipset_main.c b/drivers/staging/unisys/visorchipset/visorchipset_main.c
index 257c6e59b460..c475e256e34b 100644
--- a/drivers/staging/unisys/visorchipset/visorchipset_main.c
+++ b/drivers/staging/unisys/visorchipset/visorchipset_main.c
@@ -605,16 +605,16 @@ EXPORT_SYMBOL_GPL(visorchipset_register_busdev_client);
605static void 605static void
606cleanup_controlvm_structures(void) 606cleanup_controlvm_structures(void)
607{ 607{
608 VISORCHIPSET_BUS_INFO *bi; 608 VISORCHIPSET_BUS_INFO *bi, *tmp_bi;
609 VISORCHIPSET_DEVICE_INFO *di; 609 VISORCHIPSET_DEVICE_INFO *di, *tmp_di;
610 610
611 list_for_each_entry(bi, &BusInfoList, entry) { 611 list_for_each_entry_safe(bi, tmp_bi, &BusInfoList, entry) {
612 busInfo_clear(bi); 612 busInfo_clear(bi);
613 list_del(&bi->entry); 613 list_del(&bi->entry);
614 kfree(bi); 614 kfree(bi);
615 } 615 }
616 616
617 list_for_each_entry(di, &DevInfoList, entry) { 617 list_for_each_entry_safe(di, tmp_di, &DevInfoList, entry) {
618 devInfo_clear(di); 618 devInfo_clear(di);
619 list_del(&di->entry); 619 list_del(&di->entry);
620 kfree(di); 620 kfree(di);
diff --git a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
index c5bf60b135b9..92caef7474c7 100644
--- a/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/usbip_host_driver.c
@@ -118,6 +118,7 @@ static int refresh_exported_devices(void)
118 struct udev_list_entry *devices, *dev_list_entry; 118 struct udev_list_entry *devices, *dev_list_entry;
119 struct udev_device *dev; 119 struct udev_device *dev;
120 const char *path; 120 const char *path;
121 const char *driver;
121 122
122 enumerate = udev_enumerate_new(udev_context); 123 enumerate = udev_enumerate_new(udev_context);
123 udev_enumerate_add_match_subsystem(enumerate, "usb"); 124 udev_enumerate_add_match_subsystem(enumerate, "usb");
@@ -128,10 +129,12 @@ static int refresh_exported_devices(void)
128 udev_list_entry_foreach(dev_list_entry, devices) { 129 udev_list_entry_foreach(dev_list_entry, devices) {
129 path = udev_list_entry_get_name(dev_list_entry); 130 path = udev_list_entry_get_name(dev_list_entry);
130 dev = udev_device_new_from_syspath(udev_context, path); 131 dev = udev_device_new_from_syspath(udev_context, path);
132 if (dev == NULL)
133 continue;
131 134
132 /* Check whether device uses usbip-host driver. */ 135 /* Check whether device uses usbip-host driver. */
133 if (!strcmp(udev_device_get_driver(dev), 136 driver = udev_device_get_driver(dev);
134 USBIP_HOST_DRV_NAME)) { 137 if (driver != NULL && !strcmp(driver, USBIP_HOST_DRV_NAME)) {
135 edev = usbip_exported_device_new(path); 138 edev = usbip_exported_device_new(path);
136 if (!edev) { 139 if (!edev) {
137 dbg("usbip_exported_device_new failed"); 140 dbg("usbip_exported_device_new failed");
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 47bddcdde0a6..211f43f67ea2 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -184,7 +184,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
184 * @devid: unique device identifier in a remote host 184 * @devid: unique device identifier in a remote host
185 * @speed: usb device speed in a remote host 185 * @speed: usb device speed in a remote host
186 */ 186 */
187 if (sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed) != 1) 187 if (sscanf(buf, "%u %u %u %u", &rhport, &sockfd, &devid, &speed) != 4)
188 return -EINVAL; 188 return -EINVAL;
189 189
190 usbip_dbg_vhci_sysfs("rhport(%u) sockfd(%u) devid(%u) speed(%u)\n", 190 usbip_dbg_vhci_sysfs("rhport(%u) sockfd(%u) devid(%u) speed(%u)\n",
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 792792715673..ffb4eeefdddb 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -776,7 +776,8 @@ static int vme_user_probe(struct vme_dev *vdev)
776 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL); 776 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
777 if (image[i].kern_buf == NULL) { 777 if (image[i].kern_buf == NULL) {
778 err = -ENOMEM; 778 err = -ENOMEM;
779 goto err_master_buf; 779 vme_master_free(image[i].resource);
780 goto err_master;
780 } 781 }
781 } 782 }
782 783
@@ -819,8 +820,6 @@ static int vme_user_probe(struct vme_dev *vdev)
819 820
820 return 0; 821 return 0;
821 822
822 /* Ensure counter set correcty to destroy all sysfs devices */
823 i = VME_DEVS;
824err_sysfs: 823err_sysfs:
825 while (i > 0) { 824 while (i > 0) {
826 i--; 825 i--;
@@ -830,12 +829,10 @@ err_sysfs:
830 829
831 /* Ensure counter set correcty to unalloc all master windows */ 830 /* Ensure counter set correcty to unalloc all master windows */
832 i = MASTER_MAX + 1; 831 i = MASTER_MAX + 1;
833err_master_buf:
834 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
835 kfree(image[i].kern_buf);
836err_master: 832err_master:
837 while (i > MASTER_MINOR) { 833 while (i > MASTER_MINOR) {
838 i--; 834 i--;
835 kfree(image[i].kern_buf);
839 vme_master_free(image[i].resource); 836 vme_master_free(image[i].resource);
840 } 837 }
841 838
diff --git a/drivers/staging/xgifb/vb_def.h b/drivers/staging/xgifb/vb_def.h
index 5c739bebd8a5..949f0e5eed8d 100644
--- a/drivers/staging/xgifb/vb_def.h
+++ b/drivers/staging/xgifb/vb_def.h
@@ -1,6 +1,6 @@
1#ifndef _VB_DEF_ 1#ifndef _VB_DEF_
2#define _VB_DEF_ 2#define _VB_DEF_
3#include "../../video/sis/initdef.h" 3#include "../../video/fbdev/sis/initdef.h"
4 4
5#define VB_XGI301C 0x0020 /* for 301C */ 5#define VB_XGI301C 0x0020 /* for 301C */
6 6
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index c08ff5b2d6ee..0d27594554ca 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -1,6 +1,6 @@
1#ifndef _VB_STRUCT_ 1#ifndef _VB_STRUCT_
2#define _VB_STRUCT_ 2#define _VB_STRUCT_
3#include "../../video/sis/vstruct.h" 3#include "../../video/fbdev/sis/vstruct.h"
4 4
5struct XGI_LVDSCRT1HDataStruct { 5struct XGI_LVDSCRT1HDataStruct {
6 unsigned char Reg[8]; 6 unsigned char Reg[8];
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index ddf7776c295b..264351441f99 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -2,8 +2,8 @@
2#define _VGATYPES_ 2#define _VGATYPES_
3 3
4#include <linux/fb.h> /* for struct fb_var_screeninfo for sis.h */ 4#include <linux/fb.h> /* for struct fb_var_screeninfo for sis.h */
5#include "../../video/sis/vgatypes.h" 5#include "../../video/fbdev/sis/vgatypes.h"
6#include "../../video/sis/sis.h" /* for LCD_TYPE */ 6#include "../../video/fbdev/sis/sis.h" /* for LCD_TYPE */
7 7
8#ifndef XGI_VB_CHIP_TYPE 8#ifndef XGI_VB_CHIP_TYPE
9enum XGI_VB_CHIP_TYPE { 9enum XGI_VB_CHIP_TYPE {
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 78cab13bbb1b..46588c85d39b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1593 * Initiator is expecting a NopIN ping reply.. 1593 * Initiator is expecting a NopIN ping reply..
1594 */ 1594 */
1595 if (hdr->itt != RESERVED_ITT) { 1595 if (hdr->itt != RESERVED_ITT) {
1596 BUG_ON(!cmd); 1596 if (!cmd)
1597 return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
1598 (unsigned char *)hdr);
1597 1599
1598 spin_lock_bh(&conn->cmd_lock); 1600 spin_lock_bh(&conn->cmd_lock);
1599 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); 1601 list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 6960f22909ae..302eb3b78715 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -775,6 +775,7 @@ struct iscsi_np {
775 int np_ip_proto; 775 int np_ip_proto;
776 int np_sock_type; 776 int np_sock_type;
777 enum np_thread_state_table np_thread_state; 777 enum np_thread_state_table np_thread_state;
778 bool enabled;
778 enum iscsi_timer_flags_table np_login_timer_flags; 779 enum iscsi_timer_flags_table np_login_timer_flags;
779 u32 np_exports; 780 u32 np_exports;
780 enum np_flags_table np_flags; 781 enum np_flags_table np_flags;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8739b98f6f93..ca31fa1b8a4b 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2(
436 } 436 }
437 off = mrdsl % PAGE_SIZE; 437 off = mrdsl % PAGE_SIZE;
438 if (!off) 438 if (!off)
439 return 0; 439 goto check_prot;
440 440
441 if (mrdsl < PAGE_SIZE) 441 if (mrdsl < PAGE_SIZE)
442 mrdsl = PAGE_SIZE; 442 mrdsl = PAGE_SIZE;
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2(
452 ISCSI_LOGIN_STATUS_NO_RESOURCES); 452 ISCSI_LOGIN_STATUS_NO_RESOURCES);
453 return -1; 453 return -1;
454 } 454 }
455 /*
456 * ISER currently requires that ImmediateData + Unsolicited
457 * Data be disabled when protection / signature MRs are enabled.
458 */
459check_prot:
460 if (sess->se_sess->sup_prot_ops &
461 (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
462 TARGET_PROT_DOUT_INSERT)) {
463
464 sprintf(buf, "ImmediateData=No");
465 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
466 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
467 ISCSI_LOGIN_STATUS_NO_RESOURCES);
468 return -1;
469 }
470
471 sprintf(buf, "InitialR2T=Yes");
472 if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
473 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
474 ISCSI_LOGIN_STATUS_NO_RESOURCES);
475 return -1;
476 }
477 pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
478 " T10-PI enabled ISER session\n");
479 }
455 } 480 }
456 481
457 return 0; 482 return 0;
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket(
984 } 1009 }
985 1010
986 np->np_transport = t; 1011 np->np_transport = t;
1012 np->enabled = true;
987 return 0; 1013 return 0;
988} 1014}
989 1015
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index eb96b20dc09e..ca1811858afd 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread(
184 return; 184 return;
185 } 185 }
186 186
187 tpg_np->tpg_np->enabled = false;
187 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); 188 iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
188} 189}
189 190
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 65001e133670..26416c15d65c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
798 pr_err("emulate_write_cache not supported for pSCSI\n"); 798 pr_err("emulate_write_cache not supported for pSCSI\n");
799 return -EINVAL; 799 return -EINVAL;
800 } 800 }
801 if (dev->transport->get_write_cache) { 801 if (flag &&
802 pr_warn("emulate_write_cache cannot be changed when underlying" 802 dev->transport->get_write_cache) {
803 " HW reports WriteCacheEnabled, ignoring request\n"); 803 pr_err("emulate_write_cache not supported for this device\n");
804 return 0; 804 return -EINVAL;
805 } 805 }
806 806
807 dev->dev_attrib.emulate_write_cache = flag; 807 dev->dev_attrib.emulate_write_cache = flag;
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
936 return 0; 936 return 0;
937 } 937 }
938 if (!dev->transport->init_prot || !dev->transport->free_prot) { 938 if (!dev->transport->init_prot || !dev->transport->free_prot) {
939 /* 0 is only allowed value for non-supporting backends */
940 if (flag == 0)
941 return 0;
942
939 pr_err("DIF protection not supported by backend: %s\n", 943 pr_err("DIF protection not supported by backend: %s\n",
940 dev->transport->name); 944 dev->transport->name);
941 return -ENOSYS; 945 return -ENOSYS;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d4b98690a736..789aa9eb0a1e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd(
1113 init_completion(&cmd->cmd_wait_comp); 1113 init_completion(&cmd->cmd_wait_comp);
1114 init_completion(&cmd->task_stop_comp); 1114 init_completion(&cmd->task_stop_comp);
1115 spin_lock_init(&cmd->t_state_lock); 1115 spin_lock_init(&cmd->t_state_lock);
1116 kref_init(&cmd->cmd_kref);
1116 cmd->transport_state = CMD_T_DEV_ACTIVE; 1117 cmd->transport_state = CMD_T_DEV_ACTIVE;
1117 1118
1118 cmd->se_tfo = tfo; 1119 cmd->se_tfo = tfo;
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
2357 unsigned long flags; 2358 unsigned long flags;
2358 int ret = 0; 2359 int ret = 0;
2359 2360
2360 kref_init(&se_cmd->cmd_kref);
2361 /* 2361 /*
2362 * Add a second kref if the fabric caller is expecting to handle 2362 * Add a second kref if the fabric caller is expecting to handle
2363 * fabric acknowledgement that requires two target_put_sess_cmd() 2363 * fabric acknowledgement that requires two target_put_sess_cmd()
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 01cf37f212c3..f5fd515b2bee 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
90{ 90{
91 struct fc_frame *fp; 91 struct fc_frame *fp;
92 struct fc_lport *lport; 92 struct fc_lport *lport;
93 struct se_session *se_sess; 93 struct ft_sess *sess;
94 94
95 if (!cmd) 95 if (!cmd)
96 return; 96 return;
97 se_sess = cmd->sess->se_sess; 97 sess = cmd->sess;
98 fp = cmd->req_frame; 98 fp = cmd->req_frame;
99 lport = fr_dev(fp); 99 lport = fr_dev(fp);
100 if (fr_seq(fp)) 100 if (fr_seq(fp))
101 lport->tt.seq_release(fr_seq(fp)); 101 lport->tt.seq_release(fr_seq(fp));
102 fc_frame_free(fp); 102 fc_frame_free(fp);
103 percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 103 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
104 ft_sess_put(cmd->sess); /* undo get from lookup at recv */ 104 ft_sess_put(sess); /* undo get from lookup at recv */
105} 105}
106 106
107void ft_release_cmd(struct se_cmd *se_cmd) 107void ft_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 94f9e3a38412..0ff7fda0742f 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
190 return hvc_driver; 190 return hvc_driver;
191} 191}
192 192
193static int __init hvc_console_setup(struct console *co, char *options) 193static int hvc_console_setup(struct console *co, char *options)
194{ 194{
195 if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) 195 if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
196 return -ENODEV; 196 return -ENODEV;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 41fe8a047d37..fe9d129c8735 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2353,8 +2353,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
2353 if (tty->ops->flush_chars) 2353 if (tty->ops->flush_chars)
2354 tty->ops->flush_chars(tty); 2354 tty->ops->flush_chars(tty);
2355 } else { 2355 } else {
2356 struct n_tty_data *ldata = tty->disc_data;
2357
2356 while (nr > 0) { 2358 while (nr > 0) {
2359 mutex_lock(&ldata->output_lock);
2357 c = tty->ops->write(tty, b, nr); 2360 c = tty->ops->write(tty, b, nr);
2361 mutex_unlock(&ldata->output_lock);
2358 if (c < 0) { 2362 if (c < 0) {
2359 retval = c; 2363 retval = c;
2360 goto break_out; 2364 goto break_out;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 81f909c2101f..2d4bd3929e50 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
555 */ 555 */
556 if ((p->port.type == PORT_XR17V35X) || 556 if ((p->port.type == PORT_XR17V35X) ||
557 (p->port.type == PORT_XR17D15X)) { 557 (p->port.type == PORT_XR17D15X)) {
558 serial_out(p, UART_EXAR_SLEEP, 0xff); 558 serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
559 return; 559 return;
560 } 560 }
561 561
@@ -1520,7 +1520,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
1520 status = serial8250_rx_chars(up, status); 1520 status = serial8250_rx_chars(up, status);
1521 } 1521 }
1522 serial8250_modem_status(up); 1522 serial8250_modem_status(up);
1523 if (status & UART_LSR_THRE) 1523 if (!up->dma && (status & UART_LSR_THRE))
1524 serial8250_tx_chars(up); 1524 serial8250_tx_chars(up);
1525 1525
1526 spin_unlock_irqrestore(&port->lock, flags); 1526 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 7046769608d4..ab9096dc3849 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -20,12 +20,15 @@ static void __dma_tx_complete(void *param)
20 struct uart_8250_port *p = param; 20 struct uart_8250_port *p = param;
21 struct uart_8250_dma *dma = p->dma; 21 struct uart_8250_dma *dma = p->dma;
22 struct circ_buf *xmit = &p->port.state->xmit; 22 struct circ_buf *xmit = &p->port.state->xmit;
23 23 unsigned long flags;
24 dma->tx_running = 0;
25 24
26 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr, 25 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
27 UART_XMIT_SIZE, DMA_TO_DEVICE); 26 UART_XMIT_SIZE, DMA_TO_DEVICE);
28 27
28 spin_lock_irqsave(&p->port.lock, flags);
29
30 dma->tx_running = 0;
31
29 xmit->tail += dma->tx_size; 32 xmit->tail += dma->tx_size;
30 xmit->tail &= UART_XMIT_SIZE - 1; 33 xmit->tail &= UART_XMIT_SIZE - 1;
31 p->port.icount.tx += dma->tx_size; 34 p->port.icount.tx += dma->tx_size;
@@ -35,6 +38,8 @@ static void __dma_tx_complete(void *param)
35 38
36 if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) 39 if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
37 serial8250_tx_dma(p); 40 serial8250_tx_dma(p);
41
42 spin_unlock_irqrestore(&p->port.lock, flags);
38} 43}
39 44
40static void __dma_rx_complete(void *param) 45static void __dma_rx_complete(void *param)
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 2e6d8ddc4425..5d9b01aa54f4 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1226,6 +1226,7 @@ config SERIAL_BFIN_SPORT3_UART_CTSRTS
1226config SERIAL_TIMBERDALE 1226config SERIAL_TIMBERDALE
1227 tristate "Support for timberdale UART" 1227 tristate "Support for timberdale UART"
1228 select SERIAL_CORE 1228 select SERIAL_CORE
1229 depends on X86_32 || COMPILE_TEST
1229 ---help--- 1230 ---help---
1230 Add support for UART controller on timberdale. 1231 Add support for UART controller on timberdale.
1231 1232
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index d4eda24aa68b..dacf0a09ab24 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -318,7 +318,7 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
318 .src_addr = uap->port.mapbase + UART01x_DR, 318 .src_addr = uap->port.mapbase + UART01x_DR,
319 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 319 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
320 .direction = DMA_DEV_TO_MEM, 320 .direction = DMA_DEV_TO_MEM,
321 .src_maxburst = uap->fifosize >> 1, 321 .src_maxburst = uap->fifosize >> 2,
322 .device_fc = false, 322 .device_fc = false,
323 }; 323 };
324 324
@@ -2176,6 +2176,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2176static int pl011_remove(struct amba_device *dev) 2176static int pl011_remove(struct amba_device *dev)
2177{ 2177{
2178 struct uart_amba_port *uap = amba_get_drvdata(dev); 2178 struct uart_amba_port *uap = amba_get_drvdata(dev);
2179 bool busy = false;
2179 int i; 2180 int i;
2180 2181
2181 uart_remove_one_port(&amba_reg, &uap->port); 2182 uart_remove_one_port(&amba_reg, &uap->port);
@@ -2183,9 +2184,12 @@ static int pl011_remove(struct amba_device *dev)
2183 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) 2184 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2184 if (amba_ports[i] == uap) 2185 if (amba_ports[i] == uap)
2185 amba_ports[i] = NULL; 2186 amba_ports[i] = NULL;
2187 else if (amba_ports[i])
2188 busy = true;
2186 2189
2187 pl011_dma_remove(uap); 2190 pl011_dma_remove(uap);
2188 uart_unregister_driver(&amba_reg); 2191 if (!busy)
2192 uart_unregister_driver(&amba_reg);
2189 return 0; 2193 return 0;
2190} 2194}
2191 2195
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index 5e6fdb1ea73b..14aaea0d4131 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -368,16 +368,12 @@ static const struct uart_ops uart_clps711x_ops = {
368static void uart_clps711x_console_putchar(struct uart_port *port, int ch) 368static void uart_clps711x_console_putchar(struct uart_port *port, int ch)
369{ 369{
370 struct clps711x_port *s = dev_get_drvdata(port->dev); 370 struct clps711x_port *s = dev_get_drvdata(port->dev);
371 u32 sysflg = 0;
371 372
372 /* Wait for FIFO is not full */ 373 /* Wait for FIFO is not full */
373 while (1) { 374 do {
374 u32 sysflg = 0;
375
376 regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); 375 regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
377 if (!(sysflg & SYSFLG_UTXFF)) 376 } while (sysflg & SYSFLG_UTXFF);
378 break;
379 cond_resched();
380 }
381 377
382 writew(ch, port->membase + UARTDR_OFFSET); 378 writew(ch, port->membase + UARTDR_OFFSET);
383} 379}
@@ -387,18 +383,14 @@ static void uart_clps711x_console_write(struct console *co, const char *c,
387{ 383{
388 struct uart_port *port = clps711x_uart.state[co->index].uart_port; 384 struct uart_port *port = clps711x_uart.state[co->index].uart_port;
389 struct clps711x_port *s = dev_get_drvdata(port->dev); 385 struct clps711x_port *s = dev_get_drvdata(port->dev);
386 u32 sysflg = 0;
390 387
391 uart_console_write(port, c, n, uart_clps711x_console_putchar); 388 uart_console_write(port, c, n, uart_clps711x_console_putchar);
392 389
393 /* Wait for transmitter to become empty */ 390 /* Wait for transmitter to become empty */
394 while (1) { 391 do {
395 u32 sysflg = 0;
396
397 regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); 392 regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg);
398 if (!(sysflg & SYSFLG_UBUSY)) 393 } while (sysflg & SYSFLG_UBUSY);
399 break;
400 cond_resched();
401 }
402} 394}
403 395
404static int uart_clps711x_console_setup(struct console *co, char *options) 396static int uart_clps711x_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 028582e924a5..c167a710dc39 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -798,6 +798,9 @@ static int efm32_uart_remove(struct platform_device *pdev)
798 798
799static const struct of_device_id efm32_uart_dt_ids[] = { 799static const struct of_device_id efm32_uart_dt_ids[] = {
800 { 800 {
801 .compatible = "energymicro,efm32-uart",
802 }, {
803 /* doesn't follow the "vendor,device" scheme, don't use */
801 .compatible = "efm32,uart", 804 .compatible = "efm32,uart",
802 }, { 805 }, {
803 /* sentinel */ 806 /* sentinel */
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index dd8b1a5458ff..08b6b9419f0d 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -225,14 +225,19 @@ static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
225 if (enable) 225 if (enable)
226 enable_irq(up->wakeirq); 226 enable_irq(up->wakeirq);
227 else 227 else
228 disable_irq(up->wakeirq); 228 disable_irq_nosync(up->wakeirq);
229} 229}
230 230
231static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) 231static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
232{ 232{
233 struct omap_uart_port_info *pdata = dev_get_platdata(up->dev); 233 struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
234 234
235 if (enable == up->wakeups_enabled)
236 return;
237
235 serial_omap_enable_wakeirq(up, enable); 238 serial_omap_enable_wakeirq(up, enable);
239 up->wakeups_enabled = enable;
240
236 if (!pdata || !pdata->enable_wakeup) 241 if (!pdata || !pdata->enable_wakeup)
237 return; 242 return;
238 243
@@ -1495,6 +1500,11 @@ static int serial_omap_suspend(struct device *dev)
1495 uart_suspend_port(&serial_omap_reg, &up->port); 1500 uart_suspend_port(&serial_omap_reg, &up->port);
1496 flush_work(&up->qos_work); 1501 flush_work(&up->qos_work);
1497 1502
1503 if (device_may_wakeup(dev))
1504 serial_omap_enable_wakeup(up, true);
1505 else
1506 serial_omap_enable_wakeup(up, false);
1507
1498 return 0; 1508 return 0;
1499} 1509}
1500 1510
@@ -1502,6 +1512,9 @@ static int serial_omap_resume(struct device *dev)
1502{ 1512{
1503 struct uart_omap_port *up = dev_get_drvdata(dev); 1513 struct uart_omap_port *up = dev_get_drvdata(dev);
1504 1514
1515 if (device_may_wakeup(dev))
1516 serial_omap_enable_wakeup(up, false);
1517
1505 uart_resume_port(&serial_omap_reg, &up->port); 1518 uart_resume_port(&serial_omap_reg, &up->port);
1506 1519
1507 return 0; 1520 return 0;
@@ -1789,6 +1802,7 @@ static int serial_omap_remove(struct platform_device *dev)
1789 pm_runtime_disable(up->dev); 1802 pm_runtime_disable(up->dev);
1790 uart_remove_one_port(&serial_omap_reg, &up->port); 1803 uart_remove_one_port(&serial_omap_reg, &up->port);
1791 pm_qos_remove_request(&up->pm_qos_request); 1804 pm_qos_remove_request(&up->pm_qos_request);
1805 device_init_wakeup(&dev->dev, false);
1792 1806
1793 return 0; 1807 return 0;
1794} 1808}
@@ -1877,17 +1891,7 @@ static int serial_omap_runtime_suspend(struct device *dev)
1877 1891
1878 up->context_loss_cnt = serial_omap_get_context_loss_count(up); 1892 up->context_loss_cnt = serial_omap_get_context_loss_count(up);
1879 1893
1880 if (device_may_wakeup(dev)) { 1894 serial_omap_enable_wakeup(up, true);
1881 if (!up->wakeups_enabled) {
1882 serial_omap_enable_wakeup(up, true);
1883 up->wakeups_enabled = true;
1884 }
1885 } else {
1886 if (up->wakeups_enabled) {
1887 serial_omap_enable_wakeup(up, false);
1888 up->wakeups_enabled = false;
1889 }
1890 }
1891 1895
1892 up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE; 1896 up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
1893 schedule_work(&up->qos_work); 1897 schedule_work(&up->qos_work);
@@ -1901,6 +1905,8 @@ static int serial_omap_runtime_resume(struct device *dev)
1901 1905
1902 int loss_cnt = serial_omap_get_context_loss_count(up); 1906 int loss_cnt = serial_omap_get_context_loss_count(up);
1903 1907
1908 serial_omap_enable_wakeup(up, false);
1909
1904 if (loss_cnt < 0) { 1910 if (loss_cnt < 0) {
1905 dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n", 1911 dev_dbg(dev, "serial_omap_get_context_loss_count failed : %d\n",
1906 loss_cnt); 1912 loss_cnt);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 23f459600738..1f5505e7f90d 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1446,8 +1446,8 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
1446static void s3c24xx_serial_put_poll_char(struct uart_port *port, 1446static void s3c24xx_serial_put_poll_char(struct uart_port *port,
1447 unsigned char c) 1447 unsigned char c)
1448{ 1448{
1449 unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON); 1449 unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
1450 unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON); 1450 unsigned int ucon = rd_regl(port, S3C2410_UCON);
1451 1451
1452 /* not possible to xmit on unconfigured port */ 1452 /* not possible to xmit on unconfigured port */
1453 if (!s3c24xx_port_configured(ucon)) 1453 if (!s3c24xx_port_configured(ucon))
@@ -1455,7 +1455,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
1455 1455
1456 while (!s3c24xx_serial_console_txrdy(port, ufcon)) 1456 while (!s3c24xx_serial_console_txrdy(port, ufcon))
1457 cpu_relax(); 1457 cpu_relax();
1458 wr_regb(cons_uart, S3C2410_UTXH, c); 1458 wr_regb(port, S3C2410_UTXH, c);
1459} 1459}
1460 1460
1461#endif /* CONFIG_CONSOLE_POLL */ 1461#endif /* CONFIG_CONSOLE_POLL */
@@ -1463,22 +1463,23 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
1463static void 1463static void
1464s3c24xx_serial_console_putchar(struct uart_port *port, int ch) 1464s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
1465{ 1465{
1466 unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON); 1466 unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
1467 unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
1468
1469 /* not possible to xmit on unconfigured port */
1470 if (!s3c24xx_port_configured(ucon))
1471 return;
1472 1467
1473 while (!s3c24xx_serial_console_txrdy(port, ufcon)) 1468 while (!s3c24xx_serial_console_txrdy(port, ufcon))
1474 barrier(); 1469 cpu_relax();
1475 wr_regb(cons_uart, S3C2410_UTXH, ch); 1470 wr_regb(port, S3C2410_UTXH, ch);
1476} 1471}
1477 1472
1478static void 1473static void
1479s3c24xx_serial_console_write(struct console *co, const char *s, 1474s3c24xx_serial_console_write(struct console *co, const char *s,
1480 unsigned int count) 1475 unsigned int count)
1481{ 1476{
1477 unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
1478
1479 /* not possible to xmit on unconfigured port */
1480 if (!s3c24xx_port_configured(ucon))
1481 return;
1482
1482 uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar); 1483 uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
1483} 1484}
1484 1485
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 2cf5649a6dc0..b68550d95a40 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -89,8 +89,7 @@ static void __uart_start(struct tty_struct *tty)
89 struct uart_state *state = tty->driver_data; 89 struct uart_state *state = tty->driver_data;
90 struct uart_port *port = state->uart_port; 90 struct uart_port *port = state->uart_port;
91 91
92 if (!uart_circ_empty(&state->xmit) && state->xmit.buf && 92 if (!tty->stopped && !tty->hw_stopped)
93 !tty->stopped && !tty->hw_stopped)
94 port->ops->start_tx(port); 93 port->ops->start_tx(port);
95} 94}
96 95
@@ -138,6 +137,11 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
138 return 1; 137 return 1;
139 138
140 /* 139 /*
140 * Make sure the device is in D0 state.
141 */
142 uart_change_pm(state, UART_PM_STATE_ON);
143
144 /*
141 * Initialise and allocate the transmit and temporary 145 * Initialise and allocate the transmit and temporary
142 * buffer. 146 * buffer.
143 */ 147 */
@@ -826,25 +830,29 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
826 * If we fail to request resources for the 830 * If we fail to request resources for the
827 * new port, try to restore the old settings. 831 * new port, try to restore the old settings.
828 */ 832 */
829 if (retval && old_type != PORT_UNKNOWN) { 833 if (retval) {
830 uport->iobase = old_iobase; 834 uport->iobase = old_iobase;
831 uport->type = old_type; 835 uport->type = old_type;
832 uport->hub6 = old_hub6; 836 uport->hub6 = old_hub6;
833 uport->iotype = old_iotype; 837 uport->iotype = old_iotype;
834 uport->regshift = old_shift; 838 uport->regshift = old_shift;
835 uport->mapbase = old_mapbase; 839 uport->mapbase = old_mapbase;
836 retval = uport->ops->request_port(uport);
837 /*
838 * If we failed to restore the old settings,
839 * we fail like this.
840 */
841 if (retval)
842 uport->type = PORT_UNKNOWN;
843 840
844 /* 841 if (old_type != PORT_UNKNOWN) {
845 * We failed anyway. 842 retval = uport->ops->request_port(uport);
846 */ 843 /*
847 retval = -EBUSY; 844 * If we failed to restore the old settings,
845 * we fail like this.
846 */
847 if (retval)
848 uport->type = PORT_UNKNOWN;
849
850 /*
851 * We failed anyway.
852 */
853 retval = -EBUSY;
854 }
855
848 /* Added to return the correct error -Ram Gupta */ 856 /* Added to return the correct error -Ram Gupta */
849 goto exit; 857 goto exit;
850 } 858 }
@@ -1452,6 +1460,8 @@ static void uart_hangup(struct tty_struct *tty)
1452 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags); 1460 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
1453 spin_unlock_irqrestore(&port->lock, flags); 1461 spin_unlock_irqrestore(&port->lock, flags);
1454 tty_port_tty_set(port, NULL); 1462 tty_port_tty_set(port, NULL);
1463 if (!uart_console(state->uart_port))
1464 uart_change_pm(state, UART_PM_STATE_OFF);
1455 wake_up_interruptible(&port->open_wait); 1465 wake_up_interruptible(&port->open_wait);
1456 wake_up_interruptible(&port->delta_msr_wait); 1466 wake_up_interruptible(&port->delta_msr_wait);
1457 } 1467 }
@@ -1570,12 +1580,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
1570 } 1580 }
1571 1581
1572 /* 1582 /*
1573 * Make sure the device is in D0 state.
1574 */
1575 if (port->count == 1)
1576 uart_change_pm(state, UART_PM_STATE_ON);
1577
1578 /*
1579 * Start up the serial port. 1583 * Start up the serial port.
1580 */ 1584 */
1581 retval = uart_startup(tty, state, 0); 1585 retval = uart_startup(tty, state, 0);
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index 21e6e84c0df8..dd3a96e07026 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -295,7 +295,7 @@ static void asc_receive_chars(struct uart_port *port)
295 status & ASC_STA_OE) { 295 status & ASC_STA_OE) {
296 296
297 if (c & ASC_RXBUF_FE) { 297 if (c & ASC_RXBUF_FE) {
298 if (c == ASC_RXBUF_FE) { 298 if (c == (ASC_RXBUF_FE | ASC_RXBUF_DUMMY_RX)) {
299 port->icount.brk++; 299 port->icount.brk++;
300 if (uart_handle_break(port)) 300 if (uart_handle_break(port))
301 continue; 301 continue;
@@ -325,7 +325,7 @@ static void asc_receive_chars(struct uart_port *port)
325 flag = TTY_FRAME; 325 flag = TTY_FRAME;
326 } 326 }
327 327
328 if (uart_handle_sysrq_char(port, c)) 328 if (uart_handle_sysrq_char(port, c & 0xff))
329 continue; 329 continue;
330 330
331 uart_insert_char(port, c, ASC_RXBUF_DUMMY_OE, c & 0xff, flag); 331 uart_insert_char(port, c, ASC_RXBUF_DUMMY_OE, c & 0xff, flag);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 8ebd9f88a6f6..cf78d1985cd8 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -258,7 +258,11 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
258 n->flags = flags; 258 n->flags = flags;
259 buf->tail = n; 259 buf->tail = n;
260 b->commit = b->used; 260 b->commit = b->used;
261 smp_mb(); 261 /* paired w/ barrier in flush_to_ldisc(); ensures the
262 * latest commit value can be read before the head is
263 * advanced to the next buffer
264 */
265 smp_wmb();
262 b->next = n; 266 b->next = n;
263 } else if (change) 267 } else if (change)
264 size = 0; 268 size = 0;
@@ -444,17 +448,24 @@ static void flush_to_ldisc(struct work_struct *work)
444 448
445 while (1) { 449 while (1) {
446 struct tty_buffer *head = buf->head; 450 struct tty_buffer *head = buf->head;
451 struct tty_buffer *next;
447 int count; 452 int count;
448 453
449 /* Ldisc or user is trying to gain exclusive access */ 454 /* Ldisc or user is trying to gain exclusive access */
450 if (atomic_read(&buf->priority)) 455 if (atomic_read(&buf->priority))
451 break; 456 break;
452 457
458 next = head->next;
459 /* paired w/ barrier in __tty_buffer_request_room();
460 * ensures commit value read is not stale if the head
461 * is advancing to the next buffer
462 */
463 smp_rmb();
453 count = head->commit - head->read; 464 count = head->commit - head->read;
454 if (!count) { 465 if (!count) {
455 if (head->next == NULL) 466 if (next == NULL)
456 break; 467 break;
457 buf->head = head->next; 468 buf->head = next;
458 tty_buffer_free(port, head); 469 tty_buffer_free(port, head);
459 continue; 470 continue;
460 } 471 }
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index d3448a90f0f9..34110719fe03 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -878,9 +878,8 @@ void disassociate_ctty(int on_exit)
878 spin_lock_irq(&current->sighand->siglock); 878 spin_lock_irq(&current->sighand->siglock);
879 put_pid(current->signal->tty_old_pgrp); 879 put_pid(current->signal->tty_old_pgrp);
880 current->signal->tty_old_pgrp = NULL; 880 current->signal->tty_old_pgrp = NULL;
881 spin_unlock_irq(&current->sighand->siglock);
882 881
883 tty = get_current_tty(); 882 tty = tty_kref_get(current->signal->tty);
884 if (tty) { 883 if (tty) {
885 unsigned long flags; 884 unsigned long flags;
886 spin_lock_irqsave(&tty->ctrl_lock, flags); 885 spin_lock_irqsave(&tty->ctrl_lock, flags);
@@ -897,6 +896,7 @@ void disassociate_ctty(int on_exit)
897#endif 896#endif
898 } 897 }
899 898
899 spin_unlock_irq(&current->sighand->siglock);
900 /* Now clear signal->tty under the lock */ 900 /* Now clear signal->tty under the lock */
901 read_lock(&tasklist_lock); 901 read_lock(&tasklist_lock);
902 session_clear_tty(task_session(current)); 902 session_clear_tty(task_session(current));
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index ca6831c5b763..1cd5d0ba587c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -277,6 +277,39 @@ static void hw_phymode_configure(struct ci_hdrc *ci)
277} 277}
278 278
279/** 279/**
280 * ci_usb_phy_init: initialize phy according to different phy type
281 * @ci: the controller
282 *
283 * This function returns an error code if usb_phy_init has failed
284 */
285static int ci_usb_phy_init(struct ci_hdrc *ci)
286{
287 int ret;
288
289 switch (ci->platdata->phy_mode) {
290 case USBPHY_INTERFACE_MODE_UTMI:
291 case USBPHY_INTERFACE_MODE_UTMIW:
292 case USBPHY_INTERFACE_MODE_HSIC:
293 ret = usb_phy_init(ci->transceiver);
294 if (ret)
295 return ret;
296 hw_phymode_configure(ci);
297 break;
298 case USBPHY_INTERFACE_MODE_ULPI:
299 case USBPHY_INTERFACE_MODE_SERIAL:
300 hw_phymode_configure(ci);
301 ret = usb_phy_init(ci->transceiver);
302 if (ret)
303 return ret;
304 break;
305 default:
306 ret = usb_phy_init(ci->transceiver);
307 }
308
309 return ret;
310}
311
312/**
280 * hw_device_reset: resets chip (execute without interruption) 313 * hw_device_reset: resets chip (execute without interruption)
281 * @ci: the controller 314 * @ci: the controller
282 * 315 *
@@ -543,8 +576,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
543 return -ENODEV; 576 return -ENODEV;
544 } 577 }
545 578
546 hw_phymode_configure(ci);
547
548 if (ci->platdata->phy) 579 if (ci->platdata->phy)
549 ci->transceiver = ci->platdata->phy; 580 ci->transceiver = ci->platdata->phy;
550 else 581 else
@@ -564,7 +595,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
564 return -EPROBE_DEFER; 595 return -EPROBE_DEFER;
565 } 596 }
566 597
567 ret = usb_phy_init(ci->transceiver); 598 ret = ci_usb_phy_init(ci);
568 if (ret) { 599 if (ret) {
569 dev_err(dev, "unable to init phy: %d\n", ret); 600 dev_err(dev, "unable to init phy: %d\n", ret);
570 return ret; 601 return ret;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 900f7ff805ee..904efb6035b0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -518,13 +518,16 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
518 if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) { 518 if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
519 dev_err(&acm->control->dev, 519 dev_err(&acm->control->dev,
520 "%s - usb_submit_urb(ctrl irq) failed\n", __func__); 520 "%s - usb_submit_urb(ctrl irq) failed\n", __func__);
521 usb_autopm_put_interface(acm->control);
521 goto error_submit_urb; 522 goto error_submit_urb;
522 } 523 }
523 524
524 acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS; 525 acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
525 if (acm_set_control(acm, acm->ctrlout) < 0 && 526 if (acm_set_control(acm, acm->ctrlout) < 0 &&
526 (acm->ctrl_caps & USB_CDC_CAP_LINE)) 527 (acm->ctrl_caps & USB_CDC_CAP_LINE)) {
528 usb_autopm_put_interface(acm->control);
527 goto error_set_control; 529 goto error_set_control;
530 }
528 531
529 usb_autopm_put_interface(acm->control); 532 usb_autopm_put_interface(acm->control);
530 533
@@ -549,7 +552,6 @@ error_submit_read_urbs:
549error_set_control: 552error_set_control:
550 usb_kill_urb(acm->ctrlurb); 553 usb_kill_urb(acm->ctrlurb);
551error_submit_urb: 554error_submit_urb:
552 usb_autopm_put_interface(acm->control);
553error_get_interface: 555error_get_interface:
554disconnected: 556disconnected:
555 mutex_unlock(&acm->mutex); 557 mutex_unlock(&acm->mutex);
@@ -1652,13 +1654,27 @@ static const struct usb_device_id acm_ids[] = {
1652 }, 1654 },
1653 /* Motorola H24 HSPA module: */ 1655 /* Motorola H24 HSPA module: */
1654 { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */ 1656 { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
1655 { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */ 1657 { USB_DEVICE(0x22b8, 0x2d92), /* modem + diagnostics */
1656 { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */ 1658 .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1657 { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */ 1659 },
1658 { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */ 1660 { USB_DEVICE(0x22b8, 0x2d93), /* modem + AT port */
1659 { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */ 1661 .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1660 { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */ 1662 },
1661 { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */ 1663 { USB_DEVICE(0x22b8, 0x2d95), /* modem + AT port + diagnostics */
1664 .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1665 },
1666 { USB_DEVICE(0x22b8, 0x2d96), /* modem + NMEA */
1667 .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1668 },
1669 { USB_DEVICE(0x22b8, 0x2d97), /* modem + diagnostics + NMEA */
1670 .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1671 },
1672 { USB_DEVICE(0x22b8, 0x2d99), /* modem + AT port + NMEA */
1673 .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1674 },
1675 { USB_DEVICE(0x22b8, 0x2d9a), /* modem + AT port + diagnostics + NMEA */
1676 .driver_info = NO_UNION_NORMAL, /* handle only modem interface */
1677 },
1662 1678
1663 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */ 1679 { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
1664 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on 1680 .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index d59d99347d54..1f02e65fe305 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -75,7 +75,7 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
75 PCI_SLOT(companion->devfn) != slot) 75 PCI_SLOT(companion->devfn) != slot)
76 continue; 76 continue;
77 companion_hcd = pci_get_drvdata(companion); 77 companion_hcd = pci_get_drvdata(companion);
78 if (!companion_hcd) 78 if (!companion_hcd || !companion_hcd->self.root_hub)
79 continue; 79 continue;
80 fn(pdev, hcd, companion, companion_hcd); 80 fn(pdev, hcd, companion, companion_hcd);
81 } 81 }
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index d001417e8e37..10aaaae9af25 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -821,6 +821,7 @@ static void dwc3_complete(struct device *dev)
821 821
822 spin_lock_irqsave(&dwc->lock, flags); 822 spin_lock_irqsave(&dwc->lock, flags);
823 823
824 dwc3_event_buffers_setup(dwc);
824 switch (dwc->dr_mode) { 825 switch (dwc->dr_mode) {
825 case USB_DR_MODE_PERIPHERAL: 826 case USB_DR_MODE_PERIPHERAL:
826 case USB_DR_MODE_OTG: 827 case USB_DR_MODE_OTG:
@@ -828,7 +829,6 @@ static void dwc3_complete(struct device *dev)
828 /* FALLTHROUGH */ 829 /* FALLTHROUGH */
829 case USB_DR_MODE_HOST: 830 case USB_DR_MODE_HOST:
830 default: 831 default:
831 dwc3_event_buffers_setup(dwc);
832 break; 832 break;
833 } 833 }
834 834
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index a740eac74d56..70715eeededd 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -187,15 +187,12 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
187 * improve this algorithm so that we better use the internal 187 * improve this algorithm so that we better use the internal
188 * FIFO space 188 * FIFO space
189 */ 189 */
190 for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) { 190 for (num = 0; num < dwc->num_in_eps; num++) {
191 struct dwc3_ep *dep = dwc->eps[num]; 191 /* bit0 indicates direction; 1 means IN ep */
192 int fifo_number = dep->number >> 1; 192 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
193 int mult = 1; 193 int mult = 1;
194 int tmp; 194 int tmp;
195 195
196 if (!(dep->number & 1))
197 continue;
198
199 if (!(dep->flags & DWC3_EP_ENABLED)) 196 if (!(dep->flags & DWC3_EP_ENABLED))
200 continue; 197 continue;
201 198
@@ -224,8 +221,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
224 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n", 221 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
225 dep->name, last_fifo_depth, fifo_size & 0xffff); 222 dep->name, last_fifo_depth, fifo_size & 0xffff);
226 223
227 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number), 224 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
228 fifo_size);
229 225
230 last_fifo_depth += (fifo_size & 0xffff); 226 last_fifo_depth += (fifo_size & 0xffff);
231 } 227 }
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index f605ad8c1902..cfd18bcca723 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1709,16 +1709,6 @@ static int at91udc_probe(struct platform_device *pdev)
1709 return -ENODEV; 1709 return -ENODEV;
1710 } 1710 }
1711 1711
1712 if (pdev->num_resources != 2) {
1713 DBG("invalid num_resources\n");
1714 return -ENODEV;
1715 }
1716 if ((pdev->resource[0].flags != IORESOURCE_MEM)
1717 || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
1718 DBG("invalid resource type\n");
1719 return -ENODEV;
1720 }
1721
1722 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1712 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1723 if (!res) 1713 if (!res)
1724 return -ENXIO; 1714 return -ENXIO;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 2e164dca08e8..1e12b3ee56fd 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -745,6 +745,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
745 */ 745 */
746 struct usb_gadget *gadget = epfile->ffs->gadget; 746 struct usb_gadget *gadget = epfile->ffs->gadget;
747 747
748 spin_lock_irq(&epfile->ffs->eps_lock);
749 /* In the meantime, endpoint got disabled or changed. */
750 if (epfile->ep != ep) {
751 spin_unlock_irq(&epfile->ffs->eps_lock);
752 return -ESHUTDOWN;
753 }
748 /* 754 /*
749 * Controller may require buffer size to be aligned to 755 * Controller may require buffer size to be aligned to
750 * maxpacketsize of an out endpoint. 756 * maxpacketsize of an out endpoint.
@@ -752,6 +758,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
752 data_len = io_data->read ? 758 data_len = io_data->read ?
753 usb_ep_align_maybe(gadget, ep->ep, io_data->len) : 759 usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
754 io_data->len; 760 io_data->len;
761 spin_unlock_irq(&epfile->ffs->eps_lock);
755 762
756 data = kmalloc(data_len, GFP_KERNEL); 763 data = kmalloc(data_len, GFP_KERNEL);
757 if (unlikely(!data)) 764 if (unlikely(!data))
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index c11761ce5113..9a4f49dc6ac4 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -377,7 +377,7 @@ static struct sk_buff *rndis_add_header(struct gether *port,
377 if (skb2) 377 if (skb2)
378 rndis_add_hdr(skb2); 378 rndis_add_hdr(skb2);
379 379
380 dev_kfree_skb_any(skb); 380 dev_kfree_skb(skb);
381 return skb2; 381 return skb2;
382} 382}
383 383
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 15960af0f67e..a2f26cdb56fe 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -1219,6 +1219,10 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
1219 struct fsl_udc *udc; 1219 struct fsl_udc *udc;
1220 1220
1221 udc = container_of(gadget, struct fsl_udc, gadget); 1221 udc = container_of(gadget, struct fsl_udc, gadget);
1222
1223 if (!udc->vbus_active)
1224 return -EOPNOTSUPP;
1225
1222 udc->softconnect = (is_on != 0); 1226 udc->softconnect = (is_on != 0);
1223 if (can_pullup(udc)) 1227 if (can_pullup(udc))
1224 fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP), 1228 fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -2532,8 +2536,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
2532 if (!udc_controller) 2536 if (!udc_controller)
2533 return -ENODEV; 2537 return -ENODEV;
2534 2538
2535 usb_del_gadget_udc(&udc_controller->gadget);
2536 udc_controller->done = &done; 2539 udc_controller->done = &done;
2540 usb_del_gadget_udc(&udc_controller->gadget);
2537 2541
2538 fsl_udc_clk_release(); 2542 fsl_udc_clk_release();
2539 2543
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index b5be6f0308c2..a925d0cbcd41 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2043,6 +2043,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2043 return -ESRCH; 2043 return -ESRCH;
2044 2044
2045 /* fake probe to determine $CHIP */ 2045 /* fake probe to determine $CHIP */
2046 CHIP = NULL;
2046 usb_gadget_probe_driver(&probe_driver); 2047 usb_gadget_probe_driver(&probe_driver);
2047 if (!CHIP) 2048 if (!CHIP)
2048 return -ENODEV; 2049 return -ENODEV;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index d822d822efb3..7ed452d90f4d 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -35,6 +35,7 @@
35#include <asm/byteorder.h> 35#include <asm/byteorder.h>
36#include <asm/unaligned.h> 36#include <asm/unaligned.h>
37 37
38#include "u_rndis.h"
38 39
39#undef VERBOSE_DEBUG 40#undef VERBOSE_DEBUG
40 41
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 50d09c289137..b7d4f82872b7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -48,8 +48,6 @@
48 48
49#define UETH__VERSION "29-May-2008" 49#define UETH__VERSION "29-May-2008"
50 50
51#define GETHER_NAPI_WEIGHT 32
52
53struct eth_dev { 51struct eth_dev {
54 /* lock is held while accessing port_usb 52 /* lock is held while accessing port_usb
55 */ 53 */
@@ -74,7 +72,6 @@ struct eth_dev {
74 struct sk_buff_head *list); 72 struct sk_buff_head *list);
75 73
76 struct work_struct work; 74 struct work_struct work;
77 struct napi_struct rx_napi;
78 75
79 unsigned long todo; 76 unsigned long todo;
80#define WORK_RX_MEMORY 0 77#define WORK_RX_MEMORY 0
@@ -256,16 +253,18 @@ enomem:
256 DBG(dev, "rx submit --> %d\n", retval); 253 DBG(dev, "rx submit --> %d\n", retval);
257 if (skb) 254 if (skb)
258 dev_kfree_skb_any(skb); 255 dev_kfree_skb_any(skb);
256 spin_lock_irqsave(&dev->req_lock, flags);
257 list_add(&req->list, &dev->rx_reqs);
258 spin_unlock_irqrestore(&dev->req_lock, flags);
259 } 259 }
260 return retval; 260 return retval;
261} 261}
262 262
263static void rx_complete(struct usb_ep *ep, struct usb_request *req) 263static void rx_complete(struct usb_ep *ep, struct usb_request *req)
264{ 264{
265 struct sk_buff *skb = req->context; 265 struct sk_buff *skb = req->context, *skb2;
266 struct eth_dev *dev = ep->driver_data; 266 struct eth_dev *dev = ep->driver_data;
267 int status = req->status; 267 int status = req->status;
268 bool rx_queue = 0;
269 268
270 switch (status) { 269 switch (status) {
271 270
@@ -289,8 +288,30 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
289 } else { 288 } else {
290 skb_queue_tail(&dev->rx_frames, skb); 289 skb_queue_tail(&dev->rx_frames, skb);
291 } 290 }
292 if (!status) 291 skb = NULL;
293 rx_queue = 1; 292
293 skb2 = skb_dequeue(&dev->rx_frames);
294 while (skb2) {
295 if (status < 0
296 || ETH_HLEN > skb2->len
297 || skb2->len > VLAN_ETH_FRAME_LEN) {
298 dev->net->stats.rx_errors++;
299 dev->net->stats.rx_length_errors++;
300 DBG(dev, "rx length %d\n", skb2->len);
301 dev_kfree_skb_any(skb2);
302 goto next_frame;
303 }
304 skb2->protocol = eth_type_trans(skb2, dev->net);
305 dev->net->stats.rx_packets++;
306 dev->net->stats.rx_bytes += skb2->len;
307
308 /* no buffer copies needed, unless hardware can't
309 * use skb buffers.
310 */
311 status = netif_rx(skb2);
312next_frame:
313 skb2 = skb_dequeue(&dev->rx_frames);
314 }
294 break; 315 break;
295 316
296 /* software-driven interface shutdown */ 317 /* software-driven interface shutdown */
@@ -313,20 +334,22 @@ quiesce:
313 /* FALLTHROUGH */ 334 /* FALLTHROUGH */
314 335
315 default: 336 default:
316 rx_queue = 1;
317 dev_kfree_skb_any(skb);
318 dev->net->stats.rx_errors++; 337 dev->net->stats.rx_errors++;
319 DBG(dev, "rx status %d\n", status); 338 DBG(dev, "rx status %d\n", status);
320 break; 339 break;
321 } 340 }
322 341
342 if (skb)
343 dev_kfree_skb_any(skb);
344 if (!netif_running(dev->net)) {
323clean: 345clean:
324 spin_lock(&dev->req_lock); 346 spin_lock(&dev->req_lock);
325 list_add(&req->list, &dev->rx_reqs); 347 list_add(&req->list, &dev->rx_reqs);
326 spin_unlock(&dev->req_lock); 348 spin_unlock(&dev->req_lock);
327 349 req = NULL;
328 if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi))) 350 }
329 __napi_schedule(&dev->rx_napi); 351 if (req)
352 rx_submit(dev, req, GFP_ATOMIC);
330} 353}
331 354
332static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 355static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -391,24 +414,16 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
391{ 414{
392 struct usb_request *req; 415 struct usb_request *req;
393 unsigned long flags; 416 unsigned long flags;
394 int rx_counts = 0;
395 417
396 /* fill unused rxq slots with some skb */ 418 /* fill unused rxq slots with some skb */
397 spin_lock_irqsave(&dev->req_lock, flags); 419 spin_lock_irqsave(&dev->req_lock, flags);
398 while (!list_empty(&dev->rx_reqs)) { 420 while (!list_empty(&dev->rx_reqs)) {
399
400 if (++rx_counts > qlen(dev->gadget, dev->qmult))
401 break;
402
403 req = container_of(dev->rx_reqs.next, 421 req = container_of(dev->rx_reqs.next,
404 struct usb_request, list); 422 struct usb_request, list);
405 list_del_init(&req->list); 423 list_del_init(&req->list);
406 spin_unlock_irqrestore(&dev->req_lock, flags); 424 spin_unlock_irqrestore(&dev->req_lock, flags);
407 425
408 if (rx_submit(dev, req, gfp_flags) < 0) { 426 if (rx_submit(dev, req, gfp_flags) < 0) {
409 spin_lock_irqsave(&dev->req_lock, flags);
410 list_add(&req->list, &dev->rx_reqs);
411 spin_unlock_irqrestore(&dev->req_lock, flags);
412 defer_kevent(dev, WORK_RX_MEMORY); 427 defer_kevent(dev, WORK_RX_MEMORY);
413 return; 428 return;
414 } 429 }
@@ -418,41 +433,6 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
418 spin_unlock_irqrestore(&dev->req_lock, flags); 433 spin_unlock_irqrestore(&dev->req_lock, flags);
419} 434}
420 435
421static int gether_poll(struct napi_struct *napi, int budget)
422{
423 struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi);
424 struct sk_buff *skb;
425 unsigned int work_done = 0;
426 int status = 0;
427
428 while ((skb = skb_dequeue(&dev->rx_frames))) {
429 if (status < 0
430 || ETH_HLEN > skb->len
431 || skb->len > VLAN_ETH_FRAME_LEN) {
432 dev->net->stats.rx_errors++;
433 dev->net->stats.rx_length_errors++;
434 DBG(dev, "rx length %d\n", skb->len);
435 dev_kfree_skb_any(skb);
436 continue;
437 }
438 skb->protocol = eth_type_trans(skb, dev->net);
439 dev->net->stats.rx_packets++;
440 dev->net->stats.rx_bytes += skb->len;
441
442 status = netif_rx_ni(skb);
443 }
444
445 if (netif_running(dev->net)) {
446 rx_fill(dev, GFP_KERNEL);
447 work_done++;
448 }
449
450 if (work_done < budget)
451 napi_complete(&dev->rx_napi);
452
453 return work_done;
454}
455
456static void eth_work(struct work_struct *work) 436static void eth_work(struct work_struct *work)
457{ 437{
458 struct eth_dev *dev = container_of(work, struct eth_dev, work); 438 struct eth_dev *dev = container_of(work, struct eth_dev, work);
@@ -645,7 +625,6 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
645 /* and open the tx floodgates */ 625 /* and open the tx floodgates */
646 atomic_set(&dev->tx_qlen, 0); 626 atomic_set(&dev->tx_qlen, 0);
647 netif_wake_queue(dev->net); 627 netif_wake_queue(dev->net);
648 napi_enable(&dev->rx_napi);
649} 628}
650 629
651static int eth_open(struct net_device *net) 630static int eth_open(struct net_device *net)
@@ -672,7 +651,6 @@ static int eth_stop(struct net_device *net)
672 unsigned long flags; 651 unsigned long flags;
673 652
674 VDBG(dev, "%s\n", __func__); 653 VDBG(dev, "%s\n", __func__);
675 napi_disable(&dev->rx_napi);
676 netif_stop_queue(net); 654 netif_stop_queue(net);
677 655
678 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 656 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -790,7 +768,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
790 return ERR_PTR(-ENOMEM); 768 return ERR_PTR(-ENOMEM);
791 769
792 dev = netdev_priv(net); 770 dev = netdev_priv(net);
793 netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
794 spin_lock_init(&dev->lock); 771 spin_lock_init(&dev->lock);
795 spin_lock_init(&dev->req_lock); 772 spin_lock_init(&dev->req_lock);
796 INIT_WORK(&dev->work, eth_work); 773 INIT_WORK(&dev->work, eth_work);
@@ -853,7 +830,6 @@ struct net_device *gether_setup_name_default(const char *netname)
853 return ERR_PTR(-ENOMEM); 830 return ERR_PTR(-ENOMEM);
854 831
855 dev = netdev_priv(net); 832 dev = netdev_priv(net);
856 netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
857 spin_lock_init(&dev->lock); 833 spin_lock_init(&dev->lock);
858 spin_lock_init(&dev->req_lock); 834 spin_lock_init(&dev->req_lock);
859 INIT_WORK(&dev->work, eth_work); 835 INIT_WORK(&dev->work, eth_work);
@@ -1137,7 +1113,6 @@ void gether_disconnect(struct gether *link)
1137{ 1113{
1138 struct eth_dev *dev = link->ioport; 1114 struct eth_dev *dev = link->ioport;
1139 struct usb_request *req; 1115 struct usb_request *req;
1140 struct sk_buff *skb;
1141 1116
1142 WARN_ON(!dev); 1117 WARN_ON(!dev);
1143 if (!dev) 1118 if (!dev)
@@ -1164,12 +1139,6 @@ void gether_disconnect(struct gether *link)
1164 spin_lock(&dev->req_lock); 1139 spin_lock(&dev->req_lock);
1165 } 1140 }
1166 spin_unlock(&dev->req_lock); 1141 spin_unlock(&dev->req_lock);
1167
1168 spin_lock(&dev->rx_frames.lock);
1169 while ((skb = __skb_dequeue(&dev->rx_frames)))
1170 dev_kfree_skb_any(skb);
1171 spin_unlock(&dev->rx_frames.lock);
1172
1173 link->in_ep->driver_data = NULL; 1142 link->in_ep->driver_data = NULL;
1174 link->in_ep->desc = NULL; 1143 link->in_ep->desc = NULL;
1175 1144
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 9f170c53e3d9..134f354ede62 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -300,7 +300,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
300 ss_opts->isoc_interval = gzero_options.isoc_interval; 300 ss_opts->isoc_interval = gzero_options.isoc_interval;
301 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket; 301 ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
302 ss_opts->isoc_mult = gzero_options.isoc_mult; 302 ss_opts->isoc_mult = gzero_options.isoc_mult;
303 ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket; 303 ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
304 ss_opts->bulk_buflen = gzero_options.bulk_buflen; 304 ss_opts->bulk_buflen = gzero_options.bulk_buflen;
305 305
306 func_ss = usb_get_function(func_inst_ss); 306 func_ss = usb_get_function(func_inst_ss);
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index d1d8c47777c5..7f425acd9be5 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -212,6 +212,8 @@ static int exynos_ehci_suspend(struct device *dev)
212 int rc; 212 int rc;
213 213
214 rc = ehci_suspend(hcd, do_wakeup); 214 rc = ehci_suspend(hcd, do_wakeup);
215 if (rc)
216 return rc;
215 217
216 if (exynos_ehci->otg) 218 if (exynos_ehci->otg)
217 exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self); 219 exynos_ehci->otg->set_host(exynos_ehci->otg, &hcd->self);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 6f2c8d3899d2..cf2734b532a7 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -248,7 +248,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
248 break; 248 break;
249 } 249 }
250 250
251 if (pdata->have_sysif_regs && pdata->controller_ver && 251 if (pdata->have_sysif_regs &&
252 pdata->controller_ver > FSL_USB_VER_1_6 &&
252 (phy_mode == FSL_USB2_PHY_ULPI)) { 253 (phy_mode == FSL_USB2_PHY_ULPI)) {
253 /* check PHY_CLK_VALID to get phy clk valid */ 254 /* check PHY_CLK_VALID to get phy clk valid */
254 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & 255 if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index b3a0e11073aa..c7dd93aad20c 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -303,6 +303,8 @@ static int ehci_platform_suspend(struct device *dev)
303 int ret; 303 int ret;
304 304
305 ret = ehci_suspend(hcd, do_wakeup); 305 ret = ehci_suspend(hcd, do_wakeup);
306 if (ret)
307 return ret;
306 308
307 if (pdata->power_suspend) 309 if (pdata->power_suspend)
308 pdata->power_suspend(pdev); 310 pdata->power_suspend(pdev);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 27ac6ad53c3d..7ef00ecb0da1 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -509,8 +509,31 @@ static struct platform_driver tegra_ehci_driver = {
509 } 509 }
510}; 510};
511 511
512static int tegra_ehci_reset(struct usb_hcd *hcd)
513{
514 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
515 int retval;
516 int txfifothresh;
517
518 retval = ehci_setup(hcd);
519 if (retval)
520 return retval;
521
522 /*
523 * We should really pull this value out of tegra_ehci_soc_config, but
524 * to avoid needing access to it, make use of the fact that Tegra20 is
525 * the only one so far that needs a value of 10, and Tegra20 is the
526 * only one which doesn't set has_hostpc.
527 */
528 txfifothresh = ehci->has_hostpc ? 0x10 : 10;
529 ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning);
530
531 return 0;
532}
533
512static const struct ehci_driver_overrides tegra_overrides __initconst = { 534static const struct ehci_driver_overrides tegra_overrides __initconst = {
513 .extra_priv_size = sizeof(struct tegra_ehci_hcd), 535 .extra_priv_size = sizeof(struct tegra_ehci_hcd),
536 .reset = tegra_ehci_reset,
514}; 537};
515 538
516static int __init ehci_tegra_init(void) 539static int __init ehci_tegra_init(void)
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index c81c8721cc5a..cd871b895013 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -90,6 +90,24 @@ __acquires(ohci->lock)
90 dl_done_list (ohci); 90 dl_done_list (ohci);
91 finish_unlinks (ohci, ohci_frame_no(ohci)); 91 finish_unlinks (ohci, ohci_frame_no(ohci));
92 92
93 /*
94 * Some controllers don't handle "global" suspend properly if
95 * there are unsuspended ports. For these controllers, put all
96 * the enabled ports into suspend before suspending the root hub.
97 */
98 if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
99 __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
100 int i;
101 unsigned temp;
102
103 for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
104 temp = ohci_readl(ohci, portstat);
105 if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
106 RH_PS_PES)
107 ohci_writel(ohci, RH_PS_PSS, portstat);
108 }
109 }
110
93 /* maybe resume can wake root hub */ 111 /* maybe resume can wake root hub */
94 if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) { 112 if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
95 ohci->hc_control |= OHCI_CTRL_RWE; 113 ohci->hc_control |= OHCI_CTRL_RWE;
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index af8dc1b92d75..c2c221a332eb 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -82,14 +82,14 @@ static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
82 u16 wIndex, char *buf, u16 wLength) 82 u16 wIndex, char *buf, u16 wLength)
83{ 83{
84 struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd); 84 struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
85 int ret; 85 int ret = 0;
86 86
87 switch (typeReq) { 87 switch (typeReq) {
88 case SetHubFeature: 88 case SetPortFeature:
89 if (wValue == USB_PORT_FEAT_POWER) 89 if (wValue == USB_PORT_FEAT_POWER)
90 ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true); 90 ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true);
91 break; 91 break;
92 case ClearHubFeature: 92 case ClearPortFeature:
93 if (wValue == USB_PORT_FEAT_POWER) 93 if (wValue == USB_PORT_FEAT_POWER)
94 ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false); 94 ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false);
95 break; 95 break;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 90879e9ccbec..bb1509675727 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
160 ohci_dbg(ohci, "enabled AMD prefetch quirk\n"); 160 ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
161 } 161 }
162 162
163 ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
163 return 0; 164 return 0;
164} 165}
165 166
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 9250cada13f0..4550ce05af7f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -405,6 +405,8 @@ struct ohci_hcd {
405#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 405#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
406#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ 406#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
407#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ 407#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
408#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
409
408 // there are also chip quirks/bugs in init logic 410 // there are also chip quirks/bugs in init logic
409 411
410 struct work_struct nec_work; /* Worker for NEC quirk */ 412 struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 47390e369cd4..35d447780707 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -134,6 +134,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
134 */ 134 */
135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) 135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
136 xhci->quirks |= XHCI_SPURIOUS_WAKEUP; 136 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
137
138 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
137 } 139 }
138 if (pdev->vendor == PCI_VENDOR_ID_ETRON && 140 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
139 pdev->device == PCI_DEVICE_ID_ASROCK_P67) { 141 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
@@ -143,9 +145,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
143 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 145 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
144 } 146 }
145 if (pdev->vendor == PCI_VENDOR_ID_RENESAS && 147 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
146 pdev->device == 0x0015 && 148 pdev->device == 0x0015)
147 pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
148 pdev->subsystem_device == 0xc0cd)
149 xhci->quirks |= XHCI_RESET_ON_RESUME; 149 xhci->quirks |= XHCI_RESET_ON_RESUME;
150 if (pdev->vendor == PCI_VENDOR_ID_VIA) 150 if (pdev->vendor == PCI_VENDOR_ID_VIA)
151 xhci->quirks |= XHCI_RESET_ON_RESUME; 151 xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5f926bea5ab1..7a0e3c720c00 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -550,6 +550,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
550 struct xhci_ring *ep_ring; 550 struct xhci_ring *ep_ring;
551 struct xhci_generic_trb *trb; 551 struct xhci_generic_trb *trb;
552 dma_addr_t addr; 552 dma_addr_t addr;
553 u64 hw_dequeue;
553 554
554 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 555 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
555 ep_index, stream_id); 556 ep_index, stream_id);
@@ -559,16 +560,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
559 stream_id); 560 stream_id);
560 return; 561 return;
561 } 562 }
562 state->new_cycle_state = 0;
563 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
564 "Finding segment containing stopped TRB.");
565 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
566 dev->eps[ep_index].stopped_trb,
567 &state->new_cycle_state);
568 if (!state->new_deq_seg) {
569 WARN_ON(1);
570 return;
571 }
572 563
573 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 564 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
574 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 565 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -577,46 +568,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
577 if (ep->ep_state & EP_HAS_STREAMS) { 568 if (ep->ep_state & EP_HAS_STREAMS) {
578 struct xhci_stream_ctx *ctx = 569 struct xhci_stream_ctx *ctx =
579 &ep->stream_info->stream_ctx_array[stream_id]; 570 &ep->stream_info->stream_ctx_array[stream_id];
580 state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring); 571 hw_dequeue = le64_to_cpu(ctx->stream_ring);
581 } else { 572 } else {
582 struct xhci_ep_ctx *ep_ctx 573 struct xhci_ep_ctx *ep_ctx
583 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 574 = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
584 state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq); 575 hw_dequeue = le64_to_cpu(ep_ctx->deq);
585 } 576 }
586 577
578 /* Find virtual address and segment of hardware dequeue pointer */
579 state->new_deq_seg = ep_ring->deq_seg;
580 state->new_deq_ptr = ep_ring->dequeue;
581 while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
582 != (dma_addr_t)(hw_dequeue & ~0xf)) {
583 next_trb(xhci, ep_ring, &state->new_deq_seg,
584 &state->new_deq_ptr);
585 if (state->new_deq_ptr == ep_ring->dequeue) {
586 WARN_ON(1);
587 return;
588 }
589 }
590 /*
591 * Find cycle state for last_trb, starting at old cycle state of
592 * hw_dequeue. If there is only one segment ring, find_trb_seg() will
593 * return immediately and cannot toggle the cycle state if this search
594 * wraps around, so add one more toggle manually in that case.
595 */
596 state->new_cycle_state = hw_dequeue & 0x1;
597 if (ep_ring->first_seg == ep_ring->first_seg->next &&
598 cur_td->last_trb < state->new_deq_ptr)
599 state->new_cycle_state ^= 0x1;
600
587 state->new_deq_ptr = cur_td->last_trb; 601 state->new_deq_ptr = cur_td->last_trb;
588 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 602 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
589 "Finding segment containing last TRB in TD."); 603 "Finding segment containing last TRB in TD.");
590 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 604 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
591 state->new_deq_ptr, 605 state->new_deq_ptr, &state->new_cycle_state);
592 &state->new_cycle_state);
593 if (!state->new_deq_seg) { 606 if (!state->new_deq_seg) {
594 WARN_ON(1); 607 WARN_ON(1);
595 return; 608 return;
596 } 609 }
597 610
611 /* Increment to find next TRB after last_trb. Cycle if appropriate. */
598 trb = &state->new_deq_ptr->generic; 612 trb = &state->new_deq_ptr->generic;
599 if (TRB_TYPE_LINK_LE32(trb->field[3]) && 613 if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
600 (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) 614 (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
601 state->new_cycle_state ^= 0x1; 615 state->new_cycle_state ^= 0x1;
602 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); 616 next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
603 617
604 /* 618 /* Don't update the ring cycle state for the producer (us). */
605 * If there is only one segment in a ring, find_trb_seg()'s while loop
606 * will not run, and it will return before it has a chance to see if it
607 * needs to toggle the cycle bit. It can't tell if the stalled transfer
608 * ended just before the link TRB on a one-segment ring, or if the TD
609 * wrapped around the top of the ring, because it doesn't have the TD in
610 * question. Look for the one-segment case where stalled TRB's address
611 * is greater than the new dequeue pointer address.
612 */
613 if (ep_ring->first_seg == ep_ring->first_seg->next &&
614 state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
615 state->new_cycle_state ^= 0x1;
616 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 619 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
617 "Cycle state = 0x%x", state->new_cycle_state); 620 "Cycle state = 0x%x", state->new_cycle_state);
618 621
619 /* Don't update the ring cycle state for the producer (us). */
620 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 622 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
621 "New dequeue segment = %p (virtual)", 623 "New dequeue segment = %p (virtual)",
622 state->new_deq_seg); 624 state->new_deq_seg);
@@ -799,7 +801,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
799 if (list_empty(&ep->cancelled_td_list)) { 801 if (list_empty(&ep->cancelled_td_list)) {
800 xhci_stop_watchdog_timer_in_irq(xhci, ep); 802 xhci_stop_watchdog_timer_in_irq(xhci, ep);
801 ep->stopped_td = NULL; 803 ep->stopped_td = NULL;
802 ep->stopped_trb = NULL;
803 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 804 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
804 return; 805 return;
805 } 806 }
@@ -867,11 +868,9 @@ remove_finished_td:
867 ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 868 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
868 } 869 }
869 870
870 /* Clear stopped_td and stopped_trb if endpoint is not halted */ 871 /* Clear stopped_td if endpoint is not halted */
871 if (!(ep->ep_state & EP_HALTED)) { 872 if (!(ep->ep_state & EP_HALTED))
872 ep->stopped_td = NULL; 873 ep->stopped_td = NULL;
873 ep->stopped_trb = NULL;
874 }
875 874
876 /* 875 /*
877 * Drop the lock and complete the URBs in the cancelled TD list. 876 * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1941,14 +1940,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1941 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1940 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1942 ep->ep_state |= EP_HALTED; 1941 ep->ep_state |= EP_HALTED;
1943 ep->stopped_td = td; 1942 ep->stopped_td = td;
1944 ep->stopped_trb = event_trb;
1945 ep->stopped_stream = stream_id; 1943 ep->stopped_stream = stream_id;
1946 1944
1947 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1945 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1948 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); 1946 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1949 1947
1950 ep->stopped_td = NULL; 1948 ep->stopped_td = NULL;
1951 ep->stopped_trb = NULL;
1952 ep->stopped_stream = 0; 1949 ep->stopped_stream = 0;
1953 1950
1954 xhci_ring_cmd_db(xhci); 1951 xhci_ring_cmd_db(xhci);
@@ -2030,7 +2027,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
2030 * the ring dequeue pointer or take this TD off any lists yet. 2027 * the ring dequeue pointer or take this TD off any lists yet.
2031 */ 2028 */
2032 ep->stopped_td = td; 2029 ep->stopped_td = td;
2033 ep->stopped_trb = event_trb;
2034 return 0; 2030 return 0;
2035 } else { 2031 } else {
2036 if (trb_comp_code == COMP_STALL) { 2032 if (trb_comp_code == COMP_STALL) {
@@ -2042,7 +2038,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
2042 * USB class driver clear the stall later. 2038 * USB class driver clear the stall later.
2043 */ 2039 */
2044 ep->stopped_td = td; 2040 ep->stopped_td = td;
2045 ep->stopped_trb = event_trb;
2046 ep->stopped_stream = ep_ring->stream_id; 2041 ep->stopped_stream = ep_ring->stream_id;
2047 } else if (xhci_requires_manual_halt_cleanup(xhci, 2042 } else if (xhci_requires_manual_halt_cleanup(xhci,
2048 ep_ctx, trb_comp_code)) { 2043 ep_ctx, trb_comp_code)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8fe4e124ddd4..300836972faa 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -408,16 +408,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
408 408
409#else 409#else
410 410
411static int xhci_try_enable_msi(struct usb_hcd *hcd) 411static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
412{ 412{
413 return 0; 413 return 0;
414} 414}
415 415
416static void xhci_cleanup_msix(struct xhci_hcd *xhci) 416static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
417{ 417{
418} 418}
419 419
420static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 420static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
421{ 421{
422} 422}
423 423
@@ -2954,7 +2954,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
2954 xhci_ring_cmd_db(xhci); 2954 xhci_ring_cmd_db(xhci);
2955 } 2955 }
2956 virt_ep->stopped_td = NULL; 2956 virt_ep->stopped_td = NULL;
2957 virt_ep->stopped_trb = NULL;
2958 virt_ep->stopped_stream = 0; 2957 virt_ep->stopped_stream = 0;
2959 spin_unlock_irqrestore(&xhci->lock, flags); 2958 spin_unlock_irqrestore(&xhci->lock, flags);
2960 2959
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index d280e9213d08..4746816aed3e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -865,8 +865,6 @@ struct xhci_virt_ep {
865#define EP_GETTING_NO_STREAMS (1 << 5) 865#define EP_GETTING_NO_STREAMS (1 << 5)
866 /* ---- Related to URB cancellation ---- */ 866 /* ---- Related to URB cancellation ---- */
867 struct list_head cancelled_td_list; 867 struct list_head cancelled_td_list;
868 /* The TRB that was last reported in a stopped endpoint ring */
869 union xhci_trb *stopped_trb;
870 struct xhci_td *stopped_td; 868 struct xhci_td *stopped_td;
871 unsigned int stopped_stream; 869 unsigned int stopped_stream;
872 /* Watchdog timer for stop endpoint command to cancel URBs */ 870 /* Watchdog timer for stop endpoint command to cancel URBs */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 3372ded5def7..e2fd263585de 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -470,8 +470,9 @@ static int dsps_musb_exit(struct musb *musb)
470 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 470 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
471 471
472 del_timer_sync(&glue->timer); 472 del_timer_sync(&glue->timer);
473
474 usb_phy_shutdown(musb->xceiv); 473 usb_phy_shutdown(musb->xceiv);
474 debugfs_remove_recursive(glue->dbgfs_root);
475
475 return 0; 476 return 0;
476} 477}
477 478
@@ -708,8 +709,6 @@ static int dsps_remove(struct platform_device *pdev)
708 pm_runtime_put(&pdev->dev); 709 pm_runtime_put(&pdev->dev);
709 pm_runtime_disable(&pdev->dev); 710 pm_runtime_disable(&pdev->dev);
710 711
711 debugfs_remove_recursive(glue->dbgfs_root);
712
713 return 0; 712 return 0;
714} 713}
715 714
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index d341c149a2f9..d369bf1f3936 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -316,7 +316,13 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
316{ 316{
317 struct omap2430_glue *glue = container_of(mailbox_work, 317 struct omap2430_glue *glue = container_of(mailbox_work,
318 struct omap2430_glue, omap_musb_mailbox_work); 318 struct omap2430_glue, omap_musb_mailbox_work);
319 struct musb *musb = glue_to_musb(glue);
320 struct device *dev = musb->controller;
321
322 pm_runtime_get_sync(dev);
319 omap_musb_set_mailbox(glue); 323 omap_musb_set_mailbox(glue);
324 pm_runtime_mark_last_busy(dev);
325 pm_runtime_put_autosuspend(dev);
320} 326}
321 327
322static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) 328static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -416,6 +422,7 @@ static int omap2430_musb_init(struct musb *musb)
416 omap_musb_set_mailbox(glue); 422 omap_musb_set_mailbox(glue);
417 423
418 phy_init(musb->phy); 424 phy_init(musb->phy);
425 phy_power_on(musb->phy);
419 426
420 pm_runtime_put_noidle(musb->controller); 427 pm_runtime_put_noidle(musb->controller);
421 return 0; 428 return 0;
@@ -478,6 +485,7 @@ static int omap2430_musb_exit(struct musb *musb)
478 del_timer_sync(&musb_idle_timer); 485 del_timer_sync(&musb_idle_timer);
479 486
480 omap2430_low_level_exit(musb); 487 omap2430_low_level_exit(musb);
488 phy_power_off(musb->phy);
481 phy_exit(musb->phy); 489 phy_exit(musb->phy);
482 490
483 return 0; 491 return 0;
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index d75196ad5f2f..35b6083b7999 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -3,6 +3,7 @@
3#include <linux/err.h> 3#include <linux/err.h>
4#include <linux/of.h> 4#include <linux/of.h>
5#include <linux/io.h> 5#include <linux/io.h>
6#include <linux/delay.h>
6#include "am35x-phy-control.h" 7#include "am35x-phy-control.h"
7 8
8struct am335x_control_usb { 9struct am335x_control_usb {
@@ -86,6 +87,14 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
86 } 87 }
87 88
88 writel(val, usb_ctrl->phy_reg + reg); 89 writel(val, usb_ctrl->phy_reg + reg);
90
91 /*
92 * Give the PHY ~1ms to complete the power up operation.
93 * Tests have shown unstable behaviour if other USB PHY related
94 * registers are written too shortly after such a transition.
95 */
96 if (on)
97 mdelay(1);
89} 98}
90 99
91static const struct phy_control ctrl_am335x = { 100static const struct phy_control ctrl_am335x = {
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c47e5a6edde2..d03fadd2629f 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -303,17 +303,18 @@ int otg_statemachine(struct otg_fsm *fsm)
303 otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE); 303 otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
304 break; 304 break;
305 case OTG_STATE_A_WAIT_VRISE: 305 case OTG_STATE_A_WAIT_VRISE:
306 if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld || 306 if (fsm->a_vbus_vld)
307 fsm->a_wait_vrise_tmout) {
308 otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); 307 otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
309 } 308 else if (fsm->id || fsm->a_bus_drop ||
309 fsm->a_wait_vrise_tmout)
310 otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
310 break; 311 break;
311 case OTG_STATE_A_WAIT_BCON: 312 case OTG_STATE_A_WAIT_BCON:
312 if (!fsm->a_vbus_vld) 313 if (!fsm->a_vbus_vld)
313 otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); 314 otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
314 else if (fsm->b_conn) 315 else if (fsm->b_conn)
315 otg_set_state(fsm, OTG_STATE_A_HOST); 316 otg_set_state(fsm, OTG_STATE_A_HOST);
316 else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout) 317 else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout)
317 otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); 318 otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
318 break; 319 break;
319 case OTG_STATE_A_HOST: 320 case OTG_STATE_A_HOST:
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 8afa813d690b..36b6bce33b20 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -132,6 +132,9 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
132 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { 132 if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
133 pr_debug("PHY: unable to find transceiver of type %s\n", 133 pr_debug("PHY: unable to find transceiver of type %s\n",
134 usb_phy_type_string(type)); 134 usb_phy_type_string(type));
135 if (!IS_ERR(phy))
136 phy = ERR_PTR(-ENODEV);
137
135 goto err0; 138 goto err0;
136 } 139 }
137 140
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 95fa1217afdd..762e4a5f5ae9 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -104,6 +104,7 @@ static const struct usb_device_id id_table[] = {
104 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ 104 { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
105 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ 105 { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
106 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ 106 { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
107 { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
107 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ 108 { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
108 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ 109 { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
109 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ 110 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 44ab12986805..7c6e1dedeb06 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -909,6 +909,39 @@ static const struct usb_device_id id_table_combined[] = {
909 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) }, 909 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
910 /* Cressi Devices */ 910 /* Cressi Devices */
911 { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) }, 911 { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
912 /* Brainboxes Devices */
913 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_001_PID) },
914 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_012_PID) },
915 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
916 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
917 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
918 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
919 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
920 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
921 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_4_PID) },
922 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_5_PID) },
923 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
924 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
925 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
926 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
927 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
928 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
929 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
930 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
931 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
932 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
933 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
934 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
935 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_357_PID) },
936 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_1_PID) },
937 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_2_PID) },
938 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_606_3_PID) },
939 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_1_PID) },
940 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_701_2_PID) },
941 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_1_PID) },
942 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
943 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
944 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
912 { } /* Terminating entry */ 945 { } /* Terminating entry */
913}; 946};
914 947
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e599fbfcde5f..993c93df6874 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1326,3 +1326,40 @@
1326 * Manufacturer: Cressi 1326 * Manufacturer: Cressi
1327 */ 1327 */
1328#define FTDI_CRESSI_PID 0x87d0 1328#define FTDI_CRESSI_PID 0x87d0
1329
1330/*
1331 * Brainboxes devices
1332 */
1333#define BRAINBOXES_VID 0x05d1
1334#define BRAINBOXES_VX_001_PID 0x1001 /* VX-001 ExpressCard 1 Port RS232 */
1335#define BRAINBOXES_VX_012_PID 0x1002 /* VX-012 ExpressCard 2 Port RS232 */
1336#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
1337#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
1338#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
1339#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
1340#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
1341#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
1342#define BRAINBOXES_US_606_3_PID 0x2003 /* US-606 6 Port RS232 Serial Port 4 and 6 */
1343#define BRAINBOXES_US_701_1_PID 0x2011 /* US-701 4xRS232 1Mbaud Port 1 and 2 */
1344#define BRAINBOXES_US_701_2_PID 0x2012 /* US-701 4xRS422 1Mbaud Port 3 and 4 */
1345#define BRAINBOXES_US_279_1_PID 0x2021 /* US-279 8xRS422 1Mbaud Port 1 and 2 */
1346#define BRAINBOXES_US_279_2_PID 0x2022 /* US-279 8xRS422 1Mbaud Port 3 and 4 */
1347#define BRAINBOXES_US_279_3_PID 0x2023 /* US-279 8xRS422 1Mbaud Port 5 and 6 */
1348#define BRAINBOXES_US_279_4_PID 0x2024 /* US-279 8xRS422 1Mbaud Port 7 and 8 */
1349#define BRAINBOXES_US_346_1_PID 0x3011 /* US-346 4xRS422/485 1Mbaud Port 1 and 2 */
1350#define BRAINBOXES_US_346_2_PID 0x3012 /* US-346 4xRS422/485 1Mbaud Port 3 and 4 */
1351#define BRAINBOXES_US_257_PID 0x5001 /* US-257 2xRS232 1Mbaud */
1352#define BRAINBOXES_US_313_PID 0x6001 /* US-313 2xRS422/485 1Mbaud */
1353#define BRAINBOXES_US_357_PID 0x7001 /* US_357 1xRS232/422/485 */
1354#define BRAINBOXES_US_842_1_PID 0x8001 /* US-842 8xRS422/485 1Mbaud Port 1 and 2 */
1355#define BRAINBOXES_US_842_2_PID 0x8002 /* US-842 8xRS422/485 1Mbaud Port 3 and 4 */
1356#define BRAINBOXES_US_842_3_PID 0x8003 /* US-842 8xRS422/485 1Mbaud Port 5 and 6 */
1357#define BRAINBOXES_US_842_4_PID 0x8004 /* US-842 8xRS422/485 1Mbaud Port 7 and 8 */
1358#define BRAINBOXES_US_160_1_PID 0x9001 /* US-160 16xRS232 1Mbaud Port 1 and 2 */
1359#define BRAINBOXES_US_160_2_PID 0x9002 /* US-160 16xRS232 1Mbaud Port 3 and 4 */
1360#define BRAINBOXES_US_160_3_PID 0x9003 /* US-160 16xRS232 1Mbaud Port 5 and 6 */
1361#define BRAINBOXES_US_160_4_PID 0x9004 /* US-160 16xRS232 1Mbaud Port 7 and 8 */
1362#define BRAINBOXES_US_160_5_PID 0x9005 /* US-160 16xRS232 1Mbaud Port 9 and 10 */
1363#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
1364#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
1365#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index a2db5be9c305..df90dae53eb9 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -28,6 +28,7 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/serial.h> 30#include <linux/serial.h>
31#include <linux/swab.h>
31#include <linux/kfifo.h> 32#include <linux/kfifo.h>
32#include <linux/ioctl.h> 33#include <linux/ioctl.h>
33#include <linux/firmware.h> 34#include <linux/firmware.h>
@@ -280,7 +281,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
280{ 281{
281 int status = 0; 282 int status = 0;
282 __u8 read_length; 283 __u8 read_length;
283 __be16 be_start_address; 284 u16 be_start_address;
284 285
285 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length); 286 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
286 287
@@ -296,10 +297,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
296 if (read_length > 1) { 297 if (read_length > 1) {
297 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length); 298 dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
298 } 299 }
299 be_start_address = cpu_to_be16(start_address); 300 /*
301 * NOTE: Must use swab as wIndex is sent in little-endian
302 * byte order regardless of host byte order.
303 */
304 be_start_address = swab16((u16)start_address);
300 status = ti_vread_sync(dev, UMPC_MEMORY_READ, 305 status = ti_vread_sync(dev, UMPC_MEMORY_READ,
301 (__u16)address_type, 306 (__u16)address_type,
302 (__force __u16)be_start_address, 307 be_start_address,
303 buffer, read_length); 308 buffer, read_length);
304 309
305 if (status) { 310 if (status) {
@@ -394,7 +399,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
394 struct device *dev = &serial->serial->dev->dev; 399 struct device *dev = &serial->serial->dev->dev;
395 int status = 0; 400 int status = 0;
396 int write_length; 401 int write_length;
397 __be16 be_start_address; 402 u16 be_start_address;
398 403
399 /* We can only send a maximum of 1 aligned byte page at a time */ 404 /* We can only send a maximum of 1 aligned byte page at a time */
400 405
@@ -409,11 +414,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
409 __func__, start_address, write_length); 414 __func__, start_address, write_length);
410 usb_serial_debug_data(dev, __func__, write_length, buffer); 415 usb_serial_debug_data(dev, __func__, write_length, buffer);
411 416
412 /* Write first page */ 417 /*
413 be_start_address = cpu_to_be16(start_address); 418 * Write first page.
419 *
420 * NOTE: Must use swab as wIndex is sent in little-endian byte order
421 * regardless of host byte order.
422 */
423 be_start_address = swab16((u16)start_address);
414 status = ti_vsend_sync(serial->serial->dev, 424 status = ti_vsend_sync(serial->serial->dev,
415 UMPC_MEMORY_WRITE, (__u16)address_type, 425 UMPC_MEMORY_WRITE, (__u16)address_type,
416 (__force __u16)be_start_address, 426 be_start_address,
417 buffer, write_length); 427 buffer, write_length);
418 if (status) { 428 if (status) {
419 dev_dbg(dev, "%s - ERROR %d\n", __func__, status); 429 dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
@@ -436,11 +446,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
436 __func__, start_address, write_length); 446 __func__, start_address, write_length);
437 usb_serial_debug_data(dev, __func__, write_length, buffer); 447 usb_serial_debug_data(dev, __func__, write_length, buffer);
438 448
439 /* Write next page */ 449 /*
440 be_start_address = cpu_to_be16(start_address); 450 * Write next page.
451 *
452 * NOTE: Must use swab as wIndex is sent in little-endian byte
453 * order regardless of host byte order.
454 */
455 be_start_address = swab16((u16)start_address);
441 status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE, 456 status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
442 (__u16)address_type, 457 (__u16)address_type,
443 (__force __u16)be_start_address, 458 be_start_address,
444 buffer, write_length); 459 buffer, write_length);
445 if (status) { 460 if (status) {
446 dev_err(dev, "%s - ERROR %d\n", __func__, status); 461 dev_err(dev, "%s - ERROR %d\n", __func__, status);
@@ -585,8 +600,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
585 if (rom_desc->Type == desc_type) 600 if (rom_desc->Type == desc_type)
586 return start_address; 601 return start_address;
587 602
588 start_address = start_address + sizeof(struct ti_i2c_desc) 603 start_address = start_address + sizeof(struct ti_i2c_desc) +
589 + rom_desc->Size; 604 le16_to_cpu(rom_desc->Size);
590 605
591 } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type); 606 } while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
592 607
@@ -599,7 +614,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
599 __u16 i; 614 __u16 i;
600 __u8 cs = 0; 615 __u8 cs = 0;
601 616
602 for (i = 0; i < rom_desc->Size; i++) 617 for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
603 cs = (__u8)(cs + buffer[i]); 618 cs = (__u8)(cs + buffer[i]);
604 619
605 if (cs != rom_desc->CheckSum) { 620 if (cs != rom_desc->CheckSum) {
@@ -650,7 +665,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
650 break; 665 break;
651 666
652 if ((start_address + sizeof(struct ti_i2c_desc) + 667 if ((start_address + sizeof(struct ti_i2c_desc) +
653 rom_desc->Size) > TI_MAX_I2C_SIZE) { 668 le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
654 status = -ENODEV; 669 status = -ENODEV;
655 dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__); 670 dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
656 break; 671 break;
@@ -665,7 +680,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
665 /* Read the descriptor data */ 680 /* Read the descriptor data */
666 status = read_rom(serial, start_address + 681 status = read_rom(serial, start_address +
667 sizeof(struct ti_i2c_desc), 682 sizeof(struct ti_i2c_desc),
668 rom_desc->Size, buffer); 683 le16_to_cpu(rom_desc->Size),
684 buffer);
669 if (status) 685 if (status)
670 break; 686 break;
671 687
@@ -674,7 +690,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
674 break; 690 break;
675 } 691 }
676 start_address = start_address + sizeof(struct ti_i2c_desc) + 692 start_address = start_address + sizeof(struct ti_i2c_desc) +
677 rom_desc->Size; 693 le16_to_cpu(rom_desc->Size);
678 694
679 } while ((rom_desc->Type != I2C_DESC_TYPE_ION) && 695 } while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
680 (start_address < TI_MAX_I2C_SIZE)); 696 (start_address < TI_MAX_I2C_SIZE));
@@ -712,7 +728,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
712 728
713 /* Read the descriptor data */ 729 /* Read the descriptor data */
714 status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc), 730 status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
715 rom_desc->Size, buffer); 731 le16_to_cpu(rom_desc->Size), buffer);
716 if (status) 732 if (status)
717 goto exit; 733 goto exit;
718 734
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 68fc9fe65936..f213ee978516 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
234#define QUALCOMM_VENDOR_ID 0x05C6 234#define QUALCOMM_VENDOR_ID 0x05C6
235 235
236#define CMOTECH_VENDOR_ID 0x16d8 236#define CMOTECH_VENDOR_ID 0x16d8
237#define CMOTECH_PRODUCT_6008 0x6008 237#define CMOTECH_PRODUCT_6001 0x6001
238#define CMOTECH_PRODUCT_6280 0x6280 238#define CMOTECH_PRODUCT_CMU_300 0x6002
239#define CMOTECH_PRODUCT_6003 0x6003
240#define CMOTECH_PRODUCT_6004 0x6004
241#define CMOTECH_PRODUCT_6005 0x6005
242#define CMOTECH_PRODUCT_CGU_628A 0x6006
243#define CMOTECH_PRODUCT_CHE_628S 0x6007
244#define CMOTECH_PRODUCT_CMU_301 0x6008
245#define CMOTECH_PRODUCT_CHU_628 0x6280
246#define CMOTECH_PRODUCT_CHU_628S 0x6281
247#define CMOTECH_PRODUCT_CDU_680 0x6803
248#define CMOTECH_PRODUCT_CDU_685A 0x6804
249#define CMOTECH_PRODUCT_CHU_720S 0x7001
250#define CMOTECH_PRODUCT_7002 0x7002
251#define CMOTECH_PRODUCT_CHU_629K 0x7003
252#define CMOTECH_PRODUCT_7004 0x7004
253#define CMOTECH_PRODUCT_7005 0x7005
254#define CMOTECH_PRODUCT_CGU_629 0x7006
255#define CMOTECH_PRODUCT_CHU_629S 0x700a
256#define CMOTECH_PRODUCT_CHU_720I 0x7211
257#define CMOTECH_PRODUCT_7212 0x7212
258#define CMOTECH_PRODUCT_7213 0x7213
259#define CMOTECH_PRODUCT_7251 0x7251
260#define CMOTECH_PRODUCT_7252 0x7252
261#define CMOTECH_PRODUCT_7253 0x7253
239 262
240#define TELIT_VENDOR_ID 0x1bc7 263#define TELIT_VENDOR_ID 0x1bc7
241#define TELIT_PRODUCT_UC864E 0x1003 264#define TELIT_PRODUCT_UC864E 0x1003
@@ -243,6 +266,7 @@ static void option_instat_callback(struct urb *urb);
243#define TELIT_PRODUCT_CC864_DUAL 0x1005 266#define TELIT_PRODUCT_CC864_DUAL 0x1005
244#define TELIT_PRODUCT_CC864_SINGLE 0x1006 267#define TELIT_PRODUCT_CC864_SINGLE 0x1006
245#define TELIT_PRODUCT_DE910_DUAL 0x1010 268#define TELIT_PRODUCT_DE910_DUAL 0x1010
269#define TELIT_PRODUCT_UE910_V2 0x1012
246#define TELIT_PRODUCT_LE920 0x1200 270#define TELIT_PRODUCT_LE920 0x1200
247 271
248/* ZTE PRODUCTS */ 272/* ZTE PRODUCTS */
@@ -286,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
286#define ALCATEL_PRODUCT_X060S_X200 0x0000 310#define ALCATEL_PRODUCT_X060S_X200 0x0000
287#define ALCATEL_PRODUCT_X220_X500D 0x0017 311#define ALCATEL_PRODUCT_X220_X500D 0x0017
288#define ALCATEL_PRODUCT_L100V 0x011e 312#define ALCATEL_PRODUCT_L100V 0x011e
313#define ALCATEL_PRODUCT_L800MA 0x0203
289 314
290#define PIRELLI_VENDOR_ID 0x1266 315#define PIRELLI_VENDOR_ID 0x1266
291#define PIRELLI_PRODUCT_C100_1 0x1002 316#define PIRELLI_PRODUCT_C100_1 0x1002
@@ -348,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
348#define OLIVETTI_PRODUCT_OLICARD100 0xc000 373#define OLIVETTI_PRODUCT_OLICARD100 0xc000
349#define OLIVETTI_PRODUCT_OLICARD145 0xc003 374#define OLIVETTI_PRODUCT_OLICARD145 0xc003
350#define OLIVETTI_PRODUCT_OLICARD200 0xc005 375#define OLIVETTI_PRODUCT_OLICARD200 0xc005
376#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
351 377
352/* Celot products */ 378/* Celot products */
353#define CELOT_VENDOR_ID 0x211f 379#define CELOT_VENDOR_ID 0x211f
@@ -501,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
501 .reserved = BIT(1) | BIT(2), 527 .reserved = BIT(1) | BIT(2),
502}; 528};
503 529
530static const struct option_blacklist_info net_intf0_blacklist = {
531 .reserved = BIT(0),
532};
533
504static const struct option_blacklist_info net_intf1_blacklist = { 534static const struct option_blacklist_info net_intf1_blacklist = {
505 .reserved = BIT(1), 535 .reserved = BIT(1),
506}; 536};
@@ -1034,13 +1064,53 @@ static const struct usb_device_id option_ids[] = {
1034 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1064 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1035 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1065 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1036 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1066 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1037 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ 1067 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1038 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, 1068 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1069 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
1070 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1071 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
1072 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
1073 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
1074 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
1075 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1076 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
1077 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1078 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
1079 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1080 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
1081 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
1082 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
1083 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
1084 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1085 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
1086 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1087 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
1088 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1089 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
1090 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1091 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
1092 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
1093 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1094 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
1095 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1096 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
1097 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1098 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
1099 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1100 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
1101 .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
1102 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
1103 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
1105 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1106 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
1107 .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
1039 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 1108 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
1040 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, 1109 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
1041 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, 1110 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
1042 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, 1111 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
1043 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, 1112 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
1113 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
1044 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 1114 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1045 .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, 1115 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
1046 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 1116 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -1498,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
1498 .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, 1568 .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
1499 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V), 1569 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
1500 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 1570 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1571 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
1572 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1501 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 1573 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
1502 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, 1574 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
1503 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1575 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1543,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
1543 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), 1615 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1544 .driver_info = (kernel_ulong_t)&net_intf6_blacklist 1616 .driver_info = (kernel_ulong_t)&net_intf6_blacklist
1545 }, 1617 },
1618 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
1619 .driver_info = (kernel_ulong_t)&net_intf4_blacklist
1620 },
1546 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1621 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1547 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1622 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1548 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1623 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 2e22fc22c382..b3d5a35c0d4b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -83,6 +83,9 @@ static const struct usb_device_id id_table[] = {
83 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, 83 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
84 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, 84 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
85 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, 85 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
86 { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
87 { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
88 { USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
86 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, 89 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
87 { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, 90 { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
88 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 91 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index c38b8c00c06f..42bc082896ac 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -121,8 +121,11 @@
121#define SUPERIAL_VENDOR_ID 0x5372 121#define SUPERIAL_VENDOR_ID 0x5372
122#define SUPERIAL_PRODUCT_ID 0x2303 122#define SUPERIAL_PRODUCT_ID 0x2303
123 123
124/* Hewlett-Packard LD220-HP POS Pole Display */ 124/* Hewlett-Packard POS Pole Displays */
125#define HP_VENDOR_ID 0x03f0 125#define HP_VENDOR_ID 0x03f0
126#define HP_LD960_PRODUCT_ID 0x0b39
127#define HP_LCM220_PRODUCT_ID 0x3139
128#define HP_LCM960_PRODUCT_ID 0x3239
126#define HP_LD220_PRODUCT_ID 0x3524 129#define HP_LD220_PRODUCT_ID 0x3524
127 130
128/* Cressi Edy (diving computer) PC interface */ 131/* Cressi Edy (diving computer) PC interface */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 968a40201e5f..6c0a542e8ec1 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -136,12 +136,36 @@ static const struct usb_device_id id_table[] = {
136 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */ 136 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */
137 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */ 137 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */
138 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */ 138 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */
139 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)}, /* Sierra Wireless MC73xx Device Management */
140 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)}, /* Sierra Wireless MC73xx NMEA */
141 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)}, /* Sierra Wireless MC73xx Modem */
139 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */ 142 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
140 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */ 143 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
141 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */ 144 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
145 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
146 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
147 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
148 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
149 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
150 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
142 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ 151 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
143 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ 152 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
144 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ 153 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
154 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
155 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
156 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
157 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
158 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
159 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
160 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
161 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
162 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
163 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
164 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
165 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
166 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
167 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
168 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
145 169
146 { } /* Terminating entry */ 170 { } /* Terminating entry */
147}; 171};
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index a9eb6221a815..6b192e602ce0 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -291,7 +291,6 @@ static const struct usb_device_id id_table[] = {
291 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ 291 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
292 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 292 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
293 }, 293 },
294 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
295 294
296 { } 295 { }
297}; 296};
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 81fc0dfcfdcf..6d40d56378d7 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1347,10 +1347,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
1347static void usb_serial_deregister(struct usb_serial_driver *device) 1347static void usb_serial_deregister(struct usb_serial_driver *device)
1348{ 1348{
1349 pr_info("USB Serial deregistering driver %s\n", device->description); 1349 pr_info("USB Serial deregistering driver %s\n", device->description);
1350
1350 mutex_lock(&table_lock); 1351 mutex_lock(&table_lock);
1351 list_del(&device->driver_list); 1352 list_del(&device->driver_list);
1352 usb_serial_bus_deregister(device);
1353 mutex_unlock(&table_lock); 1353 mutex_unlock(&table_lock);
1354
1355 usb_serial_bus_deregister(device);
1354} 1356}
1355 1357
1356/** 1358/**
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 640fe0173236..b078440e822f 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -466,6 +466,9 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
466 int err; 466 int err;
467 int i; 467 int i;
468 468
469 if (!port->bulk_in_size || !port->bulk_out_size)
470 return -ENODEV;
471
469 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); 472 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
470 if (!portdata) 473 if (!portdata)
471 return -ENOMEM; 474 return -ENOMEM;
@@ -473,9 +476,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
473 init_usb_anchor(&portdata->delayed); 476 init_usb_anchor(&portdata->delayed);
474 477
475 for (i = 0; i < N_IN_URB; i++) { 478 for (i = 0; i < N_IN_URB; i++) {
476 if (!port->bulk_in_size)
477 break;
478
479 buffer = (u8 *)__get_free_page(GFP_KERNEL); 479 buffer = (u8 *)__get_free_page(GFP_KERNEL);
480 if (!buffer) 480 if (!buffer)
481 goto bail_out_error; 481 goto bail_out_error;
@@ -489,9 +489,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
489 } 489 }
490 490
491 for (i = 0; i < N_OUT_URB; i++) { 491 for (i = 0; i < N_OUT_URB; i++) {
492 if (!port->bulk_out_size)
493 break;
494
495 buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL); 492 buffer = kmalloc(OUT_BUFLEN, GFP_KERNEL);
496 if (!buffer) 493 if (!buffer)
497 goto bail_out_error2; 494 goto bail_out_error2;
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 4ef2a80728f7..008d805c3d21 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
1851 us->transport_name = "Shuttle USBAT"; 1851 us->transport_name = "Shuttle USBAT";
1852 us->transport = usbat_flash_transport; 1852 us->transport = usbat_flash_transport;
1853 us->transport_reset = usb_stor_CB_reset; 1853 us->transport_reset = usb_stor_CB_reset;
1854 us->max_lun = 1; 1854 us->max_lun = 0;
1855 1855
1856 result = usb_stor_probe2(us); 1856 result = usb_stor_probe2(us);
1857 return result; 1857 return result;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index a7ac97cc5949..511b22953167 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -137,7 +137,7 @@ static void uas_do_work(struct work_struct *work)
137 if (!(cmdinfo->state & IS_IN_WORK_LIST)) 137 if (!(cmdinfo->state & IS_IN_WORK_LIST))
138 continue; 138 continue;
139 139
140 err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_NOIO); 140 err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
141 if (!err) 141 if (!err)
142 cmdinfo->state &= ~IS_IN_WORK_LIST; 142 cmdinfo->state &= ~IS_IN_WORK_LIST;
143 else 143 else
@@ -803,7 +803,7 @@ static int uas_eh_task_mgmt(struct scsi_cmnd *cmnd,
803 803
804 devinfo->running_task = 1; 804 devinfo->running_task = 1;
805 memset(&devinfo->response, 0, sizeof(devinfo->response)); 805 memset(&devinfo->response, 0, sizeof(devinfo->response));
806 sense_urb = uas_submit_sense_urb(cmnd, GFP_NOIO, 806 sense_urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC,
807 devinfo->use_streams ? tag : 0); 807 devinfo->use_streams ? tag : 0);
808 if (!sense_urb) { 808 if (!sense_urb) {
809 shost_printk(KERN_INFO, shost, 809 shost_printk(KERN_INFO, shost,
@@ -813,7 +813,7 @@ static int uas_eh_task_mgmt(struct scsi_cmnd *cmnd,
813 spin_unlock_irqrestore(&devinfo->lock, flags); 813 spin_unlock_irqrestore(&devinfo->lock, flags);
814 return FAILED; 814 return FAILED;
815 } 815 }
816 if (uas_submit_task_urb(cmnd, GFP_NOIO, function, tag)) { 816 if (uas_submit_task_urb(cmnd, GFP_ATOMIC, function, tag)) {
817 shost_printk(KERN_INFO, shost, 817 shost_printk(KERN_INFO, shost,
818 "%s: %s: submit task mgmt urb failed\n", 818 "%s: %s: submit task mgmt urb failed\n",
819 __func__, fname); 819 __func__, fname);
@@ -1030,7 +1030,7 @@ static int uas_configure_endpoints(struct uas_dev_info *devinfo)
1030 devinfo->use_streams = 0; 1030 devinfo->use_streams = 0;
1031 } else { 1031 } else {
1032 devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, 1032 devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
1033 3, 256, GFP_KERNEL); 1033 3, 256, GFP_NOIO);
1034 if (devinfo->qdepth < 0) 1034 if (devinfo->qdepth < 0)
1035 return devinfo->qdepth; 1035 return devinfo->qdepth;
1036 devinfo->use_streams = 1; 1036 devinfo->use_streams = 1;
@@ -1047,7 +1047,7 @@ static void uas_free_streams(struct uas_dev_info *devinfo)
1047 eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe); 1047 eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe);
1048 eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe); 1048 eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
1049 eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe); 1049 eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
1050 usb_free_streams(devinfo->intf, eps, 3, GFP_KERNEL); 1050 usb_free_streams(devinfo->intf, eps, 3, GFP_NOIO);
1051} 1051}
1052 1052
1053static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) 1053static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -1096,16 +1096,17 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
1096 if (result) 1096 if (result)
1097 goto free_streams; 1097 goto free_streams;
1098 1098
1099 usb_set_intfdata(intf, shost);
1099 result = scsi_add_host(shost, &intf->dev); 1100 result = scsi_add_host(shost, &intf->dev);
1100 if (result) 1101 if (result)
1101 goto free_streams; 1102 goto free_streams;
1102 1103
1103 scsi_scan_host(shost); 1104 scsi_scan_host(shost);
1104 usb_set_intfdata(intf, shost);
1105 return result; 1105 return result;
1106 1106
1107free_streams: 1107free_streams:
1108 uas_free_streams(devinfo); 1108 uas_free_streams(devinfo);
1109 usb_set_intfdata(intf, NULL);
1109set_alt0: 1110set_alt0:
1110 usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0); 1111 usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
1111 if (shost) 1112 if (shost)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f4a82291894a..174a447868cd 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -234,6 +234,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
234 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 234 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
235 US_FL_MAX_SECTORS_64 ), 235 US_FL_MAX_SECTORS_64 ),
236 236
237/* Reported by Daniele Forsi <dforsi@gmail.com> */
238UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
239 "Nokia",
240 "5300",
241 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
242 US_FL_MAX_SECTORS_64 ),
243
244/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
245UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
246 "Nokia",
247 "305",
248 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
249 US_FL_MAX_SECTORS_64),
250
237/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */ 251/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
238UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110, 252UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
239 "Nokia", 253 "Nokia",
diff --git a/drivers/usb/usb-common.c b/drivers/usb/usb-common.c
index d771870a819e..6dfd30a863c7 100644
--- a/drivers/usb/usb-common.c
+++ b/drivers/usb/usb-common.c
@@ -69,7 +69,7 @@ const char *usb_state_string(enum usb_device_state state)
69 [USB_STATE_RECONNECTING] = "reconnecting", 69 [USB_STATE_RECONNECTING] = "reconnecting",
70 [USB_STATE_UNAUTHENTICATED] = "unauthenticated", 70 [USB_STATE_UNAUTHENTICATED] = "unauthenticated",
71 [USB_STATE_DEFAULT] = "default", 71 [USB_STATE_DEFAULT] = "default",
72 [USB_STATE_ADDRESS] = "addresssed", 72 [USB_STATE_ADDRESS] = "addressed",
73 [USB_STATE_CONFIGURED] = "configured", 73 [USB_STATE_CONFIGURED] = "configured",
74 [USB_STATE_SUSPENDED] = "suspended", 74 [USB_STATE_SUSPENDED] = "suspended",
75 }; 75 };
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 44741267c917..3f485df96226 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -301,7 +301,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
301 301
302 if (chid) 302 if (chid)
303 result = uwb_radio_start(&wusbhc->pal); 303 result = uwb_radio_start(&wusbhc->pal);
304 else 304 else if (wusbhc->uwb_rc)
305 uwb_radio_stop(&wusbhc->pal); 305 uwb_radio_stop(&wusbhc->pal);
306 306
307 return result; 307 return result;
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index c8e2a47d62a7..3e2e4ed20157 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -2390,10 +2390,10 @@ error_complete:
2390 done) { 2390 done) {
2391 2391
2392 dev_info(dev, "Control EP stall. Queue delayed work.\n"); 2392 dev_info(dev, "Control EP stall. Queue delayed work.\n");
2393 spin_lock_irq(&wa->xfer_list_lock); 2393 spin_lock(&wa->xfer_list_lock);
2394 /* move xfer from xfer_list to xfer_errored_list. */ 2394 /* move xfer from xfer_list to xfer_errored_list. */
2395 list_move_tail(&xfer->list_node, &wa->xfer_errored_list); 2395 list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
2396 spin_unlock_irq(&wa->xfer_list_lock); 2396 spin_unlock(&wa->xfer_list_lock);
2397 spin_unlock_irqrestore(&xfer->lock, flags); 2397 spin_unlock_irqrestore(&xfer->lock, flags);
2398 queue_work(wusbd, &wa->xfer_error_work); 2398 queue_work(wusbd, &wa->xfer_error_work);
2399 } else { 2399 } else {
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
index 16ada8341c46..468c89fb6a16 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/uwb/drp.c
@@ -59,6 +59,7 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59 struct uwb_rceb *reply, ssize_t reply_size) 59 struct uwb_rceb *reply, ssize_t reply_size)
60{ 60{
61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; 61 struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62 unsigned long flags;
62 63
63 if (r != NULL) { 64 if (r != NULL) {
64 if (r->bResultCode != UWB_RC_RES_SUCCESS) 65 if (r->bResultCode != UWB_RC_RES_SUCCESS)
@@ -67,14 +68,14 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
67 } else 68 } else
68 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); 69 dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
69 70
70 spin_lock_irq(&rc->rsvs_lock); 71 spin_lock_irqsave(&rc->rsvs_lock, flags);
71 if (rc->set_drp_ie_pending > 1) { 72 if (rc->set_drp_ie_pending > 1) {
72 rc->set_drp_ie_pending = 0; 73 rc->set_drp_ie_pending = 0;
73 uwb_rsv_queue_update(rc); 74 uwb_rsv_queue_update(rc);
74 } else { 75 } else {
75 rc->set_drp_ie_pending = 0; 76 rc->set_drp_ie_pending = 0;
76 } 77 }
77 spin_unlock_irq(&rc->rsvs_lock); 78 spin_unlock_irqrestore(&rc->rsvs_lock, flags);
78} 79}
79 80
80/** 81/**
@@ -599,8 +600,11 @@ static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_i
599 600
600 /* alloc and initialize new uwb_cnflt_alien */ 601 /* alloc and initialize new uwb_cnflt_alien */
601 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); 602 cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
602 if (!cnflt) 603 if (!cnflt) {
603 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); 604 dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
605 return;
606 }
607
604 INIT_LIST_HEAD(&cnflt->rc_node); 608 INIT_LIST_HEAD(&cnflt->rc_node);
605 init_timer(&cnflt->timer); 609 init_timer(&cnflt->timer);
606 cnflt->timer.function = uwb_cnflt_timer; 610 cnflt->timer.function = uwb_cnflt_timer;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6c793bc683d9..c7b4f0f927b1 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -21,7 +21,15 @@ source "drivers/gpu/vga/Kconfig"
21 21
22source "drivers/gpu/host1x/Kconfig" 22source "drivers/gpu/host1x/Kconfig"
23 23
24menu "Direct Rendering Manager"
24source "drivers/gpu/drm/Kconfig" 25source "drivers/gpu/drm/Kconfig"
26endmenu
27
28menu "Frame buffer Devices"
29source "drivers/video/fbdev/Kconfig"
30endmenu
31
32source "drivers/video/backlight/Kconfig"
25 33
26config VGASTATE 34config VGASTATE
27 tristate 35 tristate
@@ -33,2482 +41,14 @@ config VIDEOMODE_HELPERS
33config HDMI 41config HDMI
34 bool 42 bool
35 43
36menuconfig FB
37 tristate "Support for frame buffer devices"
38 ---help---
39 The frame buffer device provides an abstraction for the graphics
40 hardware. It represents the frame buffer of some video hardware and
41 allows application software to access the graphics hardware through
42 a well-defined interface, so the software doesn't need to know
43 anything about the low-level (hardware register) stuff.
44
45 Frame buffer devices work identically across the different
46 architectures supported by Linux and make the implementation of
47 application programs easier and more portable; at this point, an X
48 server exists which uses the frame buffer device exclusively.
49 On several non-X86 architectures, the frame buffer device is the
50 only way to use the graphics hardware.
51
52 The device is accessed through special device nodes, usually located
53 in the /dev directory, i.e. /dev/fb*.
54
55 You need an utility program called fbset to make full use of frame
56 buffer devices. Please read <file:Documentation/fb/framebuffer.txt>
57 and the Framebuffer-HOWTO at
58 <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.3.html> for more
59 information.
60
61 Say Y here and to the driver for your graphics board below if you
62 are compiling a kernel for a non-x86 architecture.
63
64 If you are compiling for the x86 architecture, you can say Y if you
65 want to play with it, but it is not essential. Please note that
66 running graphical applications that directly touch the hardware
67 (e.g. an accelerated X server) and that are not frame buffer
68 device-aware may cause unexpected results. If unsure, say N.
69
70config FIRMWARE_EDID
71 bool "Enable firmware EDID"
72 depends on FB
73 default n
74 ---help---
75 This enables access to the EDID transferred from the firmware.
76 On the i386, this is from the Video BIOS. Enable this if DDC/I2C
77 transfers do not work for your driver and if you are using
78 nvidiafb, i810fb or savagefb.
79
80 In general, choosing Y for this option is safe. If you
81 experience extremely long delays while booting before you get
82 something on your display, try setting this to N. Matrox cards in
83 combination with certain motherboards and monitors are known to
84 suffer from this problem.
85
86config FB_DDC
87 tristate
88 depends on FB
89 select I2C_ALGOBIT
90 select I2C
91 default n
92
93config FB_BOOT_VESA_SUPPORT
94 bool
95 depends on FB
96 default n
97 ---help---
98 If true, at least one selected framebuffer driver can take advantage
99 of VESA video modes set at an early boot stage via the vga= parameter.
100
101config FB_CFB_FILLRECT
102 tristate
103 depends on FB
104 default n
105 ---help---
106 Include the cfb_fillrect function for generic software rectangle
107 filling. This is used by drivers that don't provide their own
108 (accelerated) version.
109
110config FB_CFB_COPYAREA
111 tristate
112 depends on FB
113 default n
114 ---help---
115 Include the cfb_copyarea function for generic software area copying.
116 This is used by drivers that don't provide their own (accelerated)
117 version.
118
119config FB_CFB_IMAGEBLIT
120 tristate
121 depends on FB
122 default n
123 ---help---
124 Include the cfb_imageblit function for generic software image
125 blitting. This is used by drivers that don't provide their own
126 (accelerated) version.
127
128config FB_CFB_REV_PIXELS_IN_BYTE
129 bool
130 depends on FB
131 default n
132 ---help---
133 Allow generic frame-buffer functions to work on displays with 1, 2
134 and 4 bits per pixel depths which has opposite order of pixels in
135 byte order to bytes in long order.
136
137config FB_SYS_FILLRECT
138 tristate
139 depends on FB
140 default n
141 ---help---
142 Include the sys_fillrect function for generic software rectangle
143 filling. This is used by drivers that don't provide their own
144 (accelerated) version and the framebuffer is in system RAM.
145
146config FB_SYS_COPYAREA
147 tristate
148 depends on FB
149 default n
150 ---help---
151 Include the sys_copyarea function for generic software area copying.
152 This is used by drivers that don't provide their own (accelerated)
153 version and the framebuffer is in system RAM.
154
155config FB_SYS_IMAGEBLIT
156 tristate
157 depends on FB
158 default n
159 ---help---
160 Include the sys_imageblit function for generic software image
161 blitting. This is used by drivers that don't provide their own
162 (accelerated) version and the framebuffer is in system RAM.
163
164menuconfig FB_FOREIGN_ENDIAN
165 bool "Framebuffer foreign endianness support"
166 depends on FB
167 ---help---
168 This menu will let you enable support for the framebuffers with
169 non-native endianness (e.g. Little-Endian framebuffer on a
170 Big-Endian machine). Most probably you don't have such hardware,
171 so it's safe to say "n" here.
172
173choice
174 prompt "Choice endianness support"
175 depends on FB_FOREIGN_ENDIAN
176
177config FB_BOTH_ENDIAN
178 bool "Support for Big- and Little-Endian framebuffers"
179
180config FB_BIG_ENDIAN
181 bool "Support for Big-Endian framebuffers only"
182
183config FB_LITTLE_ENDIAN
184 bool "Support for Little-Endian framebuffers only"
185
186endchoice
187
188config FB_SYS_FOPS
189 tristate
190 depends on FB
191 default n
192
193config FB_DEFERRED_IO
194 bool
195 depends on FB
196
197config FB_HECUBA
198 tristate
199 depends on FB
200 depends on FB_DEFERRED_IO
201
202config FB_SVGALIB
203 tristate
204 depends on FB
205 default n
206 ---help---
207 Common utility functions useful to fbdev drivers of VGA-based
208 cards.
209
210config FB_MACMODES
211 tristate
212 depends on FB
213 default n
214
215config FB_BACKLIGHT
216 bool
217 depends on FB
218 select BACKLIGHT_LCD_SUPPORT
219 select BACKLIGHT_CLASS_DEVICE
220 default n
221
222config FB_MODE_HELPERS
223 bool "Enable Video Mode Handling Helpers"
224 depends on FB
225 default n
226 ---help---
227 This enables functions for handling video modes using the
228 Generalized Timing Formula and the EDID parser. A few drivers rely
229 on this feature such as the radeonfb, rivafb, and the i810fb. If
230 your driver does not take advantage of this feature, choosing Y will
231 just increase the kernel size by about 5K.
232
233config FB_TILEBLITTING
234 bool "Enable Tile Blitting Support"
235 depends on FB
236 default n
237 ---help---
238 This enables tile blitting. Tile blitting is a drawing technique
239 where the screen is divided into rectangular sections (tiles), whereas
240 the standard blitting divides the screen into pixels. Because the
241 default drawing element is a tile, drawing functions will be passed
242 parameters in terms of number of tiles instead of number of pixels.
243 For example, to draw a single character, instead of using bitmaps,
244 an index to an array of bitmaps will be used. To clear or move a
245 rectangular section of a screen, the rectangle will be described in
246 terms of number of tiles in the x- and y-axis.
247
248 This is particularly important to one driver, matroxfb. If
249 unsure, say N.
250
251comment "Frame buffer hardware drivers"
252 depends on FB
253
254config FB_GRVGA
255 tristate "Aeroflex Gaisler framebuffer support"
256 depends on FB && SPARC
257 select FB_CFB_FILLRECT
258 select FB_CFB_COPYAREA
259 select FB_CFB_IMAGEBLIT
260 ---help---
261 This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
262
263config FB_CIRRUS
264 tristate "Cirrus Logic support"
265 depends on FB && (ZORRO || PCI)
266 select FB_CFB_FILLRECT
267 select FB_CFB_COPYAREA
268 select FB_CFB_IMAGEBLIT
269 ---help---
270 This enables support for Cirrus Logic GD542x/543x based boards on
271 Amiga: SD64, Piccolo, Picasso II/II+, Picasso IV, or EGS Spectrum.
272
273 If you have a PCI-based system, this enables support for these
274 chips: GD-543x, GD-544x, GD-5480.
275
276 Please read the file <file:Documentation/fb/cirrusfb.txt>.
277
278 Say N unless you have such a graphics board or plan to get one
279 before you next recompile the kernel.
280
281config FB_PM2
282 tristate "Permedia2 support"
283 depends on FB && ((AMIGA && BROKEN) || PCI)
284 select FB_CFB_FILLRECT
285 select FB_CFB_COPYAREA
286 select FB_CFB_IMAGEBLIT
287 help
288 This is the frame buffer device driver for cards based on
289 the 3D Labs Permedia, Permedia 2 and Permedia 2V chips.
290 The driver was tested on the following cards:
291 Diamond FireGL 1000 PRO AGP
292 ELSA Gloria Synergy PCI
293 Appian Jeronimo PRO (both heads) PCI
294 3DLabs Oxygen ACX aka EONtronics Picasso P2 PCI
295 Techsource Raptor GFX-8P (aka Sun PGX-32) on SPARC
296 ASK Graphic Blaster Exxtreme AGP
297
298 To compile this driver as a module, choose M here: the
299 module will be called pm2fb.
300
301config FB_PM2_FIFO_DISCONNECT
302 bool "enable FIFO disconnect feature"
303 depends on FB_PM2 && PCI
304 help
305 Support the Permedia2 FIFO disconnect feature.
306
307config FB_ARMCLCD
308 tristate "ARM PrimeCell PL110 support"
309 depends on ARM || ARM64 || COMPILE_TEST
310 depends on FB && ARM_AMBA
311 select FB_CFB_FILLRECT
312 select FB_CFB_COPYAREA
313 select FB_CFB_IMAGEBLIT
314 help
315 This framebuffer device driver is for the ARM PrimeCell PL110
316 Colour LCD controller. ARM PrimeCells provide the building
317 blocks for System on a Chip devices.
318
319 If you want to compile this as a module (=code which can be
320 inserted into and removed from the running kernel), say M
321 here and read <file:Documentation/kbuild/modules.txt>. The module
322 will be called amba-clcd.
323
324config FB_ACORN
325 bool "Acorn VIDC support"
326 depends on (FB = y) && ARM && ARCH_ACORN
327 select FB_CFB_FILLRECT
328 select FB_CFB_COPYAREA
329 select FB_CFB_IMAGEBLIT
330 help
331 This is the frame buffer device driver for the Acorn VIDC graphics
332 hardware found in Acorn RISC PCs and other ARM-based machines. If
333 unsure, say N.
334
335config FB_CLPS711X
336 bool "CLPS711X LCD support"
337 depends on (FB = y) && ARM && ARCH_CLPS711X
338 select FB_CFB_FILLRECT
339 select FB_CFB_COPYAREA
340 select FB_CFB_IMAGEBLIT
341 help
342 Say Y to enable the Framebuffer driver for the CLPS7111 and
343 EP7212 processors.
344
345config FB_SA1100
346 bool "SA-1100 LCD support"
347 depends on (FB = y) && ARM && ARCH_SA1100
348 select FB_CFB_FILLRECT
349 select FB_CFB_COPYAREA
350 select FB_CFB_IMAGEBLIT
351 help
352 This is a framebuffer device for the SA-1100 LCD Controller.
353 See <http://www.linux-fbdev.org/> for information on framebuffer
354 devices.
355
356 If you plan to use the LCD display with your SA-1100 system, say
357 Y here.
358
359config FB_IMX
360 tristate "Freescale i.MX1/21/25/27 LCD support"
361 depends on FB && ARCH_MXC
362 select FB_CFB_FILLRECT
363 select FB_CFB_COPYAREA
364 select FB_CFB_IMAGEBLIT
365 select FB_MODE_HELPERS
366 select VIDEOMODE_HELPERS
367
368config FB_CYBER2000
369 tristate "CyberPro 2000/2010/5000 support"
370 depends on FB && PCI && (BROKEN || !SPARC64)
371 select FB_CFB_FILLRECT
372 select FB_CFB_COPYAREA
373 select FB_CFB_IMAGEBLIT
374 help
375 This enables support for the Integraphics CyberPro 20x0 and 5000
376 VGA chips used in the Rebel.com Netwinder and other machines.
377 Say Y if you have a NetWinder or a graphics card containing this
378 device, otherwise say N.
379
380config FB_CYBER2000_DDC
381 bool "DDC for CyberPro support"
382 depends on FB_CYBER2000
383 select FB_DDC
384 default y
385 help
386 Say Y here if you want DDC support for your CyberPro graphics
387 card. This is only I2C bus support, driver does not use EDID.
388
389config FB_CYBER2000_I2C
390 bool "CyberPro 2000/2010/5000 I2C support"
391 depends on FB_CYBER2000 && I2C && ARCH_NETWINDER
392 select I2C_ALGOBIT
393 help
394 Enable support for the I2C video decoder interface on the
395 Integraphics CyberPro 20x0 and 5000 VGA chips. This is used
396 on the Netwinder machines for the SAA7111 video capture.
397
398config FB_APOLLO
399 bool
400 depends on (FB = y) && APOLLO
401 default y
402 select FB_CFB_FILLRECT
403 select FB_CFB_IMAGEBLIT
404
405config FB_Q40
406 bool
407 depends on (FB = y) && Q40
408 default y
409 select FB_CFB_FILLRECT
410 select FB_CFB_COPYAREA
411 select FB_CFB_IMAGEBLIT
412
413config FB_AMIGA
414 tristate "Amiga native chipset support"
415 depends on FB && AMIGA
416 help
417 This is the frame buffer device driver for the builtin graphics
418 chipset found in Amigas.
419
420 To compile this driver as a module, choose M here: the
421 module will be called amifb.
422
423config FB_AMIGA_OCS
424 bool "Amiga OCS chipset support"
425 depends on FB_AMIGA
426 help
427 This enables support for the original Agnus and Denise video chips,
428 found in the Amiga 1000 and most A500's and A2000's. If you intend
429 to run Linux on any of these systems, say Y; otherwise say N.
430
431config FB_AMIGA_ECS
432 bool "Amiga ECS chipset support"
433 depends on FB_AMIGA
434 help
435 This enables support for the Enhanced Chip Set, found in later
436 A500's, later A2000's, the A600, the A3000, the A3000T and CDTV. If
437 you intend to run Linux on any of these systems, say Y; otherwise
438 say N.
439
440config FB_AMIGA_AGA
441 bool "Amiga AGA chipset support"
442 depends on FB_AMIGA
443 help
444 This enables support for the Advanced Graphics Architecture (also
445 known as the AGA or AA) Chip Set, found in the A1200, A4000, A4000T
446 and CD32. If you intend to run Linux on any of these systems, say Y;
447 otherwise say N.
448
449config FB_FM2
450 bool "Amiga FrameMaster II/Rainbow II support"
451 depends on (FB = y) && ZORRO
452 select FB_CFB_FILLRECT
453 select FB_CFB_COPYAREA
454 select FB_CFB_IMAGEBLIT
455 help
456 This is the frame buffer device driver for the Amiga FrameMaster
457 card from BSC (exhibited 1992 but not shipped as a CBM product).
458
459config FB_ARC
460 tristate "Arc Monochrome LCD board support"
461 depends on FB && X86
462 select FB_SYS_FILLRECT
463 select FB_SYS_COPYAREA
464 select FB_SYS_IMAGEBLIT
465 select FB_SYS_FOPS
466 help
467 This enables support for the Arc Monochrome LCD board. The board
468 is based on the KS-108 lcd controller and is typically a matrix
469 of 2*n chips. This driver was tested with a 128x64 panel. This
470 driver supports it for use with x86 SBCs through a 16 bit GPIO
471 interface (8 bit data, 8 bit control). If you anticipate using
472 this driver, say Y or M; otherwise say N. You must specify the
473 GPIO IO address to be used for setting control and data.
474
475config FB_ATARI
476 bool "Atari native chipset support"
477 depends on (FB = y) && ATARI
478 select FB_CFB_FILLRECT
479 select FB_CFB_COPYAREA
480 select FB_CFB_IMAGEBLIT
481 help
482 This is the frame buffer device driver for the builtin graphics
483 chipset found in Ataris.
484
485config FB_OF
486 bool "Open Firmware frame buffer device support"
487 depends on (FB = y) && (PPC64 || PPC_OF) && (!PPC_PSERIES || PCI)
488 select FB_CFB_FILLRECT
489 select FB_CFB_COPYAREA
490 select FB_CFB_IMAGEBLIT
491 select FB_MACMODES
492 help
493 Say Y if you want support with Open Firmware for your graphics
494 board.
495
496config FB_CONTROL
497 bool "Apple \"control\" display support"
498 depends on (FB = y) && PPC_PMAC && PPC32
499 select FB_CFB_FILLRECT
500 select FB_CFB_COPYAREA
501 select FB_CFB_IMAGEBLIT
502 select FB_MACMODES
503 help
504 This driver supports a frame buffer for the graphics adapter in the
505 Power Macintosh 7300 and others.
506
507config FB_PLATINUM
508 bool "Apple \"platinum\" display support"
509 depends on (FB = y) && PPC_PMAC && PPC32
510 select FB_CFB_FILLRECT
511 select FB_CFB_COPYAREA
512 select FB_CFB_IMAGEBLIT
513 select FB_MACMODES
514 help
515 This driver supports a frame buffer for the "platinum" graphics
516 adapter in some Power Macintoshes.
517
518config FB_VALKYRIE
519 bool "Apple \"valkyrie\" display support"
520 depends on (FB = y) && (MAC || (PPC_PMAC && PPC32))
521 select FB_CFB_FILLRECT
522 select FB_CFB_COPYAREA
523 select FB_CFB_IMAGEBLIT
524 select FB_MACMODES
525 help
526 This driver supports a frame buffer for the "valkyrie" graphics
527 adapter in some Power Macintoshes.
528
529config FB_CT65550
530 bool "Chips 65550 display support"
531 depends on (FB = y) && PPC32 && PCI
532 select FB_CFB_FILLRECT
533 select FB_CFB_COPYAREA
534 select FB_CFB_IMAGEBLIT
535 help
536 This is the frame buffer device driver for the Chips & Technologies
537 65550 graphics chip in PowerBooks.
538
539config FB_ASILIANT
540 bool "Asiliant (Chips) 69000 display support"
541 depends on (FB = y) && PCI
542 select FB_CFB_FILLRECT
543 select FB_CFB_COPYAREA
544 select FB_CFB_IMAGEBLIT
545 help
546 This is the frame buffer device driver for the Asiliant 69030 chipset
547
548config FB_IMSTT
549 bool "IMS Twin Turbo display support"
550 depends on (FB = y) && PCI
551 select FB_CFB_IMAGEBLIT
552 select FB_MACMODES if PPC
553 help
554 The IMS Twin Turbo is a PCI-based frame buffer card bundled with
555 many Macintosh and compatible computers.
556
557config FB_VGA16
558 tristate "VGA 16-color graphics support"
559 depends on FB && (X86 || PPC)
560 select FB_CFB_FILLRECT
561 select FB_CFB_COPYAREA
562 select FB_CFB_IMAGEBLIT
563 select VGASTATE
564 select FONT_8x16 if FRAMEBUFFER_CONSOLE
565 help
566 This is the frame buffer device driver for VGA 16 color graphic
567 cards. Say Y if you have such a card.
568
569 To compile this driver as a module, choose M here: the
570 module will be called vga16fb.
571
572config FB_BF54X_LQ043
573 tristate "SHARP LQ043 TFT LCD (BF548 EZKIT)"
574 depends on FB && (BF54x) && !BF542
575 select FB_CFB_FILLRECT
576 select FB_CFB_COPYAREA
577 select FB_CFB_IMAGEBLIT
578 help
579 This is the framebuffer device driver for a SHARP LQ043T1DG01 TFT LCD
580
581config FB_BFIN_T350MCQB
582 tristate "Varitronix COG-T350MCQB TFT LCD display (BF527 EZKIT)"
583 depends on FB && BLACKFIN
584 select BFIN_GPTIMERS
585 select FB_CFB_FILLRECT
586 select FB_CFB_COPYAREA
587 select FB_CFB_IMAGEBLIT
588 help
589 This is the framebuffer device driver for a Varitronix VL-PS-COG-T350MCQB-01 display TFT LCD
590 This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI
591 It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK.
592
593config FB_BFIN_LQ035Q1
594 tristate "SHARP LQ035Q1DH02 TFT LCD"
595 depends on FB && BLACKFIN && SPI
596 select FB_CFB_FILLRECT
597 select FB_CFB_COPYAREA
598 select FB_CFB_IMAGEBLIT
599 select BFIN_GPTIMERS
600 help
601 This is the framebuffer device driver for a SHARP LQ035Q1DH02 TFT display found on
602 the Blackfin Landscape LCD EZ-Extender Card.
603 This display is a QVGA 320x240 18-bit RGB display interfaced by an 16-bit wide PPI
604 It uses PPI[0..15] PPI_FS1, PPI_FS2 and PPI_CLK.
605
606 To compile this driver as a module, choose M here: the
607 module will be called bfin-lq035q1-fb.
608
609config FB_BF537_LQ035
610 tristate "SHARP LQ035 TFT LCD (BF537 STAMP)"
611 depends on FB && (BF534 || BF536 || BF537) && I2C_BLACKFIN_TWI
612 select FB_CFB_FILLRECT
613 select FB_CFB_COPYAREA
614 select FB_CFB_IMAGEBLIT
615 select BFIN_GPTIMERS
616 help
617 This is the framebuffer device for a SHARP LQ035Q7DB03 TFT LCD
618 attached to a BF537.
619
620 To compile this driver as a module, choose M here: the
621 module will be called bf537-lq035.
622
623config FB_BFIN_7393
624 tristate "Blackfin ADV7393 Video encoder"
625 depends on FB && BLACKFIN
626 select I2C
627 select FB_CFB_FILLRECT
628 select FB_CFB_COPYAREA
629 select FB_CFB_IMAGEBLIT
630 help
631 This is the framebuffer device for a ADV7393 video encoder
632 attached to a Blackfin on the PPI port.
633 If your Blackfin board has a ADV7393 select Y.
634
635 To compile this driver as a module, choose M here: the
636 module will be called bfin_adv7393fb.
637
638choice
639 prompt "Video mode support"
640 depends on FB_BFIN_7393
641 default NTSC
642
643config NTSC
644 bool 'NTSC 720x480'
645
646config PAL
647 bool 'PAL 720x576'
648
649config NTSC_640x480
650 bool 'NTSC 640x480 (Experimental)'
651
652config PAL_640x480
653 bool 'PAL 640x480 (Experimental)'
654
655config NTSC_YCBCR
656 bool 'NTSC 720x480 YCbCR input'
657
658config PAL_YCBCR
659 bool 'PAL 720x576 YCbCR input'
660
661endchoice
662
663choice
664 prompt "Size of ADV7393 frame buffer memory Single/Double Size"
665 depends on (FB_BFIN_7393)
666 default ADV7393_1XMEM
667
668config ADV7393_1XMEM
669 bool 'Single'
670
671config ADV7393_2XMEM
672 bool 'Double'
673endchoice
674
675config FB_STI
676 tristate "HP STI frame buffer device support"
677 depends on FB && PARISC
678 select FB_CFB_FILLRECT
679 select FB_CFB_COPYAREA
680 select FB_CFB_IMAGEBLIT
681 select STI_CONSOLE
682 select VT
683 default y
684 ---help---
685 STI refers to the HP "Standard Text Interface" which is a set of
686 BIOS routines contained in a ROM chip in HP PA-RISC based machines.
687 Enabling this option will implement the linux framebuffer device
688 using calls to the STI BIOS routines for initialisation.
689
690 If you enable this option, you will get a planar framebuffer device
691 /dev/fb which will work on the most common HP graphic cards of the
692 NGLE family, including the artist chips (in the 7xx and Bxxx series),
693 HCRX, HCRX24, CRX, CRX24 and VisEG series.
694
695 It is safe to enable this option, so you should probably say "Y".
696
697config FB_MAC
698 bool "Generic Macintosh display support"
699 depends on (FB = y) && MAC
700 select FB_CFB_FILLRECT
701 select FB_CFB_COPYAREA
702 select FB_CFB_IMAGEBLIT
703 select FB_MACMODES
704
705config FB_HP300
706 bool
707 depends on (FB = y) && DIO
708 select FB_CFB_IMAGEBLIT
709 default y
710
711config FB_TGA
712 tristate "TGA/SFB+ framebuffer support"
713 depends on FB && (ALPHA || TC)
714 select FB_CFB_FILLRECT
715 select FB_CFB_COPYAREA
716 select FB_CFB_IMAGEBLIT
717 select BITREVERSE
718 ---help---
719 This is the frame buffer device driver for generic TGA and SFB+
720 graphic cards. These include DEC ZLXp-E1, -E2 and -E3 PCI cards,
721 also known as PBXGA-A, -B and -C, and DEC ZLX-E1, -E2 and -E3
722 TURBOchannel cards, also known as PMAGD-A, -B and -C.
723
724 Due to hardware limitations ZLX-E2 and E3 cards are not supported
725 for DECstation 5000/200 systems. Additionally due to firmware
726 limitations these cards may cause troubles with booting DECstation
727 5000/240 and /260 systems, but are fully supported under Linux if
728 you manage to get it going. ;-)
729
730 Say Y if you have one of those.
731
732config FB_UVESA
733 tristate "Userspace VESA VGA graphics support"
734 depends on FB && CONNECTOR
735 select FB_CFB_FILLRECT
736 select FB_CFB_COPYAREA
737 select FB_CFB_IMAGEBLIT
738 select FB_MODE_HELPERS
739 help
740 This is the frame buffer driver for generic VBE 2.0 compliant
741 graphic cards. It can also take advantage of VBE 3.0 features,
742 such as refresh rate adjustment.
743
744 This driver generally provides more features than vesafb but
745 requires a userspace helper application called 'v86d'. See
746 <file:Documentation/fb/uvesafb.txt> for more information.
747
748 If unsure, say N.
749
750config FB_VESA
751 bool "VESA VGA graphics support"
752 depends on (FB = y) && X86
753 select FB_CFB_FILLRECT
754 select FB_CFB_COPYAREA
755 select FB_CFB_IMAGEBLIT
756 select FB_BOOT_VESA_SUPPORT
757 help
758 This is the frame buffer device driver for generic VESA 2.0
759 compliant graphic cards. The older VESA 1.2 cards are not supported.
760 You will get a boot time penguin logo at no additional cost. Please
761 read <file:Documentation/fb/vesafb.txt>. If unsure, say Y.
762
763config FB_EFI
764 bool "EFI-based Framebuffer Support"
765 depends on (FB = y) && X86 && EFI
766 select FB_CFB_FILLRECT
767 select FB_CFB_COPYAREA
768 select FB_CFB_IMAGEBLIT
769 help
770 This is the EFI frame buffer device driver. If the firmware on
771 your platform is EFI 1.10 or UEFI 2.0, select Y to add support for
772 using the EFI framebuffer as your console.
773
774config FB_N411
775 tristate "N411 Apollo/Hecuba devkit support"
776 depends on FB && X86 && MMU
777 select FB_SYS_FILLRECT
778 select FB_SYS_COPYAREA
779 select FB_SYS_IMAGEBLIT
780 select FB_SYS_FOPS
781 select FB_DEFERRED_IO
782 select FB_HECUBA
783 help
784 This enables support for the Apollo display controller in its
785 Hecuba form using the n411 devkit.
786
787config FB_HGA
788 tristate "Hercules mono graphics support"
789 depends on FB && X86
790 help
791 Say Y here if you have a Hercules mono graphics card.
792
793 To compile this driver as a module, choose M here: the
794 module will be called hgafb.
795
796 As this card technology is at least 25 years old,
797 most people will answer N here.
798
799config FB_GBE
800 bool "SGI Graphics Backend frame buffer support"
801 depends on (FB = y) && SGI_IP32
802 select FB_CFB_FILLRECT
803 select FB_CFB_COPYAREA
804 select FB_CFB_IMAGEBLIT
805 help
806 This is the frame buffer device driver for SGI Graphics Backend.
807 This chip is used in SGI O2 and Visual Workstation 320/540.
808
809config FB_GBE_MEM
810 int "Video memory size in MB"
811 depends on FB_GBE
812 default 4
813 help
814 This is the amount of memory reserved for the framebuffer,
815 which can be any value between 1MB and 8MB.
816
817config FB_SBUS
818 bool "SBUS and UPA framebuffers"
819 depends on (FB = y) && SPARC
820 help
821 Say Y if you want support for SBUS or UPA based frame buffer device.
822
823config FB_BW2
824 bool "BWtwo support"
825 depends on (FB = y) && (SPARC && FB_SBUS)
826 select FB_CFB_FILLRECT
827 select FB_CFB_COPYAREA
828 select FB_CFB_IMAGEBLIT
829 help
830 This is the frame buffer device driver for the BWtwo frame buffer.
831
832config FB_CG3
833 bool "CGthree support"
834 depends on (FB = y) && (SPARC && FB_SBUS)
835 select FB_CFB_FILLRECT
836 select FB_CFB_COPYAREA
837 select FB_CFB_IMAGEBLIT
838 help
839 This is the frame buffer device driver for the CGthree frame buffer.
840
841config FB_CG6
842 bool "CGsix (GX,TurboGX) support"
843 depends on (FB = y) && (SPARC && FB_SBUS)
844 select FB_CFB_COPYAREA
845 select FB_CFB_IMAGEBLIT
846 help
847 This is the frame buffer device driver for the CGsix (GX, TurboGX)
848 frame buffer.
849
850config FB_FFB
851 bool "Creator/Creator3D/Elite3D support"
852 depends on FB_SBUS && SPARC64
853 select FB_CFB_COPYAREA
854 select FB_CFB_IMAGEBLIT
855 help
856 This is the frame buffer device driver for the Creator, Creator3D,
857 and Elite3D graphics boards.
858
859config FB_TCX
860 bool "TCX (SS4/SS5 only) support"
861 depends on FB_SBUS
862 select FB_CFB_FILLRECT
863 select FB_CFB_COPYAREA
864 select FB_CFB_IMAGEBLIT
865 help
866 This is the frame buffer device driver for the TCX 24/8bit frame
867 buffer.
868
869config FB_CG14
870 bool "CGfourteen (SX) support"
871 depends on FB_SBUS
872 select FB_CFB_FILLRECT
873 select FB_CFB_COPYAREA
874 select FB_CFB_IMAGEBLIT
875 help
876 This is the frame buffer device driver for the CGfourteen frame
877 buffer on Desktop SPARCsystems with the SX graphics option.
878
879config FB_P9100
880 bool "P9100 (Sparcbook 3 only) support"
881 depends on FB_SBUS
882 select FB_CFB_FILLRECT
883 select FB_CFB_COPYAREA
884 select FB_CFB_IMAGEBLIT
885 help
886 This is the frame buffer device driver for the P9100 card
887 supported on Sparcbook 3 machines.
888
889config FB_LEO
890 bool "Leo (ZX) support"
891 depends on FB_SBUS
892 select FB_CFB_FILLRECT
893 select FB_CFB_COPYAREA
894 select FB_CFB_IMAGEBLIT
895 help
896 This is the frame buffer device driver for the SBUS-based Sun ZX
897 (leo) frame buffer cards.
898
899config FB_IGA
900 bool "IGA 168x display support"
901 depends on (FB = y) && SPARC32
902 select FB_CFB_FILLRECT
903 select FB_CFB_COPYAREA
904 select FB_CFB_IMAGEBLIT
905 help
906 This is the framebuffer device for the INTERGRAPHICS 1680 and
907 successor frame buffer cards.
908
909config FB_XVR500
910 bool "Sun XVR-500 3DLABS Wildcat support"
911 depends on (FB = y) && PCI && SPARC64
912 select FB_CFB_FILLRECT
913 select FB_CFB_COPYAREA
914 select FB_CFB_IMAGEBLIT
915 help
916 This is the framebuffer device for the Sun XVR-500 and similar
917 graphics cards based upon the 3DLABS Wildcat chipset. The driver
918 only works on sparc64 systems where the system firmware has
919 mostly initialized the card already. It is treated as a
920 completely dumb framebuffer device.
921
922config FB_XVR2500
923 bool "Sun XVR-2500 3DLABS Wildcat support"
924 depends on (FB = y) && PCI && SPARC64
925 select FB_CFB_FILLRECT
926 select FB_CFB_COPYAREA
927 select FB_CFB_IMAGEBLIT
928 help
929 This is the framebuffer device for the Sun XVR-2500 and similar
930 graphics cards based upon the 3DLABS Wildcat chipset. The driver
931 only works on sparc64 systems where the system firmware has
932 mostly initialized the card already. It is treated as a
933 completely dumb framebuffer device.
934
935config FB_XVR1000
936 bool "Sun XVR-1000 support"
937 depends on (FB = y) && SPARC64
938 select FB_CFB_FILLRECT
939 select FB_CFB_COPYAREA
940 select FB_CFB_IMAGEBLIT
941 help
942 This is the framebuffer device for the Sun XVR-1000 and similar
943 graphics cards. The driver only works on sparc64 systems where
944 the system firmware has mostly initialized the card already. It
945 is treated as a completely dumb framebuffer device.
946
947config FB_PVR2
948 tristate "NEC PowerVR 2 display support"
949 depends on FB && SH_DREAMCAST
950 select FB_CFB_FILLRECT
951 select FB_CFB_COPYAREA
952 select FB_CFB_IMAGEBLIT
953 ---help---
954 Say Y here if you have a PowerVR 2 card in your box. If you plan to
955 run linux on your Dreamcast, you will have to say Y here.
956 This driver may or may not work on other PowerVR 2 cards, but is
957 totally untested. Use at your own risk. If unsure, say N.
958
959 To compile this driver as a module, choose M here: the
960 module will be called pvr2fb.
961
962 You can pass several parameters to the driver at boot time or at
963 module load time. The parameters look like "video=pvr2:XXX", where
964 the meaning of XXX can be found at the end of the main source file
965 (<file:drivers/video/pvr2fb.c>). Please see the file
966 <file:Documentation/fb/pvr2fb.txt>.
967
968config FB_OPENCORES
969 tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
970 depends on FB && HAS_DMA
971 select FB_CFB_FILLRECT
972 select FB_CFB_COPYAREA
973 select FB_CFB_IMAGEBLIT
974 help
975 This enables support for the OpenCores VGA/LCD core.
976
977 The OpenCores VGA/LCD core is typically used together with
978 softcore CPUs (e.g. OpenRISC or Microblaze) or hard processor
979 systems (e.g. Altera socfpga or Xilinx Zynq) on FPGAs.
980
981 The source code and specification for the core is available at
982 <http://opencores.org/project,vga_lcd>
983
984config FB_S1D13XXX
985 tristate "Epson S1D13XXX framebuffer support"
986 depends on FB
987 select FB_CFB_FILLRECT
988 select FB_CFB_COPYAREA
989 select FB_CFB_IMAGEBLIT
990 help
991 Support for S1D13XXX framebuffer device family (currently only
992 working with S1D13806). Product specs at
993 <http://vdc.epson.com/>
994
995config FB_ATMEL
996 tristate "AT91/AT32 LCD Controller support"
997 depends on FB && HAVE_FB_ATMEL
998 select FB_CFB_FILLRECT
999 select FB_CFB_COPYAREA
1000 select FB_CFB_IMAGEBLIT
1001 select FB_MODE_HELPERS
1002 select VIDEOMODE_HELPERS
1003 help
1004 This enables support for the AT91/AT32 LCD Controller.
1005
1006config FB_INTSRAM
1007 bool "Frame Buffer in internal SRAM"
1008 depends on FB_ATMEL && ARCH_AT91SAM9261
1009 help
1010 Say Y if you want to map Frame Buffer in internal SRAM. Say N if you want
1011 to let frame buffer in external SDRAM.
1012
1013config FB_ATMEL_STN
1014 bool "Use a STN display with AT91/AT32 LCD Controller"
1015 depends on FB_ATMEL && (MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK)
1016 default n
1017 help
1018 Say Y if you want to connect a STN LCD display to the AT91/AT32 LCD
1019 Controller. Say N if you want to connect a TFT.
1020
1021 If unsure, say N.
1022
1023config FB_NVIDIA
1024 tristate "nVidia Framebuffer Support"
1025 depends on FB && PCI
1026 select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT
1027 select FB_MODE_HELPERS
1028 select FB_CFB_FILLRECT
1029 select FB_CFB_COPYAREA
1030 select FB_CFB_IMAGEBLIT
1031 select BITREVERSE
1032 select VGASTATE
1033 help
1034 This driver supports graphics boards with the nVidia chips, TNT
1035 and newer. For very old chipsets, such as the RIVA128, then use
1036 the rivafb.
1037 Say Y if you have such a graphics board.
1038
1039 To compile this driver as a module, choose M here: the
1040 module will be called nvidiafb.
1041
1042config FB_NVIDIA_I2C
1043 bool "Enable DDC Support"
1044 depends on FB_NVIDIA
1045 select FB_DDC
1046 help
1047 This enables I2C support for nVidia Chipsets. This is used
1048 only for getting EDID information from the attached display
1049 allowing for robust video mode handling and switching.
1050
1051 Because fbdev-2.6 requires that drivers must be able to
1052 independently validate video mode parameters, you should say Y
1053 here.
1054
1055config FB_NVIDIA_DEBUG
1056 bool "Lots of debug output"
1057 depends on FB_NVIDIA
1058 default n
1059 help
1060 Say Y here if you want the nVidia driver to output all sorts
1061 of debugging information to provide to the maintainer when
1062 something goes wrong.
1063
1064config FB_NVIDIA_BACKLIGHT
1065 bool "Support for backlight control"
1066 depends on FB_NVIDIA
1067 default y
1068 help
1069 Say Y here if you want to control the backlight of your display.
1070
1071config FB_RIVA
1072 tristate "nVidia Riva support"
1073 depends on FB && PCI
1074 select FB_BACKLIGHT if FB_RIVA_BACKLIGHT
1075 select FB_MODE_HELPERS
1076 select FB_CFB_FILLRECT
1077 select FB_CFB_COPYAREA
1078 select FB_CFB_IMAGEBLIT
1079 select BITREVERSE
1080 select VGASTATE
1081 help
1082 This driver supports graphics boards with the nVidia Riva/Geforce
1083 chips.
1084 Say Y if you have such a graphics board.
1085
1086 To compile this driver as a module, choose M here: the
1087 module will be called rivafb.
1088
1089config FB_RIVA_I2C
1090 bool "Enable DDC Support"
1091 depends on FB_RIVA
1092 select FB_DDC
1093 help
1094 This enables I2C support for nVidia Chipsets. This is used
1095 only for getting EDID information from the attached display
1096 allowing for robust video mode handling and switching.
1097
1098 Because fbdev-2.6 requires that drivers must be able to
1099 independently validate video mode parameters, you should say Y
1100 here.
1101
1102config FB_RIVA_DEBUG
1103 bool "Lots of debug output"
1104 depends on FB_RIVA
1105 default n
1106 help
1107 Say Y here if you want the Riva driver to output all sorts
1108 of debugging information to provide to the maintainer when
1109 something goes wrong.
1110
1111config FB_RIVA_BACKLIGHT
1112 bool "Support for backlight control"
1113 depends on FB_RIVA
1114 default y
1115 help
1116 Say Y here if you want to control the backlight of your display.
1117
1118config FB_I740
1119 tristate "Intel740 support"
1120 depends on FB && PCI
1121 select FB_MODE_HELPERS
1122 select FB_CFB_FILLRECT
1123 select FB_CFB_COPYAREA
1124 select FB_CFB_IMAGEBLIT
1125 select VGASTATE
1126 select FB_DDC
1127 help
1128 This driver supports graphics cards based on Intel740 chip.
1129
1130config FB_I810
1131 tristate "Intel 810/815 support"
1132 depends on FB && PCI && X86_32 && AGP_INTEL
1133 select FB_MODE_HELPERS
1134 select FB_CFB_FILLRECT
1135 select FB_CFB_COPYAREA
1136 select FB_CFB_IMAGEBLIT
1137 select VGASTATE
1138 help
1139 This driver supports the on-board graphics built in to the Intel 810
1140 and 815 chipsets. Say Y if you have and plan to use such a board.
1141
1142 To compile this driver as a module, choose M here: the
1143 module will be called i810fb.
1144
1145 For more information, please read
1146 <file:Documentation/fb/intel810.txt>
1147
1148config FB_I810_GTF
1149 bool "use VESA Generalized Timing Formula"
1150 depends on FB_I810
1151 help
1152 If you say Y, then the VESA standard, Generalized Timing Formula
1153 or GTF, will be used to calculate the required video timing values
1154 per video mode. Since the GTF allows nondiscrete timings
1155 (nondiscrete being a range of values as opposed to discrete being a
1156 set of values), you'll be able to use any combination of horizontal
1157 and vertical resolutions, and vertical refresh rates without having
1158 to specify your own timing parameters. This is especially useful
1159 to maximize the performance of an aging display, or if you just
1160 have a display with nonstandard dimensions. A VESA compliant
1161 monitor is recommended, but can still work with non-compliant ones.
1162 If you need or want this, then select this option. The timings may
1163 not be compliant with Intel's recommended values. Use at your own
1164 risk.
1165
1166 If you say N, the driver will revert to discrete video timings
1167 using a set recommended by Intel in their documentation.
1168
1169 If unsure, say N.
1170
1171config FB_I810_I2C
1172 bool "Enable DDC Support"
1173 depends on FB_I810 && FB_I810_GTF
1174 select FB_DDC
1175 help
1176
1177config FB_LE80578
1178 tristate "Intel LE80578 (Vermilion) support"
1179 depends on FB && PCI && X86
1180 select FB_MODE_HELPERS
1181 select FB_CFB_FILLRECT
1182 select FB_CFB_COPYAREA
1183 select FB_CFB_IMAGEBLIT
1184 help
1185 This driver supports the LE80578 (Vermilion Range) chipset
1186
1187config FB_CARILLO_RANCH
1188 tristate "Intel Carillo Ranch support"
1189 depends on FB_LE80578 && FB && PCI && X86
1190 help
1191 This driver supports the LE80578 (Carillo Ranch) board
1192
1193config FB_INTEL
1194 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support"
1195 depends on FB && PCI && X86 && AGP_INTEL && EXPERT
1196 select FB_MODE_HELPERS
1197 select FB_CFB_FILLRECT
1198 select FB_CFB_COPYAREA
1199 select FB_CFB_IMAGEBLIT
1200 select FB_BOOT_VESA_SUPPORT if FB_INTEL = y
1201 depends on !DRM_I915
1202 help
1203 This driver supports the on-board graphics built in to the Intel
1204 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
1205 Say Y if you have and plan to use such a board.
1206
1207 To make FB_INTELFB=Y work you need to say AGP_INTEL=y too.
1208
1209 To compile this driver as a module, choose M here: the
1210 module will be called intelfb.
1211
1212 For more information, please read <file:Documentation/fb/intelfb.txt>
1213
1214config FB_INTEL_DEBUG
1215 bool "Intel driver Debug Messages"
1216 depends on FB_INTEL
1217 ---help---
1218 Say Y here if you want the Intel driver to output all sorts
1219 of debugging information to provide to the maintainer when
1220 something goes wrong.
1221
1222config FB_INTEL_I2C
1223 bool "DDC/I2C for Intel framebuffer support"
1224 depends on FB_INTEL
1225 select FB_DDC
1226 default y
1227 help
1228 Say Y here if you want DDC/I2C support for your on-board Intel graphics.
1229
1230config FB_MATROX
1231 tristate "Matrox acceleration"
1232 depends on FB && PCI
1233 select FB_CFB_FILLRECT
1234 select FB_CFB_COPYAREA
1235 select FB_CFB_IMAGEBLIT
1236 select FB_TILEBLITTING
1237 select FB_MACMODES if PPC_PMAC
1238 ---help---
1239 Say Y here if you have a Matrox Millennium, Matrox Millennium II,
1240 Matrox Mystique, Matrox Mystique 220, Matrox Productiva G100, Matrox
1241 Mystique G200, Matrox Millennium G200, Matrox Marvel G200 video,
1242 Matrox G400, G450 or G550 card in your box.
1243
1244 To compile this driver as a module, choose M here: the
1245 module will be called matroxfb.
1246
1247 You can pass several parameters to the driver at boot time or at
1248 module load time. The parameters look like "video=matroxfb:XXX", and
1249 are described in <file:Documentation/fb/matroxfb.txt>.
1250
1251config FB_MATROX_MILLENIUM
1252 bool "Millennium I/II support"
1253 depends on FB_MATROX
1254 help
1255 Say Y here if you have a Matrox Millennium or Matrox Millennium II
1256 video card. If you select "Advanced lowlevel driver options" below,
1257 you should check 4 bpp packed pixel, 8 bpp packed pixel, 16 bpp
1258 packed pixel, 24 bpp packed pixel and 32 bpp packed pixel. You can
1259 also use font widths different from 8.
1260
1261config FB_MATROX_MYSTIQUE
1262 bool "Mystique support"
1263 depends on FB_MATROX
1264 help
1265 Say Y here if you have a Matrox Mystique or Matrox Mystique 220
1266 video card. If you select "Advanced lowlevel driver options" below,
1267 you should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp
1268 packed pixel and 32 bpp packed pixel. You can also use font widths
1269 different from 8.
1270
1271config FB_MATROX_G
1272 bool "G100/G200/G400/G450/G550 support"
1273 depends on FB_MATROX
1274 ---help---
1275 Say Y here if you have a Matrox G100, G200, G400, G450 or G550 based
1276 video card. If you select "Advanced lowlevel driver options", you
1277 should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp packed
1278 pixel and 32 bpp packed pixel. You can also use font widths
1279 different from 8.
1280
1281 If you need support for G400 secondary head, you must say Y to
1282 "Matrox I2C support" and "G400 second head support" right below.
1283 G450/G550 secondary head and digital output are supported without
1284 additional modules.
1285
1286 The driver starts in monitor mode. You must use the matroxset tool
1287 (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
1288 swap primary and secondary head outputs, or to change output mode.
1289 Secondary head driver always start in 640x480 resolution and you
1290 must use fbset to change it.
1291
1292 Do not forget that second head supports only 16 and 32 bpp
1293 packed pixels, so it is a good idea to compile them into the kernel
1294 too. You can use only some font widths, as the driver uses generic
1295 painting procedures (the secondary head does not use acceleration
1296 engine).
1297
1298 G450/G550 hardware can display TV picture only from secondary CRTC,
1299 and it performs no scaling, so picture must have 525 or 625 lines.
1300
1301config FB_MATROX_I2C
1302 tristate "Matrox I2C support"
1303 depends on FB_MATROX
1304 select FB_DDC
1305 ---help---
1306 This drivers creates I2C buses which are needed for accessing the
1307 DDC (I2C) bus present on all Matroxes, an I2C bus which
1308 interconnects Matrox optional devices, like MGA-TVO on G200 and
1309 G400, and the secondary head DDC bus, present on G400 only.
1310
1311 You can say Y or M here if you want to experiment with monitor
1312 detection code. You must say Y or M here if you want to use either
1313 second head of G400 or MGA-TVO on G200 or G400.
1314
1315 If you compile it as module, it will create a module named
1316 i2c-matroxfb.
1317
1318config FB_MATROX_MAVEN
1319 tristate "G400 second head support"
1320 depends on FB_MATROX_G && FB_MATROX_I2C
1321 ---help---
1322 WARNING !!! This support does not work with G450 !!!
1323
1324 Say Y or M here if you want to use a secondary head (meaning two
1325 monitors in parallel) on G400 or MGA-TVO add-on on G200. Secondary
1326 head is not compatible with accelerated XFree 3.3.x SVGA servers -
1327 secondary head output is blanked while you are in X. With XFree
1328 3.9.17 preview you can use both heads if you use SVGA over fbdev or
1329 the fbdev driver on first head and the fbdev driver on second head.
1330
1331 If you compile it as module, two modules are created,
1332 matroxfb_crtc2 and matroxfb_maven. Matroxfb_maven is needed for
1333 both G200 and G400, matroxfb_crtc2 is needed only by G400. You must
1334 also load i2c-matroxfb to get it to run.
1335
1336 The driver starts in monitor mode and you must use the matroxset
1337 tool (available at
1338 <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to switch it to
1339 PAL or NTSC or to swap primary and secondary head outputs.
1340 Secondary head driver also always start in 640x480 resolution, you
1341 must use fbset to change it.
1342
1343 Also do not forget that second head supports only 16 and 32 bpp
1344 packed pixels, so it is a good idea to compile them into the kernel
1345 too. You can use only some font widths, as the driver uses generic
1346 painting procedures (the secondary head does not use acceleration
1347 engine).
1348
1349config FB_RADEON
1350 tristate "ATI Radeon display support"
1351 depends on FB && PCI
1352 select FB_BACKLIGHT if FB_RADEON_BACKLIGHT
1353 select FB_MODE_HELPERS
1354 select FB_CFB_FILLRECT
1355 select FB_CFB_COPYAREA
1356 select FB_CFB_IMAGEBLIT
1357 select FB_MACMODES if PPC_OF
1358 help
1359 Choose this option if you want to use an ATI Radeon graphics card as
1360 a framebuffer device. There are both PCI and AGP versions. You
1361 don't need to choose this to run the Radeon in plain VGA mode.
1362
1363 There is a product page at
1364 http://products.amd.com/en-us/GraphicCardResult.aspx
1365
1366config FB_RADEON_I2C
1367 bool "DDC/I2C for ATI Radeon support"
1368 depends on FB_RADEON
1369 select FB_DDC
1370 default y
1371 help
1372 Say Y here if you want DDC/I2C support for your Radeon board.
1373
1374config FB_RADEON_BACKLIGHT
1375 bool "Support for backlight control"
1376 depends on FB_RADEON
1377 default y
1378 help
1379 Say Y here if you want to control the backlight of your display.
1380
1381config FB_RADEON_DEBUG
1382 bool "Lots of debug output from Radeon driver"
1383 depends on FB_RADEON
1384 default n
1385 help
1386 Say Y here if you want the Radeon driver to output all sorts
1387 of debugging information to provide to the maintainer when
1388 something goes wrong.
1389
1390config FB_ATY128
1391 tristate "ATI Rage128 display support"
1392 depends on FB && PCI
1393 select FB_CFB_FILLRECT
1394 select FB_CFB_COPYAREA
1395 select FB_CFB_IMAGEBLIT
1396 select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
1397 select FB_MACMODES if PPC_PMAC
1398 help
1399 This driver supports graphics boards with the ATI Rage128 chips.
1400 Say Y if you have such a graphics board and read
1401 <file:Documentation/fb/aty128fb.txt>.
1402
1403 To compile this driver as a module, choose M here: the
1404 module will be called aty128fb.
1405
1406config FB_ATY128_BACKLIGHT
1407 bool "Support for backlight control"
1408 depends on FB_ATY128
1409 default y
1410 help
1411 Say Y here if you want to control the backlight of your display.
1412
1413config FB_ATY
1414 tristate "ATI Mach64 display support" if PCI || ATARI
1415 depends on FB && !SPARC32
1416 select FB_CFB_FILLRECT
1417 select FB_CFB_COPYAREA
1418 select FB_CFB_IMAGEBLIT
1419 select FB_BACKLIGHT if FB_ATY_BACKLIGHT
1420 select FB_MACMODES if PPC
1421 help
1422 This driver supports graphics boards with the ATI Mach64 chips.
1423 Say Y if you have such a graphics board.
1424
1425 To compile this driver as a module, choose M here: the
1426 module will be called atyfb.
1427
1428config FB_ATY_CT
1429 bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
1430 depends on PCI && FB_ATY
1431 default y if SPARC64 && PCI
1432 help
1433 Say Y here to support use of ATI's 64-bit Rage boards (or other
1434 boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
1435 framebuffer device. The ATI product support page for these boards
1436 is at <http://support.ati.com/products/pc/mach64/mach64.html>.
1437
1438config FB_ATY_GENERIC_LCD
1439 bool "Mach64 generic LCD support"
1440 depends on FB_ATY_CT
1441 help
1442 Say Y if you have a laptop with an ATI Rage LT PRO, Rage Mobility,
1443 Rage XC, or Rage XL chipset.
1444
1445config FB_ATY_GX
1446 bool "Mach64 GX support" if PCI
1447 depends on FB_ATY
1448 default y if ATARI
1449 help
1450 Say Y here to support use of the ATI Mach64 Graphics Expression
1451 board (or other boards based on the Mach64 GX chipset) as a
1452 framebuffer device. The ATI product support page for these boards
1453 is at
1454 <http://support.ati.com/products/pc/mach64/graphics_xpression.html>.
1455
1456config FB_ATY_BACKLIGHT
1457 bool "Support for backlight control"
1458 depends on FB_ATY
1459 default y
1460 help
1461 Say Y here if you want to control the backlight of your display.
1462
1463config FB_S3
1464 tristate "S3 Trio/Virge support"
1465 depends on FB && PCI
1466 select FB_CFB_FILLRECT
1467 select FB_CFB_COPYAREA
1468 select FB_CFB_IMAGEBLIT
1469 select FB_TILEBLITTING
1470 select FB_SVGALIB
1471 select VGASTATE
1472 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1473 ---help---
1474 Driver for graphics boards with S3 Trio / S3 Virge chip.
1475
1476config FB_S3_DDC
1477 bool "DDC for S3 support"
1478 depends on FB_S3
1479 select FB_DDC
1480 default y
1481 help
1482 Say Y here if you want DDC support for your S3 graphics card.
1483
1484config FB_SAVAGE
1485 tristate "S3 Savage support"
1486 depends on FB && PCI
1487 select FB_MODE_HELPERS
1488 select FB_CFB_FILLRECT
1489 select FB_CFB_COPYAREA
1490 select FB_CFB_IMAGEBLIT
1491 select VGASTATE
1492 help
1493 This driver supports notebooks and computers with S3 Savage PCI/AGP
1494 chips.
1495
1496 Say Y if you have such a graphics card.
1497
1498 To compile this driver as a module, choose M here; the module
1499 will be called savagefb.
1500
1501config FB_SAVAGE_I2C
1502 bool "Enable DDC2 Support"
1503 depends on FB_SAVAGE
1504 select FB_DDC
1505 help
1506 This enables I2C support for S3 Savage Chipsets. This is used
1507 only for getting EDID information from the attached display
1508 allowing for robust video mode handling and switching.
1509
1510 Because fbdev-2.6 requires that drivers must be able to
1511 independently validate video mode parameters, you should say Y
1512 here.
1513
1514config FB_SAVAGE_ACCEL
1515 bool "Enable Console Acceleration"
1516 depends on FB_SAVAGE
1517 default n
1518 help
1519 This option will compile in console acceleration support. If
1520 the resulting framebuffer console has bothersome glitches, then
1521 choose N here.
1522
1523config FB_SIS
1524 tristate "SiS/XGI display support"
1525 depends on FB && PCI
1526 select FB_CFB_FILLRECT
1527 select FB_CFB_COPYAREA
1528 select FB_CFB_IMAGEBLIT
1529 select FB_BOOT_VESA_SUPPORT if FB_SIS = y
1530 help
1531 This is the frame buffer device driver for the SiS 300, 315, 330
1532 and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets.
1533 Specs available at <http://www.sis.com> and <http://www.xgitech.com>.
1534
1535 To compile this driver as a module, choose M here; the module
1536 will be called sisfb.
1537
1538config FB_SIS_300
1539 bool "SiS 300 series support"
1540 depends on FB_SIS
1541 help
1542 Say Y here to support use of the SiS 300/305, 540, 630 and 730.
1543
1544config FB_SIS_315
1545 bool "SiS 315/330/340 series and XGI support"
1546 depends on FB_SIS
1547 help
1548 Say Y here to support use of the SiS 315, 330 and 340 series
1549 (315/H/PRO, 55x, 650, 651, 740, 330, 661, 741, 760, 761) as well
1550 as XGI V3XT, V5, V8 and Z7.
1551
1552config FB_VIA
1553 tristate "VIA UniChrome (Pro) and Chrome9 display support"
1554 depends on FB && PCI && X86
1555 select FB_CFB_FILLRECT
1556 select FB_CFB_COPYAREA
1557 select FB_CFB_IMAGEBLIT
1558 select I2C_ALGOBIT
1559 select I2C
1560 select GPIOLIB
1561 help
1562 This is the frame buffer device driver for Graphics chips of VIA
1563 UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
1564 CN700/VN800,CX700/VX700,P4M890) and Chrome9 Family (K8M890,CN896
1565 /P4M900,VX800)
1566 Say Y if you have a VIA UniChrome graphics board.
1567
1568 To compile this driver as a module, choose M here: the
1569 module will be called viafb.
1570
1571if FB_VIA
1572
1573config FB_VIA_DIRECT_PROCFS
1574 bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)"
1575 depends on FB_VIA
1576 default n
1577 help
1578 Allow direct hardware access to some output registers via procfs.
1579 This is dangerous but may provide the only chance to get the
1580 correct output device configuration.
1581 Its use is strongly discouraged.
1582
1583config FB_VIA_X_COMPATIBILITY
1584 bool "X server compatibility"
1585 depends on FB_VIA
1586 default n
1587 help
1588 This option reduces the functionality (power saving, ...) of the
1589 framebuffer to avoid negative impact on the OpenChrome X server.
1590 If you use any X server other than fbdev you should enable this
1591 otherwise it should be safe to disable it and allow using all
1592 features.
1593
1594endif
1595
1596config FB_NEOMAGIC
1597 tristate "NeoMagic display support"
1598 depends on FB && PCI
1599 select FB_MODE_HELPERS
1600 select FB_CFB_FILLRECT
1601 select FB_CFB_COPYAREA
1602 select FB_CFB_IMAGEBLIT
1603 select VGASTATE
1604 help
1605 This driver supports notebooks with NeoMagic PCI chips.
1606 Say Y if you have such a graphics card.
1607
1608 To compile this driver as a module, choose M here: the
1609 module will be called neofb.
1610
1611config FB_KYRO
1612 tristate "IMG Kyro support"
1613 depends on FB && PCI
1614 select FB_CFB_FILLRECT
1615 select FB_CFB_COPYAREA
1616 select FB_CFB_IMAGEBLIT
1617 help
1618 Say Y here if you have a STG4000 / Kyro / PowerVR 3 based
1619 graphics board.
1620
1621 To compile this driver as a module, choose M here: the
1622 module will be called kyrofb.
1623
1624config FB_3DFX
1625 tristate "3Dfx Banshee/Voodoo3/Voodoo5 display support"
1626 depends on FB && PCI
1627 select FB_CFB_IMAGEBLIT
1628 select FB_CFB_FILLRECT
1629 select FB_CFB_COPYAREA
1630 select FB_MODE_HELPERS
1631 help
1632 This driver supports graphics boards with the 3Dfx Banshee,
1633 Voodoo3 or VSA-100 (aka Voodoo4/5) chips. Say Y if you have
1634 such a graphics board.
1635
1636 To compile this driver as a module, choose M here: the
1637 module will be called tdfxfb.
1638
1639config FB_3DFX_ACCEL
1640 bool "3Dfx Acceleration functions"
1641 depends on FB_3DFX
1642 ---help---
1643 This will compile the 3Dfx Banshee/Voodoo3/VSA-100 frame buffer
1644 device driver with acceleration functions.
1645
1646config FB_3DFX_I2C
1647 bool "Enable DDC/I2C support"
1648 depends on FB_3DFX
1649 select FB_DDC
1650 default y
1651 help
1652 Say Y here if you want DDC/I2C support for your 3dfx Voodoo3.
1653
1654config FB_VOODOO1
1655 tristate "3Dfx Voodoo Graphics (sst1) support"
1656 depends on FB && PCI
1657 select FB_CFB_FILLRECT
1658 select FB_CFB_COPYAREA
1659 select FB_CFB_IMAGEBLIT
1660 ---help---
1661 Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
1662 Voodoo2 (cvg) based graphics card.
1663
1664 To compile this driver as a module, choose M here: the
1665 module will be called sstfb.
1666
1667 WARNING: Do not use any application that uses the 3D engine
1668 (namely glide) while using this driver.
1669 Please read the <file:Documentation/fb/sstfb.txt> for supported
1670 options and other important info support.
1671
1672config FB_VT8623
1673 tristate "VIA VT8623 support"
1674 depends on FB && PCI
1675 select FB_CFB_FILLRECT
1676 select FB_CFB_COPYAREA
1677 select FB_CFB_IMAGEBLIT
1678 select FB_TILEBLITTING
1679 select FB_SVGALIB
1680 select VGASTATE
1681 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1682 ---help---
1683 Driver for CastleRock integrated graphics core in the
1684 VIA VT8623 [Apollo CLE266] chipset.
1685
1686config FB_TRIDENT
1687 tristate "Trident/CyberXXX/CyberBlade support"
1688 depends on FB && PCI
1689 select FB_CFB_FILLRECT
1690 select FB_CFB_COPYAREA
1691 select FB_CFB_IMAGEBLIT
1692 ---help---
1693 This is the frame buffer device driver for Trident PCI/AGP chipsets.
1694 Supported chipset families are TGUI 9440/96XX, 3DImage, Blade3D
1695 and Blade XP.
1696 There are also integrated versions of these chips called CyberXXXX,
1697 CyberImage or CyberBlade. These chips are mostly found in laptops
1698 but also on some motherboards including early VIA EPIA motherboards.
1699 For more information, read <file:Documentation/fb/tridentfb.txt>
1700
1701 Say Y if you have such a graphics board.
1702
1703 To compile this driver as a module, choose M here: the
1704 module will be called tridentfb.
1705
1706config FB_ARK
1707 tristate "ARK 2000PV support"
1708 depends on FB && PCI
1709 select FB_CFB_FILLRECT
1710 select FB_CFB_COPYAREA
1711 select FB_CFB_IMAGEBLIT
1712 select FB_TILEBLITTING
1713 select FB_SVGALIB
1714 select VGASTATE
1715 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1716 ---help---
1717 Driver for PCI graphics boards with ARK 2000PV chip
1718 and ICS 5342 RAMDAC.
1719
1720config FB_PM3
1721 tristate "Permedia3 support"
1722 depends on FB && PCI
1723 select FB_CFB_FILLRECT
1724 select FB_CFB_COPYAREA
1725 select FB_CFB_IMAGEBLIT
1726 help
1727 This is the frame buffer device driver for the 3DLabs Permedia3
1728 chipset, used in Formac ProFormance III, 3DLabs Oxygen VX1 &
1729 similar boards, 3DLabs Permedia3 Create!, Appian Jeronimo 2000
1730 and maybe other boards.
1731
1732config FB_CARMINE
1733 tristate "Fujitsu carmine frame buffer support"
1734 depends on FB && PCI
1735 select FB_CFB_FILLRECT
1736 select FB_CFB_COPYAREA
1737 select FB_CFB_IMAGEBLIT
1738 help
1739 This is the frame buffer device driver for the Fujitsu Carmine chip.
1740 The driver provides two independent frame buffer devices.
1741
1742choice
1743 depends on FB_CARMINE
1744 prompt "DRAM timing"
1745 default FB_CARMINE_DRAM_EVAL
1746
1747config FB_CARMINE_DRAM_EVAL
1748 bool "Eval board timings"
1749 help
1750 Use timings which work on the eval card.
1751
1752config CARMINE_DRAM_CUSTOM
1753 bool "Custom board timings"
1754 help
1755 Use custom board timings.
1756endchoice
1757
1758config FB_AU1100
1759 bool "Au1100 LCD Driver"
1760 depends on (FB = y) && MIPS_ALCHEMY
1761 select FB_CFB_FILLRECT
1762 select FB_CFB_COPYAREA
1763 select FB_CFB_IMAGEBLIT
1764 help
1765 This is the framebuffer driver for the AMD Au1100 SOC. It can drive
1766 various panels and CRTs by passing in kernel cmd line option
1767 au1100fb:panel=<name>.
1768
1769config FB_AU1200
1770 bool "Au1200/Au1300 LCD Driver"
1771 depends on (FB = y) && MIPS_ALCHEMY
1772 select FB_SYS_FILLRECT
1773 select FB_SYS_COPYAREA
1774 select FB_SYS_IMAGEBLIT
1775 select FB_SYS_FOPS
1776 help
1777 This is the framebuffer driver for the Au1200/Au1300 SOCs.
1778 It can drive various panels and CRTs by passing in kernel cmd line
1779 option au1200fb:panel=<name>.
1780
1781config FB_VT8500
1782 bool "VIA VT8500 framebuffer support"
1783 depends on (FB = y) && ARM && ARCH_VT8500
1784 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
1785 select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
1786 select FB_SYS_IMAGEBLIT
1787 select FB_MODE_HELPERS
1788 select VIDEOMODE_HELPERS
1789 help
1790 This is the framebuffer driver for VIA VT8500 integrated LCD
1791 controller.
1792
1793config FB_WM8505
1794 bool "Wondermedia WM8xxx-series frame buffer support"
1795 depends on (FB = y) && ARM && ARCH_VT8500
1796 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
1797 select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
1798 select FB_SYS_IMAGEBLIT
1799 select FB_MODE_HELPERS
1800 select VIDEOMODE_HELPERS
1801 help
1802 This is the framebuffer driver for WonderMedia WM8xxx-series
1803 integrated LCD controller. This driver covers the WM8505, WM8650
1804 and WM8850 SoCs.
1805
1806config FB_WMT_GE_ROPS
1807 bool "VT8500/WM8xxx accelerated raster ops support"
1808 depends on (FB = y) && (FB_VT8500 || FB_WM8505)
1809 default n
1810 help
1811 This adds support for accelerated raster operations on the
1812 VIA VT8500 and Wondermedia 85xx series SoCs.
1813
1814source "drivers/video/geode/Kconfig"
1815
1816config FB_HIT
1817 tristate "HD64461 Frame Buffer support"
1818 depends on FB && HD64461
1819 select FB_CFB_FILLRECT
1820 select FB_CFB_COPYAREA
1821 select FB_CFB_IMAGEBLIT
1822 help
1823 This is the frame buffer device driver for the Hitachi HD64461 LCD
1824 frame buffer card.
1825
1826config FB_PMAG_AA
1827 bool "PMAG-AA TURBOchannel framebuffer support"
1828 depends on (FB = y) && TC
1829 select FB_CFB_FILLRECT
1830 select FB_CFB_COPYAREA
1831 select FB_CFB_IMAGEBLIT
1832 help
1833 Support for the PMAG-AA TURBOchannel framebuffer card (1280x1024x1)
1834 used mainly in the MIPS-based DECstation series.
1835
1836config FB_PMAG_BA
1837 tristate "PMAG-BA TURBOchannel framebuffer support"
1838 depends on FB && TC
1839 select FB_CFB_FILLRECT
1840 select FB_CFB_COPYAREA
1841 select FB_CFB_IMAGEBLIT
1842 help
1843 Support for the PMAG-BA TURBOchannel framebuffer card (1024x864x8)
1844 used mainly in the MIPS-based DECstation series.
1845
1846config FB_PMAGB_B
1847 tristate "PMAGB-B TURBOchannel framebuffer support"
1848 depends on FB && TC
1849 select FB_CFB_FILLRECT
1850 select FB_CFB_COPYAREA
1851 select FB_CFB_IMAGEBLIT
1852 help
1853 Support for the PMAGB-B TURBOchannel framebuffer card used mainly
1854 in the MIPS-based DECstation series. The card is currently only
1855 supported in 1280x1024x8 mode.
1856
1857config FB_MAXINE
1858 bool "Maxine (Personal DECstation) onboard framebuffer support"
1859 depends on (FB = y) && MACH_DECSTATION
1860 select FB_CFB_FILLRECT
1861 select FB_CFB_COPYAREA
1862 select FB_CFB_IMAGEBLIT
1863 help
1864 Support for the onboard framebuffer (1024x768x8) in the Personal
1865 DECstation series (Personal DECstation 5000/20, /25, /33, /50,
1866 Codename "Maxine").
1867
1868config FB_G364
1869 bool "G364 frame buffer support"
1870 depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
1871 select FB_CFB_FILLRECT
1872 select FB_CFB_COPYAREA
1873 select FB_CFB_IMAGEBLIT
1874 help
1875 The G364 driver is the framebuffer used in MIPS Magnum 4000 and
1876 Olivetti M700-10 systems.
1877
1878config FB_68328
1879 bool "Motorola 68328 native frame buffer support"
1880 depends on (FB = y) && (M68328 || M68EZ328 || M68VZ328)
1881 select FB_CFB_FILLRECT
1882 select FB_CFB_COPYAREA
1883 select FB_CFB_IMAGEBLIT
1884 help
1885 Say Y here if you want to support the built-in frame buffer of
1886 the Motorola 68328 CPU family.
1887
1888config FB_PXA168
1889 tristate "PXA168/910 LCD framebuffer support"
1890 depends on FB && (CPU_PXA168 || CPU_PXA910)
1891 select FB_CFB_FILLRECT
1892 select FB_CFB_COPYAREA
1893 select FB_CFB_IMAGEBLIT
1894 ---help---
1895 Frame buffer driver for the built-in LCD controller in the Marvell
1896 MMP processor.
1897
1898config FB_PXA
1899 tristate "PXA LCD framebuffer support"
1900 depends on FB && ARCH_PXA
1901 select FB_CFB_FILLRECT
1902 select FB_CFB_COPYAREA
1903 select FB_CFB_IMAGEBLIT
1904 ---help---
1905 Frame buffer driver for the built-in LCD controller in the Intel
1906 PXA2x0 processor.
1907
1908 This driver is also available as a module ( = code which can be
1909 inserted and removed from the running kernel whenever you want). The
1910 module will be called pxafb. If you want to compile it as a module,
1911 say M here and read <file:Documentation/kbuild/modules.txt>.
1912
1913 If unsure, say N.
1914
1915config FB_PXA_OVERLAY
1916 bool "Support PXA27x/PXA3xx Overlay(s) as framebuffer"
1917 default n
1918 depends on FB_PXA && (PXA27x || PXA3xx)
1919
1920config FB_PXA_SMARTPANEL
1921 bool "PXA Smartpanel LCD support"
1922 default n
1923 depends on FB_PXA
1924
1925config FB_PXA_PARAMETERS
1926 bool "PXA LCD command line parameters"
1927 default n
1928 depends on FB_PXA
1929 ---help---
1930 Enable the use of kernel command line or module parameters
1931 to configure the physical properties of the LCD panel when
1932 using the PXA LCD driver.
1933
1934 This option allows you to override the panel parameters
1935 supplied by the platform in order to support multiple
1936 different models of flatpanel. If you will only be using a
1937 single model of flatpanel then you can safely leave this
1938 option disabled.
1939
1940 <file:Documentation/fb/pxafb.txt> describes the available parameters.
1941
1942config PXA3XX_GCU
1943 tristate "PXA3xx 2D graphics accelerator driver"
1944 depends on FB_PXA
1945 help
1946 Kernelspace driver for the 2D graphics controller unit (GCU)
1947 found on PXA3xx processors. There is a counterpart driver in the
1948 DirectFB suite, see http://www.directfb.org/
1949
1950 If you compile this as a module, it will be called pxa3xx_gcu.
1951
1952config FB_MBX
1953 tristate "2700G LCD framebuffer support"
1954 depends on FB && ARCH_PXA
1955 select FB_CFB_FILLRECT
1956 select FB_CFB_COPYAREA
1957 select FB_CFB_IMAGEBLIT
1958 ---help---
1959 Framebuffer driver for the Intel 2700G (Marathon) Graphics
1960 Accelerator
1961
1962config FB_MBX_DEBUG
1963 bool "Enable debugging info via debugfs"
1964 depends on FB_MBX && DEBUG_FS
1965 default n
1966 ---help---
1967 Enable this if you want debugging information using the debug
1968 filesystem (debugfs)
1969
1970 If unsure, say N.
1971
1972config FB_FSL_DIU
1973 tristate "Freescale DIU framebuffer support"
1974 depends on FB && FSL_SOC
1975 select FB_MODE_HELPERS
1976 select FB_CFB_FILLRECT
1977 select FB_CFB_COPYAREA
1978 select FB_CFB_IMAGEBLIT
1979 select PPC_LIB_RHEAP
1980 ---help---
1981 Framebuffer driver for the Freescale SoC DIU
1982
1983config FB_W100
1984 tristate "W100 frame buffer support"
1985 depends on FB && ARCH_PXA
1986 select FB_CFB_FILLRECT
1987 select FB_CFB_COPYAREA
1988 select FB_CFB_IMAGEBLIT
1989 ---help---
1990 Frame buffer driver for the w100 as found on the Sharp SL-Cxx series.
1991 It can also drive the w3220 chip found on iPAQ hx4700.
1992
1993 This driver is also available as a module ( = code which can be
1994 inserted and removed from the running kernel whenever you want). The
1995 module will be called w100fb. If you want to compile it as a module,
1996 say M here and read <file:Documentation/kbuild/modules.txt>.
1997
1998 If unsure, say N.
1999
2000config FB_SH_MOBILE_LCDC
2001 tristate "SuperH Mobile LCDC framebuffer support"
2002 depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
2003 select FB_SYS_FILLRECT
2004 select FB_SYS_COPYAREA
2005 select FB_SYS_IMAGEBLIT
2006 select FB_SYS_FOPS
2007 select FB_DEFERRED_IO
2008 select FB_BACKLIGHT
2009 select SH_MIPI_DSI if SH_LCD_MIPI_DSI
2010 ---help---
2011 Frame buffer driver for the on-chip SH-Mobile LCD controller.
2012
2013config FB_SH_MOBILE_HDMI
2014 tristate "SuperH Mobile HDMI controller support"
2015 depends on FB_SH_MOBILE_LCDC
2016 select FB_MODE_HELPERS
2017 select SOUND
2018 select SND
2019 select SND_SOC
2020 ---help---
2021 Driver for the on-chip SH-Mobile HDMI controller.
2022
2023config FB_TMIO
2024 tristate "Toshiba Mobile IO FrameBuffer support"
2025 depends on FB && MFD_CORE
2026 select FB_CFB_FILLRECT
2027 select FB_CFB_COPYAREA
2028 select FB_CFB_IMAGEBLIT
2029 ---help---
2030 Frame buffer driver for the Toshiba Mobile IO integrated as found
2031 on the Sharp SL-6000 series
2032
2033 This driver is also available as a module ( = code which can be
2034 inserted and removed from the running kernel whenever you want). The
2035 module will be called tmiofb. If you want to compile it as a module,
2036 say M here and read <file:Documentation/kbuild/modules.txt>.
2037
2038 If unsure, say N.
2039
2040config FB_TMIO_ACCELL
2041 bool "tmiofb acceleration"
2042 depends on FB_TMIO
2043 default y
2044
2045config FB_S3C
2046 tristate "Samsung S3C framebuffer support"
2047 depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || ARCH_S5P64X0 || \
2048 ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
2049 select FB_CFB_FILLRECT
2050 select FB_CFB_COPYAREA
2051 select FB_CFB_IMAGEBLIT
2052 ---help---
2053 Frame buffer driver for the built-in FB controller in the Samsung
2054 SoC line from the S3C2443 onwards, including the S3C2416, S3C2450,
2055 and the S3C64XX series such as the S3C6400 and S3C6410.
2056
2057 These chips all have the same basic framebuffer design with the
2058 actual capabilities depending on the chip. For instance the S3C6400
2059 and S3C6410 support 4 hardware windows whereas the S3C24XX series
2060 currently only have two.
2061
2062 Currently the support is only for the S3C6400 and S3C6410 SoCs.
2063
2064config FB_S3C_DEBUG_REGWRITE
2065 bool "Debug register writes"
2066 depends on FB_S3C
2067 ---help---
2068 Show all register writes via pr_debug()
2069
2070config FB_S3C2410
2071 tristate "S3C2410 LCD framebuffer support"
2072 depends on FB && ARCH_S3C24XX
2073 select FB_CFB_FILLRECT
2074 select FB_CFB_COPYAREA
2075 select FB_CFB_IMAGEBLIT
2076 ---help---
2077 Frame buffer driver for the built-in LCD controller in the Samsung
2078 S3C2410 processor.
2079
2080 This driver is also available as a module ( = code which can be
2081 inserted and removed from the running kernel whenever you want). The
2082 module will be called s3c2410fb. If you want to compile it as a module,
2083 say M here and read <file:Documentation/kbuild/modules.txt>.
2084
2085 If unsure, say N.
2086config FB_S3C2410_DEBUG
2087 bool "S3C2410 lcd debug messages"
2088 depends on FB_S3C2410
2089 help
2090 Turn on debugging messages. Note that you can set/unset at run time
2091 through sysfs
2092
2093config FB_NUC900
2094 bool "NUC900 LCD framebuffer support"
2095 depends on FB && ARCH_W90X900
2096 select FB_CFB_FILLRECT
2097 select FB_CFB_COPYAREA
2098 select FB_CFB_IMAGEBLIT
2099 ---help---
2100 Frame buffer driver for the built-in LCD controller in the Nuvoton
2101 NUC900 processor
2102
2103config GPM1040A0_320X240
2104 bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
2105 depends on FB_NUC900
2106
2107config FB_SM501
2108 tristate "Silicon Motion SM501 framebuffer support"
2109 depends on FB && MFD_SM501
2110 select FB_CFB_FILLRECT
2111 select FB_CFB_COPYAREA
2112 select FB_CFB_IMAGEBLIT
2113 ---help---
2114 Frame buffer driver for the CRT and LCD controllers in the Silicon
2115 Motion SM501.
2116
2117 This driver is also available as a module ( = code which can be
2118 inserted and removed from the running kernel whenever you want). The
2119 module will be called sm501fb. If you want to compile it as a module,
2120 say M here and read <file:Documentation/kbuild/modules.txt>.
2121
2122 If unsure, say N.
2123
2124config FB_SMSCUFX
2125 tristate "SMSC UFX6000/7000 USB Framebuffer support"
2126 depends on FB && USB
2127 select FB_MODE_HELPERS
2128 select FB_SYS_FILLRECT
2129 select FB_SYS_COPYAREA
2130 select FB_SYS_IMAGEBLIT
2131 select FB_SYS_FOPS
2132 select FB_DEFERRED_IO
2133 ---help---
2134 This is a kernel framebuffer driver for SMSC UFX USB devices.
2135 Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
2136 mplayer -vo fbdev. Supports both UFX6000 (USB 2.0) and UFX7000
2137 (USB 3.0) devices.
2138 To compile as a module, choose M here: the module name is smscufx.
2139
2140config FB_UDL
2141 tristate "Displaylink USB Framebuffer support"
2142 depends on FB && USB
2143 select FB_MODE_HELPERS
2144 select FB_SYS_FILLRECT
2145 select FB_SYS_COPYAREA
2146 select FB_SYS_IMAGEBLIT
2147 select FB_SYS_FOPS
2148 select FB_DEFERRED_IO
2149 ---help---
2150 This is a kernel framebuffer driver for DisplayLink USB devices.
2151 Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
2152 mplayer -vo fbdev. Supports all USB 2.0 era DisplayLink devices.
2153 To compile as a module, choose M here: the module name is udlfb.
2154
2155config FB_IBM_GXT4500
2156 tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
2157 depends on FB && PPC
2158 select FB_CFB_FILLRECT
2159 select FB_CFB_COPYAREA
2160 select FB_CFB_IMAGEBLIT
2161 ---help---
2162 Say Y here to enable support for the IBM GXT4000P/6000P and
2163 GXT4500P/6500P display adaptor based on Raster Engine RC1000,
2164 found on some IBM System P (pSeries) machines. This driver
2165 doesn't use Geometry Engine GT1000.
2166
2167config FB_PS3
2168 tristate "PS3 GPU framebuffer driver"
2169 depends on FB && PS3_PS3AV
2170 select FB_SYS_FILLRECT
2171 select FB_SYS_COPYAREA
2172 select FB_SYS_IMAGEBLIT
2173 select FB_SYS_FOPS
2174 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
2175 ---help---
2176 Include support for the virtual frame buffer in the PS3 platform.
2177
2178config FB_PS3_DEFAULT_SIZE_M
2179 int "PS3 default frame buffer size (in MiB)"
2180 depends on FB_PS3
2181 default 9
2182 ---help---
2183 This is the default size (in MiB) of the virtual frame buffer in
2184 the PS3.
2185 The default value can be overridden on the kernel command line
2186 using the "ps3fb" option (e.g. "ps3fb=9M");
2187
2188config FB_XILINX
2189 tristate "Xilinx frame buffer support"
2190 depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ)
2191 select FB_CFB_FILLRECT
2192 select FB_CFB_COPYAREA
2193 select FB_CFB_IMAGEBLIT
2194 ---help---
2195 Include support for the Xilinx ML300/ML403 reference design
2196 framebuffer. ML300 carries a 640*480 LCD display on the board,
2197 ML403 uses a standard DB15 VGA connector.
2198
2199config FB_GOLDFISH
2200 tristate "Goldfish Framebuffer"
2201 depends on FB && HAS_DMA
2202 select FB_CFB_FILLRECT
2203 select FB_CFB_COPYAREA
2204 select FB_CFB_IMAGEBLIT
2205 ---help---
2206 Framebuffer driver for Goldfish Virtual Platform
2207
2208config FB_COBALT
2209 tristate "Cobalt server LCD frame buffer support"
2210 depends on FB && (MIPS_COBALT || MIPS_SEAD3)
2211
2212config FB_SH7760
2213 bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
2214 depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
2215 || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
2216 select FB_CFB_FILLRECT
2217 select FB_CFB_COPYAREA
2218 select FB_CFB_IMAGEBLIT
2219 ---help---
2220 Support for the SH7760/SH7763/SH7720/SH7721 integrated
2221 (D)STN/TFT LCD Controller.
2222 Supports display resolutions up to 1024x1024 pixel, grayscale and
2223 color operation, with depths ranging from 1 bpp to 8 bpp monochrome
2224 and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for
2225 panels <= 320 pixel horizontal resolution.
2226
2227config FB_DA8XX
2228 tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
2229 depends on FB && (ARCH_DAVINCI_DA8XX || SOC_AM33XX)
2230 select FB_CFB_FILLRECT
2231 select FB_CFB_COPYAREA
2232 select FB_CFB_IMAGEBLIT
2233 select FB_CFB_REV_PIXELS_IN_BYTE
2234 select FB_MODE_HELPERS
2235 select VIDEOMODE_HELPERS
2236 ---help---
2237 This is the frame buffer device driver for the TI LCD controller
2238 found on DA8xx/OMAP-L1xx/AM335x SoCs.
2239 If unsure, say N.
2240
2241config FB_VIRTUAL
2242 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
2243 depends on FB
2244 select FB_SYS_FILLRECT
2245 select FB_SYS_COPYAREA
2246 select FB_SYS_IMAGEBLIT
2247 select FB_SYS_FOPS
2248 ---help---
2249 This is a `virtual' frame buffer device. It operates on a chunk of
2250 unswappable kernel memory instead of on the memory of a graphics
2251 board. This means you cannot see any output sent to this frame
2252 buffer device, while it does consume precious memory. The main use
2253 of this frame buffer device is testing and debugging the frame
2254 buffer subsystem. Do NOT enable it for normal systems! To protect
2255 the innocent, it has to be enabled explicitly at boot time using the
2256 kernel option `video=vfb:'.
2257
2258 To compile this driver as a module, choose M here: the
2259 module will be called vfb. In order to load it, you must use
2260 the vfb_enable=1 option.
2261
2262 If unsure, say N.
2263
2264config XEN_FBDEV_FRONTEND
2265 tristate "Xen virtual frame buffer support"
2266 depends on FB && XEN
2267 select FB_SYS_FILLRECT
2268 select FB_SYS_COPYAREA
2269 select FB_SYS_IMAGEBLIT
2270 select FB_SYS_FOPS
2271 select FB_DEFERRED_IO
2272 select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
2273 select XEN_XENBUS_FRONTEND
2274 default y
2275 help
2276 This driver implements the front-end of the Xen virtual
2277 frame buffer driver. It communicates with a back-end
2278 in another domain.
2279
2280config FB_METRONOME
2281 tristate "E-Ink Metronome/8track controller support"
2282 depends on FB
2283 select FB_SYS_FILLRECT
2284 select FB_SYS_COPYAREA
2285 select FB_SYS_IMAGEBLIT
2286 select FB_SYS_FOPS
2287 select FB_DEFERRED_IO
2288 help
2289 This driver implements support for the E-Ink Metronome
2290 controller. The pre-release name for this device was 8track
2291 and could also have been called by some vendors as PVI-nnnn.
2292
2293config FB_MB862XX
2294 tristate "Fujitsu MB862xx GDC support"
2295 depends on FB
2296 depends on PCI || (OF && PPC)
2297 select FB_CFB_FILLRECT
2298 select FB_CFB_COPYAREA
2299 select FB_CFB_IMAGEBLIT
2300 ---help---
2301 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
2302
2303choice
2304 prompt "GDC variant"
2305 depends on FB_MB862XX
2306
2307config FB_MB862XX_PCI_GDC
2308 bool "Carmine/Coral-P(A) GDC"
2309 depends on PCI
2310 ---help---
2311 This enables framebuffer support for Fujitsu Carmine/Coral-P(A)
2312 PCI graphics controller devices.
2313
2314config FB_MB862XX_LIME
2315 bool "Lime GDC"
2316 depends on OF && PPC
2317 select FB_FOREIGN_ENDIAN
2318 select FB_LITTLE_ENDIAN
2319 ---help---
2320 Framebuffer support for Fujitsu Lime GDC on host CPU bus.
2321
2322endchoice
2323
2324config FB_MB862XX_I2C
2325 bool "Support I2C bus on MB862XX GDC"
2326 depends on FB_MB862XX && I2C
2327 default y
2328 help
2329 Selecting this option adds Coral-P(A)/Lime GDC I2C bus adapter
2330 driver to support accessing I2C devices on controller's I2C bus.
2331 These are usually some video decoder chips.
2332
2333config FB_EP93XX
2334 tristate "EP93XX frame buffer support"
2335 depends on FB && ARCH_EP93XX
2336 select FB_CFB_FILLRECT
2337 select FB_CFB_COPYAREA
2338 select FB_CFB_IMAGEBLIT
2339 ---help---
2340 Framebuffer driver for the Cirrus Logic EP93XX series of processors.
2341 This driver is also available as a module. The module will be called
2342 ep93xx-fb.
2343
2344config FB_PRE_INIT_FB
2345 bool "Don't reinitialize, use bootloader's GDC/Display configuration"
2346 depends on FB && FB_MB862XX_LIME
2347 ---help---
2348 Select this option if display contents should be inherited as set by
2349 the bootloader.
2350
2351config FB_MSM
2352 tristate "MSM Framebuffer support"
2353 depends on FB && ARCH_MSM
2354 select FB_CFB_FILLRECT
2355 select FB_CFB_COPYAREA
2356 select FB_CFB_IMAGEBLIT
2357
2358config FB_MX3
2359 tristate "MX3 Framebuffer support"
2360 depends on FB && MX3_IPU
2361 select FB_CFB_FILLRECT
2362 select FB_CFB_COPYAREA
2363 select FB_CFB_IMAGEBLIT
2364 default y
2365 help
2366 This is a framebuffer device for the i.MX31 LCD Controller. So
2367 far only synchronous displays are supported. If you plan to use
2368 an LCD display with your i.MX31 system, say Y here.
2369
2370config FB_BROADSHEET
2371 tristate "E-Ink Broadsheet/Epson S1D13521 controller support"
2372 depends on FB
2373 select FB_SYS_FILLRECT
2374 select FB_SYS_COPYAREA
2375 select FB_SYS_IMAGEBLIT
2376 select FB_SYS_FOPS
2377 select FB_DEFERRED_IO
2378 help
2379 This driver implements support for the E-Ink Broadsheet
2380 controller. The release name for this device was Epson S1D13521
2381 and could also have been called by other names when coupled with
2382 a bridge adapter.
2383
2384config FB_AUO_K190X
2385 tristate "AUO-K190X EPD controller support"
2386 depends on FB
2387 select FB_SYS_FILLRECT
2388 select FB_SYS_COPYAREA
2389 select FB_SYS_IMAGEBLIT
2390 select FB_SYS_FOPS
2391 select FB_DEFERRED_IO
2392 help
2393 Provides support for epaper controllers from the K190X series
2394 of AUO. These controllers can be used to drive epaper displays
2395 from Sipix.
2396
2397 This option enables the common support, shared by the individual
2398 controller drivers. You will also have to enable the driver
2399 for the controller type used in your device.
2400
2401config FB_AUO_K1900
2402 tristate "AUO-K1900 EPD controller support"
2403 depends on FB && FB_AUO_K190X
2404 help
2405 This driver implements support for the AUO K1900 epd-controller.
2406 This controller can drive Sipix epaper displays but can only do
2407 serial updates, reducing the number of possible frames per second.
2408
2409config FB_AUO_K1901
2410 tristate "AUO-K1901 EPD controller support"
2411 depends on FB && FB_AUO_K190X
2412 help
2413 This driver implements support for the AUO K1901 epd-controller.
2414 This controller can drive Sipix epaper displays and supports
2415 concurrent updates, making higher frames per second possible.
2416
2417config FB_JZ4740
2418 tristate "JZ4740 LCD framebuffer support"
2419 depends on FB && MACH_JZ4740
2420 select FB_SYS_FILLRECT
2421 select FB_SYS_COPYAREA
2422 select FB_SYS_IMAGEBLIT
2423 help
2424 Framebuffer support for the JZ4740 SoC.
2425
2426config FB_MXS
2427 tristate "MXS LCD framebuffer support"
2428 depends on FB && ARCH_MXS
2429 select FB_CFB_FILLRECT
2430 select FB_CFB_COPYAREA
2431 select FB_CFB_IMAGEBLIT
2432 select FB_MODE_HELPERS
2433 select VIDEOMODE_HELPERS
2434 help
2435 Framebuffer support for the MXS SoC.
2436
2437config FB_PUV3_UNIGFX
2438 tristate "PKUnity v3 Unigfx framebuffer support"
2439 depends on FB && UNICORE32 && ARCH_PUV3
2440 select FB_SYS_FILLRECT
2441 select FB_SYS_COPYAREA
2442 select FB_SYS_IMAGEBLIT
2443 select FB_SYS_FOPS
2444 help
2445 Choose this option if you want to use the Unigfx device as a
2446 framebuffer device. Without the support of PCI & AGP.
2447
2448config FB_HYPERV
2449 tristate "Microsoft Hyper-V Synthetic Video support"
2450 depends on FB && HYPERV
2451 select FB_CFB_FILLRECT
2452 select FB_CFB_COPYAREA
2453 select FB_CFB_IMAGEBLIT
2454 help
2455 This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
2456
2457config FB_SIMPLE
2458 bool "Simple framebuffer support"
2459 depends on (FB = y)
2460 select FB_CFB_FILLRECT
2461 select FB_CFB_COPYAREA
2462 select FB_CFB_IMAGEBLIT
2463 help
2464 Say Y if you want support for a simple frame-buffer.
2465
2466 This driver assumes that the display hardware has been initialized
2467 before the kernel boots, and the kernel will simply render to the
2468 pre-allocated frame buffer surface.
2469
2470 Configuration re: surface address, size, and format must be provided
2471 through device tree, or plain old platform data.
2472
2473source "drivers/video/omap/Kconfig"
2474source "drivers/video/omap2/Kconfig"
2475source "drivers/video/exynos/Kconfig"
2476source "drivers/video/mmp/Kconfig"
2477source "drivers/video/backlight/Kconfig"
2478
2479if VT 44if VT
2480 source "drivers/video/console/Kconfig" 45 source "drivers/video/console/Kconfig"
2481endif 46endif
2482 47
2483if FB || SGI_NEWPORT_CONSOLE 48if FB || SGI_NEWPORT_CONSOLE
2484 source "drivers/video/logo/Kconfig" 49 source "drivers/video/logo/Kconfig"
2485endif
2486 50
2487config FB_SH_MOBILE_MERAM 51endif
2488 tristate "SuperH Mobile MERAM read ahead support"
2489 depends on (SUPERH || ARCH_SHMOBILE)
2490 select GENERIC_ALLOCATOR
2491 ---help---
2492 Enable MERAM support for the SuperH controller.
2493
2494 This will allow for caching of the framebuffer to provide more
2495 reliable access under heavy main memory bus traffic situations.
2496 Up to 4 memory channels can be configured, allowing 4 RGB or
2497 2 YCbCr framebuffers to be configured.
2498 52
2499config FB_SSD1307
2500 tristate "Solomon SSD1307 framebuffer support"
2501 depends on FB && I2C
2502 depends on OF
2503 depends on GPIOLIB
2504 select FB_SYS_FOPS
2505 select FB_SYS_FILLRECT
2506 select FB_SYS_COPYAREA
2507 select FB_SYS_IMAGEBLIT
2508 select FB_DEFERRED_IO
2509 select PWM
2510 help
2511 This driver implements support for the Solomon SSD1307
2512 OLED controller over I2C.
2513 53
2514endmenu 54endmenu
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 1be26fe10592..9ad3c17d6456 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -1,175 +1,11 @@
1# Makefile for the Linux video drivers.
2# 5 Aug 1999, James Simmons, <mailto:jsimmons@users.sf.net>
3# Rewritten to use lists instead of if-statements.
4
5# Each configuration option enables a list of files.
6
7obj-$(CONFIG_VGASTATE) += vgastate.o 1obj-$(CONFIG_VGASTATE) += vgastate.o
8obj-$(CONFIG_HDMI) += hdmi.o 2obj-$(CONFIG_HDMI) += hdmi.o
9obj-y += fb_notify.o
10obj-$(CONFIG_FB) += fb.o
11fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
12 modedb.o fbcvt.o
13fb-objs := $(fb-y)
14 3
15obj-$(CONFIG_VT) += console/ 4obj-$(CONFIG_VT) += console/
16obj-$(CONFIG_LOGO) += logo/ 5obj-$(CONFIG_LOGO) += logo/
17obj-y += backlight/ 6obj-y += backlight/
18 7
19obj-$(CONFIG_EXYNOS_VIDEO) += exynos/ 8obj-y += fbdev/
20
21obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
22obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
23obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o
24obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o
25obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o
26obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
27obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o
28obj-$(CONFIG_FB_SVGALIB) += svgalib.o
29obj-$(CONFIG_FB_MACMODES) += macmodes.o
30obj-$(CONFIG_FB_DDC) += fb_ddc.o
31obj-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o
32obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o
33
34# Hardware specific drivers go first
35obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o
36obj-$(CONFIG_FB_ARC) += arcfb.o
37obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o
38obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o
39obj-$(CONFIG_FB_GRVGA) += grvga.o
40obj-$(CONFIG_FB_PM2) += pm2fb.o
41obj-$(CONFIG_FB_PM3) += pm3fb.o
42
43obj-$(CONFIG_FB_I740) += i740fb.o
44obj-$(CONFIG_FB_MATROX) += matrox/
45obj-$(CONFIG_FB_RIVA) += riva/
46obj-$(CONFIG_FB_NVIDIA) += nvidia/
47obj-$(CONFIG_FB_ATY) += aty/ macmodes.o
48obj-$(CONFIG_FB_ATY128) += aty/ macmodes.o
49obj-$(CONFIG_FB_RADEON) += aty/
50obj-$(CONFIG_FB_SIS) += sis/
51obj-$(CONFIG_FB_VIA) += via/
52obj-$(CONFIG_FB_KYRO) += kyro/
53obj-$(CONFIG_FB_SAVAGE) += savage/
54obj-$(CONFIG_FB_GEODE) += geode/
55obj-$(CONFIG_FB_MBX) += mbx/
56obj-$(CONFIG_FB_NEOMAGIC) += neofb.o
57obj-$(CONFIG_FB_3DFX) += tdfxfb.o
58obj-$(CONFIG_FB_CONTROL) += controlfb.o
59obj-$(CONFIG_FB_PLATINUM) += platinumfb.o
60obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
61obj-$(CONFIG_FB_CT65550) += chipsfb.o
62obj-$(CONFIG_FB_IMSTT) += imsttfb.o
63obj-$(CONFIG_FB_FM2) += fm2fb.o
64obj-$(CONFIG_FB_VT8623) += vt8623fb.o
65obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
66obj-$(CONFIG_FB_LE80578) += vermilion/
67obj-$(CONFIG_FB_S3) += s3fb.o
68obj-$(CONFIG_FB_ARK) += arkfb.o
69obj-$(CONFIG_FB_STI) += stifb.o
70obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
71obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
72obj-$(CONFIG_FB_CG3) += cg3.o sbuslib.o
73obj-$(CONFIG_FB_BW2) += bw2.o sbuslib.o
74obj-$(CONFIG_FB_CG14) += cg14.o sbuslib.o
75obj-$(CONFIG_FB_P9100) += p9100.o sbuslib.o
76obj-$(CONFIG_FB_TCX) += tcx.o sbuslib.o
77obj-$(CONFIG_FB_LEO) += leo.o sbuslib.o
78obj-$(CONFIG_FB_ACORN) += acornfb.o
79obj-$(CONFIG_FB_ATARI) += atafb.o c2p_iplan2.o atafb_mfb.o \
80 atafb_iplan2p2.o atafb_iplan2p4.o atafb_iplan2p8.o
81obj-$(CONFIG_FB_MAC) += macfb.o
82obj-$(CONFIG_FB_HECUBA) += hecubafb.o
83obj-$(CONFIG_FB_N411) += n411.o
84obj-$(CONFIG_FB_HGA) += hgafb.o
85obj-$(CONFIG_FB_XVR500) += sunxvr500.o
86obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
87obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
88obj-$(CONFIG_FB_IGA) += igafb.o
89obj-$(CONFIG_FB_APOLLO) += dnfb.o
90obj-$(CONFIG_FB_Q40) += q40fb.o
91obj-$(CONFIG_FB_TGA) += tgafb.o
92obj-$(CONFIG_FB_HP300) += hpfb.o
93obj-$(CONFIG_FB_G364) += g364fb.o
94obj-$(CONFIG_FB_EP93XX) += ep93xx-fb.o
95obj-$(CONFIG_FB_SA1100) += sa1100fb.o
96obj-$(CONFIG_FB_HIT) += hitfb.o
97obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
98obj-$(CONFIG_FB_PVR2) += pvr2fb.o
99obj-$(CONFIG_FB_VOODOO1) += sstfb.o
100obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
101obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
102obj-$(CONFIG_FB_68328) += 68328fb.o
103obj-$(CONFIG_FB_GBE) += gbefb.o
104obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o
105obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
106obj-$(CONFIG_FB_PXA) += pxafb.o
107obj-$(CONFIG_FB_PXA168) += pxa168fb.o
108obj-$(CONFIG_PXA3XX_GCU) += pxa3xx-gcu.o
109obj-$(CONFIG_MMP_DISP) += mmp/
110obj-$(CONFIG_FB_W100) += w100fb.o
111obj-$(CONFIG_FB_TMIO) += tmiofb.o
112obj-$(CONFIG_FB_AU1100) += au1100fb.o
113obj-$(CONFIG_FB_AU1200) += au1200fb.o
114obj-$(CONFIG_FB_VT8500) += vt8500lcdfb.o
115obj-$(CONFIG_FB_WM8505) += wm8505fb.o
116obj-$(CONFIG_FB_PMAG_AA) += pmag-aa-fb.o
117obj-$(CONFIG_FB_PMAG_BA) += pmag-ba-fb.o
118obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
119obj-$(CONFIG_FB_MAXINE) += maxinefb.o
120obj-$(CONFIG_FB_METRONOME) += metronomefb.o
121obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o
122obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o
123obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o
124obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o
125obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
126obj-$(CONFIG_FB_SH7760) += sh7760fb.o
127obj-$(CONFIG_FB_IMX) += imxfb.o
128obj-$(CONFIG_FB_S3C) += s3c-fb.o
129obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
130obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
131obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o
132obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
133obj-$(CONFIG_FB_PS3) += ps3fb.o
134obj-$(CONFIG_FB_SM501) += sm501fb.o
135obj-$(CONFIG_FB_UDL) += udlfb.o
136obj-$(CONFIG_FB_SMSCUFX) += smscufx.o
137obj-$(CONFIG_FB_XILINX) += xilinxfb.o
138obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
139obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o
140obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
141obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
142obj-$(CONFIG_FB_OMAP) += omap/
143obj-y += omap2/
144obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
145obj-$(CONFIG_FB_CARMINE) += carminefb.o
146obj-$(CONFIG_FB_MB862XX) += mb862xx/
147obj-$(CONFIG_FB_MSM) += msm/
148obj-$(CONFIG_FB_NUC900) += nuc900fb.o
149obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
150obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
151obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o
152obj-$(CONFIG_FB_OPENCORES) += ocfb.o
153
154# Platform or fallback drivers go here
155obj-$(CONFIG_FB_UVESA) += uvesafb.o
156obj-$(CONFIG_FB_VESA) += vesafb.o
157obj-$(CONFIG_FB_EFI) += efifb.o
158obj-$(CONFIG_FB_VGA16) += vga16fb.o
159obj-$(CONFIG_FB_OF) += offb.o
160obj-$(CONFIG_FB_BF537_LQ035) += bf537-lq035.o
161obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
162obj-$(CONFIG_FB_BFIN_LQ035Q1) += bfin-lq035q1-fb.o
163obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
164obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
165obj-$(CONFIG_FB_MX3) += mx3fb.o
166obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
167obj-$(CONFIG_FB_MXS) += mxsfb.o
168obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
169obj-$(CONFIG_FB_SIMPLE) += simplefb.o
170
171# the test framebuffer is last
172obj-$(CONFIG_FB_VIRTUAL) += vfb.o
173 9
174obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o 10obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
175ifeq ($(CONFIG_OF),y) 11ifeq ($(CONFIG_OF),y)
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 5f65ca3d8564..026fd1215933 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -46,7 +46,7 @@
46 46
47#include <asm/io.h> 47#include <asm/io.h>
48 48
49#include "../sticore.h" 49#include "../fbdev/sticore.h"
50 50
51/* switching to graphics mode */ 51/* switching to graphics mode */
52#define BLANK 0 52#define BLANK 0
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index cecd3de01c24..7da1ad03acb5 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -28,7 +28,7 @@
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/grfioctl.h> 29#include <asm/grfioctl.h>
30 30
31#include "../sticore.h" 31#include "../fbdev/sticore.h"
32 32
33#define STI_DRIVERVERSION "Version 0.9b" 33#define STI_DRIVERVERSION "Version 0.9b"
34 34
diff --git a/drivers/video/68328fb.c b/drivers/video/fbdev/68328fb.c
index 552258c8f99d..552258c8f99d 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
new file mode 100644
index 000000000000..e1f47272fdea
--- /dev/null
+++ b/drivers/video/fbdev/Kconfig
@@ -0,0 +1,2474 @@
1#
2# fbdev configuration
3#
4
5menuconfig FB
6 tristate "Support for frame buffer devices"
7 ---help---
8 The frame buffer device provides an abstraction for the graphics
9 hardware. It represents the frame buffer of some video hardware and
10 allows application software to access the graphics hardware through
11 a well-defined interface, so the software doesn't need to know
12 anything about the low-level (hardware register) stuff.
13
14 Frame buffer devices work identically across the different
15 architectures supported by Linux and make the implementation of
16 application programs easier and more portable; at this point, an X
17 server exists which uses the frame buffer device exclusively.
18 On several non-X86 architectures, the frame buffer device is the
19 only way to use the graphics hardware.
20
21 The device is accessed through special device nodes, usually located
22 in the /dev directory, i.e. /dev/fb*.
23
24 You need an utility program called fbset to make full use of frame
25 buffer devices. Please read <file:Documentation/fb/framebuffer.txt>
26 and the Framebuffer-HOWTO at
27 <http://www.munted.org.uk/programming/Framebuffer-HOWTO-1.3.html> for more
28 information.
29
30 Say Y here and to the driver for your graphics board below if you
31 are compiling a kernel for a non-x86 architecture.
32
33 If you are compiling for the x86 architecture, you can say Y if you
34 want to play with it, but it is not essential. Please note that
35 running graphical applications that directly touch the hardware
36 (e.g. an accelerated X server) and that are not frame buffer
37 device-aware may cause unexpected results. If unsure, say N.
38
39config FIRMWARE_EDID
40 bool "Enable firmware EDID"
41 depends on FB
42 default n
43 ---help---
44 This enables access to the EDID transferred from the firmware.
45 On the i386, this is from the Video BIOS. Enable this if DDC/I2C
46 transfers do not work for your driver and if you are using
47 nvidiafb, i810fb or savagefb.
48
49 In general, choosing Y for this option is safe. If you
50 experience extremely long delays while booting before you get
51 something on your display, try setting this to N. Matrox cards in
52 combination with certain motherboards and monitors are known to
53 suffer from this problem.
54
55config FB_DDC
56 tristate
57 depends on FB
58 select I2C_ALGOBIT
59 select I2C
60 default n
61
62config FB_BOOT_VESA_SUPPORT
63 bool
64 depends on FB
65 default n
66 ---help---
67 If true, at least one selected framebuffer driver can take advantage
68 of VESA video modes set at an early boot stage via the vga= parameter.
69
70config FB_CFB_FILLRECT
71 tristate
72 depends on FB
73 default n
74 ---help---
75 Include the cfb_fillrect function for generic software rectangle
76 filling. This is used by drivers that don't provide their own
77 (accelerated) version.
78
79config FB_CFB_COPYAREA
80 tristate
81 depends on FB
82 default n
83 ---help---
84 Include the cfb_copyarea function for generic software area copying.
85 This is used by drivers that don't provide their own (accelerated)
86 version.
87
88config FB_CFB_IMAGEBLIT
89 tristate
90 depends on FB
91 default n
92 ---help---
93 Include the cfb_imageblit function for generic software image
94 blitting. This is used by drivers that don't provide their own
95 (accelerated) version.
96
97config FB_CFB_REV_PIXELS_IN_BYTE
98 bool
99 depends on FB
100 default n
101 ---help---
102 Allow generic frame-buffer functions to work on displays with 1, 2
103 and 4 bits per pixel depths which has opposite order of pixels in
104 byte order to bytes in long order.
105
106config FB_SYS_FILLRECT
107 tristate
108 depends on FB
109 default n
110 ---help---
111 Include the sys_fillrect function for generic software rectangle
112 filling. This is used by drivers that don't provide their own
113 (accelerated) version and the framebuffer is in system RAM.
114
115config FB_SYS_COPYAREA
116 tristate
117 depends on FB
118 default n
119 ---help---
120 Include the sys_copyarea function for generic software area copying.
121 This is used by drivers that don't provide their own (accelerated)
122 version and the framebuffer is in system RAM.
123
124config FB_SYS_IMAGEBLIT
125 tristate
126 depends on FB
127 default n
128 ---help---
129 Include the sys_imageblit function for generic software image
130 blitting. This is used by drivers that don't provide their own
131 (accelerated) version and the framebuffer is in system RAM.
132
133menuconfig FB_FOREIGN_ENDIAN
134 bool "Framebuffer foreign endianness support"
135 depends on FB
136 ---help---
137 This menu will let you enable support for the framebuffers with
138 non-native endianness (e.g. Little-Endian framebuffer on a
139 Big-Endian machine). Most probably you don't have such hardware,
140 so it's safe to say "n" here.
141
142choice
143 prompt "Choice endianness support"
144 depends on FB_FOREIGN_ENDIAN
145
146config FB_BOTH_ENDIAN
147 bool "Support for Big- and Little-Endian framebuffers"
148
149config FB_BIG_ENDIAN
150 bool "Support for Big-Endian framebuffers only"
151
152config FB_LITTLE_ENDIAN
153 bool "Support for Little-Endian framebuffers only"
154
155endchoice
156
157config FB_SYS_FOPS
158 tristate
159 depends on FB
160 default n
161
162config FB_DEFERRED_IO
163 bool
164 depends on FB
165
166config FB_HECUBA
167 tristate
168 depends on FB
169 depends on FB_DEFERRED_IO
170
171config FB_SVGALIB
172 tristate
173 depends on FB
174 default n
175 ---help---
176 Common utility functions useful to fbdev drivers of VGA-based
177 cards.
178
179config FB_MACMODES
180 tristate
181 depends on FB
182 default n
183
184config FB_BACKLIGHT
185 bool
186 depends on FB
187 select BACKLIGHT_LCD_SUPPORT
188 select BACKLIGHT_CLASS_DEVICE
189 default n
190
191config FB_MODE_HELPERS
192 bool "Enable Video Mode Handling Helpers"
193 depends on FB
194 default n
195 ---help---
196 This enables functions for handling video modes using the
197 Generalized Timing Formula and the EDID parser. A few drivers rely
198 on this feature such as the radeonfb, rivafb, and the i810fb. If
199 your driver does not take advantage of this feature, choosing Y will
200 just increase the kernel size by about 5K.
201
202config FB_TILEBLITTING
203 bool "Enable Tile Blitting Support"
204 depends on FB
205 default n
206 ---help---
207 This enables tile blitting. Tile blitting is a drawing technique
208 where the screen is divided into rectangular sections (tiles), whereas
209 the standard blitting divides the screen into pixels. Because the
210 default drawing element is a tile, drawing functions will be passed
211 parameters in terms of number of tiles instead of number of pixels.
212 For example, to draw a single character, instead of using bitmaps,
213 an index to an array of bitmaps will be used. To clear or move a
214 rectangular section of a screen, the rectangle will be described in
215 terms of number of tiles in the x- and y-axis.
216
217 This is particularly important to one driver, matroxfb. If
218 unsure, say N.
219
220comment "Frame buffer hardware drivers"
221 depends on FB
222
223config FB_GRVGA
224 tristate "Aeroflex Gaisler framebuffer support"
225 depends on FB && SPARC
226 select FB_CFB_FILLRECT
227 select FB_CFB_COPYAREA
228 select FB_CFB_IMAGEBLIT
229 ---help---
230 This enables support for the SVGACTRL framebuffer in the GRLIB IP library from Aeroflex Gaisler.
231
232config FB_CIRRUS
233 tristate "Cirrus Logic support"
234 depends on FB && (ZORRO || PCI)
235 select FB_CFB_FILLRECT
236 select FB_CFB_COPYAREA
237 select FB_CFB_IMAGEBLIT
238 ---help---
239 This enables support for Cirrus Logic GD542x/543x based boards on
240 Amiga: SD64, Piccolo, Picasso II/II+, Picasso IV, or EGS Spectrum.
241
242 If you have a PCI-based system, this enables support for these
243 chips: GD-543x, GD-544x, GD-5480.
244
245 Please read the file <file:Documentation/fb/cirrusfb.txt>.
246
247 Say N unless you have such a graphics board or plan to get one
248 before you next recompile the kernel.
249
250config FB_PM2
251 tristate "Permedia2 support"
252 depends on FB && ((AMIGA && BROKEN) || PCI)
253 select FB_CFB_FILLRECT
254 select FB_CFB_COPYAREA
255 select FB_CFB_IMAGEBLIT
256 help
257 This is the frame buffer device driver for cards based on
258 the 3D Labs Permedia, Permedia 2 and Permedia 2V chips.
259 The driver was tested on the following cards:
260 Diamond FireGL 1000 PRO AGP
261 ELSA Gloria Synergy PCI
262 Appian Jeronimo PRO (both heads) PCI
263 3DLabs Oxygen ACX aka EONtronics Picasso P2 PCI
264 Techsource Raptor GFX-8P (aka Sun PGX-32) on SPARC
265 ASK Graphic Blaster Exxtreme AGP
266
267 To compile this driver as a module, choose M here: the
268 module will be called pm2fb.
269
270config FB_PM2_FIFO_DISCONNECT
271 bool "enable FIFO disconnect feature"
272 depends on FB_PM2 && PCI
273 help
274 Support the Permedia2 FIFO disconnect feature.
275
276config FB_ARMCLCD
277 tristate "ARM PrimeCell PL110 support"
278 depends on ARM || ARM64 || COMPILE_TEST
279 depends on FB && ARM_AMBA
280 select FB_CFB_FILLRECT
281 select FB_CFB_COPYAREA
282 select FB_CFB_IMAGEBLIT
283 help
284 This framebuffer device driver is for the ARM PrimeCell PL110
285 Colour LCD controller. ARM PrimeCells provide the building
286 blocks for System on a Chip devices.
287
288 If you want to compile this as a module (=code which can be
289 inserted into and removed from the running kernel), say M
290 here and read <file:Documentation/kbuild/modules.txt>. The module
291 will be called amba-clcd.
292
293config FB_ACORN
294 bool "Acorn VIDC support"
295 depends on (FB = y) && ARM && ARCH_ACORN
296 select FB_CFB_FILLRECT
297 select FB_CFB_COPYAREA
298 select FB_CFB_IMAGEBLIT
299 help
300 This is the frame buffer device driver for the Acorn VIDC graphics
301 hardware found in Acorn RISC PCs and other ARM-based machines. If
302 unsure, say N.
303
304config FB_CLPS711X
305 bool "CLPS711X LCD support"
306 depends on (FB = y) && ARM && ARCH_CLPS711X
307 select FB_CFB_FILLRECT
308 select FB_CFB_COPYAREA
309 select FB_CFB_IMAGEBLIT
310 help
311 Say Y to enable the Framebuffer driver for the CLPS7111 and
312 EP7212 processors.
313
314config FB_SA1100
315 bool "SA-1100 LCD support"
316 depends on (FB = y) && ARM && ARCH_SA1100
317 select FB_CFB_FILLRECT
318 select FB_CFB_COPYAREA
319 select FB_CFB_IMAGEBLIT
320 help
321 This is a framebuffer device for the SA-1100 LCD Controller.
322 See <http://www.linux-fbdev.org/> for information on framebuffer
323 devices.
324
325 If you plan to use the LCD display with your SA-1100 system, say
326 Y here.
327
328config FB_IMX
329 tristate "Freescale i.MX1/21/25/27 LCD support"
330 depends on FB && ARCH_MXC
331 select BACKLIGHT_LCD_SUPPORT
332 select LCD_CLASS_DEVICE
333 select FB_CFB_FILLRECT
334 select FB_CFB_COPYAREA
335 select FB_CFB_IMAGEBLIT
336 select FB_MODE_HELPERS
337 select VIDEOMODE_HELPERS
338
339config FB_CYBER2000
340 tristate "CyberPro 2000/2010/5000 support"
341 depends on FB && PCI && (BROKEN || !SPARC64)
342 select FB_CFB_FILLRECT
343 select FB_CFB_COPYAREA
344 select FB_CFB_IMAGEBLIT
345 help
346 This enables support for the Integraphics CyberPro 20x0 and 5000
347 VGA chips used in the Rebel.com Netwinder and other machines.
348 Say Y if you have a NetWinder or a graphics card containing this
349 device, otherwise say N.
350
351config FB_CYBER2000_DDC
352 bool "DDC for CyberPro support"
353 depends on FB_CYBER2000
354 select FB_DDC
355 default y
356 help
357 Say Y here if you want DDC support for your CyberPro graphics
358 card. This is only I2C bus support, driver does not use EDID.
359
360config FB_CYBER2000_I2C
361 bool "CyberPro 2000/2010/5000 I2C support"
362 depends on FB_CYBER2000 && I2C && ARCH_NETWINDER
363 select I2C_ALGOBIT
364 help
365 Enable support for the I2C video decoder interface on the
366 Integraphics CyberPro 20x0 and 5000 VGA chips. This is used
367 on the Netwinder machines for the SAA7111 video capture.
368
369config FB_APOLLO
370 bool
371 depends on (FB = y) && APOLLO
372 default y
373 select FB_CFB_FILLRECT
374 select FB_CFB_IMAGEBLIT
375
376config FB_Q40
377 bool
378 depends on (FB = y) && Q40
379 default y
380 select FB_CFB_FILLRECT
381 select FB_CFB_COPYAREA
382 select FB_CFB_IMAGEBLIT
383
384config FB_AMIGA
385 tristate "Amiga native chipset support"
386 depends on FB && AMIGA
387 help
388 This is the frame buffer device driver for the builtin graphics
389 chipset found in Amigas.
390
391 To compile this driver as a module, choose M here: the
392 module will be called amifb.
393
394config FB_AMIGA_OCS
395 bool "Amiga OCS chipset support"
396 depends on FB_AMIGA
397 help
398 This enables support for the original Agnus and Denise video chips,
399 found in the Amiga 1000 and most A500's and A2000's. If you intend
400 to run Linux on any of these systems, say Y; otherwise say N.
401
402config FB_AMIGA_ECS
403 bool "Amiga ECS chipset support"
404 depends on FB_AMIGA
405 help
406 This enables support for the Enhanced Chip Set, found in later
407 A500's, later A2000's, the A600, the A3000, the A3000T and CDTV. If
408 you intend to run Linux on any of these systems, say Y; otherwise
409 say N.
410
411config FB_AMIGA_AGA
412 bool "Amiga AGA chipset support"
413 depends on FB_AMIGA
414 help
415 This enables support for the Advanced Graphics Architecture (also
416 known as the AGA or AA) Chip Set, found in the A1200, A4000, A4000T
417 and CD32. If you intend to run Linux on any of these systems, say Y;
418 otherwise say N.
419
420config FB_FM2
421 bool "Amiga FrameMaster II/Rainbow II support"
422 depends on (FB = y) && ZORRO
423 select FB_CFB_FILLRECT
424 select FB_CFB_COPYAREA
425 select FB_CFB_IMAGEBLIT
426 help
427 This is the frame buffer device driver for the Amiga FrameMaster
428 card from BSC (exhibited 1992 but not shipped as a CBM product).
429
430config FB_ARC
431 tristate "Arc Monochrome LCD board support"
432 depends on FB && X86
433 select FB_SYS_FILLRECT
434 select FB_SYS_COPYAREA
435 select FB_SYS_IMAGEBLIT
436 select FB_SYS_FOPS
437 help
438 This enables support for the Arc Monochrome LCD board. The board
439 is based on the KS-108 lcd controller and is typically a matrix
440 of 2*n chips. This driver was tested with a 128x64 panel. This
441 driver supports it for use with x86 SBCs through a 16 bit GPIO
442 interface (8 bit data, 8 bit control). If you anticipate using
443 this driver, say Y or M; otherwise say N. You must specify the
444 GPIO IO address to be used for setting control and data.
445
446config FB_ATARI
447 bool "Atari native chipset support"
448 depends on (FB = y) && ATARI
449 select FB_CFB_FILLRECT
450 select FB_CFB_COPYAREA
451 select FB_CFB_IMAGEBLIT
452 help
453 This is the frame buffer device driver for the builtin graphics
454 chipset found in Ataris.
455
456config FB_OF
457 bool "Open Firmware frame buffer device support"
458 depends on (FB = y) && (PPC64 || PPC_OF) && (!PPC_PSERIES || PCI)
459 select FB_CFB_FILLRECT
460 select FB_CFB_COPYAREA
461 select FB_CFB_IMAGEBLIT
462 select FB_MACMODES
463 help
464 Say Y if you want support with Open Firmware for your graphics
465 board.
466
467config FB_CONTROL
468 bool "Apple \"control\" display support"
469 depends on (FB = y) && PPC_PMAC && PPC32
470 select FB_CFB_FILLRECT
471 select FB_CFB_COPYAREA
472 select FB_CFB_IMAGEBLIT
473 select FB_MACMODES
474 help
475 This driver supports a frame buffer for the graphics adapter in the
476 Power Macintosh 7300 and others.
477
478config FB_PLATINUM
479 bool "Apple \"platinum\" display support"
480 depends on (FB = y) && PPC_PMAC && PPC32
481 select FB_CFB_FILLRECT
482 select FB_CFB_COPYAREA
483 select FB_CFB_IMAGEBLIT
484 select FB_MACMODES
485 help
486 This driver supports a frame buffer for the "platinum" graphics
487 adapter in some Power Macintoshes.
488
489config FB_VALKYRIE
490 bool "Apple \"valkyrie\" display support"
491 depends on (FB = y) && (MAC || (PPC_PMAC && PPC32))
492 select FB_CFB_FILLRECT
493 select FB_CFB_COPYAREA
494 select FB_CFB_IMAGEBLIT
495 select FB_MACMODES
496 help
497 This driver supports a frame buffer for the "valkyrie" graphics
498 adapter in some Power Macintoshes.
499
500config FB_CT65550
501 bool "Chips 65550 display support"
502 depends on (FB = y) && PPC32 && PCI
503 select FB_CFB_FILLRECT
504 select FB_CFB_COPYAREA
505 select FB_CFB_IMAGEBLIT
506 help
507 This is the frame buffer device driver for the Chips & Technologies
508 65550 graphics chip in PowerBooks.
509
510config FB_ASILIANT
511 bool "Asiliant (Chips) 69000 display support"
512 depends on (FB = y) && PCI
513 select FB_CFB_FILLRECT
514 select FB_CFB_COPYAREA
515 select FB_CFB_IMAGEBLIT
516 help
517 This is the frame buffer device driver for the Asiliant 69030 chipset
518
519config FB_IMSTT
520 bool "IMS Twin Turbo display support"
521 depends on (FB = y) && PCI
522 select FB_CFB_IMAGEBLIT
523 select FB_MACMODES if PPC
524 help
525 The IMS Twin Turbo is a PCI-based frame buffer card bundled with
526 many Macintosh and compatible computers.
527
528config FB_VGA16
529 tristate "VGA 16-color graphics support"
530 depends on FB && (X86 || PPC)
531 select FB_CFB_FILLRECT
532 select FB_CFB_COPYAREA
533 select FB_CFB_IMAGEBLIT
534 select VGASTATE
535 select FONT_8x16 if FRAMEBUFFER_CONSOLE
536 help
537 This is the frame buffer device driver for VGA 16 color graphic
538 cards. Say Y if you have such a card.
539
540 To compile this driver as a module, choose M here: the
541 module will be called vga16fb.
542
543config FB_BF54X_LQ043
544 tristate "SHARP LQ043 TFT LCD (BF548 EZKIT)"
545 depends on FB && (BF54x) && !BF542
546 select FB_CFB_FILLRECT
547 select FB_CFB_COPYAREA
548 select FB_CFB_IMAGEBLIT
549 help
550 This is the framebuffer device driver for a SHARP LQ043T1DG01 TFT LCD
551
552config FB_BFIN_T350MCQB
553 tristate "Varitronix COG-T350MCQB TFT LCD display (BF527 EZKIT)"
554 depends on FB && BLACKFIN
555 select BFIN_GPTIMERS
556 select FB_CFB_FILLRECT
557 select FB_CFB_COPYAREA
558 select FB_CFB_IMAGEBLIT
559 help
560 This is the framebuffer device driver for a Varitronix VL-PS-COG-T350MCQB-01 display TFT LCD
561 This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI
562 It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK.
563
564config FB_BFIN_LQ035Q1
565 tristate "SHARP LQ035Q1DH02 TFT LCD"
566 depends on FB && BLACKFIN && SPI
567 select FB_CFB_FILLRECT
568 select FB_CFB_COPYAREA
569 select FB_CFB_IMAGEBLIT
570 select BFIN_GPTIMERS
571 help
572 This is the framebuffer device driver for a SHARP LQ035Q1DH02 TFT display found on
573 the Blackfin Landscape LCD EZ-Extender Card.
574 This display is a QVGA 320x240 18-bit RGB display interfaced by an 16-bit wide PPI
575 It uses PPI[0..15] PPI_FS1, PPI_FS2 and PPI_CLK.
576
577 To compile this driver as a module, choose M here: the
578 module will be called bfin-lq035q1-fb.
579
580config FB_BF537_LQ035
581 tristate "SHARP LQ035 TFT LCD (BF537 STAMP)"
582 depends on FB && (BF534 || BF536 || BF537) && I2C_BLACKFIN_TWI
583 select FB_CFB_FILLRECT
584 select FB_CFB_COPYAREA
585 select FB_CFB_IMAGEBLIT
586 select BFIN_GPTIMERS
587 help
588 This is the framebuffer device for a SHARP LQ035Q7DB03 TFT LCD
589 attached to a BF537.
590
591 To compile this driver as a module, choose M here: the
592 module will be called bf537-lq035.
593
594config FB_BFIN_7393
595 tristate "Blackfin ADV7393 Video encoder"
596 depends on FB && BLACKFIN
597 select I2C
598 select FB_CFB_FILLRECT
599 select FB_CFB_COPYAREA
600 select FB_CFB_IMAGEBLIT
601 help
602 This is the framebuffer device for a ADV7393 video encoder
603 attached to a Blackfin on the PPI port.
604 If your Blackfin board has a ADV7393 select Y.
605
606 To compile this driver as a module, choose M here: the
607 module will be called bfin_adv7393fb.
608
609choice
610 prompt "Video mode support"
611 depends on FB_BFIN_7393
612 default NTSC
613
614config NTSC
615 bool 'NTSC 720x480'
616
617config PAL
618 bool 'PAL 720x576'
619
620config NTSC_640x480
621 bool 'NTSC 640x480 (Experimental)'
622
623config PAL_640x480
624 bool 'PAL 640x480 (Experimental)'
625
626config NTSC_YCBCR
627 bool 'NTSC 720x480 YCbCR input'
628
629config PAL_YCBCR
630 bool 'PAL 720x576 YCbCR input'
631
632endchoice
633
634choice
635 prompt "Size of ADV7393 frame buffer memory Single/Double Size"
636 depends on (FB_BFIN_7393)
637 default ADV7393_1XMEM
638
639config ADV7393_1XMEM
640 bool 'Single'
641
642config ADV7393_2XMEM
643 bool 'Double'
644endchoice
645
646config FB_STI
647 tristate "HP STI frame buffer device support"
648 depends on FB && PARISC
649 select FB_CFB_FILLRECT
650 select FB_CFB_COPYAREA
651 select FB_CFB_IMAGEBLIT
652 select STI_CONSOLE
653 select VT
654 default y
655 ---help---
656 STI refers to the HP "Standard Text Interface" which is a set of
657 BIOS routines contained in a ROM chip in HP PA-RISC based machines.
658 Enabling this option will implement the linux framebuffer device
659 using calls to the STI BIOS routines for initialisation.
660
661 If you enable this option, you will get a planar framebuffer device
662 /dev/fb which will work on the most common HP graphic cards of the
663 NGLE family, including the artist chips (in the 7xx and Bxxx series),
664 HCRX, HCRX24, CRX, CRX24 and VisEG series.
665
666 It is safe to enable this option, so you should probably say "Y".
667
668config FB_MAC
669 bool "Generic Macintosh display support"
670 depends on (FB = y) && MAC
671 select FB_CFB_FILLRECT
672 select FB_CFB_COPYAREA
673 select FB_CFB_IMAGEBLIT
674 select FB_MACMODES
675
676config FB_HP300
677 bool
678 depends on (FB = y) && DIO
679 select FB_CFB_IMAGEBLIT
680 default y
681
682config FB_TGA
683 tristate "TGA/SFB+ framebuffer support"
684 depends on FB && (ALPHA || TC)
685 select FB_CFB_FILLRECT
686 select FB_CFB_COPYAREA
687 select FB_CFB_IMAGEBLIT
688 select BITREVERSE
689 ---help---
690 This is the frame buffer device driver for generic TGA and SFB+
691 graphic cards. These include DEC ZLXp-E1, -E2 and -E3 PCI cards,
692 also known as PBXGA-A, -B and -C, and DEC ZLX-E1, -E2 and -E3
693 TURBOchannel cards, also known as PMAGD-A, -B and -C.
694
695 Due to hardware limitations ZLX-E2 and E3 cards are not supported
696 for DECstation 5000/200 systems. Additionally due to firmware
697 limitations these cards may cause troubles with booting DECstation
698 5000/240 and /260 systems, but are fully supported under Linux if
699 you manage to get it going. ;-)
700
701 Say Y if you have one of those.
702
703config FB_UVESA
704 tristate "Userspace VESA VGA graphics support"
705 depends on FB && CONNECTOR
706 select FB_CFB_FILLRECT
707 select FB_CFB_COPYAREA
708 select FB_CFB_IMAGEBLIT
709 select FB_MODE_HELPERS
710 help
711 This is the frame buffer driver for generic VBE 2.0 compliant
712 graphic cards. It can also take advantage of VBE 3.0 features,
713 such as refresh rate adjustment.
714
715 This driver generally provides more features than vesafb but
716 requires a userspace helper application called 'v86d'. See
717 <file:Documentation/fb/uvesafb.txt> for more information.
718
719 If unsure, say N.
720
721config FB_VESA
722 bool "VESA VGA graphics support"
723 depends on (FB = y) && X86
724 select FB_CFB_FILLRECT
725 select FB_CFB_COPYAREA
726 select FB_CFB_IMAGEBLIT
727 select FB_BOOT_VESA_SUPPORT
728 help
729 This is the frame buffer device driver for generic VESA 2.0
730 compliant graphic cards. The older VESA 1.2 cards are not supported.
731 You will get a boot time penguin logo at no additional cost. Please
732 read <file:Documentation/fb/vesafb.txt>. If unsure, say Y.
733
734config FB_EFI
735 bool "EFI-based Framebuffer Support"
736 depends on (FB = y) && X86 && EFI
737 select FB_CFB_FILLRECT
738 select FB_CFB_COPYAREA
739 select FB_CFB_IMAGEBLIT
740 help
741 This is the EFI frame buffer device driver. If the firmware on
742 your platform is EFI 1.10 or UEFI 2.0, select Y to add support for
743 using the EFI framebuffer as your console.
744
745config FB_N411
746 tristate "N411 Apollo/Hecuba devkit support"
747 depends on FB && X86 && MMU
748 select FB_SYS_FILLRECT
749 select FB_SYS_COPYAREA
750 select FB_SYS_IMAGEBLIT
751 select FB_SYS_FOPS
752 select FB_DEFERRED_IO
753 select FB_HECUBA
754 help
755 This enables support for the Apollo display controller in its
756 Hecuba form using the n411 devkit.
757
758config FB_HGA
759 tristate "Hercules mono graphics support"
760 depends on FB && X86
761 help
762 Say Y here if you have a Hercules mono graphics card.
763
764 To compile this driver as a module, choose M here: the
765 module will be called hgafb.
766
767 As this card technology is at least 25 years old,
768 most people will answer N here.
769
770config FB_GBE
771 bool "SGI Graphics Backend frame buffer support"
772 depends on (FB = y) && SGI_IP32
773 select FB_CFB_FILLRECT
774 select FB_CFB_COPYAREA
775 select FB_CFB_IMAGEBLIT
776 help
777 This is the frame buffer device driver for SGI Graphics Backend.
778 This chip is used in SGI O2 and Visual Workstation 320/540.
779
780config FB_GBE_MEM
781 int "Video memory size in MB"
782 depends on FB_GBE
783 default 4
784 help
785 This is the amount of memory reserved for the framebuffer,
786 which can be any value between 1MB and 8MB.
787
788config FB_SBUS
789 bool "SBUS and UPA framebuffers"
790 depends on (FB = y) && SPARC
791 help
792 Say Y if you want support for SBUS or UPA based frame buffer device.
793
794config FB_BW2
795 bool "BWtwo support"
796 depends on (FB = y) && (SPARC && FB_SBUS)
797 select FB_CFB_FILLRECT
798 select FB_CFB_COPYAREA
799 select FB_CFB_IMAGEBLIT
800 help
801 This is the frame buffer device driver for the BWtwo frame buffer.
802
803config FB_CG3
804 bool "CGthree support"
805 depends on (FB = y) && (SPARC && FB_SBUS)
806 select FB_CFB_FILLRECT
807 select FB_CFB_COPYAREA
808 select FB_CFB_IMAGEBLIT
809 help
810 This is the frame buffer device driver for the CGthree frame buffer.
811
812config FB_CG6
813 bool "CGsix (GX,TurboGX) support"
814 depends on (FB = y) && (SPARC && FB_SBUS)
815 select FB_CFB_COPYAREA
816 select FB_CFB_IMAGEBLIT
817 help
818 This is the frame buffer device driver for the CGsix (GX, TurboGX)
819 frame buffer.
820
821config FB_FFB
822 bool "Creator/Creator3D/Elite3D support"
823 depends on FB_SBUS && SPARC64
824 select FB_CFB_COPYAREA
825 select FB_CFB_IMAGEBLIT
826 help
827 This is the frame buffer device driver for the Creator, Creator3D,
828 and Elite3D graphics boards.
829
830config FB_TCX
831 bool "TCX (SS4/SS5 only) support"
832 depends on FB_SBUS
833 select FB_CFB_FILLRECT
834 select FB_CFB_COPYAREA
835 select FB_CFB_IMAGEBLIT
836 help
837 This is the frame buffer device driver for the TCX 24/8bit frame
838 buffer.
839
840config FB_CG14
841 bool "CGfourteen (SX) support"
842 depends on FB_SBUS
843 select FB_CFB_FILLRECT
844 select FB_CFB_COPYAREA
845 select FB_CFB_IMAGEBLIT
846 help
847 This is the frame buffer device driver for the CGfourteen frame
848 buffer on Desktop SPARCsystems with the SX graphics option.
849
850config FB_P9100
851 bool "P9100 (Sparcbook 3 only) support"
852 depends on FB_SBUS
853 select FB_CFB_FILLRECT
854 select FB_CFB_COPYAREA
855 select FB_CFB_IMAGEBLIT
856 help
857 This is the frame buffer device driver for the P9100 card
858 supported on Sparcbook 3 machines.
859
860config FB_LEO
861 bool "Leo (ZX) support"
862 depends on FB_SBUS
863 select FB_CFB_FILLRECT
864 select FB_CFB_COPYAREA
865 select FB_CFB_IMAGEBLIT
866 help
867 This is the frame buffer device driver for the SBUS-based Sun ZX
868 (leo) frame buffer cards.
869
870config FB_IGA
871 bool "IGA 168x display support"
872 depends on (FB = y) && SPARC32
873 select FB_CFB_FILLRECT
874 select FB_CFB_COPYAREA
875 select FB_CFB_IMAGEBLIT
876 help
877 This is the framebuffer device for the INTERGRAPHICS 1680 and
878 successor frame buffer cards.
879
880config FB_XVR500
881 bool "Sun XVR-500 3DLABS Wildcat support"
882 depends on (FB = y) && PCI && SPARC64
883 select FB_CFB_FILLRECT
884 select FB_CFB_COPYAREA
885 select FB_CFB_IMAGEBLIT
886 help
887 This is the framebuffer device for the Sun XVR-500 and similar
888 graphics cards based upon the 3DLABS Wildcat chipset. The driver
889 only works on sparc64 systems where the system firmware has
890 mostly initialized the card already. It is treated as a
891 completely dumb framebuffer device.
892
893config FB_XVR2500
894 bool "Sun XVR-2500 3DLABS Wildcat support"
895 depends on (FB = y) && PCI && SPARC64
896 select FB_CFB_FILLRECT
897 select FB_CFB_COPYAREA
898 select FB_CFB_IMAGEBLIT
899 help
900 This is the framebuffer device for the Sun XVR-2500 and similar
901 graphics cards based upon the 3DLABS Wildcat chipset. The driver
902 only works on sparc64 systems where the system firmware has
903 mostly initialized the card already. It is treated as a
904 completely dumb framebuffer device.
905
906config FB_XVR1000
907 bool "Sun XVR-1000 support"
908 depends on (FB = y) && SPARC64
909 select FB_CFB_FILLRECT
910 select FB_CFB_COPYAREA
911 select FB_CFB_IMAGEBLIT
912 help
913 This is the framebuffer device for the Sun XVR-1000 and similar
914 graphics cards. The driver only works on sparc64 systems where
915 the system firmware has mostly initialized the card already. It
916 is treated as a completely dumb framebuffer device.
917
918config FB_PVR2
919 tristate "NEC PowerVR 2 display support"
920 depends on FB && SH_DREAMCAST
921 select FB_CFB_FILLRECT
922 select FB_CFB_COPYAREA
923 select FB_CFB_IMAGEBLIT
924 ---help---
925 Say Y here if you have a PowerVR 2 card in your box. If you plan to
926 run linux on your Dreamcast, you will have to say Y here.
927 This driver may or may not work on other PowerVR 2 cards, but is
928 totally untested. Use at your own risk. If unsure, say N.
929
930 To compile this driver as a module, choose M here: the
931 module will be called pvr2fb.
932
933 You can pass several parameters to the driver at boot time or at
934 module load time. The parameters look like "video=pvr2:XXX", where
935 the meaning of XXX can be found at the end of the main source file
936 (<file:drivers/video/pvr2fb.c>). Please see the file
937 <file:Documentation/fb/pvr2fb.txt>.
938
939config FB_OPENCORES
940 tristate "OpenCores VGA/LCD core 2.0 framebuffer support"
941 depends on FB && HAS_DMA
942 select FB_CFB_FILLRECT
943 select FB_CFB_COPYAREA
944 select FB_CFB_IMAGEBLIT
945 help
946 This enables support for the OpenCores VGA/LCD core.
947
948 The OpenCores VGA/LCD core is typically used together with
949 softcore CPUs (e.g. OpenRISC or Microblaze) or hard processor
950 systems (e.g. Altera socfpga or Xilinx Zynq) on FPGAs.
951
952 The source code and specification for the core is available at
953 <http://opencores.org/project,vga_lcd>
954
955config FB_S1D13XXX
956 tristate "Epson S1D13XXX framebuffer support"
957 depends on FB
958 select FB_CFB_FILLRECT
959 select FB_CFB_COPYAREA
960 select FB_CFB_IMAGEBLIT
961 help
962 Support for S1D13XXX framebuffer device family (currently only
963 working with S1D13806). Product specs at
964 <http://vdc.epson.com/>
965
966config FB_ATMEL
967 tristate "AT91/AT32 LCD Controller support"
968 depends on FB && HAVE_FB_ATMEL
969 select FB_CFB_FILLRECT
970 select FB_CFB_COPYAREA
971 select FB_CFB_IMAGEBLIT
972 select FB_MODE_HELPERS
973 select VIDEOMODE_HELPERS
974 help
975 This enables support for the AT91/AT32 LCD Controller.
976
977config FB_INTSRAM
978 bool "Frame Buffer in internal SRAM"
979 depends on FB_ATMEL && ARCH_AT91SAM9261
980 help
981 Say Y if you want to map Frame Buffer in internal SRAM. Say N if you want
982 to let frame buffer in external SDRAM.
983
984config FB_ATMEL_STN
985 bool "Use a STN display with AT91/AT32 LCD Controller"
986 depends on FB_ATMEL && (MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK)
987 default n
988 help
989 Say Y if you want to connect a STN LCD display to the AT91/AT32 LCD
990 Controller. Say N if you want to connect a TFT.
991
992 If unsure, say N.
993
994config FB_NVIDIA
995 tristate "nVidia Framebuffer Support"
996 depends on FB && PCI
997 select FB_BACKLIGHT if FB_NVIDIA_BACKLIGHT
998 select FB_MODE_HELPERS
999 select FB_CFB_FILLRECT
1000 select FB_CFB_COPYAREA
1001 select FB_CFB_IMAGEBLIT
1002 select BITREVERSE
1003 select VGASTATE
1004 help
1005 This driver supports graphics boards with the nVidia chips, TNT
1006 and newer. For very old chipsets, such as the RIVA128, then use
1007 the rivafb.
1008 Say Y if you have such a graphics board.
1009
1010 To compile this driver as a module, choose M here: the
1011 module will be called nvidiafb.
1012
1013config FB_NVIDIA_I2C
1014 bool "Enable DDC Support"
1015 depends on FB_NVIDIA
1016 select FB_DDC
1017 help
1018 This enables I2C support for nVidia Chipsets. This is used
1019 only for getting EDID information from the attached display
1020 allowing for robust video mode handling and switching.
1021
1022 Because fbdev-2.6 requires that drivers must be able to
1023 independently validate video mode parameters, you should say Y
1024 here.
1025
1026config FB_NVIDIA_DEBUG
1027 bool "Lots of debug output"
1028 depends on FB_NVIDIA
1029 default n
1030 help
1031 Say Y here if you want the nVidia driver to output all sorts
1032 of debugging information to provide to the maintainer when
1033 something goes wrong.
1034
1035config FB_NVIDIA_BACKLIGHT
1036 bool "Support for backlight control"
1037 depends on FB_NVIDIA
1038 default y
1039 help
1040 Say Y here if you want to control the backlight of your display.
1041
1042config FB_RIVA
1043 tristate "nVidia Riva support"
1044 depends on FB && PCI
1045 select FB_BACKLIGHT if FB_RIVA_BACKLIGHT
1046 select FB_MODE_HELPERS
1047 select FB_CFB_FILLRECT
1048 select FB_CFB_COPYAREA
1049 select FB_CFB_IMAGEBLIT
1050 select BITREVERSE
1051 select VGASTATE
1052 help
1053 This driver supports graphics boards with the nVidia Riva/Geforce
1054 chips.
1055 Say Y if you have such a graphics board.
1056
1057 To compile this driver as a module, choose M here: the
1058 module will be called rivafb.
1059
1060config FB_RIVA_I2C
1061 bool "Enable DDC Support"
1062 depends on FB_RIVA
1063 select FB_DDC
1064 help
1065 This enables I2C support for nVidia Chipsets. This is used
1066 only for getting EDID information from the attached display
1067 allowing for robust video mode handling and switching.
1068
1069 Because fbdev-2.6 requires that drivers must be able to
1070 independently validate video mode parameters, you should say Y
1071 here.
1072
1073config FB_RIVA_DEBUG
1074 bool "Lots of debug output"
1075 depends on FB_RIVA
1076 default n
1077 help
1078 Say Y here if you want the Riva driver to output all sorts
1079 of debugging information to provide to the maintainer when
1080 something goes wrong.
1081
1082config FB_RIVA_BACKLIGHT
1083 bool "Support for backlight control"
1084 depends on FB_RIVA
1085 default y
1086 help
1087 Say Y here if you want to control the backlight of your display.
1088
1089config FB_I740
1090 tristate "Intel740 support"
1091 depends on FB && PCI
1092 select FB_MODE_HELPERS
1093 select FB_CFB_FILLRECT
1094 select FB_CFB_COPYAREA
1095 select FB_CFB_IMAGEBLIT
1096 select VGASTATE
1097 select FB_DDC
1098 help
1099 This driver supports graphics cards based on Intel740 chip.
1100
1101config FB_I810
1102 tristate "Intel 810/815 support"
1103 depends on FB && PCI && X86_32 && AGP_INTEL
1104 select FB_MODE_HELPERS
1105 select FB_CFB_FILLRECT
1106 select FB_CFB_COPYAREA
1107 select FB_CFB_IMAGEBLIT
1108 select VGASTATE
1109 help
1110 This driver supports the on-board graphics built in to the Intel 810
1111 and 815 chipsets. Say Y if you have and plan to use such a board.
1112
1113 To compile this driver as a module, choose M here: the
1114 module will be called i810fb.
1115
1116 For more information, please read
1117 <file:Documentation/fb/intel810.txt>
1118
1119config FB_I810_GTF
1120 bool "use VESA Generalized Timing Formula"
1121 depends on FB_I810
1122 help
1123 If you say Y, then the VESA standard, Generalized Timing Formula
1124 or GTF, will be used to calculate the required video timing values
1125 per video mode. Since the GTF allows nondiscrete timings
1126 (nondiscrete being a range of values as opposed to discrete being a
1127 set of values), you'll be able to use any combination of horizontal
1128 and vertical resolutions, and vertical refresh rates without having
1129 to specify your own timing parameters. This is especially useful
1130 to maximize the performance of an aging display, or if you just
1131 have a display with nonstandard dimensions. A VESA compliant
1132 monitor is recommended, but can still work with non-compliant ones.
1133 If you need or want this, then select this option. The timings may
1134 not be compliant with Intel's recommended values. Use at your own
1135 risk.
1136
1137 If you say N, the driver will revert to discrete video timings
1138 using a set recommended by Intel in their documentation.
1139
1140 If unsure, say N.
1141
1142config FB_I810_I2C
1143 bool "Enable DDC Support"
1144 depends on FB_I810 && FB_I810_GTF
1145 select FB_DDC
1146 help
1147
1148config FB_LE80578
1149 tristate "Intel LE80578 (Vermilion) support"
1150 depends on FB && PCI && X86
1151 select FB_MODE_HELPERS
1152 select FB_CFB_FILLRECT
1153 select FB_CFB_COPYAREA
1154 select FB_CFB_IMAGEBLIT
1155 help
1156 This driver supports the LE80578 (Vermilion Range) chipset
1157
1158config FB_CARILLO_RANCH
1159 tristate "Intel Carillo Ranch support"
1160 depends on FB_LE80578 && FB && PCI && X86
1161 help
1162 This driver supports the LE80578 (Carillo Ranch) board
1163
1164config FB_INTEL
1165 tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support"
1166 depends on FB && PCI && X86 && AGP_INTEL && EXPERT
1167 select FB_MODE_HELPERS
1168 select FB_CFB_FILLRECT
1169 select FB_CFB_COPYAREA
1170 select FB_CFB_IMAGEBLIT
1171 select FB_BOOT_VESA_SUPPORT if FB_INTEL = y
1172 depends on !DRM_I915
1173 help
1174 This driver supports the on-board graphics built in to the Intel
1175 830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
1176 Say Y if you have and plan to use such a board.
1177
1178 To make FB_INTELFB=Y work you need to say AGP_INTEL=y too.
1179
1180 To compile this driver as a module, choose M here: the
1181 module will be called intelfb.
1182
1183 For more information, please read <file:Documentation/fb/intelfb.txt>
1184
1185config FB_INTEL_DEBUG
1186 bool "Intel driver Debug Messages"
1187 depends on FB_INTEL
1188 ---help---
1189 Say Y here if you want the Intel driver to output all sorts
1190 of debugging information to provide to the maintainer when
1191 something goes wrong.
1192
1193config FB_INTEL_I2C
1194 bool "DDC/I2C for Intel framebuffer support"
1195 depends on FB_INTEL
1196 select FB_DDC
1197 default y
1198 help
1199 Say Y here if you want DDC/I2C support for your on-board Intel graphics.
1200
1201config FB_MATROX
1202 tristate "Matrox acceleration"
1203 depends on FB && PCI
1204 select FB_CFB_FILLRECT
1205 select FB_CFB_COPYAREA
1206 select FB_CFB_IMAGEBLIT
1207 select FB_TILEBLITTING
1208 select FB_MACMODES if PPC_PMAC
1209 ---help---
1210 Say Y here if you have a Matrox Millennium, Matrox Millennium II,
1211 Matrox Mystique, Matrox Mystique 220, Matrox Productiva G100, Matrox
1212 Mystique G200, Matrox Millennium G200, Matrox Marvel G200 video,
1213 Matrox G400, G450 or G550 card in your box.
1214
1215 To compile this driver as a module, choose M here: the
1216 module will be called matroxfb.
1217
1218 You can pass several parameters to the driver at boot time or at
1219 module load time. The parameters look like "video=matroxfb:XXX", and
1220 are described in <file:Documentation/fb/matroxfb.txt>.
1221
1222config FB_MATROX_MILLENIUM
1223 bool "Millennium I/II support"
1224 depends on FB_MATROX
1225 help
1226 Say Y here if you have a Matrox Millennium or Matrox Millennium II
1227 video card. If you select "Advanced lowlevel driver options" below,
1228 you should check 4 bpp packed pixel, 8 bpp packed pixel, 16 bpp
1229 packed pixel, 24 bpp packed pixel and 32 bpp packed pixel. You can
1230 also use font widths different from 8.
1231
1232config FB_MATROX_MYSTIQUE
1233 bool "Mystique support"
1234 depends on FB_MATROX
1235 help
1236 Say Y here if you have a Matrox Mystique or Matrox Mystique 220
1237 video card. If you select "Advanced lowlevel driver options" below,
1238 you should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp
1239 packed pixel and 32 bpp packed pixel. You can also use font widths
1240 different from 8.
1241
1242config FB_MATROX_G
1243 bool "G100/G200/G400/G450/G550 support"
1244 depends on FB_MATROX
1245 ---help---
1246 Say Y here if you have a Matrox G100, G200, G400, G450 or G550 based
1247 video card. If you select "Advanced lowlevel driver options", you
1248 should check 8 bpp packed pixel, 16 bpp packed pixel, 24 bpp packed
1249 pixel and 32 bpp packed pixel. You can also use font widths
1250 different from 8.
1251
1252 If you need support for G400 secondary head, you must say Y to
1253 "Matrox I2C support" and "G400 second head support" right below.
1254 G450/G550 secondary head and digital output are supported without
1255 additional modules.
1256
1257 The driver starts in monitor mode. You must use the matroxset tool
1258 (available at <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to
1259 swap primary and secondary head outputs, or to change output mode.
1260 Secondary head driver always start in 640x480 resolution and you
1261 must use fbset to change it.
1262
1263 Do not forget that second head supports only 16 and 32 bpp
1264 packed pixels, so it is a good idea to compile them into the kernel
1265 too. You can use only some font widths, as the driver uses generic
1266 painting procedures (the secondary head does not use acceleration
1267 engine).
1268
1269 G450/G550 hardware can display TV picture only from secondary CRTC,
1270 and it performs no scaling, so picture must have 525 or 625 lines.
1271
1272config FB_MATROX_I2C
1273 tristate "Matrox I2C support"
1274 depends on FB_MATROX
1275 select FB_DDC
1276 ---help---
1277 This drivers creates I2C buses which are needed for accessing the
1278 DDC (I2C) bus present on all Matroxes, an I2C bus which
1279 interconnects Matrox optional devices, like MGA-TVO on G200 and
1280 G400, and the secondary head DDC bus, present on G400 only.
1281
1282 You can say Y or M here if you want to experiment with monitor
1283 detection code. You must say Y or M here if you want to use either
1284 second head of G400 or MGA-TVO on G200 or G400.
1285
1286 If you compile it as module, it will create a module named
1287 i2c-matroxfb.
1288
1289config FB_MATROX_MAVEN
1290 tristate "G400 second head support"
1291 depends on FB_MATROX_G && FB_MATROX_I2C
1292 ---help---
1293 WARNING !!! This support does not work with G450 !!!
1294
1295 Say Y or M here if you want to use a secondary head (meaning two
1296 monitors in parallel) on G400 or MGA-TVO add-on on G200. Secondary
1297 head is not compatible with accelerated XFree 3.3.x SVGA servers -
1298 secondary head output is blanked while you are in X. With XFree
1299 3.9.17 preview you can use both heads if you use SVGA over fbdev or
1300 the fbdev driver on first head and the fbdev driver on second head.
1301
1302 If you compile it as module, two modules are created,
1303 matroxfb_crtc2 and matroxfb_maven. Matroxfb_maven is needed for
1304 both G200 and G400, matroxfb_crtc2 is needed only by G400. You must
1305 also load i2c-matroxfb to get it to run.
1306
1307 The driver starts in monitor mode and you must use the matroxset
1308 tool (available at
1309 <ftp://platan.vc.cvut.cz/pub/linux/matrox-latest/>) to switch it to
1310 PAL or NTSC or to swap primary and secondary head outputs.
1311 Secondary head driver also always start in 640x480 resolution, you
1312 must use fbset to change it.
1313
1314 Also do not forget that second head supports only 16 and 32 bpp
1315 packed pixels, so it is a good idea to compile them into the kernel
1316 too. You can use only some font widths, as the driver uses generic
1317 painting procedures (the secondary head does not use acceleration
1318 engine).
1319
1320config FB_RADEON
1321 tristate "ATI Radeon display support"
1322 depends on FB && PCI
1323 select FB_BACKLIGHT if FB_RADEON_BACKLIGHT
1324 select FB_MODE_HELPERS
1325 select FB_CFB_FILLRECT
1326 select FB_CFB_COPYAREA
1327 select FB_CFB_IMAGEBLIT
1328 select FB_MACMODES if PPC_OF
1329 help
1330 Choose this option if you want to use an ATI Radeon graphics card as
1331 a framebuffer device. There are both PCI and AGP versions. You
1332 don't need to choose this to run the Radeon in plain VGA mode.
1333
1334 There is a product page at
1335 http://products.amd.com/en-us/GraphicCardResult.aspx
1336
1337config FB_RADEON_I2C
1338 bool "DDC/I2C for ATI Radeon support"
1339 depends on FB_RADEON
1340 select FB_DDC
1341 default y
1342 help
1343 Say Y here if you want DDC/I2C support for your Radeon board.
1344
1345config FB_RADEON_BACKLIGHT
1346 bool "Support for backlight control"
1347 depends on FB_RADEON
1348 default y
1349 help
1350 Say Y here if you want to control the backlight of your display.
1351
1352config FB_RADEON_DEBUG
1353 bool "Lots of debug output from Radeon driver"
1354 depends on FB_RADEON
1355 default n
1356 help
1357 Say Y here if you want the Radeon driver to output all sorts
1358 of debugging information to provide to the maintainer when
1359 something goes wrong.
1360
1361config FB_ATY128
1362 tristate "ATI Rage128 display support"
1363 depends on FB && PCI
1364 select FB_CFB_FILLRECT
1365 select FB_CFB_COPYAREA
1366 select FB_CFB_IMAGEBLIT
1367 select FB_BACKLIGHT if FB_ATY128_BACKLIGHT
1368 select FB_MACMODES if PPC_PMAC
1369 help
1370 This driver supports graphics boards with the ATI Rage128 chips.
1371 Say Y if you have such a graphics board and read
1372 <file:Documentation/fb/aty128fb.txt>.
1373
1374 To compile this driver as a module, choose M here: the
1375 module will be called aty128fb.
1376
1377config FB_ATY128_BACKLIGHT
1378 bool "Support for backlight control"
1379 depends on FB_ATY128
1380 default y
1381 help
1382 Say Y here if you want to control the backlight of your display.
1383
1384config FB_ATY
1385 tristate "ATI Mach64 display support" if PCI || ATARI
1386 depends on FB && !SPARC32
1387 select FB_CFB_FILLRECT
1388 select FB_CFB_COPYAREA
1389 select FB_CFB_IMAGEBLIT
1390 select FB_BACKLIGHT if FB_ATY_BACKLIGHT
1391 select FB_MACMODES if PPC
1392 help
1393 This driver supports graphics boards with the ATI Mach64 chips.
1394 Say Y if you have such a graphics board.
1395
1396 To compile this driver as a module, choose M here: the
1397 module will be called atyfb.
1398
1399config FB_ATY_CT
1400 bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
1401 depends on PCI && FB_ATY
1402 default y if SPARC64 && PCI
1403 help
1404 Say Y here to support use of ATI's 64-bit Rage boards (or other
1405 boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
1406 framebuffer device. The ATI product support page for these boards
1407 is at <http://support.ati.com/products/pc/mach64/mach64.html>.
1408
1409config FB_ATY_GENERIC_LCD
1410 bool "Mach64 generic LCD support"
1411 depends on FB_ATY_CT
1412 help
1413 Say Y if you have a laptop with an ATI Rage LT PRO, Rage Mobility,
1414 Rage XC, or Rage XL chipset.
1415
1416config FB_ATY_GX
1417 bool "Mach64 GX support" if PCI
1418 depends on FB_ATY
1419 default y if ATARI
1420 help
1421 Say Y here to support use of the ATI Mach64 Graphics Expression
1422 board (or other boards based on the Mach64 GX chipset) as a
1423 framebuffer device. The ATI product support page for these boards
1424 is at
1425 <http://support.ati.com/products/pc/mach64/graphics_xpression.html>.
1426
1427config FB_ATY_BACKLIGHT
1428 bool "Support for backlight control"
1429 depends on FB_ATY
1430 default y
1431 help
1432 Say Y here if you want to control the backlight of your display.
1433
1434config FB_S3
1435 tristate "S3 Trio/Virge support"
1436 depends on FB && PCI
1437 select FB_CFB_FILLRECT
1438 select FB_CFB_COPYAREA
1439 select FB_CFB_IMAGEBLIT
1440 select FB_TILEBLITTING
1441 select FB_SVGALIB
1442 select VGASTATE
1443 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1444 ---help---
1445 Driver for graphics boards with S3 Trio / S3 Virge chip.
1446
1447config FB_S3_DDC
1448 bool "DDC for S3 support"
1449 depends on FB_S3
1450 select FB_DDC
1451 default y
1452 help
1453 Say Y here if you want DDC support for your S3 graphics card.
1454
1455config FB_SAVAGE
1456 tristate "S3 Savage support"
1457 depends on FB && PCI
1458 select FB_MODE_HELPERS
1459 select FB_CFB_FILLRECT
1460 select FB_CFB_COPYAREA
1461 select FB_CFB_IMAGEBLIT
1462 select VGASTATE
1463 help
1464 This driver supports notebooks and computers with S3 Savage PCI/AGP
1465 chips.
1466
1467 Say Y if you have such a graphics card.
1468
1469 To compile this driver as a module, choose M here; the module
1470 will be called savagefb.
1471
1472config FB_SAVAGE_I2C
1473 bool "Enable DDC2 Support"
1474 depends on FB_SAVAGE
1475 select FB_DDC
1476 help
1477 This enables I2C support for S3 Savage Chipsets. This is used
1478 only for getting EDID information from the attached display
1479 allowing for robust video mode handling and switching.
1480
1481 Because fbdev-2.6 requires that drivers must be able to
1482 independently validate video mode parameters, you should say Y
1483 here.
1484
1485config FB_SAVAGE_ACCEL
1486 bool "Enable Console Acceleration"
1487 depends on FB_SAVAGE
1488 default n
1489 help
1490 This option will compile in console acceleration support. If
1491 the resulting framebuffer console has bothersome glitches, then
1492 choose N here.
1493
1494config FB_SIS
1495 tristate "SiS/XGI display support"
1496 depends on FB && PCI
1497 select FB_CFB_FILLRECT
1498 select FB_CFB_COPYAREA
1499 select FB_CFB_IMAGEBLIT
1500 select FB_BOOT_VESA_SUPPORT if FB_SIS = y
1501 help
1502 This is the frame buffer device driver for the SiS 300, 315, 330
1503 and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets.
1504 Specs available at <http://www.sis.com> and <http://www.xgitech.com>.
1505
1506 To compile this driver as a module, choose M here; the module
1507 will be called sisfb.
1508
1509config FB_SIS_300
1510 bool "SiS 300 series support"
1511 depends on FB_SIS
1512 help
1513 Say Y here to support use of the SiS 300/305, 540, 630 and 730.
1514
1515config FB_SIS_315
1516 bool "SiS 315/330/340 series and XGI support"
1517 depends on FB_SIS
1518 help
1519 Say Y here to support use of the SiS 315, 330 and 340 series
1520 (315/H/PRO, 55x, 650, 651, 740, 330, 661, 741, 760, 761) as well
1521 as XGI V3XT, V5, V8 and Z7.
1522
1523config FB_VIA
1524 tristate "VIA UniChrome (Pro) and Chrome9 display support"
1525 depends on FB && PCI && X86
1526 select FB_CFB_FILLRECT
1527 select FB_CFB_COPYAREA
1528 select FB_CFB_IMAGEBLIT
1529 select I2C_ALGOBIT
1530 select I2C
1531 select GPIOLIB
1532 help
1533 This is the frame buffer device driver for Graphics chips of VIA
1534 UniChrome (Pro) Family (CLE266,PM800/CN400,P4M800CE/P4M800Pro/
1535 CN700/VN800,CX700/VX700,P4M890) and Chrome9 Family (K8M890,CN896
1536 /P4M900,VX800)
1537 Say Y if you have a VIA UniChrome graphics board.
1538
1539 To compile this driver as a module, choose M here: the
1540 module will be called viafb.
1541
1542if FB_VIA
1543
1544config FB_VIA_DIRECT_PROCFS
1545 bool "direct hardware access via procfs (DEPRECATED)(DANGEROUS)"
1546 depends on FB_VIA
1547 default n
1548 help
1549 Allow direct hardware access to some output registers via procfs.
1550 This is dangerous but may provide the only chance to get the
1551 correct output device configuration.
1552 Its use is strongly discouraged.
1553
1554config FB_VIA_X_COMPATIBILITY
1555 bool "X server compatibility"
1556 depends on FB_VIA
1557 default n
1558 help
1559 This option reduces the functionality (power saving, ...) of the
1560 framebuffer to avoid negative impact on the OpenChrome X server.
1561 If you use any X server other than fbdev you should enable this
1562 otherwise it should be safe to disable it and allow using all
1563 features.
1564
1565endif
1566
1567config FB_NEOMAGIC
1568 tristate "NeoMagic display support"
1569 depends on FB && PCI
1570 select FB_MODE_HELPERS
1571 select FB_CFB_FILLRECT
1572 select FB_CFB_COPYAREA
1573 select FB_CFB_IMAGEBLIT
1574 select VGASTATE
1575 help
1576 This driver supports notebooks with NeoMagic PCI chips.
1577 Say Y if you have such a graphics card.
1578
1579 To compile this driver as a module, choose M here: the
1580 module will be called neofb.
1581
1582config FB_KYRO
1583 tristate "IMG Kyro support"
1584 depends on FB && PCI
1585 select FB_CFB_FILLRECT
1586 select FB_CFB_COPYAREA
1587 select FB_CFB_IMAGEBLIT
1588 help
1589 Say Y here if you have a STG4000 / Kyro / PowerVR 3 based
1590 graphics board.
1591
1592 To compile this driver as a module, choose M here: the
1593 module will be called kyrofb.
1594
1595config FB_3DFX
1596 tristate "3Dfx Banshee/Voodoo3/Voodoo5 display support"
1597 depends on FB && PCI
1598 select FB_CFB_IMAGEBLIT
1599 select FB_CFB_FILLRECT
1600 select FB_CFB_COPYAREA
1601 select FB_MODE_HELPERS
1602 help
1603 This driver supports graphics boards with the 3Dfx Banshee,
1604 Voodoo3 or VSA-100 (aka Voodoo4/5) chips. Say Y if you have
1605 such a graphics board.
1606
1607 To compile this driver as a module, choose M here: the
1608 module will be called tdfxfb.
1609
1610config FB_3DFX_ACCEL
1611 bool "3Dfx Acceleration functions"
1612 depends on FB_3DFX
1613 ---help---
1614 This will compile the 3Dfx Banshee/Voodoo3/VSA-100 frame buffer
1615 device driver with acceleration functions.
1616
1617config FB_3DFX_I2C
1618 bool "Enable DDC/I2C support"
1619 depends on FB_3DFX
1620 select FB_DDC
1621 default y
1622 help
1623 Say Y here if you want DDC/I2C support for your 3dfx Voodoo3.
1624
1625config FB_VOODOO1
1626 tristate "3Dfx Voodoo Graphics (sst1) support"
1627 depends on FB && PCI
1628 select FB_CFB_FILLRECT
1629 select FB_CFB_COPYAREA
1630 select FB_CFB_IMAGEBLIT
1631 ---help---
1632 Say Y here if you have a 3Dfx Voodoo Graphics (Voodoo1/sst1) or
1633 Voodoo2 (cvg) based graphics card.
1634
1635 To compile this driver as a module, choose M here: the
1636 module will be called sstfb.
1637
1638 WARNING: Do not use any application that uses the 3D engine
1639 (namely glide) while using this driver.
1640 Please read the <file:Documentation/fb/sstfb.txt> for supported
1641 options and other important info support.
1642
1643config FB_VT8623
1644 tristate "VIA VT8623 support"
1645 depends on FB && PCI
1646 select FB_CFB_FILLRECT
1647 select FB_CFB_COPYAREA
1648 select FB_CFB_IMAGEBLIT
1649 select FB_TILEBLITTING
1650 select FB_SVGALIB
1651 select VGASTATE
1652 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1653 ---help---
1654 Driver for CastleRock integrated graphics core in the
1655 VIA VT8623 [Apollo CLE266] chipset.
1656
1657config FB_TRIDENT
1658 tristate "Trident/CyberXXX/CyberBlade support"
1659 depends on FB && PCI
1660 select FB_CFB_FILLRECT
1661 select FB_CFB_COPYAREA
1662 select FB_CFB_IMAGEBLIT
1663 ---help---
1664 This is the frame buffer device driver for Trident PCI/AGP chipsets.
1665 Supported chipset families are TGUI 9440/96XX, 3DImage, Blade3D
1666 and Blade XP.
1667 There are also integrated versions of these chips called CyberXXXX,
1668 CyberImage or CyberBlade. These chips are mostly found in laptops
1669 but also on some motherboards including early VIA EPIA motherboards.
1670 For more information, read <file:Documentation/fb/tridentfb.txt>
1671
1672 Say Y if you have such a graphics board.
1673
1674 To compile this driver as a module, choose M here: the
1675 module will be called tridentfb.
1676
1677config FB_ARK
1678 tristate "ARK 2000PV support"
1679 depends on FB && PCI
1680 select FB_CFB_FILLRECT
1681 select FB_CFB_COPYAREA
1682 select FB_CFB_IMAGEBLIT
1683 select FB_TILEBLITTING
1684 select FB_SVGALIB
1685 select VGASTATE
1686 select FONT_8x16 if FRAMEBUFFER_CONSOLE
1687 ---help---
1688 Driver for PCI graphics boards with ARK 2000PV chip
1689 and ICS 5342 RAMDAC.
1690
1691config FB_PM3
1692 tristate "Permedia3 support"
1693 depends on FB && PCI
1694 select FB_CFB_FILLRECT
1695 select FB_CFB_COPYAREA
1696 select FB_CFB_IMAGEBLIT
1697 help
1698 This is the frame buffer device driver for the 3DLabs Permedia3
1699 chipset, used in Formac ProFormance III, 3DLabs Oxygen VX1 &
1700 similar boards, 3DLabs Permedia3 Create!, Appian Jeronimo 2000
1701 and maybe other boards.
1702
1703config FB_CARMINE
1704 tristate "Fujitsu carmine frame buffer support"
1705 depends on FB && PCI
1706 select FB_CFB_FILLRECT
1707 select FB_CFB_COPYAREA
1708 select FB_CFB_IMAGEBLIT
1709 help
1710 This is the frame buffer device driver for the Fujitsu Carmine chip.
1711 The driver provides two independent frame buffer devices.
1712
1713choice
1714 depends on FB_CARMINE
1715 prompt "DRAM timing"
1716 default FB_CARMINE_DRAM_EVAL
1717
1718config FB_CARMINE_DRAM_EVAL
1719 bool "Eval board timings"
1720 help
1721 Use timings which work on the eval card.
1722
1723config CARMINE_DRAM_CUSTOM
1724 bool "Custom board timings"
1725 help
1726 Use custom board timings.
1727endchoice
1728
1729config FB_AU1100
1730 bool "Au1100 LCD Driver"
1731 depends on (FB = y) && MIPS_ALCHEMY
1732 select FB_CFB_FILLRECT
1733 select FB_CFB_COPYAREA
1734 select FB_CFB_IMAGEBLIT
1735 help
1736 This is the framebuffer driver for the AMD Au1100 SOC. It can drive
1737 various panels and CRTs by passing in kernel cmd line option
1738 au1100fb:panel=<name>.
1739
1740config FB_AU1200
1741 bool "Au1200/Au1300 LCD Driver"
1742 depends on (FB = y) && MIPS_ALCHEMY
1743 select FB_SYS_FILLRECT
1744 select FB_SYS_COPYAREA
1745 select FB_SYS_IMAGEBLIT
1746 select FB_SYS_FOPS
1747 help
1748 This is the framebuffer driver for the Au1200/Au1300 SOCs.
1749 It can drive various panels and CRTs by passing in kernel cmd line
1750 option au1200fb:panel=<name>.
1751
1752config FB_VT8500
1753 bool "VIA VT8500 framebuffer support"
1754 depends on (FB = y) && ARM && ARCH_VT8500
1755 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
1756 select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
1757 select FB_SYS_IMAGEBLIT
1758 select FB_MODE_HELPERS
1759 select VIDEOMODE_HELPERS
1760 help
1761 This is the framebuffer driver for VIA VT8500 integrated LCD
1762 controller.
1763
1764config FB_WM8505
1765 bool "Wondermedia WM8xxx-series frame buffer support"
1766 depends on (FB = y) && ARM && ARCH_VT8500
1767 select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
1768 select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
1769 select FB_SYS_IMAGEBLIT
1770 select FB_MODE_HELPERS
1771 select VIDEOMODE_HELPERS
1772 help
1773 This is the framebuffer driver for WonderMedia WM8xxx-series
1774 integrated LCD controller. This driver covers the WM8505, WM8650
1775 and WM8850 SoCs.
1776
1777config FB_WMT_GE_ROPS
1778 bool "VT8500/WM8xxx accelerated raster ops support"
1779 depends on (FB = y) && (FB_VT8500 || FB_WM8505)
1780 default n
1781 help
1782 This adds support for accelerated raster operations on the
1783 VIA VT8500 and Wondermedia 85xx series SoCs.
1784
1785source "drivers/video/fbdev/geode/Kconfig"
1786
1787config FB_HIT
1788 tristate "HD64461 Frame Buffer support"
1789 depends on FB && HD64461
1790 select FB_CFB_FILLRECT
1791 select FB_CFB_COPYAREA
1792 select FB_CFB_IMAGEBLIT
1793 help
1794 This is the frame buffer device driver for the Hitachi HD64461 LCD
1795 frame buffer card.
1796
1797config FB_PMAG_AA
1798 bool "PMAG-AA TURBOchannel framebuffer support"
1799 depends on (FB = y) && TC
1800 select FB_CFB_FILLRECT
1801 select FB_CFB_COPYAREA
1802 select FB_CFB_IMAGEBLIT
1803 help
1804 Support for the PMAG-AA TURBOchannel framebuffer card (1280x1024x1)
1805 used mainly in the MIPS-based DECstation series.
1806
1807config FB_PMAG_BA
1808 tristate "PMAG-BA TURBOchannel framebuffer support"
1809 depends on FB && TC
1810 select FB_CFB_FILLRECT
1811 select FB_CFB_COPYAREA
1812 select FB_CFB_IMAGEBLIT
1813 help
1814 Support for the PMAG-BA TURBOchannel framebuffer card (1024x864x8)
1815 used mainly in the MIPS-based DECstation series.
1816
1817config FB_PMAGB_B
1818 tristate "PMAGB-B TURBOchannel framebuffer support"
1819 depends on FB && TC
1820 select FB_CFB_FILLRECT
1821 select FB_CFB_COPYAREA
1822 select FB_CFB_IMAGEBLIT
1823 help
1824 Support for the PMAGB-B TURBOchannel framebuffer card used mainly
1825 in the MIPS-based DECstation series. The card is currently only
1826 supported in 1280x1024x8 mode.
1827
1828config FB_MAXINE
1829 bool "Maxine (Personal DECstation) onboard framebuffer support"
1830 depends on (FB = y) && MACH_DECSTATION
1831 select FB_CFB_FILLRECT
1832 select FB_CFB_COPYAREA
1833 select FB_CFB_IMAGEBLIT
1834 help
1835 Support for the onboard framebuffer (1024x768x8) in the Personal
1836 DECstation series (Personal DECstation 5000/20, /25, /33, /50,
1837 Codename "Maxine").
1838
1839config FB_G364
1840 bool "G364 frame buffer support"
1841 depends on (FB = y) && (MIPS_MAGNUM_4000 || OLIVETTI_M700)
1842 select FB_CFB_FILLRECT
1843 select FB_CFB_COPYAREA
1844 select FB_CFB_IMAGEBLIT
1845 help
1846 The G364 driver is the framebuffer used in MIPS Magnum 4000 and
1847 Olivetti M700-10 systems.
1848
1849config FB_68328
1850 bool "Motorola 68328 native frame buffer support"
1851 depends on (FB = y) && (M68328 || M68EZ328 || M68VZ328)
1852 select FB_CFB_FILLRECT
1853 select FB_CFB_COPYAREA
1854 select FB_CFB_IMAGEBLIT
1855 help
1856 Say Y here if you want to support the built-in frame buffer of
1857 the Motorola 68328 CPU family.
1858
1859config FB_PXA168
1860 tristate "PXA168/910 LCD framebuffer support"
1861 depends on FB && (CPU_PXA168 || CPU_PXA910)
1862 select FB_CFB_FILLRECT
1863 select FB_CFB_COPYAREA
1864 select FB_CFB_IMAGEBLIT
1865 ---help---
1866 Frame buffer driver for the built-in LCD controller in the Marvell
1867 MMP processor.
1868
1869config FB_PXA
1870 tristate "PXA LCD framebuffer support"
1871 depends on FB && ARCH_PXA
1872 select FB_CFB_FILLRECT
1873 select FB_CFB_COPYAREA
1874 select FB_CFB_IMAGEBLIT
1875 ---help---
1876 Frame buffer driver for the built-in LCD controller in the Intel
1877 PXA2x0 processor.
1878
1879 This driver is also available as a module ( = code which can be
1880 inserted and removed from the running kernel whenever you want). The
1881 module will be called pxafb. If you want to compile it as a module,
1882 say M here and read <file:Documentation/kbuild/modules.txt>.
1883
1884 If unsure, say N.
1885
1886config FB_PXA_OVERLAY
1887 bool "Support PXA27x/PXA3xx Overlay(s) as framebuffer"
1888 default n
1889 depends on FB_PXA && (PXA27x || PXA3xx)
1890
1891config FB_PXA_SMARTPANEL
1892 bool "PXA Smartpanel LCD support"
1893 default n
1894 depends on FB_PXA
1895
1896config FB_PXA_PARAMETERS
1897 bool "PXA LCD command line parameters"
1898 default n
1899 depends on FB_PXA
1900 ---help---
1901 Enable the use of kernel command line or module parameters
1902 to configure the physical properties of the LCD panel when
1903 using the PXA LCD driver.
1904
1905 This option allows you to override the panel parameters
1906 supplied by the platform in order to support multiple
1907 different models of flatpanel. If you will only be using a
1908 single model of flatpanel then you can safely leave this
1909 option disabled.
1910
1911 <file:Documentation/fb/pxafb.txt> describes the available parameters.
1912
1913config PXA3XX_GCU
1914 tristate "PXA3xx 2D graphics accelerator driver"
1915 depends on FB_PXA
1916 help
1917 Kernelspace driver for the 2D graphics controller unit (GCU)
1918 found on PXA3xx processors. There is a counterpart driver in the
1919 DirectFB suite, see http://www.directfb.org/
1920
1921 If you compile this as a module, it will be called pxa3xx_gcu.
1922
1923config FB_MBX
1924 tristate "2700G LCD framebuffer support"
1925 depends on FB && ARCH_PXA
1926 select FB_CFB_FILLRECT
1927 select FB_CFB_COPYAREA
1928 select FB_CFB_IMAGEBLIT
1929 ---help---
1930 Framebuffer driver for the Intel 2700G (Marathon) Graphics
1931 Accelerator
1932
1933config FB_MBX_DEBUG
1934 bool "Enable debugging info via debugfs"
1935 depends on FB_MBX && DEBUG_FS
1936 default n
1937 ---help---
1938 Enable this if you want debugging information using the debug
1939 filesystem (debugfs)
1940
1941 If unsure, say N.
1942
1943config FB_FSL_DIU
1944 tristate "Freescale DIU framebuffer support"
1945 depends on FB && FSL_SOC
1946 select FB_MODE_HELPERS
1947 select FB_CFB_FILLRECT
1948 select FB_CFB_COPYAREA
1949 select FB_CFB_IMAGEBLIT
1950 select PPC_LIB_RHEAP
1951 ---help---
1952 Framebuffer driver for the Freescale SoC DIU
1953
1954config FB_W100
1955 tristate "W100 frame buffer support"
1956 depends on FB && ARCH_PXA
1957 select FB_CFB_FILLRECT
1958 select FB_CFB_COPYAREA
1959 select FB_CFB_IMAGEBLIT
1960 ---help---
1961 Frame buffer driver for the w100 as found on the Sharp SL-Cxx series.
1962 It can also drive the w3220 chip found on iPAQ hx4700.
1963
1964 This driver is also available as a module ( = code which can be
1965 inserted and removed from the running kernel whenever you want). The
1966 module will be called w100fb. If you want to compile it as a module,
1967 say M here and read <file:Documentation/kbuild/modules.txt>.
1968
1969 If unsure, say N.
1970
1971config FB_SH_MOBILE_LCDC
1972 tristate "SuperH Mobile LCDC framebuffer support"
1973 depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
1974 select FB_SYS_FILLRECT
1975 select FB_SYS_COPYAREA
1976 select FB_SYS_IMAGEBLIT
1977 select FB_SYS_FOPS
1978 select FB_DEFERRED_IO
1979 select FB_BACKLIGHT
1980 select SH_MIPI_DSI if SH_LCD_MIPI_DSI
1981 ---help---
1982 Frame buffer driver for the on-chip SH-Mobile LCD controller.
1983
1984config FB_SH_MOBILE_HDMI
1985 tristate "SuperH Mobile HDMI controller support"
1986 depends on FB_SH_MOBILE_LCDC
1987 select FB_MODE_HELPERS
1988 select SOUND
1989 select SND
1990 select SND_SOC
1991 ---help---
1992 Driver for the on-chip SH-Mobile HDMI controller.
1993
1994config FB_TMIO
1995 tristate "Toshiba Mobile IO FrameBuffer support"
1996 depends on FB && MFD_CORE
1997 select FB_CFB_FILLRECT
1998 select FB_CFB_COPYAREA
1999 select FB_CFB_IMAGEBLIT
2000 ---help---
2001 Frame buffer driver for the Toshiba Mobile IO integrated as found
2002 on the Sharp SL-6000 series
2003
2004 This driver is also available as a module ( = code which can be
2005 inserted and removed from the running kernel whenever you want). The
2006 module will be called tmiofb. If you want to compile it as a module,
2007 say M here and read <file:Documentation/kbuild/modules.txt>.
2008
2009 If unsure, say N.
2010
2011config FB_TMIO_ACCELL
2012 bool "tmiofb acceleration"
2013 depends on FB_TMIO
2014 default y
2015
2016config FB_S3C
2017 tristate "Samsung S3C framebuffer support"
2018 depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || ARCH_S5P64X0 || \
2019 ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
2020 select FB_CFB_FILLRECT
2021 select FB_CFB_COPYAREA
2022 select FB_CFB_IMAGEBLIT
2023 ---help---
2024 Frame buffer driver for the built-in FB controller in the Samsung
2025 SoC line from the S3C2443 onwards, including the S3C2416, S3C2450,
2026 and the S3C64XX series such as the S3C6400 and S3C6410.
2027
2028 These chips all have the same basic framebuffer design with the
2029 actual capabilities depending on the chip. For instance the S3C6400
2030 and S3C6410 support 4 hardware windows whereas the S3C24XX series
2031 currently only have two.
2032
2033 Currently the support is only for the S3C6400 and S3C6410 SoCs.
2034
2035config FB_S3C_DEBUG_REGWRITE
2036 bool "Debug register writes"
2037 depends on FB_S3C
2038 ---help---
2039 Show all register writes via pr_debug()
2040
2041config FB_S3C2410
2042 tristate "S3C2410 LCD framebuffer support"
2043 depends on FB && ARCH_S3C24XX
2044 select FB_CFB_FILLRECT
2045 select FB_CFB_COPYAREA
2046 select FB_CFB_IMAGEBLIT
2047 ---help---
2048 Frame buffer driver for the built-in LCD controller in the Samsung
2049 S3C2410 processor.
2050
2051 This driver is also available as a module ( = code which can be
2052 inserted and removed from the running kernel whenever you want). The
2053 module will be called s3c2410fb. If you want to compile it as a module,
2054 say M here and read <file:Documentation/kbuild/modules.txt>.
2055
2056 If unsure, say N.
2057config FB_S3C2410_DEBUG
2058 bool "S3C2410 lcd debug messages"
2059 depends on FB_S3C2410
2060 help
2061 Turn on debugging messages. Note that you can set/unset at run time
2062 through sysfs
2063
2064config FB_NUC900
2065 bool "NUC900 LCD framebuffer support"
2066 depends on FB && ARCH_W90X900
2067 select FB_CFB_FILLRECT
2068 select FB_CFB_COPYAREA
2069 select FB_CFB_IMAGEBLIT
2070 ---help---
2071 Frame buffer driver for the built-in LCD controller in the Nuvoton
2072 NUC900 processor
2073
2074config GPM1040A0_320X240
2075 bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
2076 depends on FB_NUC900
2077
2078config FB_SM501
2079 tristate "Silicon Motion SM501 framebuffer support"
2080 depends on FB && MFD_SM501
2081 select FB_CFB_FILLRECT
2082 select FB_CFB_COPYAREA
2083 select FB_CFB_IMAGEBLIT
2084 ---help---
2085 Frame buffer driver for the CRT and LCD controllers in the Silicon
2086 Motion SM501.
2087
2088 This driver is also available as a module ( = code which can be
2089 inserted and removed from the running kernel whenever you want). The
2090 module will be called sm501fb. If you want to compile it as a module,
2091 say M here and read <file:Documentation/kbuild/modules.txt>.
2092
2093 If unsure, say N.
2094
2095config FB_SMSCUFX
2096 tristate "SMSC UFX6000/7000 USB Framebuffer support"
2097 depends on FB && USB
2098 select FB_MODE_HELPERS
2099 select FB_SYS_FILLRECT
2100 select FB_SYS_COPYAREA
2101 select FB_SYS_IMAGEBLIT
2102 select FB_SYS_FOPS
2103 select FB_DEFERRED_IO
2104 ---help---
2105 This is a kernel framebuffer driver for SMSC UFX USB devices.
2106 Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
2107 mplayer -vo fbdev. Supports both UFX6000 (USB 2.0) and UFX7000
2108 (USB 3.0) devices.
2109 To compile as a module, choose M here: the module name is smscufx.
2110
2111config FB_UDL
2112 tristate "Displaylink USB Framebuffer support"
2113 depends on FB && USB
2114 select FB_MODE_HELPERS
2115 select FB_SYS_FILLRECT
2116 select FB_SYS_COPYAREA
2117 select FB_SYS_IMAGEBLIT
2118 select FB_SYS_FOPS
2119 select FB_DEFERRED_IO
2120 ---help---
2121 This is a kernel framebuffer driver for DisplayLink USB devices.
2122 Supports fbdev clients like xf86-video-fbdev, kdrive, fbi, and
2123 mplayer -vo fbdev. Supports all USB 2.0 era DisplayLink devices.
2124 To compile as a module, choose M here: the module name is udlfb.
2125
2126config FB_IBM_GXT4500
2127 tristate "Framebuffer support for IBM GXT4000P/4500P/6000P/6500P adaptors"
2128 depends on FB && PPC
2129 select FB_CFB_FILLRECT
2130 select FB_CFB_COPYAREA
2131 select FB_CFB_IMAGEBLIT
2132 ---help---
2133 Say Y here to enable support for the IBM GXT4000P/6000P and
2134 GXT4500P/6500P display adaptor based on Raster Engine RC1000,
2135 found on some IBM System P (pSeries) machines. This driver
2136 doesn't use Geometry Engine GT1000.
2137
2138config FB_PS3
2139 tristate "PS3 GPU framebuffer driver"
2140 depends on FB && PS3_PS3AV
2141 select FB_SYS_FILLRECT
2142 select FB_SYS_COPYAREA
2143 select FB_SYS_IMAGEBLIT
2144 select FB_SYS_FOPS
2145 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
2146 ---help---
2147 Include support for the virtual frame buffer in the PS3 platform.
2148
2149config FB_PS3_DEFAULT_SIZE_M
2150 int "PS3 default frame buffer size (in MiB)"
2151 depends on FB_PS3
2152 default 9
2153 ---help---
2154 This is the default size (in MiB) of the virtual frame buffer in
2155 the PS3.
2156 The default value can be overridden on the kernel command line
2157 using the "ps3fb" option (e.g. "ps3fb=9M");
2158
2159config FB_XILINX
2160 tristate "Xilinx frame buffer support"
2161 depends on FB && (XILINX_VIRTEX || MICROBLAZE || ARCH_ZYNQ)
2162 select FB_CFB_FILLRECT
2163 select FB_CFB_COPYAREA
2164 select FB_CFB_IMAGEBLIT
2165 ---help---
2166 Include support for the Xilinx ML300/ML403 reference design
2167 framebuffer. ML300 carries a 640*480 LCD display on the board,
2168 ML403 uses a standard DB15 VGA connector.
2169
2170config FB_GOLDFISH
2171 tristate "Goldfish Framebuffer"
2172 depends on FB && HAS_DMA
2173 select FB_CFB_FILLRECT
2174 select FB_CFB_COPYAREA
2175 select FB_CFB_IMAGEBLIT
2176 ---help---
2177 Framebuffer driver for Goldfish Virtual Platform
2178
2179config FB_COBALT
2180 tristate "Cobalt server LCD frame buffer support"
2181 depends on FB && (MIPS_COBALT || MIPS_SEAD3)
2182
2183config FB_SH7760
2184 bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
2185 depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
2186 || CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
2187 select FB_CFB_FILLRECT
2188 select FB_CFB_COPYAREA
2189 select FB_CFB_IMAGEBLIT
2190 ---help---
2191 Support for the SH7760/SH7763/SH7720/SH7721 integrated
2192 (D)STN/TFT LCD Controller.
2193 Supports display resolutions up to 1024x1024 pixel, grayscale and
2194 color operation, with depths ranging from 1 bpp to 8 bpp monochrome
2195 and 8, 15 or 16 bpp color; 90 degrees clockwise display rotation for
2196 panels <= 320 pixel horizontal resolution.
2197
2198config FB_DA8XX
2199 tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
2200 depends on FB && (ARCH_DAVINCI_DA8XX || SOC_AM33XX)
2201 select FB_CFB_FILLRECT
2202 select FB_CFB_COPYAREA
2203 select FB_CFB_IMAGEBLIT
2204 select FB_CFB_REV_PIXELS_IN_BYTE
2205 select FB_MODE_HELPERS
2206 select VIDEOMODE_HELPERS
2207 ---help---
2208 This is the frame buffer device driver for the TI LCD controller
2209 found on DA8xx/OMAP-L1xx/AM335x SoCs.
2210 If unsure, say N.
2211
2212config FB_VIRTUAL
2213 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
2214 depends on FB
2215 select FB_SYS_FILLRECT
2216 select FB_SYS_COPYAREA
2217 select FB_SYS_IMAGEBLIT
2218 select FB_SYS_FOPS
2219 ---help---
2220 This is a `virtual' frame buffer device. It operates on a chunk of
2221 unswappable kernel memory instead of on the memory of a graphics
2222 board. This means you cannot see any output sent to this frame
2223 buffer device, while it does consume precious memory. The main use
2224 of this frame buffer device is testing and debugging the frame
2225 buffer subsystem. Do NOT enable it for normal systems! To protect
2226 the innocent, it has to be enabled explicitly at boot time using the
2227 kernel option `video=vfb:'.
2228
2229 To compile this driver as a module, choose M here: the
2230 module will be called vfb. In order to load it, you must use
2231 the vfb_enable=1 option.
2232
2233 If unsure, say N.
2234
2235config XEN_FBDEV_FRONTEND
2236 tristate "Xen virtual frame buffer support"
2237 depends on FB && XEN
2238 select FB_SYS_FILLRECT
2239 select FB_SYS_COPYAREA
2240 select FB_SYS_IMAGEBLIT
2241 select FB_SYS_FOPS
2242 select FB_DEFERRED_IO
2243 select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
2244 select XEN_XENBUS_FRONTEND
2245 default y
2246 help
2247 This driver implements the front-end of the Xen virtual
2248 frame buffer driver. It communicates with a back-end
2249 in another domain.
2250
2251config FB_METRONOME
2252 tristate "E-Ink Metronome/8track controller support"
2253 depends on FB
2254 select FB_SYS_FILLRECT
2255 select FB_SYS_COPYAREA
2256 select FB_SYS_IMAGEBLIT
2257 select FB_SYS_FOPS
2258 select FB_DEFERRED_IO
2259 help
2260 This driver implements support for the E-Ink Metronome
2261 controller. The pre-release name for this device was 8track
2262 and could also have been called by some vendors as PVI-nnnn.
2263
2264config FB_MB862XX
2265 tristate "Fujitsu MB862xx GDC support"
2266 depends on FB
2267 depends on PCI || (OF && PPC)
2268 select FB_CFB_FILLRECT
2269 select FB_CFB_COPYAREA
2270 select FB_CFB_IMAGEBLIT
2271 ---help---
2272 Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers.
2273
2274choice
2275 prompt "GDC variant"
2276 depends on FB_MB862XX
2277
2278config FB_MB862XX_PCI_GDC
2279 bool "Carmine/Coral-P(A) GDC"
2280 depends on PCI
2281 ---help---
2282 This enables framebuffer support for Fujitsu Carmine/Coral-P(A)
2283 PCI graphics controller devices.
2284
2285config FB_MB862XX_LIME
2286 bool "Lime GDC"
2287 depends on OF && PPC
2288 select FB_FOREIGN_ENDIAN
2289 select FB_LITTLE_ENDIAN
2290 ---help---
2291 Framebuffer support for Fujitsu Lime GDC on host CPU bus.
2292
2293endchoice
2294
2295config FB_MB862XX_I2C
2296 bool "Support I2C bus on MB862XX GDC"
2297 depends on FB_MB862XX && I2C
2298 default y
2299 help
2300 Selecting this option adds Coral-P(A)/Lime GDC I2C bus adapter
2301 driver to support accessing I2C devices on controller's I2C bus.
2302 These are usually some video decoder chips.
2303
2304config FB_EP93XX
2305 tristate "EP93XX frame buffer support"
2306 depends on FB && ARCH_EP93XX
2307 select FB_CFB_FILLRECT
2308 select FB_CFB_COPYAREA
2309 select FB_CFB_IMAGEBLIT
2310 ---help---
2311 Framebuffer driver for the Cirrus Logic EP93XX series of processors.
2312 This driver is also available as a module. The module will be called
2313 ep93xx-fb.
2314
2315config FB_PRE_INIT_FB
2316 bool "Don't reinitialize, use bootloader's GDC/Display configuration"
2317 depends on FB && FB_MB862XX_LIME
2318 ---help---
2319 Select this option if display contents should be inherited as set by
2320 the bootloader.
2321
2322config FB_MSM
2323 tristate "MSM Framebuffer support"
2324 depends on FB && ARCH_MSM
2325 select FB_CFB_FILLRECT
2326 select FB_CFB_COPYAREA
2327 select FB_CFB_IMAGEBLIT
2328
2329config FB_MX3
2330 tristate "MX3 Framebuffer support"
2331 depends on FB && MX3_IPU
2332 select FB_CFB_FILLRECT
2333 select FB_CFB_COPYAREA
2334 select FB_CFB_IMAGEBLIT
2335 default y
2336 help
2337 This is a framebuffer device for the i.MX31 LCD Controller. So
2338 far only synchronous displays are supported. If you plan to use
2339 an LCD display with your i.MX31 system, say Y here.
2340
2341config FB_BROADSHEET
2342 tristate "E-Ink Broadsheet/Epson S1D13521 controller support"
2343 depends on FB
2344 select FB_SYS_FILLRECT
2345 select FB_SYS_COPYAREA
2346 select FB_SYS_IMAGEBLIT
2347 select FB_SYS_FOPS
2348 select FB_DEFERRED_IO
2349 help
2350 This driver implements support for the E-Ink Broadsheet
2351 controller. The release name for this device was Epson S1D13521
2352 and could also have been called by other names when coupled with
2353 a bridge adapter.
2354
2355config FB_AUO_K190X
2356 tristate "AUO-K190X EPD controller support"
2357 depends on FB
2358 select FB_SYS_FILLRECT
2359 select FB_SYS_COPYAREA
2360 select FB_SYS_IMAGEBLIT
2361 select FB_SYS_FOPS
2362 select FB_DEFERRED_IO
2363 help
2364 Provides support for epaper controllers from the K190X series
2365 of AUO. These controllers can be used to drive epaper displays
2366 from Sipix.
2367
2368 This option enables the common support, shared by the individual
2369 controller drivers. You will also have to enable the driver
2370 for the controller type used in your device.
2371
2372config FB_AUO_K1900
2373 tristate "AUO-K1900 EPD controller support"
2374 depends on FB && FB_AUO_K190X
2375 help
2376 This driver implements support for the AUO K1900 epd-controller.
2377 This controller can drive Sipix epaper displays but can only do
2378 serial updates, reducing the number of possible frames per second.
2379
2380config FB_AUO_K1901
2381 tristate "AUO-K1901 EPD controller support"
2382 depends on FB && FB_AUO_K190X
2383 help
2384 This driver implements support for the AUO K1901 epd-controller.
2385 This controller can drive Sipix epaper displays and supports
2386 concurrent updates, making higher frames per second possible.
2387
2388config FB_JZ4740
2389 tristate "JZ4740 LCD framebuffer support"
2390 depends on FB && MACH_JZ4740
2391 select FB_SYS_FILLRECT
2392 select FB_SYS_COPYAREA
2393 select FB_SYS_IMAGEBLIT
2394 help
2395 Framebuffer support for the JZ4740 SoC.
2396
2397config FB_MXS
2398 tristate "MXS LCD framebuffer support"
2399 depends on FB && ARCH_MXS
2400 select FB_CFB_FILLRECT
2401 select FB_CFB_COPYAREA
2402 select FB_CFB_IMAGEBLIT
2403 select FB_MODE_HELPERS
2404 select VIDEOMODE_HELPERS
2405 help
2406 Framebuffer support for the MXS SoC.
2407
2408config FB_PUV3_UNIGFX
2409 tristate "PKUnity v3 Unigfx framebuffer support"
2410 depends on FB && UNICORE32 && ARCH_PUV3
2411 select FB_SYS_FILLRECT
2412 select FB_SYS_COPYAREA
2413 select FB_SYS_IMAGEBLIT
2414 select FB_SYS_FOPS
2415 help
2416 Choose this option if you want to use the Unigfx device as a
2417 framebuffer device. Without the support of PCI & AGP.
2418
2419config FB_HYPERV
2420 tristate "Microsoft Hyper-V Synthetic Video support"
2421 depends on FB && HYPERV
2422 select FB_CFB_FILLRECT
2423 select FB_CFB_COPYAREA
2424 select FB_CFB_IMAGEBLIT
2425 help
2426 This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
2427
2428config FB_SIMPLE
2429 bool "Simple framebuffer support"
2430 depends on (FB = y)
2431 select FB_CFB_FILLRECT
2432 select FB_CFB_COPYAREA
2433 select FB_CFB_IMAGEBLIT
2434 help
2435 Say Y if you want support for a simple frame-buffer.
2436
2437 This driver assumes that the display hardware has been initialized
2438 before the kernel boots, and the kernel will simply render to the
2439 pre-allocated frame buffer surface.
2440
2441 Configuration re: surface address, size, and format must be provided
2442 through device tree, or plain old platform data.
2443
2444source "drivers/video/fbdev/omap/Kconfig"
2445source "drivers/video/fbdev/omap2/Kconfig"
2446source "drivers/video/fbdev/exynos/Kconfig"
2447source "drivers/video/fbdev/mmp/Kconfig"
2448
2449config FB_SH_MOBILE_MERAM
2450 tristate "SuperH Mobile MERAM read ahead support"
2451 depends on (SUPERH || ARCH_SHMOBILE)
2452 select GENERIC_ALLOCATOR
2453 ---help---
2454 Enable MERAM support for the SuperH controller.
2455
2456 This will allow for caching of the framebuffer to provide more
2457 reliable access under heavy main memory bus traffic situations.
2458 Up to 4 memory channels can be configured, allowing 4 RGB or
2459 2 YCbCr framebuffers to be configured.
2460
2461config FB_SSD1307
2462 tristate "Solomon SSD1307 framebuffer support"
2463 depends on FB && I2C
2464 depends on OF
2465 depends on GPIOLIB
2466 select FB_SYS_FOPS
2467 select FB_SYS_FILLRECT
2468 select FB_SYS_COPYAREA
2469 select FB_SYS_IMAGEBLIT
2470 select FB_DEFERRED_IO
2471 select PWM
2472 help
2473 This driver implements support for the Solomon SSD1307
2474 OLED controller over I2C.
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
new file mode 100644
index 000000000000..0284f2a12538
--- /dev/null
+++ b/drivers/video/fbdev/Makefile
@@ -0,0 +1,152 @@
1# Makefile for the Linux video drivers.
2# 5 Aug 1999, James Simmons, <mailto:jsimmons@users.sf.net>
3# Rewritten to use lists instead of if-statements.
4
5# Each configuration option enables a list of files.
6
7obj-y += core/
8
9obj-$(CONFIG_EXYNOS_VIDEO) += exynos/
10
11obj-$(CONFIG_FB_MACMODES) += macmodes.o
12obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o
13
14# Hardware specific drivers go first
15obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o
16obj-$(CONFIG_FB_ARC) += arcfb.o
17obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o
18obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o
19obj-$(CONFIG_FB_GRVGA) += grvga.o
20obj-$(CONFIG_FB_PM2) += pm2fb.o
21obj-$(CONFIG_FB_PM3) += pm3fb.o
22
23obj-$(CONFIG_FB_I740) += i740fb.o
24obj-$(CONFIG_FB_MATROX) += matrox/
25obj-$(CONFIG_FB_RIVA) += riva/
26obj-$(CONFIG_FB_NVIDIA) += nvidia/
27obj-$(CONFIG_FB_ATY) += aty/ macmodes.o
28obj-$(CONFIG_FB_ATY128) += aty/ macmodes.o
29obj-$(CONFIG_FB_RADEON) += aty/
30obj-$(CONFIG_FB_SIS) += sis/
31obj-$(CONFIG_FB_VIA) += via/
32obj-$(CONFIG_FB_KYRO) += kyro/
33obj-$(CONFIG_FB_SAVAGE) += savage/
34obj-$(CONFIG_FB_GEODE) += geode/
35obj-$(CONFIG_FB_MBX) += mbx/
36obj-$(CONFIG_FB_NEOMAGIC) += neofb.o
37obj-$(CONFIG_FB_3DFX) += tdfxfb.o
38obj-$(CONFIG_FB_CONTROL) += controlfb.o
39obj-$(CONFIG_FB_PLATINUM) += platinumfb.o
40obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
41obj-$(CONFIG_FB_CT65550) += chipsfb.o
42obj-$(CONFIG_FB_IMSTT) += imsttfb.o
43obj-$(CONFIG_FB_FM2) += fm2fb.o
44obj-$(CONFIG_FB_VT8623) += vt8623fb.o
45obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
46obj-$(CONFIG_FB_LE80578) += vermilion/
47obj-$(CONFIG_FB_S3) += s3fb.o
48obj-$(CONFIG_FB_ARK) += arkfb.o
49obj-$(CONFIG_FB_STI) += stifb.o
50obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
51obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
52obj-$(CONFIG_FB_CG3) += cg3.o sbuslib.o
53obj-$(CONFIG_FB_BW2) += bw2.o sbuslib.o
54obj-$(CONFIG_FB_CG14) += cg14.o sbuslib.o
55obj-$(CONFIG_FB_P9100) += p9100.o sbuslib.o
56obj-$(CONFIG_FB_TCX) += tcx.o sbuslib.o
57obj-$(CONFIG_FB_LEO) += leo.o sbuslib.o
58obj-$(CONFIG_FB_ACORN) += acornfb.o
59obj-$(CONFIG_FB_ATARI) += atafb.o c2p_iplan2.o atafb_mfb.o \
60 atafb_iplan2p2.o atafb_iplan2p4.o atafb_iplan2p8.o
61obj-$(CONFIG_FB_MAC) += macfb.o
62obj-$(CONFIG_FB_HECUBA) += hecubafb.o
63obj-$(CONFIG_FB_N411) += n411.o
64obj-$(CONFIG_FB_HGA) += hgafb.o
65obj-$(CONFIG_FB_XVR500) += sunxvr500.o
66obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
67obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
68obj-$(CONFIG_FB_IGA) += igafb.o
69obj-$(CONFIG_FB_APOLLO) += dnfb.o
70obj-$(CONFIG_FB_Q40) += q40fb.o
71obj-$(CONFIG_FB_TGA) += tgafb.o
72obj-$(CONFIG_FB_HP300) += hpfb.o
73obj-$(CONFIG_FB_G364) += g364fb.o
74obj-$(CONFIG_FB_EP93XX) += ep93xx-fb.o
75obj-$(CONFIG_FB_SA1100) += sa1100fb.o
76obj-$(CONFIG_FB_HIT) += hitfb.o
77obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
78obj-$(CONFIG_FB_PVR2) += pvr2fb.o
79obj-$(CONFIG_FB_VOODOO1) += sstfb.o
80obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
81obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
82obj-$(CONFIG_FB_68328) += 68328fb.o
83obj-$(CONFIG_FB_GBE) += gbefb.o
84obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o
85obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
86obj-$(CONFIG_FB_PXA) += pxafb.o
87obj-$(CONFIG_FB_PXA168) += pxa168fb.o
88obj-$(CONFIG_PXA3XX_GCU) += pxa3xx-gcu.o
89obj-$(CONFIG_MMP_DISP) += mmp/
90obj-$(CONFIG_FB_W100) += w100fb.o
91obj-$(CONFIG_FB_TMIO) += tmiofb.o
92obj-$(CONFIG_FB_AU1100) += au1100fb.o
93obj-$(CONFIG_FB_AU1200) += au1200fb.o
94obj-$(CONFIG_FB_VT8500) += vt8500lcdfb.o
95obj-$(CONFIG_FB_WM8505) += wm8505fb.o
96obj-$(CONFIG_FB_PMAG_AA) += pmag-aa-fb.o
97obj-$(CONFIG_FB_PMAG_BA) += pmag-ba-fb.o
98obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
99obj-$(CONFIG_FB_MAXINE) += maxinefb.o
100obj-$(CONFIG_FB_METRONOME) += metronomefb.o
101obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o
102obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o
103obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o
104obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o
105obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
106obj-$(CONFIG_FB_SH7760) += sh7760fb.o
107obj-$(CONFIG_FB_IMX) += imxfb.o
108obj-$(CONFIG_FB_S3C) += s3c-fb.o
109obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
110obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
111obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o
112obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
113obj-$(CONFIG_FB_PS3) += ps3fb.o
114obj-$(CONFIG_FB_SM501) += sm501fb.o
115obj-$(CONFIG_FB_UDL) += udlfb.o
116obj-$(CONFIG_FB_SMSCUFX) += smscufx.o
117obj-$(CONFIG_FB_XILINX) += xilinxfb.o
118obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
119obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o
120obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
121obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
122obj-$(CONFIG_FB_OMAP) += omap/
123obj-y += omap2/
124obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
125obj-$(CONFIG_FB_CARMINE) += carminefb.o
126obj-$(CONFIG_FB_MB862XX) += mb862xx/
127obj-$(CONFIG_FB_MSM) += msm/
128obj-$(CONFIG_FB_NUC900) += nuc900fb.o
129obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
130obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
131obj-$(CONFIG_FB_HYPERV) += hyperv_fb.o
132obj-$(CONFIG_FB_OPENCORES) += ocfb.o
133
134# Platform or fallback drivers go here
135obj-$(CONFIG_FB_UVESA) += uvesafb.o
136obj-$(CONFIG_FB_VESA) += vesafb.o
137obj-$(CONFIG_FB_EFI) += efifb.o
138obj-$(CONFIG_FB_VGA16) += vga16fb.o
139obj-$(CONFIG_FB_OF) += offb.o
140obj-$(CONFIG_FB_BF537_LQ035) += bf537-lq035.o
141obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
142obj-$(CONFIG_FB_BFIN_LQ035Q1) += bfin-lq035q1-fb.o
143obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
144obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
145obj-$(CONFIG_FB_MX3) += mx3fb.o
146obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
147obj-$(CONFIG_FB_MXS) += mxsfb.o
148obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
149obj-$(CONFIG_FB_SIMPLE) += simplefb.o
150
151# the test framebuffer is last
152obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/acornfb.c b/drivers/video/fbdev/acornfb.c
index a305caea58ee..a305caea58ee 100644
--- a/drivers/video/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
diff --git a/drivers/video/acornfb.h b/drivers/video/fbdev/acornfb.h
index 175c8ff3367c..175c8ff3367c 100644
--- a/drivers/video/acornfb.h
+++ b/drivers/video/fbdev/acornfb.h
diff --git a/drivers/video/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 14d6b3793e0a..14d6b3793e0a 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
diff --git a/drivers/video/amifb.c b/drivers/video/fbdev/amifb.c
index 518f790ef88a..518f790ef88a 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/fbdev/amifb.c
diff --git a/drivers/video/arcfb.c b/drivers/video/fbdev/arcfb.c
index 1b0b233b8b39..1b0b233b8b39 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/fbdev/arcfb.c
diff --git a/drivers/video/arkfb.c b/drivers/video/fbdev/arkfb.c
index adc4ea2cc5a0..adc4ea2cc5a0 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
diff --git a/drivers/video/asiliantfb.c b/drivers/video/fbdev/asiliantfb.c
index 7e8ddf00ccc2..7e8ddf00ccc2 100644
--- a/drivers/video/asiliantfb.c
+++ b/drivers/video/fbdev/asiliantfb.c
diff --git a/drivers/video/atafb.c b/drivers/video/fbdev/atafb.c
index e21d1f58554c..e21d1f58554c 100644
--- a/drivers/video/atafb.c
+++ b/drivers/video/fbdev/atafb.c
diff --git a/drivers/video/atafb.h b/drivers/video/fbdev/atafb.h
index 014e05906cb1..014e05906cb1 100644
--- a/drivers/video/atafb.h
+++ b/drivers/video/fbdev/atafb.h
diff --git a/drivers/video/atafb_iplan2p2.c b/drivers/video/fbdev/atafb_iplan2p2.c
index 8cc9c50379d0..8cc9c50379d0 100644
--- a/drivers/video/atafb_iplan2p2.c
+++ b/drivers/video/fbdev/atafb_iplan2p2.c
diff --git a/drivers/video/atafb_iplan2p4.c b/drivers/video/fbdev/atafb_iplan2p4.c
index bee0d89463f7..bee0d89463f7 100644
--- a/drivers/video/atafb_iplan2p4.c
+++ b/drivers/video/fbdev/atafb_iplan2p4.c
diff --git a/drivers/video/atafb_iplan2p8.c b/drivers/video/fbdev/atafb_iplan2p8.c
index 356fb52ce443..356fb52ce443 100644
--- a/drivers/video/atafb_iplan2p8.c
+++ b/drivers/video/fbdev/atafb_iplan2p8.c
diff --git a/drivers/video/atafb_mfb.c b/drivers/video/fbdev/atafb_mfb.c
index 6a352d62eecf..6a352d62eecf 100644
--- a/drivers/video/atafb_mfb.c
+++ b/drivers/video/fbdev/atafb_mfb.c
diff --git a/drivers/video/atafb_utils.h b/drivers/video/fbdev/atafb_utils.h
index ac9e19dc5057..ac9e19dc5057 100644
--- a/drivers/video/atafb_utils.h
+++ b/drivers/video/fbdev/atafb_utils.h
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index e683b6ef9594..e683b6ef9594 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
diff --git a/drivers/video/aty/Makefile b/drivers/video/fbdev/aty/Makefile
index a6cc0e9ec790..a6cc0e9ec790 100644
--- a/drivers/video/aty/Makefile
+++ b/drivers/video/fbdev/aty/Makefile
diff --git a/drivers/video/aty/ati_ids.h b/drivers/video/fbdev/aty/ati_ids.h
index 3e9d28bcd9f8..3e9d28bcd9f8 100644
--- a/drivers/video/aty/ati_ids.h
+++ b/drivers/video/fbdev/aty/ati_ids.h
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 52108be69e77..52108be69e77 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
diff --git a/drivers/video/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 1f39a62f899b..1f39a62f899b 100644
--- a/drivers/video/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index c3d0074a32db..c3d0074a32db 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
diff --git a/drivers/video/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c
index 182bd680141f..182bd680141f 100644
--- a/drivers/video/aty/mach64_accel.c
+++ b/drivers/video/fbdev/aty/mach64_accel.c
diff --git a/drivers/video/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 51f29d627ceb..51f29d627ceb 100644
--- a/drivers/video/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
index 0fe02e22d9a4..2fa0317ab3c7 100644
--- a/drivers/video/aty/mach64_cursor.c
+++ b/drivers/video/fbdev/aty/mach64_cursor.c
@@ -5,7 +5,7 @@
5#include <linux/fb.h> 5#include <linux/fb.h>
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/string.h> 7#include <linux/string.h>
8#include "../fb_draw.h" 8#include "../core/fb_draw.h"
9 9
10#include <asm/io.h> 10#include <asm/io.h>
11 11
diff --git a/drivers/video/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 10c988aef58e..10c988aef58e 100644
--- a/drivers/video/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/fbdev/aty/radeon_accel.c
index a469a3d6edcb..a469a3d6edcb 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/fbdev/aty/radeon_accel.c
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/fbdev/aty/radeon_backlight.c
index db572df7e1ef..db572df7e1ef 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/fbdev/aty/radeon_backlight.c
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 26d80a4486fb..26d80a4486fb 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/fbdev/aty/radeon_i2c.c
index ab1d0fd76316..ab1d0fd76316 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/fbdev/aty/radeon_i2c.c
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/fbdev/aty/radeon_monitor.c
index bc078d50d8f1..bc078d50d8f1 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/fbdev/aty/radeon_monitor.c
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index 46a12f1a93c3..46a12f1a93c3 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/fbdev/aty/radeonfb.h
index cb846044f57c..cb846044f57c 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/fbdev/aty/radeonfb.h
diff --git a/drivers/video/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 372d4aea9d1c..372d4aea9d1c 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
diff --git a/drivers/video/au1100fb.h b/drivers/video/fbdev/au1100fb.h
index 12d9642d5465..12d9642d5465 100644
--- a/drivers/video/au1100fb.h
+++ b/drivers/video/fbdev/au1100fb.h
diff --git a/drivers/video/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 4cfba78a1458..4cfba78a1458 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
diff --git a/drivers/video/au1200fb.h b/drivers/video/fbdev/au1200fb.h
index e2672714d8d4..e2672714d8d4 100644
--- a/drivers/video/au1200fb.h
+++ b/drivers/video/fbdev/au1200fb.h
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/fbdev/auo_k1900fb.c
index f5b668e77af3..f5b668e77af3 100644
--- a/drivers/video/auo_k1900fb.c
+++ b/drivers/video/fbdev/auo_k1900fb.c
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/fbdev/auo_k1901fb.c
index 12b9adcb75c5..12b9adcb75c5 100644
--- a/drivers/video/auo_k1901fb.c
+++ b/drivers/video/fbdev/auo_k1901fb.c
diff --git a/drivers/video/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c
index 8d2499d1cafb..8d2499d1cafb 100644
--- a/drivers/video/auo_k190x.c
+++ b/drivers/video/fbdev/auo_k190x.c
diff --git a/drivers/video/auo_k190x.h b/drivers/video/fbdev/auo_k190x.h
index e35af1f51b28..e35af1f51b28 100644
--- a/drivers/video/auo_k190x.h
+++ b/drivers/video/fbdev/auo_k190x.h
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/fbdev/bf537-lq035.c
index a82d2578d976..a82d2578d976 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/fbdev/bf537-lq035.c
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/fbdev/bf54x-lq043fb.c
index 42b8f9d11018..e2c42ad8515a 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/fbdev/bf54x-lq043fb.c
@@ -49,13 +49,13 @@
49#include <linux/spinlock.h> 49#include <linux/spinlock.h>
50#include <linux/dma-mapping.h> 50#include <linux/dma-mapping.h>
51#include <linux/platform_device.h> 51#include <linux/platform_device.h>
52#include <linux/gpio.h>
52 53
53#include <asm/blackfin.h> 54#include <asm/blackfin.h>
54#include <asm/irq.h> 55#include <asm/irq.h>
55#include <asm/dpmc.h> 56#include <asm/dpmc.h>
56#include <asm/dma-mapping.h> 57#include <asm/dma-mapping.h>
57#include <asm/dma.h> 58#include <asm/dma.h>
58#include <asm/gpio.h>
59#include <asm/portmux.h> 59#include <asm/portmux.h>
60 60
61#include <mach/bf54x-lq043.h> 61#include <mach/bf54x-lq043.h>
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/fbdev/bfin-lq035q1-fb.c
index b594a58ff21d..b594a58ff21d 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/fbdev/bfin-lq035q1-fb.c
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/fbdev/bfin-t350mcqb-fb.c
index b5cf1307a3d9..b5cf1307a3d9 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/fbdev/bfin-t350mcqb-fb.c
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c
index a54f7f7d763b..a54f7f7d763b 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/fbdev/bfin_adv7393fb.c
diff --git a/drivers/video/bfin_adv7393fb.h b/drivers/video/fbdev/bfin_adv7393fb.h
index cd591b5152a5..cd591b5152a5 100644
--- a/drivers/video/bfin_adv7393fb.h
+++ b/drivers/video/fbdev/bfin_adv7393fb.h
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 8556264b16b7..8556264b16b7 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
diff --git a/drivers/video/bt431.h b/drivers/video/fbdev/bt431.h
index 04e0cfbba538..04e0cfbba538 100644
--- a/drivers/video/bt431.h
+++ b/drivers/video/fbdev/bt431.h
diff --git a/drivers/video/bt455.h b/drivers/video/fbdev/bt455.h
index 80f61b03e9ae..80f61b03e9ae 100644
--- a/drivers/video/bt455.h
+++ b/drivers/video/fbdev/bt455.h
diff --git a/drivers/video/bw2.c b/drivers/video/fbdev/bw2.c
index bc123d6947a4..bc123d6947a4 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/fbdev/bw2.c
diff --git a/drivers/video/c2p.h b/drivers/video/fbdev/c2p.h
index 6c38d40427d8..6c38d40427d8 100644
--- a/drivers/video/c2p.h
+++ b/drivers/video/fbdev/c2p.h
diff --git a/drivers/video/c2p_core.h b/drivers/video/fbdev/c2p_core.h
index e1035a865fb9..e1035a865fb9 100644
--- a/drivers/video/c2p_core.h
+++ b/drivers/video/fbdev/c2p_core.h
diff --git a/drivers/video/c2p_iplan2.c b/drivers/video/fbdev/c2p_iplan2.c
index 19156dc6158c..19156dc6158c 100644
--- a/drivers/video/c2p_iplan2.c
+++ b/drivers/video/fbdev/c2p_iplan2.c
diff --git a/drivers/video/c2p_planar.c b/drivers/video/fbdev/c2p_planar.c
index ec7ac8526f06..ec7ac8526f06 100644
--- a/drivers/video/c2p_planar.c
+++ b/drivers/video/fbdev/c2p_planar.c
diff --git a/drivers/video/carminefb.c b/drivers/video/fbdev/carminefb.c
index 65f7c15f5fdb..65f7c15f5fdb 100644
--- a/drivers/video/carminefb.c
+++ b/drivers/video/fbdev/carminefb.c
diff --git a/drivers/video/carminefb.h b/drivers/video/fbdev/carminefb.h
index 05306de0c6b6..05306de0c6b6 100644
--- a/drivers/video/carminefb.h
+++ b/drivers/video/fbdev/carminefb.h
diff --git a/drivers/video/carminefb_regs.h b/drivers/video/fbdev/carminefb_regs.h
index 045215600b73..045215600b73 100644
--- a/drivers/video/carminefb_regs.h
+++ b/drivers/video/fbdev/carminefb_regs.h
diff --git a/drivers/video/cg14.c b/drivers/video/fbdev/cg14.c
index c79745b136bb..c79745b136bb 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/fbdev/cg14.c
diff --git a/drivers/video/cg3.c b/drivers/video/fbdev/cg3.c
index 64a89d5747ed..64a89d5747ed 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/fbdev/cg3.c
diff --git a/drivers/video/cg6.c b/drivers/video/fbdev/cg6.c
index 70781fea092a..70781fea092a 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/fbdev/cg6.c
diff --git a/drivers/video/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 206a66b61072..206a66b61072 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
diff --git a/drivers/video/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index d992aa5eb3f0..d992aa5eb3f0 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
diff --git a/drivers/video/clps711xfb.c b/drivers/video/fbdev/clps711xfb.c
index f00980607b8f..f00980607b8f 100644
--- a/drivers/video/clps711xfb.c
+++ b/drivers/video/fbdev/clps711xfb.c
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index d5533f4db1cf..d5533f4db1cf 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
diff --git a/drivers/video/controlfb.c b/drivers/video/fbdev/controlfb.c
index fdadef979238..fdadef979238 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
diff --git a/drivers/video/controlfb.h b/drivers/video/fbdev/controlfb.h
index 6026c60fc100..6026c60fc100 100644
--- a/drivers/video/controlfb.h
+++ b/drivers/video/fbdev/controlfb.h
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
new file mode 100644
index 000000000000..fa306538dac2
--- /dev/null
+++ b/drivers/video/fbdev/core/Makefile
@@ -0,0 +1,16 @@
1obj-y += fb_notify.o
2obj-$(CONFIG_FB) += fb.o
3fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
4 modedb.o fbcvt.o
5fb-objs := $(fb-y)
6
7obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
8obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
9obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o
10obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o
11obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o
12obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
13obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o
14obj-$(CONFIG_FB_SVGALIB) += svgalib.o
15obj-$(CONFIG_FB_DDC) += fb_ddc.o
16obj-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o
diff --git a/drivers/video/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index bcb57235fcc7..bcb57235fcc7 100644
--- a/drivers/video/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
diff --git a/drivers/video/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c
index ba9f58b2a5e8..ba9f58b2a5e8 100644
--- a/drivers/video/cfbfillrect.c
+++ b/drivers/video/fbdev/core/cfbfillrect.c
diff --git a/drivers/video/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c
index a2bb276a8b24..a2bb276a8b24 100644
--- a/drivers/video/cfbimgblt.c
+++ b/drivers/video/fbdev/core/cfbimgblt.c
diff --git a/drivers/video/fb_ddc.c b/drivers/video/fbdev/core/fb_ddc.c
index 2b106f046fde..94322ccfedde 100644
--- a/drivers/video/fb_ddc.c
+++ b/drivers/video/fbdev/core/fb_ddc.c
@@ -15,7 +15,7 @@
15#include <linux/i2c-algo-bit.h> 15#include <linux/i2c-algo-bit.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17 17
18#include "edid.h" 18#include "../edid.h"
19 19
20#define DDC_ADDR 0x50 20#define DDC_ADDR 0x50
21 21
diff --git a/drivers/video/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 900aa4ecd617..900aa4ecd617 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
diff --git a/drivers/video/fb_draw.h b/drivers/video/fbdev/core/fb_draw.h
index 624ee115f129..624ee115f129 100644
--- a/drivers/video/fb_draw.h
+++ b/drivers/video/fbdev/core/fb_draw.h
diff --git a/drivers/video/fb_notify.c b/drivers/video/fbdev/core/fb_notify.c
index 74c2da528884..74c2da528884 100644
--- a/drivers/video/fb_notify.c
+++ b/drivers/video/fbdev/core/fb_notify.c
diff --git a/drivers/video/fb_sys_fops.c b/drivers/video/fbdev/core/fb_sys_fops.c
index ff275d7f3eaf..ff275d7f3eaf 100644
--- a/drivers/video/fb_sys_fops.c
+++ b/drivers/video/fbdev/core/fb_sys_fops.c
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index f89245b8ba8e..f89245b8ba8e 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
diff --git a/drivers/video/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c
index 7cb715dfc0e1..7cb715dfc0e1 100644
--- a/drivers/video/fbcvt.c
+++ b/drivers/video/fbdev/core/fbcvt.c
diff --git a/drivers/video/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index b6d5008f361f..b6d5008f361f 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
diff --git a/drivers/video/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 6103fa6fb54f..c204ebe6187e 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -37,7 +37,7 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/pci-bridge.h> 38#include <asm/pci-bridge.h>
39#endif 39#endif
40#include "edid.h" 40#include "../edid.h"
41 41
42/* 42/*
43 * EDID parser 43 * EDID parser
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 53444ac19fe0..53444ac19fe0 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
diff --git a/drivers/video/modedb.c b/drivers/video/fbdev/core/modedb.c
index a9a907c440d7..a9a907c440d7 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
diff --git a/drivers/video/svgalib.c b/drivers/video/fbdev/core/svgalib.c
index 9e01322fabe3..9e01322fabe3 100644
--- a/drivers/video/svgalib.c
+++ b/drivers/video/fbdev/core/svgalib.c
diff --git a/drivers/video/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index 844a32fd38ed..844a32fd38ed 100644
--- a/drivers/video/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
diff --git a/drivers/video/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c
index 33ee3d34f9d2..33ee3d34f9d2 100644
--- a/drivers/video/sysfillrect.c
+++ b/drivers/video/fbdev/core/sysfillrect.c
diff --git a/drivers/video/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index a4d05b1b17d7..a4d05b1b17d7 100644
--- a/drivers/video/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
diff --git a/drivers/video/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index b0a950f36970..b0a950f36970 100644
--- a/drivers/video/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
diff --git a/drivers/video/cyber2000fb.h b/drivers/video/fbdev/cyber2000fb.h
index bad69102e774..bad69102e774 100644
--- a/drivers/video/cyber2000fb.h
+++ b/drivers/video/fbdev/cyber2000fb.h
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 0c0ba920ea48..6b23508ff0a5 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -663,15 +663,7 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
663 (green << info->var.green.offset) | 663 (green << info->var.green.offset) |
664 (blue << info->var.blue.offset); 664 (blue << info->var.blue.offset);
665 665
666 switch (info->var.bits_per_pixel) { 666 ((u32 *) (info->pseudo_palette))[regno] = v;
667 case 16:
668 ((u16 *) (info->pseudo_palette))[regno] = v;
669 break;
670 case 24:
671 case 32:
672 ((u32 *) (info->pseudo_palette))[regno] = v;
673 break;
674 }
675 if (palette[0] != 0x4000) { 667 if (palette[0] != 0x4000) {
676 update_hw = 1; 668 update_hw = 1;
677 palette[0] = 0x4000; 669 palette[0] = 0x4000;
diff --git a/drivers/video/dnfb.c b/drivers/video/fbdev/dnfb.c
index 3526899da61b..3526899da61b 100644
--- a/drivers/video/dnfb.c
+++ b/drivers/video/fbdev/dnfb.c
diff --git a/drivers/video/edid.h b/drivers/video/fbdev/edid.h
index d03a232d90b2..d03a232d90b2 100644
--- a/drivers/video/edid.h
+++ b/drivers/video/fbdev/edid.h
diff --git a/drivers/video/efifb.c b/drivers/video/fbdev/efifb.c
index ae9618ff6735..ae9618ff6735 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/fbdev/efifb.c
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index 35a0f533f1a2..35a0f533f1a2 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/fbdev/exynos/Kconfig
index fcf2d48ac6d1..fcf2d48ac6d1 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/fbdev/exynos/Kconfig
diff --git a/drivers/video/exynos/Makefile b/drivers/video/fbdev/exynos/Makefile
index b5b1bd228abb..b5b1bd228abb 100644
--- a/drivers/video/exynos/Makefile
+++ b/drivers/video/fbdev/exynos/Makefile
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
index cee9602f9a7b..cee9602f9a7b 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
index 85edabfdef5a..85edabfdef5a 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h
index 412552274df3..412552274df3 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.h
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.h
diff --git a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c
index c148d06540c1..c148d06540c1 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.c
diff --git a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h
index 85460701c7ea..85460701c7ea 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.h
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_lowlevel.h
diff --git a/drivers/video/exynos/exynos_mipi_dsi_regs.h b/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h
index 4227106d3fd0..4227106d3fd0 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_regs.h
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_regs.h
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 29e70ed3f154..29e70ed3f154 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fbdev/fb-puv3.c
index 6db9ebd042a3..6db9ebd042a3 100644
--- a/drivers/video/fb-puv3.c
+++ b/drivers/video/fbdev/fb-puv3.c
diff --git a/drivers/video/ffb.c b/drivers/video/fbdev/ffb.c
index 4c4ffa61ae26..4c4ffa61ae26 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/fbdev/ffb.c
diff --git a/drivers/video/fm2fb.c b/drivers/video/fbdev/fm2fb.c
index e69d47af9932..e69d47af9932 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fbdev/fm2fb.c
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index e8758b9c3bcc..e8758b9c3bcc 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
diff --git a/drivers/video/g364fb.c b/drivers/video/fbdev/g364fb.c
index 223896cc5f7d..223896cc5f7d 100644
--- a/drivers/video/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
diff --git a/drivers/video/gbefb.c b/drivers/video/fbdev/gbefb.c
index 3ec65a878ac8..3ec65a878ac8 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/fbdev/gbefb.c
diff --git a/drivers/video/geode/Kconfig b/drivers/video/fbdev/geode/Kconfig
index 1e8555284786..1e8555284786 100644
--- a/drivers/video/geode/Kconfig
+++ b/drivers/video/fbdev/geode/Kconfig
diff --git a/drivers/video/geode/Makefile b/drivers/video/fbdev/geode/Makefile
index 5c98da126883..5c98da126883 100644
--- a/drivers/video/geode/Makefile
+++ b/drivers/video/fbdev/geode/Makefile
diff --git a/drivers/video/geode/display_gx.c b/drivers/video/fbdev/geode/display_gx.c
index f0af911a096d..f0af911a096d 100644
--- a/drivers/video/geode/display_gx.c
+++ b/drivers/video/fbdev/geode/display_gx.c
diff --git a/drivers/video/geode/display_gx1.c b/drivers/video/fbdev/geode/display_gx1.c
index 926d53eeb549..926d53eeb549 100644
--- a/drivers/video/geode/display_gx1.c
+++ b/drivers/video/fbdev/geode/display_gx1.c
diff --git a/drivers/video/geode/display_gx1.h b/drivers/video/fbdev/geode/display_gx1.h
index 671c05558c79..671c05558c79 100644
--- a/drivers/video/geode/display_gx1.h
+++ b/drivers/video/fbdev/geode/display_gx1.h
diff --git a/drivers/video/geode/geodefb.h b/drivers/video/fbdev/geode/geodefb.h
index ae04820e0c57..ae04820e0c57 100644
--- a/drivers/video/geode/geodefb.h
+++ b/drivers/video/fbdev/geode/geodefb.h
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/fbdev/geode/gx1fb_core.c
index 2794ba11f332..2794ba11f332 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/fbdev/geode/gx1fb_core.c
diff --git a/drivers/video/geode/gxfb.h b/drivers/video/fbdev/geode/gxfb.h
index d19e9378b0c0..d19e9378b0c0 100644
--- a/drivers/video/geode/gxfb.h
+++ b/drivers/video/fbdev/geode/gxfb.h
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index 1790f14bab15..1790f14bab15 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/fbdev/geode/lxfb.h
index cfcd8090f313..cfcd8090f313 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/fbdev/geode/lxfb.h
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 9e1d19d673a1..9e1d19d673a1 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c
index 79e9abc72b83..79e9abc72b83 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/fbdev/geode/lxfb_ops.c
diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/fbdev/geode/suspend_gx.c
index 1bb043d70c64..1bb043d70c64 100644
--- a/drivers/video/geode/suspend_gx.c
+++ b/drivers/video/fbdev/geode/suspend_gx.c
diff --git a/drivers/video/geode/video_cs5530.c b/drivers/video/fbdev/geode/video_cs5530.c
index 649c3943d431..649c3943d431 100644
--- a/drivers/video/geode/video_cs5530.c
+++ b/drivers/video/fbdev/geode/video_cs5530.c
diff --git a/drivers/video/geode/video_cs5530.h b/drivers/video/fbdev/geode/video_cs5530.h
index 56cecca7f1ce..56cecca7f1ce 100644
--- a/drivers/video/geode/video_cs5530.h
+++ b/drivers/video/fbdev/geode/video_cs5530.h
diff --git a/drivers/video/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
index 6082f653c68a..6082f653c68a 100644
--- a/drivers/video/geode/video_gx.c
+++ b/drivers/video/fbdev/geode/video_gx.c
diff --git a/drivers/video/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 7f6c9e6cfc6c..7f6c9e6cfc6c 100644
--- a/drivers/video/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
diff --git a/drivers/video/grvga.c b/drivers/video/fbdev/grvga.c
index c078701f15f6..c078701f15f6 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/fbdev/grvga.c
diff --git a/drivers/video/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 135d78a02588..135d78a02588 100644
--- a/drivers/video/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
diff --git a/drivers/video/hecubafb.c b/drivers/video/fbdev/hecubafb.c
index f64120ec9192..f64120ec9192 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/fbdev/hecubafb.c
diff --git a/drivers/video/hgafb.c b/drivers/video/fbdev/hgafb.c
index 5ff9fe2116a4..5ff9fe2116a4 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
diff --git a/drivers/video/hitfb.c b/drivers/video/fbdev/hitfb.c
index a648d5186c6e..a648d5186c6e 100644
--- a/drivers/video/hitfb.c
+++ b/drivers/video/fbdev/hitfb.c
diff --git a/drivers/video/hpfb.c b/drivers/video/fbdev/hpfb.c
index a1b7e5fa9b09..a1b7e5fa9b09 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index e23392ec5af3..e23392ec5af3 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
diff --git a/drivers/video/i740_reg.h b/drivers/video/fbdev/i740_reg.h
index 91bac76549d7..91bac76549d7 100644
--- a/drivers/video/i740_reg.h
+++ b/drivers/video/fbdev/i740_reg.h
diff --git a/drivers/video/i740fb.c b/drivers/video/fbdev/i740fb.c
index ca7c9df193b0..ca7c9df193b0 100644
--- a/drivers/video/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
diff --git a/drivers/video/i810/Makefile b/drivers/video/fbdev/i810/Makefile
index 96e08c8ded97..96e08c8ded97 100644
--- a/drivers/video/i810/Makefile
+++ b/drivers/video/fbdev/i810/Makefile
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/fbdev/i810/i810-i2c.c
index 7db17d0d8a8c..7db17d0d8a8c 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/fbdev/i810/i810-i2c.c
diff --git a/drivers/video/i810/i810.h b/drivers/video/fbdev/i810/i810.h
index 1414b73ac55b..1414b73ac55b 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/fbdev/i810/i810.h
diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
index 7672d2ea9b35..7672d2ea9b35 100644
--- a/drivers/video/i810/i810_accel.c
+++ b/drivers/video/fbdev/i810/i810_accel.c
diff --git a/drivers/video/i810/i810_dvt.c b/drivers/video/fbdev/i810/i810_dvt.c
index b4b3670667ab..b4b3670667ab 100644
--- a/drivers/video/i810/i810_dvt.c
+++ b/drivers/video/fbdev/i810/i810_dvt.c
diff --git a/drivers/video/i810/i810_gtf.c b/drivers/video/fbdev/i810/i810_gtf.c
index 9743d51e7f8c..9743d51e7f8c 100644
--- a/drivers/video/i810/i810_gtf.c
+++ b/drivers/video/fbdev/i810/i810_gtf.c
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/fbdev/i810/i810_main.c
index bb674e431741..bb674e431741 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/fbdev/i810/i810_main.c
diff --git a/drivers/video/i810/i810_main.h b/drivers/video/fbdev/i810/i810_main.h
index a25afaa534ba..a25afaa534ba 100644
--- a/drivers/video/i810/i810_main.h
+++ b/drivers/video/fbdev/i810/i810_main.h
diff --git a/drivers/video/i810/i810_regs.h b/drivers/video/fbdev/i810/i810_regs.h
index 91c6bd9d0d0d..91c6bd9d0d0d 100644
--- a/drivers/video/i810/i810_regs.h
+++ b/drivers/video/fbdev/i810/i810_regs.h
diff --git a/drivers/video/igafb.c b/drivers/video/fbdev/igafb.c
index 486f18897414..486f18897414 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/fbdev/igafb.c
diff --git a/drivers/video/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index aae10ce74f14..aae10ce74f14 100644
--- a/drivers/video/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
diff --git a/drivers/video/imxfb.c b/drivers/video/fbdev/imxfb.c
index f6e621684953..f6e621684953 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
diff --git a/drivers/video/intelfb/Makefile b/drivers/video/fbdev/intelfb/Makefile
index f7d631ebee8e..f7d631ebee8e 100644
--- a/drivers/video/intelfb/Makefile
+++ b/drivers/video/fbdev/intelfb/Makefile
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/fbdev/intelfb/intelfb.h
index 6b51175629c7..6b51175629c7 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/fbdev/intelfb/intelfb.h
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/fbdev/intelfb/intelfb_i2c.c
index 3300bd31d9d7..3300bd31d9d7 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/fbdev/intelfb/intelfb_i2c.c
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
index b847d530471a..b847d530471a 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c
index fbad61da359f..fbad61da359f 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/fbdev/intelfb/intelfbhw.c
diff --git a/drivers/video/intelfb/intelfbhw.h b/drivers/video/fbdev/intelfb/intelfbhw.h
index 216ca20f259f..216ca20f259f 100644
--- a/drivers/video/intelfb/intelfbhw.h
+++ b/drivers/video/fbdev/intelfb/intelfbhw.h
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/fbdev/jz4740_fb.c
index 87790e9644d0..87790e9644d0 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/fbdev/jz4740_fb.c
diff --git a/drivers/video/kyro/Makefile b/drivers/video/fbdev/kyro/Makefile
index 2fd66f551bae..2fd66f551bae 100644
--- a/drivers/video/kyro/Makefile
+++ b/drivers/video/fbdev/kyro/Makefile
diff --git a/drivers/video/kyro/STG4000InitDevice.c b/drivers/video/fbdev/kyro/STG4000InitDevice.c
index 1d3f2080aa6f..1d3f2080aa6f 100644
--- a/drivers/video/kyro/STG4000InitDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000InitDevice.c
diff --git a/drivers/video/kyro/STG4000Interface.h b/drivers/video/fbdev/kyro/STG4000Interface.h
index b7c83d5dfb13..b7c83d5dfb13 100644
--- a/drivers/video/kyro/STG4000Interface.h
+++ b/drivers/video/fbdev/kyro/STG4000Interface.h
diff --git a/drivers/video/kyro/STG4000OverlayDevice.c b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
index 0aeeaa10708b..0aeeaa10708b 100644
--- a/drivers/video/kyro/STG4000OverlayDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
diff --git a/drivers/video/kyro/STG4000Ramdac.c b/drivers/video/fbdev/kyro/STG4000Ramdac.c
index e6ad037e4396..e6ad037e4396 100644
--- a/drivers/video/kyro/STG4000Ramdac.c
+++ b/drivers/video/fbdev/kyro/STG4000Ramdac.c
diff --git a/drivers/video/kyro/STG4000Reg.h b/drivers/video/fbdev/kyro/STG4000Reg.h
index 50f4670e9252..50f4670e9252 100644
--- a/drivers/video/kyro/STG4000Reg.h
+++ b/drivers/video/fbdev/kyro/STG4000Reg.h
diff --git a/drivers/video/kyro/STG4000VTG.c b/drivers/video/fbdev/kyro/STG4000VTG.c
index bd389709d234..bd389709d234 100644
--- a/drivers/video/kyro/STG4000VTG.c
+++ b/drivers/video/fbdev/kyro/STG4000VTG.c
diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 65041e15fd59..65041e15fd59 100644
--- a/drivers/video/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
diff --git a/drivers/video/leo.c b/drivers/video/fbdev/leo.c
index 2c7f7d479fe2..2c7f7d479fe2 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/fbdev/leo.c
diff --git a/drivers/video/macfb.c b/drivers/video/fbdev/macfb.c
index cda7587cbc86..cda7587cbc86 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/fbdev/macfb.c
diff --git a/drivers/video/macmodes.c b/drivers/video/fbdev/macmodes.c
index af86c081d2be..af86c081d2be 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/fbdev/macmodes.c
diff --git a/drivers/video/macmodes.h b/drivers/video/fbdev/macmodes.h
index b86ba08aac9e..b86ba08aac9e 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/fbdev/macmodes.h
diff --git a/drivers/video/matrox/Makefile b/drivers/video/fbdev/matrox/Makefile
index f9c00ebe2530..f9c00ebe2530 100644
--- a/drivers/video/matrox/Makefile
+++ b/drivers/video/fbdev/matrox/Makefile
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/fbdev/matrox/g450_pll.c
index c15f8a57498e..c15f8a57498e 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/fbdev/matrox/g450_pll.c
diff --git a/drivers/video/matrox/g450_pll.h b/drivers/video/fbdev/matrox/g450_pll.h
index aac615d18440..aac615d18440 100644
--- a/drivers/video/matrox/g450_pll.h
+++ b/drivers/video/fbdev/matrox/g450_pll.h
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/fbdev/matrox/i2c-matroxfb.c
index 0fb280ead3dc..0fb280ead3dc 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/fbdev/matrox/i2c-matroxfb.c
diff --git a/drivers/video/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
index a01147fdf270..a01147fdf270 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
diff --git a/drivers/video/matrox/matroxfb_DAC1064.h b/drivers/video/fbdev/matrox/matroxfb_DAC1064.h
index 1e6e45b57b78..1e6e45b57b78 100644
--- a/drivers/video/matrox/matroxfb_DAC1064.h
+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.h
diff --git a/drivers/video/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
index 195ad7cac1ba..195ad7cac1ba 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
diff --git a/drivers/video/matrox/matroxfb_Ti3026.h b/drivers/video/fbdev/matrox/matroxfb_Ti3026.h
index 27872aaa0a17..27872aaa0a17 100644
--- a/drivers/video/matrox/matroxfb_Ti3026.h
+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.h
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/fbdev/matrox/matroxfb_accel.c
index 0d5cb85d071a..0d5cb85d071a 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/fbdev/matrox/matroxfb_accel.c
diff --git a/drivers/video/matrox/matroxfb_accel.h b/drivers/video/fbdev/matrox/matroxfb_accel.h
index 1e418e62c22d..1e418e62c22d 100644
--- a/drivers/video/matrox/matroxfb_accel.h
+++ b/drivers/video/fbdev/matrox/matroxfb_accel.h
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 7116c5309c7d..7116c5309c7d 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h
index 556d96ce40bf..556d96ce40bf 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/fbdev/matrox/matroxfb_base.h
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/fbdev/matrox/matroxfb_crtc2.c
index 02796a4317a9..02796a4317a9 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/fbdev/matrox/matroxfb_crtc2.c
diff --git a/drivers/video/matrox/matroxfb_crtc2.h b/drivers/video/fbdev/matrox/matroxfb_crtc2.h
index 1005582e843e..1005582e843e 100644
--- a/drivers/video/matrox/matroxfb_crtc2.h
+++ b/drivers/video/fbdev/matrox/matroxfb_crtc2.h
diff --git a/drivers/video/matrox/matroxfb_g450.c b/drivers/video/fbdev/matrox/matroxfb_g450.c
index cff0546ea6fd..cff0546ea6fd 100644
--- a/drivers/video/matrox/matroxfb_g450.c
+++ b/drivers/video/fbdev/matrox/matroxfb_g450.c
diff --git a/drivers/video/matrox/matroxfb_g450.h b/drivers/video/fbdev/matrox/matroxfb_g450.h
index 3a3e654444b8..3a3e654444b8 100644
--- a/drivers/video/matrox/matroxfb_g450.h
+++ b/drivers/video/fbdev/matrox/matroxfb_g450.h
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/fbdev/matrox/matroxfb_maven.c
index ee41a0f276b2..ee41a0f276b2 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.c
diff --git a/drivers/video/matrox/matroxfb_maven.h b/drivers/video/fbdev/matrox/matroxfb_maven.h
index 99eddec9f30c..99eddec9f30c 100644
--- a/drivers/video/matrox/matroxfb_maven.h
+++ b/drivers/video/fbdev/matrox/matroxfb_maven.h
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/fbdev/matrox/matroxfb_misc.c
index 9948ca2a3046..9948ca2a3046 100644
--- a/drivers/video/matrox/matroxfb_misc.c
+++ b/drivers/video/fbdev/matrox/matroxfb_misc.c
diff --git a/drivers/video/matrox/matroxfb_misc.h b/drivers/video/fbdev/matrox/matroxfb_misc.h
index 351c823f1f74..351c823f1f74 100644
--- a/drivers/video/matrox/matroxfb_misc.h
+++ b/drivers/video/fbdev/matrox/matroxfb_misc.h
diff --git a/drivers/video/maxinefb.c b/drivers/video/fbdev/maxinefb.c
index 5cf52d3c8e75..5cf52d3c8e75 100644
--- a/drivers/video/maxinefb.c
+++ b/drivers/video/fbdev/maxinefb.c
diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/fbdev/mb862xx/Makefile
index 5707ed0e31a7..5707ed0e31a7 100644
--- a/drivers/video/mb862xx/Makefile
+++ b/drivers/video/fbdev/mb862xx/Makefile
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
index c87e17afb3e2..c87e17afb3e2 100644
--- a/drivers/video/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
diff --git a/drivers/video/mb862xx/mb862xx_reg.h b/drivers/video/fbdev/mb862xx/mb862xx_reg.h
index 9df48b8edc94..9df48b8edc94 100644
--- a/drivers/video/mb862xx/mb862xx_reg.h
+++ b/drivers/video/fbdev/mb862xx/mb862xx_reg.h
diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/fbdev/mb862xx/mb862xxfb.h
index 8550630c1e01..8550630c1e01 100644
--- a/drivers/video/mb862xx/mb862xxfb.h
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb.h
diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
index fe92eed6da70..fe92eed6da70 100644
--- a/drivers/video/mb862xx/mb862xxfb_accel.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
diff --git a/drivers/video/mb862xx/mb862xxfb_accel.h b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.h
index 96a2dfef0f60..96a2dfef0f60 100644
--- a/drivers/video/mb862xx/mb862xxfb_accel.h
+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.h
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 0cd4c3318511..0cd4c3318511 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
diff --git a/drivers/video/mbx/Makefile b/drivers/video/fbdev/mbx/Makefile
index 16c1165cf9c7..16c1165cf9c7 100644
--- a/drivers/video/mbx/Makefile
+++ b/drivers/video/fbdev/mbx/Makefile
diff --git a/drivers/video/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c
index 4449f249b0e7..4449f249b0e7 100644
--- a/drivers/video/mbx/mbxdebugfs.c
+++ b/drivers/video/fbdev/mbx/mbxdebugfs.c
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index f0a5392f5fd3..f0a5392f5fd3 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
diff --git a/drivers/video/mbx/reg_bits.h b/drivers/video/fbdev/mbx/reg_bits.h
index 5f14b4befd71..5f14b4befd71 100644
--- a/drivers/video/mbx/reg_bits.h
+++ b/drivers/video/fbdev/mbx/reg_bits.h
diff --git a/drivers/video/mbx/regs.h b/drivers/video/fbdev/mbx/regs.h
index 063099d48839..063099d48839 100644
--- a/drivers/video/mbx/regs.h
+++ b/drivers/video/fbdev/mbx/regs.h
diff --git a/drivers/video/metronomefb.c b/drivers/video/fbdev/metronomefb.c
index 195cc2db4c2c..195cc2db4c2c 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/fbdev/metronomefb.c
diff --git a/drivers/video/mmp/Kconfig b/drivers/video/fbdev/mmp/Kconfig
index e9ea39e13722..d4a4ffc24749 100644
--- a/drivers/video/mmp/Kconfig
+++ b/drivers/video/fbdev/mmp/Kconfig
@@ -5,7 +5,7 @@ menuconfig MMP_DISP
5 Marvell Display Subsystem support. 5 Marvell Display Subsystem support.
6 6
7if MMP_DISP 7if MMP_DISP
8source "drivers/video/mmp/hw/Kconfig" 8source "drivers/video/fbdev/mmp/hw/Kconfig"
9source "drivers/video/mmp/panel/Kconfig" 9source "drivers/video/fbdev/mmp/panel/Kconfig"
10source "drivers/video/mmp/fb/Kconfig" 10source "drivers/video/fbdev/mmp/fb/Kconfig"
11endif 11endif
diff --git a/drivers/video/mmp/Makefile b/drivers/video/fbdev/mmp/Makefile
index a014cb358bf8..a014cb358bf8 100644
--- a/drivers/video/mmp/Makefile
+++ b/drivers/video/fbdev/mmp/Makefile
diff --git a/drivers/video/mmp/core.c b/drivers/video/fbdev/mmp/core.c
index b563b920f159..b563b920f159 100644
--- a/drivers/video/mmp/core.c
+++ b/drivers/video/fbdev/mmp/core.c
diff --git a/drivers/video/mmp/fb/Kconfig b/drivers/video/fbdev/mmp/fb/Kconfig
index 9b0141f105f5..9b0141f105f5 100644
--- a/drivers/video/mmp/fb/Kconfig
+++ b/drivers/video/fbdev/mmp/fb/Kconfig
diff --git a/drivers/video/mmp/fb/Makefile b/drivers/video/fbdev/mmp/fb/Makefile
index 709fd1f76abe..709fd1f76abe 100644
--- a/drivers/video/mmp/fb/Makefile
+++ b/drivers/video/fbdev/mmp/fb/Makefile
diff --git a/drivers/video/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c
index 7ab31eb76a8c..7ab31eb76a8c 100644
--- a/drivers/video/mmp/fb/mmpfb.c
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.c
diff --git a/drivers/video/mmp/fb/mmpfb.h b/drivers/video/fbdev/mmp/fb/mmpfb.h
index 88c23c10a9ec..88c23c10a9ec 100644
--- a/drivers/video/mmp/fb/mmpfb.h
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.h
diff --git a/drivers/video/mmp/hw/Kconfig b/drivers/video/fbdev/mmp/hw/Kconfig
index 02f109a20cd0..02f109a20cd0 100644
--- a/drivers/video/mmp/hw/Kconfig
+++ b/drivers/video/fbdev/mmp/hw/Kconfig
diff --git a/drivers/video/mmp/hw/Makefile b/drivers/video/fbdev/mmp/hw/Makefile
index 0000a714fedf..0000a714fedf 100644
--- a/drivers/video/mmp/hw/Makefile
+++ b/drivers/video/fbdev/mmp/hw/Makefile
diff --git a/drivers/video/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index 8621a9f2bdcc..8621a9f2bdcc 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
diff --git a/drivers/video/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index 53301cfdb1ae..53301cfdb1ae 100644
--- a/drivers/video/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
diff --git a/drivers/video/mmp/hw/mmp_spi.c b/drivers/video/fbdev/mmp/hw/mmp_spi.c
index e62ca7bf0d5e..e62ca7bf0d5e 100644
--- a/drivers/video/mmp/hw/mmp_spi.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_spi.c
diff --git a/drivers/video/mmp/panel/Kconfig b/drivers/video/fbdev/mmp/panel/Kconfig
index 4b2c4f457b11..4b2c4f457b11 100644
--- a/drivers/video/mmp/panel/Kconfig
+++ b/drivers/video/fbdev/mmp/panel/Kconfig
diff --git a/drivers/video/mmp/panel/Makefile b/drivers/video/fbdev/mmp/panel/Makefile
index 2f91611c7e5e..2f91611c7e5e 100644
--- a/drivers/video/mmp/panel/Makefile
+++ b/drivers/video/fbdev/mmp/panel/Makefile
diff --git a/drivers/video/mmp/panel/tpo_tj032md01bw.c b/drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c
index 998978b08f5e..998978b08f5e 100644
--- a/drivers/video/mmp/panel/tpo_tj032md01bw.c
+++ b/drivers/video/fbdev/mmp/panel/tpo_tj032md01bw.c
diff --git a/drivers/video/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index 802d6ae523fb..802d6ae523fb 100644
--- a/drivers/video/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
diff --git a/drivers/video/msm/mddi.c b/drivers/video/fbdev/msm/mddi.c
index e0f8011a3c4b..e0f8011a3c4b 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/fbdev/msm/mddi.c
diff --git a/drivers/video/msm/mddi_client_dummy.c b/drivers/video/fbdev/msm/mddi_client_dummy.c
index f1b0dfcc9717..f1b0dfcc9717 100644
--- a/drivers/video/msm/mddi_client_dummy.c
+++ b/drivers/video/fbdev/msm/mddi_client_dummy.c
diff --git a/drivers/video/msm/mddi_client_nt35399.c b/drivers/video/fbdev/msm/mddi_client_nt35399.c
index f96df32e5509..f96df32e5509 100644
--- a/drivers/video/msm/mddi_client_nt35399.c
+++ b/drivers/video/fbdev/msm/mddi_client_nt35399.c
diff --git a/drivers/video/msm/mddi_client_toshiba.c b/drivers/video/fbdev/msm/mddi_client_toshiba.c
index 061d7dfebbf3..061d7dfebbf3 100644
--- a/drivers/video/msm/mddi_client_toshiba.c
+++ b/drivers/video/fbdev/msm/mddi_client_toshiba.c
diff --git a/drivers/video/msm/mddi_hw.h b/drivers/video/fbdev/msm/mddi_hw.h
index 45cc01fc1e7f..45cc01fc1e7f 100644
--- a/drivers/video/msm/mddi_hw.h
+++ b/drivers/video/fbdev/msm/mddi_hw.h
diff --git a/drivers/video/msm/mdp.c b/drivers/video/fbdev/msm/mdp.c
index 113c7876c855..113c7876c855 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/fbdev/msm/mdp.c
diff --git a/drivers/video/msm/mdp_csc_table.h b/drivers/video/fbdev/msm/mdp_csc_table.h
index d1cde30ead52..d1cde30ead52 100644
--- a/drivers/video/msm/mdp_csc_table.h
+++ b/drivers/video/fbdev/msm/mdp_csc_table.h
diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/fbdev/msm/mdp_hw.h
index 35848d741001..35848d741001 100644
--- a/drivers/video/msm/mdp_hw.h
+++ b/drivers/video/fbdev/msm/mdp_hw.h
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/fbdev/msm/mdp_ppp.c
index be6079cdfbb6..be6079cdfbb6 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/fbdev/msm/mdp_ppp.c
diff --git a/drivers/video/msm/mdp_scale_tables.c b/drivers/video/fbdev/msm/mdp_scale_tables.c
index 604783b2e17c..604783b2e17c 100644
--- a/drivers/video/msm/mdp_scale_tables.c
+++ b/drivers/video/fbdev/msm/mdp_scale_tables.c
diff --git a/drivers/video/msm/mdp_scale_tables.h b/drivers/video/fbdev/msm/mdp_scale_tables.h
index 34077b1af603..34077b1af603 100644
--- a/drivers/video/msm/mdp_scale_tables.h
+++ b/drivers/video/fbdev/msm/mdp_scale_tables.h
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/fbdev/msm/msm_fb.c
index 1374803fbcd9..1374803fbcd9 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/fbdev/msm/msm_fb.c
diff --git a/drivers/video/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 142e860fb527..142e860fb527 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
diff --git a/drivers/video/mxsfb.c b/drivers/video/fbdev/mxsfb.c
index accf48a2cce4..accf48a2cce4 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/fbdev/mxsfb.c
diff --git a/drivers/video/n411.c b/drivers/video/fbdev/n411.c
index 935830fea7b6..935830fea7b6 100644
--- a/drivers/video/n411.c
+++ b/drivers/video/fbdev/n411.c
diff --git a/drivers/video/neofb.c b/drivers/video/fbdev/neofb.c
index 44f99a60bb9b..44f99a60bb9b 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/fbdev/neofb.c
diff --git a/drivers/video/nuc900fb.c b/drivers/video/fbdev/nuc900fb.c
index 478f9808dee4..478f9808dee4 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/fbdev/nuc900fb.c
diff --git a/drivers/video/nuc900fb.h b/drivers/video/fbdev/nuc900fb.h
index 9a1ca6dbb6b2..9a1ca6dbb6b2 100644
--- a/drivers/video/nuc900fb.h
+++ b/drivers/video/fbdev/nuc900fb.h
diff --git a/drivers/video/nvidia/Makefile b/drivers/video/fbdev/nvidia/Makefile
index ca47432113e0..ca47432113e0 100644
--- a/drivers/video/nvidia/Makefile
+++ b/drivers/video/fbdev/nvidia/Makefile
diff --git a/drivers/video/nvidia/nv_accel.c b/drivers/video/fbdev/nvidia/nv_accel.c
index ad6472a894ea..ad6472a894ea 100644
--- a/drivers/video/nvidia/nv_accel.c
+++ b/drivers/video/fbdev/nvidia/nv_accel.c
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/fbdev/nvidia/nv_backlight.c
index 8471008aa6ff..8471008aa6ff 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/fbdev/nvidia/nv_backlight.c
diff --git a/drivers/video/nvidia/nv_dma.h b/drivers/video/fbdev/nvidia/nv_dma.h
index a7ed1c0acbbb..a7ed1c0acbbb 100644
--- a/drivers/video/nvidia/nv_dma.h
+++ b/drivers/video/fbdev/nvidia/nv_dma.h
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/fbdev/nvidia/nv_hw.c
index 81c80ac3c76f..81c80ac3c76f 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/fbdev/nvidia/nv_hw.c
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c
index d7994a173245..d7994a173245 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/fbdev/nvidia/nv_i2c.c
diff --git a/drivers/video/nvidia/nv_local.h b/drivers/video/fbdev/nvidia/nv_local.h
index 68e508daa417..68e508daa417 100644
--- a/drivers/video/nvidia/nv_local.h
+++ b/drivers/video/fbdev/nvidia/nv_local.h
diff --git a/drivers/video/nvidia/nv_of.c b/drivers/video/fbdev/nvidia/nv_of.c
index 3bc13df4b120..3bc13df4b120 100644
--- a/drivers/video/nvidia/nv_of.c
+++ b/drivers/video/fbdev/nvidia/nv_of.c
diff --git a/drivers/video/nvidia/nv_proto.h b/drivers/video/fbdev/nvidia/nv_proto.h
index ff5c410355ea..ff5c410355ea 100644
--- a/drivers/video/nvidia/nv_proto.h
+++ b/drivers/video/fbdev/nvidia/nv_proto.h
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/fbdev/nvidia/nv_setup.c
index 2f2e162134fa..2f2e162134fa 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/fbdev/nvidia/nv_setup.c
diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/fbdev/nvidia/nv_type.h
index c03f7f55c76d..c03f7f55c76d 100644
--- a/drivers/video/nvidia/nv_type.h
+++ b/drivers/video/fbdev/nvidia/nv_type.h
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index def041204676..def041204676 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
diff --git a/drivers/video/ocfb.c b/drivers/video/fbdev/ocfb.c
index 7f9dc9bec309..7f9dc9bec309 100644
--- a/drivers/video/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
diff --git a/drivers/video/offb.c b/drivers/video/fbdev/offb.c
index 7d44d669d5b6..7d44d669d5b6 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/fbdev/offb.c
diff --git a/drivers/video/omap/Kconfig b/drivers/video/fbdev/omap/Kconfig
index 0bc3a936ce2b..0bc3a936ce2b 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/fbdev/omap/Kconfig
diff --git a/drivers/video/omap/Makefile b/drivers/video/fbdev/omap/Makefile
index 1927faffb5bc..1927faffb5bc 100644
--- a/drivers/video/omap/Makefile
+++ b/drivers/video/fbdev/omap/Makefile
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index a4ee65b8f918..a4ee65b8f918 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/fbdev/omap/lcd_ams_delta.c
index 4a5f2cd3d3bf..4a5f2cd3d3bf 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/fbdev/omap/lcd_ams_delta.c
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/fbdev/omap/lcd_h3.c
index 49bdeca81e50..49bdeca81e50 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/fbdev/omap/lcd_h3.c
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/fbdev/omap/lcd_htcherald.c
index 20f477851d54..20f477851d54 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/fbdev/omap/lcd_htcherald.c
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/fbdev/omap/lcd_inn1510.c
index 2ee423279e35..2ee423279e35 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/fbdev/omap/lcd_inn1510.c
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/fbdev/omap/lcd_inn1610.c
index e3d3d135aa48..e3d3d135aa48 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/fbdev/omap/lcd_inn1610.c
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/fbdev/omap/lcd_mipid.c
index 803fee618d57..803fee618d57 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/fbdev/omap/lcd_mipid.c
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/fbdev/omap/lcd_osk.c
index 7fbe04bce0ed..7fbe04bce0ed 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/fbdev/omap/lcd_osk.c
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/fbdev/omap/lcd_palmte.c
index ff4fb624b904..ff4fb624b904 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/fbdev/omap/lcd_palmte.c
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/fbdev/omap/lcd_palmtt.c
index aaf3c8ba1243..aaf3c8ba1243 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/fbdev/omap/lcd_palmtt.c
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/fbdev/omap/lcd_palmz71.c
index 3b7d8aa1cf34..3b7d8aa1cf34 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/fbdev/omap/lcd_palmz71.c
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index b52f62595f65..b52f62595f65 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
diff --git a/drivers/video/omap/lcdc.h b/drivers/video/fbdev/omap/lcdc.h
index 845222270db3..845222270db3 100644
--- a/drivers/video/omap/lcdc.h
+++ b/drivers/video/fbdev/omap/lcdc.h
diff --git a/drivers/video/omap/omapfb.h b/drivers/video/fbdev/omap/omapfb.h
index 2921d20e4fba..2921d20e4fba 100644
--- a/drivers/video/omap/omapfb.h
+++ b/drivers/video/fbdev/omap/omapfb.h
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index e4fc6d9b5371..e4fc6d9b5371 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
diff --git a/drivers/video/omap/sossi.c b/drivers/video/fbdev/omap/sossi.c
index d4e7684e7045..d4e7684e7045 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/fbdev/omap/sossi.c
diff --git a/drivers/video/fbdev/omap2/Kconfig b/drivers/video/fbdev/omap2/Kconfig
new file mode 100644
index 000000000000..c22955d2de9a
--- /dev/null
+++ b/drivers/video/fbdev/omap2/Kconfig
@@ -0,0 +1,10 @@
1config OMAP2_VRFB
2 bool
3
4if ARCH_OMAP2PLUS
5
6source "drivers/video/fbdev/omap2/dss/Kconfig"
7source "drivers/video/fbdev/omap2/omapfb/Kconfig"
8source "drivers/video/fbdev/omap2/displays-new/Kconfig"
9
10endif
diff --git a/drivers/video/omap2/Makefile b/drivers/video/fbdev/omap2/Makefile
index bf8127df8c71..bf8127df8c71 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/fbdev/omap2/Makefile
diff --git a/drivers/video/omap2/displays-new/Kconfig b/drivers/video/fbdev/omap2/displays-new/Kconfig
index e6cfc38160d3..e6cfc38160d3 100644
--- a/drivers/video/omap2/displays-new/Kconfig
+++ b/drivers/video/fbdev/omap2/displays-new/Kconfig
diff --git a/drivers/video/omap2/displays-new/Makefile b/drivers/video/fbdev/omap2/displays-new/Makefile
index 0323a8a1c682..0323a8a1c682 100644
--- a/drivers/video/omap2/displays-new/Makefile
+++ b/drivers/video/fbdev/omap2/displays-new/Makefile
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
index 5ee3b5505f7f..5ee3b5505f7f 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
diff --git a/drivers/video/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index 74de2bc50c4f..74de2bc50c4f 100644
--- a/drivers/video/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
diff --git a/drivers/video/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
index 29ed21b9dce5..29ed21b9dce5 100644
--- a/drivers/video/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
diff --git a/drivers/video/omap2/displays-new/encoder-tfp410.c b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
index b4e9a42a79e6..b4e9a42a79e6 100644
--- a/drivers/video/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
diff --git a/drivers/video/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
index 7e33686171e3..7e33686171e3 100644
--- a/drivers/video/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
diff --git a/drivers/video/omap2/displays-new/panel-dpi.c b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
index 5f8f7e7c81ef..5f8f7e7c81ef 100644
--- a/drivers/video/omap2/displays-new/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
diff --git a/drivers/video/omap2/displays-new/panel-dsi-cm.c b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
index d6f14e8717e8..d6f14e8717e8 100644
--- a/drivers/video/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
diff --git a/drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
index 2e6b513222d9..2e6b513222d9 100644
--- a/drivers/video/omap2/displays-new/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
diff --git a/drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
index 996fa004b48c..996fa004b48c 100644
--- a/drivers/video/omap2/displays-new/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
diff --git a/drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
index b2f710be565d..b2f710be565d 100644
--- a/drivers/video/omap2/displays-new/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
diff --git a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index c7ba4d8b928a..c7ba4d8b928a 100644
--- a/drivers/video/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
diff --git a/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
index fae6adc005a7..fae6adc005a7 100644
--- a/drivers/video/omap2/displays-new/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
diff --git a/drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
index 875b40263b33..875b40263b33 100644
--- a/drivers/video/omap2/displays-new/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/fbdev/omap2/dss/Kconfig
index dde4281663b1..dde4281663b1 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/dss/Kconfig
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/fbdev/omap2/dss/Makefile
index 8aec8bda27cc..8aec8bda27cc 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/fbdev/omap2/dss/Makefile
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/fbdev/omap2/dss/apply.c
index 0a0b084ce65d..0a0b084ce65d 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/fbdev/omap2/dss/apply.c
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/fbdev/omap2/dss/core.c
index ffa45c894cd4..ffa45c894cd4 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/fbdev/omap2/dss/core.c
diff --git a/drivers/video/omap2/dss/dispc-compat.c b/drivers/video/fbdev/omap2/dss/dispc-compat.c
index 83779c2b292a..83779c2b292a 100644
--- a/drivers/video/omap2/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/dss/dispc-compat.c
diff --git a/drivers/video/omap2/dss/dispc-compat.h b/drivers/video/fbdev/omap2/dss/dispc-compat.h
index 14a69b3d4fb0..14a69b3d4fb0 100644
--- a/drivers/video/omap2/dss/dispc-compat.h
+++ b/drivers/video/fbdev/omap2/dss/dispc-compat.h
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index 2bbdb7ff7daf..f18397c33e8f 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -101,6 +101,8 @@ static struct {
101 void __iomem *base; 101 void __iomem *base;
102 102
103 int irq; 103 int irq;
104 irq_handler_t user_handler;
105 void *user_data;
104 106
105 unsigned long core_clk_rate; 107 unsigned long core_clk_rate;
106 unsigned long tv_pclk_rate; 108 unsigned long tv_pclk_rate;
@@ -113,6 +115,8 @@ static struct {
113 u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; 115 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
114 116
115 const struct dispc_features *feat; 117 const struct dispc_features *feat;
118
119 bool is_enabled;
116} dispc; 120} dispc;
117 121
118enum omap_color_component { 122enum omap_color_component {
@@ -141,12 +145,18 @@ enum mgr_reg_fields {
141 DISPC_MGR_FLD_NUM, 145 DISPC_MGR_FLD_NUM,
142}; 146};
143 147
148struct dispc_reg_field {
149 u16 reg;
150 u8 high;
151 u8 low;
152};
153
144static const struct { 154static const struct {
145 const char *name; 155 const char *name;
146 u32 vsync_irq; 156 u32 vsync_irq;
147 u32 framedone_irq; 157 u32 framedone_irq;
148 u32 sync_lost_irq; 158 u32 sync_lost_irq;
149 struct reg_field reg_desc[DISPC_MGR_FLD_NUM]; 159 struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
150} mgr_desc[] = { 160} mgr_desc[] = {
151 [OMAP_DSS_CHANNEL_LCD] = { 161 [OMAP_DSS_CHANNEL_LCD] = {
152 .name = "LCD", 162 .name = "LCD",
@@ -238,13 +248,13 @@ static inline u32 dispc_read_reg(const u16 idx)
238 248
239static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld) 249static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld)
240{ 250{
241 const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; 251 const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
242 return REG_GET(rfld.reg, rfld.high, rfld.low); 252 return REG_GET(rfld.reg, rfld.high, rfld.low);
243} 253}
244 254
245static void mgr_fld_write(enum omap_channel channel, 255static void mgr_fld_write(enum omap_channel channel,
246 enum mgr_reg_fields regfld, int val) { 256 enum mgr_reg_fields regfld, int val) {
247 const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; 257 const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
248 REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low); 258 REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low);
249} 259}
250 260
@@ -3669,16 +3679,44 @@ static int __init dispc_init_features(struct platform_device *pdev)
3669 return 0; 3679 return 0;
3670} 3680}
3671 3681
3682static irqreturn_t dispc_irq_handler(int irq, void *arg)
3683{
3684 if (!dispc.is_enabled)
3685 return IRQ_NONE;
3686
3687 return dispc.user_handler(irq, dispc.user_data);
3688}
3689
3672int dispc_request_irq(irq_handler_t handler, void *dev_id) 3690int dispc_request_irq(irq_handler_t handler, void *dev_id)
3673{ 3691{
3674 return devm_request_irq(&dispc.pdev->dev, dispc.irq, handler, 3692 int r;
3675 IRQF_SHARED, "OMAP DISPC", dev_id); 3693
3694 if (dispc.user_handler != NULL)
3695 return -EBUSY;
3696
3697 dispc.user_handler = handler;
3698 dispc.user_data = dev_id;
3699
3700 /* ensure the dispc_irq_handler sees the values above */
3701 smp_wmb();
3702
3703 r = devm_request_irq(&dispc.pdev->dev, dispc.irq, dispc_irq_handler,
3704 IRQF_SHARED, "OMAP DISPC", &dispc);
3705 if (r) {
3706 dispc.user_handler = NULL;
3707 dispc.user_data = NULL;
3708 }
3709
3710 return r;
3676} 3711}
3677EXPORT_SYMBOL(dispc_request_irq); 3712EXPORT_SYMBOL(dispc_request_irq);
3678 3713
3679void dispc_free_irq(void *dev_id) 3714void dispc_free_irq(void *dev_id)
3680{ 3715{
3681 devm_free_irq(&dispc.pdev->dev, dispc.irq, dev_id); 3716 devm_free_irq(&dispc.pdev->dev, dispc.irq, &dispc);
3717
3718 dispc.user_handler = NULL;
3719 dispc.user_data = NULL;
3682} 3720}
3683EXPORT_SYMBOL(dispc_free_irq); 3721EXPORT_SYMBOL(dispc_free_irq);
3684 3722
@@ -3750,6 +3788,12 @@ static int __exit omap_dispchw_remove(struct platform_device *pdev)
3750 3788
3751static int dispc_runtime_suspend(struct device *dev) 3789static int dispc_runtime_suspend(struct device *dev)
3752{ 3790{
3791 dispc.is_enabled = false;
3792 /* ensure the dispc_irq_handler sees the is_enabled value */
3793 smp_wmb();
3794 /* wait for current handler to finish before turning the DISPC off */
3795 synchronize_irq(dispc.irq);
3796
3753 dispc_save_context(); 3797 dispc_save_context();
3754 3798
3755 return 0; 3799 return 0;
@@ -3763,12 +3807,15 @@ static int dispc_runtime_resume(struct device *dev)
3763 * _omap_dispc_initial_config(). We can thus use it to detect if 3807 * _omap_dispc_initial_config(). We can thus use it to detect if
3764 * we have lost register context. 3808 * we have lost register context.
3765 */ 3809 */
3766 if (REG_GET(DISPC_CONFIG, 2, 1) == OMAP_DSS_LOAD_FRAME_ONLY) 3810 if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
3767 return 0; 3811 _omap_dispc_initial_config();
3768 3812
3769 _omap_dispc_initial_config(); 3813 dispc_restore_context();
3814 }
3770 3815
3771 dispc_restore_context(); 3816 dispc.is_enabled = true;
3817 /* ensure the dispc_irq_handler sees the is_enabled value */
3818 smp_wmb();
3772 3819
3773 return 0; 3820 return 0;
3774} 3821}
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/fbdev/omap2/dss/dispc.h
index 78edb449c763..78edb449c763 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/fbdev/omap2/dss/dispc.h
diff --git a/drivers/video/omap2/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/dss/dispc_coefs.c
index 038c15b04215..038c15b04215 100644
--- a/drivers/video/omap2/dss/dispc_coefs.c
+++ b/drivers/video/fbdev/omap2/dss/dispc_coefs.c
diff --git a/drivers/video/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c
index 5a2095a98ed8..5a2095a98ed8 100644
--- a/drivers/video/omap2/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
index 2412a0dd0c13..2412a0dd0c13 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/fbdev/omap2/dss/display.c
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/fbdev/omap2/dss/dpi.c
index 157921db447a..157921db447a 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/dss/dpi.c
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index 121d1049d0bc..8be9b04d8849 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -297,6 +297,8 @@ struct dsi_data {
297 297
298 int irq; 298 int irq;
299 299
300 bool is_enabled;
301
300 struct clk *dss_clk; 302 struct clk *dss_clk;
301 struct clk *sys_clk; 303 struct clk *sys_clk;
302 304
@@ -795,6 +797,9 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
795 dsidev = (struct platform_device *) arg; 797 dsidev = (struct platform_device *) arg;
796 dsi = dsi_get_dsidrv_data(dsidev); 798 dsi = dsi_get_dsidrv_data(dsidev);
797 799
800 if (!dsi->is_enabled)
801 return IRQ_NONE;
802
798 spin_lock(&dsi->irq_lock); 803 spin_lock(&dsi->irq_lock);
799 804
800 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS); 805 irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
@@ -5671,6 +5676,15 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
5671 5676
5672static int dsi_runtime_suspend(struct device *dev) 5677static int dsi_runtime_suspend(struct device *dev)
5673{ 5678{
5679 struct platform_device *pdev = to_platform_device(dev);
5680 struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5681
5682 dsi->is_enabled = false;
5683 /* ensure the irq handler sees the is_enabled value */
5684 smp_wmb();
5685 /* wait for current handler to finish before turning the DSI off */
5686 synchronize_irq(dsi->irq);
5687
5674 dispc_runtime_put(); 5688 dispc_runtime_put();
5675 5689
5676 return 0; 5690 return 0;
@@ -5678,12 +5692,18 @@ static int dsi_runtime_suspend(struct device *dev)
5678 5692
5679static int dsi_runtime_resume(struct device *dev) 5693static int dsi_runtime_resume(struct device *dev)
5680{ 5694{
5695 struct platform_device *pdev = to_platform_device(dev);
5696 struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
5681 int r; 5697 int r;
5682 5698
5683 r = dispc_runtime_get(); 5699 r = dispc_runtime_get();
5684 if (r) 5700 if (r)
5685 return r; 5701 return r;
5686 5702
5703 dsi->is_enabled = true;
5704 /* ensure the irq handler sees the is_enabled value */
5705 smp_wmb();
5706
5687 return 0; 5707 return 0;
5688} 5708}
5689 5709
diff --git a/drivers/video/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index a4b20aaf6142..a4b20aaf6142 100644
--- a/drivers/video/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index 825c019ddee7..d55266c0e029 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -457,7 +457,7 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
457 fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul); 457 fckd_stop = max(DIV_ROUND_UP(prate * m, fck_hw_max), 1ul);
458 458
459 for (fckd = fckd_start; fckd >= fckd_stop; --fckd) { 459 for (fckd = fckd_start; fckd >= fckd_stop; --fckd) {
460 fck = prate / fckd * m; 460 fck = DIV_ROUND_UP(prate, fckd) * m;
461 461
462 if (func(fck, data)) 462 if (func(fck, data))
463 return true; 463 return true;
@@ -506,7 +506,7 @@ static int dss_setup_default_clock(void)
506 506
507 fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier, 507 fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
508 max_dss_fck); 508 max_dss_fck);
509 fck = prate / fck_div * dss.feat->dss_fck_multiplier; 509 fck = DIV_ROUND_UP(prate, fck_div) * dss.feat->dss_fck_multiplier;
510 } 510 }
511 511
512 r = dss_set_fck_rate(fck); 512 r = dss_set_fck_rate(fck);
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/fbdev/omap2/dss/dss.h
index 918fec182424..560078fcb198 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/fbdev/omap2/dss/dss.h
@@ -131,12 +131,6 @@ struct dsi_clock_info {
131 u16 lp_clk_div; 131 u16 lp_clk_div;
132}; 132};
133 133
134struct reg_field {
135 u16 reg;
136 u8 high;
137 u8 low;
138};
139
140struct dss_lcd_mgr_config { 134struct dss_lcd_mgr_config {
141 enum dss_io_pad_mode io_pad_mode; 135 enum dss_io_pad_mode io_pad_mode;
142 136
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/fbdev/omap2/dss/dss_features.c
index 7f8969191dc6..7f8969191dc6 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/dss/dss_features.c
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/fbdev/omap2/dss/dss_features.h
index e3ef3b714896..e3ef3b714896 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/fbdev/omap2/dss/dss_features.h
diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/fbdev/omap2/dss/hdmi.h
index e25681ff5a70..e25681ff5a70 100644
--- a/drivers/video/omap2/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi.h
diff --git a/drivers/video/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c
index f5f7944a1fd1..f5f7944a1fd1 100644
--- a/drivers/video/omap2/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4.c
diff --git a/drivers/video/omap2/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
index 2eb04dcf807c..2eb04dcf807c 100644
--- a/drivers/video/omap2/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
diff --git a/drivers/video/omap2/dss/hdmi4_core.h b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
index bb646896fa82..bb646896fa82 100644
--- a/drivers/video/omap2/dss/hdmi4_core.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
diff --git a/drivers/video/omap2/dss/hdmi_common.c b/drivers/video/fbdev/omap2/dss/hdmi_common.c
index b11afac8e068..0b12a3f62fe1 100644
--- a/drivers/video/omap2/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_common.c
@@ -347,17 +347,17 @@ int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
347 case 96000: 347 case 96000:
348 case 192000: 348 case 192000:
349 if (deep_color == 125) 349 if (deep_color == 125)
350 if (pclk == 27027 || pclk == 74250) 350 if (pclk == 27027000 || pclk == 74250000)
351 deep_color_correct = true; 351 deep_color_correct = true;
352 if (deep_color == 150) 352 if (deep_color == 150)
353 if (pclk == 27027) 353 if (pclk == 27027000)
354 deep_color_correct = true; 354 deep_color_correct = true;
355 break; 355 break;
356 case 44100: 356 case 44100:
357 case 88200: 357 case 88200:
358 case 176400: 358 case 176400:
359 if (deep_color == 125) 359 if (deep_color == 125)
360 if (pclk == 27027) 360 if (pclk == 27027000)
361 deep_color_correct = true; 361 deep_color_correct = true;
362 break; 362 break;
363 default: 363 default:
@@ -418,7 +418,7 @@ int hdmi_compute_acr(u32 pclk, u32 sample_freq, u32 *n, u32 *cts)
418 } 418 }
419 } 419 }
420 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ 420 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
421 *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10); 421 *cts = (pclk/1000) * (*n / 128) * deep_color / (sample_freq / 10);
422 422
423 return 0; 423 return 0;
424} 424}
diff --git a/drivers/video/omap2/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
index dd376ce8da01..dd376ce8da01 100644
--- a/drivers/video/omap2/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_phy.c
diff --git a/drivers/video/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 5fc71215c303..5fc71215c303 100644
--- a/drivers/video/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
diff --git a/drivers/video/omap2/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/dss/hdmi_wp.c
index f5f4ccf50d90..f5f4ccf50d90 100644
--- a/drivers/video/omap2/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_wp.c
diff --git a/drivers/video/omap2/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/dss/manager-sysfs.c
index 37b59fe28dc8..37b59fe28dc8 100644
--- a/drivers/video/omap2/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/dss/manager-sysfs.c
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/fbdev/omap2/dss/manager.c
index 1aac9b4191a9..1aac9b4191a9 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/fbdev/omap2/dss/manager.c
diff --git a/drivers/video/omap2/dss/output.c b/drivers/video/fbdev/omap2/dss/output.c
index 2ab3afa615e8..2ab3afa615e8 100644
--- a/drivers/video/omap2/dss/output.c
+++ b/drivers/video/fbdev/omap2/dss/output.c
diff --git a/drivers/video/omap2/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/dss/overlay-sysfs.c
index 4cc5ddebfb34..4cc5ddebfb34 100644
--- a/drivers/video/omap2/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/dss/overlay-sysfs.c
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/fbdev/omap2/dss/overlay.c
index 2f7cee985cdd..2f7cee985cdd 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/fbdev/omap2/dss/overlay.c
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/fbdev/omap2/dss/rfbi.c
index c8a81a2b879c..c8a81a2b879c 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/dss/rfbi.c
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index 911dcc9173a6..911dcc9173a6 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 21d81113962b..21d81113962b 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
diff --git a/drivers/video/omap2/dss/venc_panel.c b/drivers/video/fbdev/omap2/dss/venc_panel.c
index af68cd444d7e..af68cd444d7e 100644
--- a/drivers/video/omap2/dss/venc_panel.c
+++ b/drivers/video/fbdev/omap2/dss/venc_panel.c
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/fbdev/omap2/omapfb/Kconfig
index 4cb12ce68855..4cb12ce68855 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/fbdev/omap2/omapfb/Kconfig
diff --git a/drivers/video/omap2/omapfb/Makefile b/drivers/video/fbdev/omap2/omapfb/Makefile
index 51c2e00d9bf8..51c2e00d9bf8 100644
--- a/drivers/video/omap2/omapfb/Makefile
+++ b/drivers/video/fbdev/omap2/omapfb/Makefile
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 146b6f5428db..146b6f5428db 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index ec2d132c782d..ec2d132c782d 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 18fa9e1d0033..18fa9e1d0033 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h
index 623cd872a367..623cd872a367 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/fbdev/omap2/vrfb.c
index f346b02eee1d..f346b02eee1d 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/fbdev/omap2/vrfb.c
diff --git a/drivers/video/p9100.c b/drivers/video/fbdev/p9100.c
index 367cea8f43f3..367cea8f43f3 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/fbdev/p9100.c
diff --git a/drivers/video/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 4c9299576827..4c9299576827 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
diff --git a/drivers/video/platinumfb.h b/drivers/video/fbdev/platinumfb.h
index f6bd77cafd17..f6bd77cafd17 100644
--- a/drivers/video/platinumfb.h
+++ b/drivers/video/fbdev/platinumfb.h
diff --git a/drivers/video/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 3b85b647bc10..3b85b647bc10 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
diff --git a/drivers/video/pm3fb.c b/drivers/video/fbdev/pm3fb.c
index 4bf3273d0433..4bf3273d0433 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/fbdev/pm3fb.c
diff --git a/drivers/video/pmag-aa-fb.c b/drivers/video/fbdev/pmag-aa-fb.c
index 838424817de2..838424817de2 100644
--- a/drivers/video/pmag-aa-fb.c
+++ b/drivers/video/fbdev/pmag-aa-fb.c
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
index 914a52ba8477..914a52ba8477 100644
--- a/drivers/video/pmag-ba-fb.c
+++ b/drivers/video/fbdev/pmag-ba-fb.c
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/fbdev/pmagb-b-fb.c
index 0822b6f8dddc..0822b6f8dddc 100644
--- a/drivers/video/pmagb-b-fb.c
+++ b/drivers/video/fbdev/pmagb-b-fb.c
diff --git a/drivers/video/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index b269abd932aa..b269abd932aa 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
diff --git a/drivers/video/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 167cffff3d4e..167cffff3d4e 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
diff --git a/drivers/video/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index c95b9e46d48f..c95b9e46d48f 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
diff --git a/drivers/video/pxa168fb.h b/drivers/video/fbdev/pxa168fb.h
index eee09279c524..eee09279c524 100644
--- a/drivers/video/pxa168fb.h
+++ b/drivers/video/fbdev/pxa168fb.h
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 417f9a27eb7d..417f9a27eb7d 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
diff --git a/drivers/video/pxa3xx-gcu.h b/drivers/video/fbdev/pxa3xx-gcu.h
index 0428ed03dc49..0428ed03dc49 100644
--- a/drivers/video/pxa3xx-gcu.h
+++ b/drivers/video/fbdev/pxa3xx-gcu.h
diff --git a/drivers/video/pxafb.c b/drivers/video/fbdev/pxafb.c
index 1ecd9cec2921..1ecd9cec2921 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
diff --git a/drivers/video/pxafb.h b/drivers/video/fbdev/pxafb.h
index 26ba9fa3f737..26ba9fa3f737 100644
--- a/drivers/video/pxafb.h
+++ b/drivers/video/fbdev/pxafb.h
diff --git a/drivers/video/q40fb.c b/drivers/video/fbdev/q40fb.c
index 7487f76f6275..7487f76f6275 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/fbdev/q40fb.c
diff --git a/drivers/video/riva/Makefile b/drivers/video/fbdev/riva/Makefile
index 8898c9915b02..8898c9915b02 100644
--- a/drivers/video/riva/Makefile
+++ b/drivers/video/fbdev/riva/Makefile
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 8a8d7f060784..8a8d7f060784 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
diff --git a/drivers/video/riva/nv_driver.c b/drivers/video/fbdev/riva/nv_driver.c
index f3694cf17e58..f3694cf17e58 100644
--- a/drivers/video/riva/nv_driver.c
+++ b/drivers/video/fbdev/riva/nv_driver.c
diff --git a/drivers/video/riva/nv_type.h b/drivers/video/fbdev/riva/nv_type.h
index a69480c9a67c..a69480c9a67c 100644
--- a/drivers/video/riva/nv_type.h
+++ b/drivers/video/fbdev/riva/nv_type.h
diff --git a/drivers/video/riva/nvreg.h b/drivers/video/fbdev/riva/nvreg.h
index abfc167ae8d8..abfc167ae8d8 100644
--- a/drivers/video/riva/nvreg.h
+++ b/drivers/video/fbdev/riva/nvreg.h
diff --git a/drivers/video/riva/riva_hw.c b/drivers/video/fbdev/riva/riva_hw.c
index 78fdbf5178d7..78fdbf5178d7 100644
--- a/drivers/video/riva/riva_hw.c
+++ b/drivers/video/fbdev/riva/riva_hw.c
diff --git a/drivers/video/riva/riva_hw.h b/drivers/video/fbdev/riva/riva_hw.h
index c2769f73e0b2..c2769f73e0b2 100644
--- a/drivers/video/riva/riva_hw.h
+++ b/drivers/video/fbdev/riva/riva_hw.h
diff --git a/drivers/video/riva/riva_tbl.h b/drivers/video/fbdev/riva/riva_tbl.h
index 7ee7d72932d4..7ee7d72932d4 100644
--- a/drivers/video/riva/riva_tbl.h
+++ b/drivers/video/fbdev/riva/riva_tbl.h
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/fbdev/riva/rivafb-i2c.c
index 6a183375ced1..6a183375ced1 100644
--- a/drivers/video/riva/rivafb-i2c.c
+++ b/drivers/video/fbdev/riva/rivafb-i2c.c
diff --git a/drivers/video/riva/rivafb.h b/drivers/video/fbdev/riva/rivafb.h
index d9f107b704c6..d9f107b704c6 100644
--- a/drivers/video/riva/rivafb.h
+++ b/drivers/video/fbdev/riva/rivafb.h
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 83433cb0dfba..83433cb0dfba 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
diff --git a/drivers/video/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 62acae2694a9..62acae2694a9 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index 81af5a63e9e1..81af5a63e9e1 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
diff --git a/drivers/video/s3c2410fb.h b/drivers/video/fbdev/s3c2410fb.h
index 47a17bd23011..47a17bd23011 100644
--- a/drivers/video/s3c2410fb.h
+++ b/drivers/video/fbdev/s3c2410fb.h
diff --git a/drivers/video/s3fb.c b/drivers/video/fbdev/s3fb.c
index 9a3f8f1c6aab..9a3f8f1c6aab 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
diff --git a/drivers/video/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 580c444ec301..580c444ec301 100644
--- a/drivers/video/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
diff --git a/drivers/video/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index fc5d4292fad6..fc5d4292fad6 100644
--- a/drivers/video/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
diff --git a/drivers/video/savage/Makefile b/drivers/video/fbdev/savage/Makefile
index e09770fff8ea..e09770fff8ea 100644
--- a/drivers/video/savage/Makefile
+++ b/drivers/video/fbdev/savage/Makefile
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/fbdev/savage/savagefb-i2c.c
index 80fa87e2ae2f..80fa87e2ae2f 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/fbdev/savage/savagefb-i2c.c
diff --git a/drivers/video/savage/savagefb.h b/drivers/video/fbdev/savage/savagefb.h
index dcaab9012ca2..dcaab9012ca2 100644
--- a/drivers/video/savage/savagefb.h
+++ b/drivers/video/fbdev/savage/savagefb.h
diff --git a/drivers/video/savage/savagefb_accel.c b/drivers/video/fbdev/savage/savagefb_accel.c
index bfefa6234cf0..bfefa6234cf0 100644
--- a/drivers/video/savage/savagefb_accel.c
+++ b/drivers/video/fbdev/savage/savagefb_accel.c
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index 4dbf45f3b21a..4dbf45f3b21a 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
diff --git a/drivers/video/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index a350209ffbd3..a350209ffbd3 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
diff --git a/drivers/video/sbuslib.h b/drivers/video/fbdev/sbuslib.h
index 7ba3250236bd..7ba3250236bd 100644
--- a/drivers/video/sbuslib.h
+++ b/drivers/video/fbdev/sbuslib.h
diff --git a/drivers/video/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c
index 1265b25f9f99..1265b25f9f99 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/fbdev/sh7760fb.c
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/fbdev/sh_mipi_dsi.c
index 8f6e8ff620d4..8f6e8ff620d4 100644
--- a/drivers/video/sh_mipi_dsi.c
+++ b/drivers/video/fbdev/sh_mipi_dsi.c
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/fbdev/sh_mobile_hdmi.c
index 9a33ee0413fb..9a33ee0413fb 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/fbdev/sh_mobile_hdmi.c
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 2bcc84ac18c7..2bcc84ac18c7 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/fbdev/sh_mobile_lcdcfb.h
index f839adef1d90..f839adef1d90 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.h
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/fbdev/sh_mobile_meram.c
index a297de5cc859..a297de5cc859 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/fbdev/sh_mobile_meram.c
diff --git a/drivers/video/simplefb.c b/drivers/video/fbdev/simplefb.c
index 210f3a02121a..210f3a02121a 100644
--- a/drivers/video/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
diff --git a/drivers/video/sis/300vtbl.h b/drivers/video/fbdev/sis/300vtbl.h
index e4b4a2626da4..e4b4a2626da4 100644
--- a/drivers/video/sis/300vtbl.h
+++ b/drivers/video/fbdev/sis/300vtbl.h
diff --git a/drivers/video/sis/310vtbl.h b/drivers/video/fbdev/sis/310vtbl.h
index 54fcbbf4ef63..54fcbbf4ef63 100644
--- a/drivers/video/sis/310vtbl.h
+++ b/drivers/video/fbdev/sis/310vtbl.h
diff --git a/drivers/video/sis/Makefile b/drivers/video/fbdev/sis/Makefile
index f7c0046e5b1d..f7c0046e5b1d 100644
--- a/drivers/video/sis/Makefile
+++ b/drivers/video/fbdev/sis/Makefile
diff --git a/drivers/video/sis/init.c b/drivers/video/fbdev/sis/init.c
index bd40f5ecd901..bd40f5ecd901 100644
--- a/drivers/video/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
diff --git a/drivers/video/sis/init.h b/drivers/video/fbdev/sis/init.h
index 85d6738b6c64..85d6738b6c64 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/fbdev/sis/init.h
diff --git a/drivers/video/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index a89e3cafd5ad..a89e3cafd5ad 100644
--- a/drivers/video/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
diff --git a/drivers/video/sis/init301.h b/drivers/video/fbdev/sis/init301.h
index 2112d6d7feda..2112d6d7feda 100644
--- a/drivers/video/sis/init301.h
+++ b/drivers/video/fbdev/sis/init301.h
diff --git a/drivers/video/sis/initdef.h b/drivers/video/fbdev/sis/initdef.h
index 264b55a5947b..264b55a5947b 100644
--- a/drivers/video/sis/initdef.h
+++ b/drivers/video/fbdev/sis/initdef.h
diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/fbdev/sis/initextlfb.c
index 3ab18f5a3759..3ab18f5a3759 100644
--- a/drivers/video/sis/initextlfb.c
+++ b/drivers/video/fbdev/sis/initextlfb.c
diff --git a/drivers/video/sis/oem300.h b/drivers/video/fbdev/sis/oem300.h
index b73f26840143..b73f26840143 100644
--- a/drivers/video/sis/oem300.h
+++ b/drivers/video/fbdev/sis/oem300.h
diff --git a/drivers/video/sis/oem310.h b/drivers/video/fbdev/sis/oem310.h
index 8fce56e4482c..8fce56e4482c 100644
--- a/drivers/video/sis/oem310.h
+++ b/drivers/video/fbdev/sis/oem310.h
diff --git a/drivers/video/sis/sis.h b/drivers/video/fbdev/sis/sis.h
index 1987f1b7212f..1987f1b7212f 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/fbdev/sis/sis.h
diff --git a/drivers/video/sis/sis_accel.c b/drivers/video/fbdev/sis/sis_accel.c
index ceb434c95c0d..ceb434c95c0d 100644
--- a/drivers/video/sis/sis_accel.c
+++ b/drivers/video/fbdev/sis/sis_accel.c
diff --git a/drivers/video/sis/sis_accel.h b/drivers/video/fbdev/sis/sis_accel.h
index 30e03cdf6b85..30e03cdf6b85 100644
--- a/drivers/video/sis/sis_accel.h
+++ b/drivers/video/fbdev/sis/sis_accel.h
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 22ad028bf123..22ad028bf123 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
diff --git a/drivers/video/sis/sis_main.h b/drivers/video/fbdev/sis/sis_main.h
index 32e23c209430..32e23c209430 100644
--- a/drivers/video/sis/sis_main.h
+++ b/drivers/video/fbdev/sis/sis_main.h
diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/fbdev/sis/vgatypes.h
index e3f9976cfef0..e3f9976cfef0 100644
--- a/drivers/video/sis/vgatypes.h
+++ b/drivers/video/fbdev/sis/vgatypes.h
diff --git a/drivers/video/sis/vstruct.h b/drivers/video/fbdev/sis/vstruct.h
index ea94d214dcff..ea94d214dcff 100644
--- a/drivers/video/sis/vstruct.h
+++ b/drivers/video/fbdev/sis/vstruct.h
diff --git a/drivers/video/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index fefde7c6add7..fefde7c6add7 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
diff --git a/drivers/video/sm501fb.c b/drivers/video/fbdev/sm501fb.c
index 1501979099dc..1501979099dc 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/fbdev/sm501fb.c
diff --git a/drivers/video/smscufx.c b/drivers/video/fbdev/smscufx.c
index d513ed6a49f2..d513ed6a49f2 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/fbdev/smscufx.c
diff --git a/drivers/video/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index f4daa59f0a80..f4daa59f0a80 100644
--- a/drivers/video/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
diff --git a/drivers/video/sstfb.c b/drivers/video/fbdev/sstfb.c
index f0cb279ef333..f0cb279ef333 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/fbdev/sstfb.c
diff --git a/drivers/video/sticore.h b/drivers/video/fbdev/sticore.h
index af1619536ac8..af1619536ac8 100644
--- a/drivers/video/sticore.h
+++ b/drivers/video/fbdev/sticore.h
diff --git a/drivers/video/stifb.c b/drivers/video/fbdev/stifb.c
index cfe8a2f905c5..cfe8a2f905c5 100644
--- a/drivers/video/stifb.c
+++ b/drivers/video/fbdev/stifb.c
diff --git a/drivers/video/sunxvr1000.c b/drivers/video/fbdev/sunxvr1000.c
index 58241b47a96d..58241b47a96d 100644
--- a/drivers/video/sunxvr1000.c
+++ b/drivers/video/fbdev/sunxvr1000.c
diff --git a/drivers/video/sunxvr2500.c b/drivers/video/fbdev/sunxvr2500.c
index 843b6bab0483..843b6bab0483 100644
--- a/drivers/video/sunxvr2500.c
+++ b/drivers/video/fbdev/sunxvr2500.c
diff --git a/drivers/video/sunxvr500.c b/drivers/video/fbdev/sunxvr500.c
index 387350d004df..387350d004df 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/fbdev/sunxvr500.c
diff --git a/drivers/video/tcx.c b/drivers/video/fbdev/tcx.c
index 7fb2d696fac7..7fb2d696fac7 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/fbdev/tcx.c
diff --git a/drivers/video/tdfxfb.c b/drivers/video/fbdev/tdfxfb.c
index f761fe375f5b..f761fe375f5b 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/fbdev/tdfxfb.c
diff --git a/drivers/video/tgafb.c b/drivers/video/fbdev/tgafb.c
index 65ba9921506e..65ba9921506e 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
diff --git a/drivers/video/tmiofb.c b/drivers/video/fbdev/tmiofb.c
index 7fb4e321a431..7fb4e321a431 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/fbdev/tmiofb.c
diff --git a/drivers/video/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 7ed9a227f5ea..7ed9a227f5ea 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
diff --git a/drivers/video/udlfb.c b/drivers/video/fbdev/udlfb.c
index 77b890e4d296..77b890e4d296 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
diff --git a/drivers/video/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 509d452e8f91..509d452e8f91 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
diff --git a/drivers/video/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 97cb9bd1d1dd..97cb9bd1d1dd 100644
--- a/drivers/video/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
diff --git a/drivers/video/valkyriefb.h b/drivers/video/fbdev/valkyriefb.h
index d787441e5a42..d787441e5a42 100644
--- a/drivers/video/valkyriefb.h
+++ b/drivers/video/fbdev/valkyriefb.h
diff --git a/drivers/video/vermilion/Makefile b/drivers/video/fbdev/vermilion/Makefile
index cc21a656153d..cc21a656153d 100644
--- a/drivers/video/vermilion/Makefile
+++ b/drivers/video/fbdev/vermilion/Makefile
diff --git a/drivers/video/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c
index ebc6e6e0dd0f..ebc6e6e0dd0f 100644
--- a/drivers/video/vermilion/cr_pll.c
+++ b/drivers/video/fbdev/vermilion/cr_pll.c
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/fbdev/vermilion/vermilion.c
index 048a66640b03..048a66640b03 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/fbdev/vermilion/vermilion.c
diff --git a/drivers/video/vermilion/vermilion.h b/drivers/video/fbdev/vermilion/vermilion.h
index 43d11ec197fc..43d11ec197fc 100644
--- a/drivers/video/vermilion/vermilion.h
+++ b/drivers/video/fbdev/vermilion/vermilion.h
diff --git a/drivers/video/vesafb.c b/drivers/video/fbdev/vesafb.c
index 6170e7f58640..6170e7f58640 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
diff --git a/drivers/video/vfb.c b/drivers/video/fbdev/vfb.c
index 70a897b1e458..70a897b1e458 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/fbdev/vfb.c
diff --git a/drivers/video/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index 283d335a759f..283d335a759f 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
diff --git a/drivers/video/via/Makefile b/drivers/video/fbdev/via/Makefile
index 159f26e6adb5..159f26e6adb5 100644
--- a/drivers/video/via/Makefile
+++ b/drivers/video/fbdev/via/Makefile
diff --git a/drivers/video/via/accel.c b/drivers/video/fbdev/via/accel.c
index 4b67b8e6030a..4b67b8e6030a 100644
--- a/drivers/video/via/accel.c
+++ b/drivers/video/fbdev/via/accel.c
diff --git a/drivers/video/via/accel.h b/drivers/video/fbdev/via/accel.h
index 79d5e10cc835..79d5e10cc835 100644
--- a/drivers/video/via/accel.h
+++ b/drivers/video/fbdev/via/accel.h
diff --git a/drivers/video/via/chip.h b/drivers/video/fbdev/via/chip.h
index d32a5076c20f..d32a5076c20f 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/fbdev/via/chip.h
diff --git a/drivers/video/via/debug.h b/drivers/video/fbdev/via/debug.h
index 86eacc2017f3..86eacc2017f3 100644
--- a/drivers/video/via/debug.h
+++ b/drivers/video/fbdev/via/debug.h
diff --git a/drivers/video/via/dvi.c b/drivers/video/fbdev/via/dvi.c
index 7789553952d3..7789553952d3 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/fbdev/via/dvi.c
diff --git a/drivers/video/via/dvi.h b/drivers/video/fbdev/via/dvi.h
index 4c6bfba57d11..4c6bfba57d11 100644
--- a/drivers/video/via/dvi.h
+++ b/drivers/video/fbdev/via/dvi.h
diff --git a/drivers/video/via/global.c b/drivers/video/fbdev/via/global.c
index 3102171c1674..3102171c1674 100644
--- a/drivers/video/via/global.c
+++ b/drivers/video/fbdev/via/global.c
diff --git a/drivers/video/via/global.h b/drivers/video/fbdev/via/global.h
index 275dbbbd6b81..275dbbbd6b81 100644
--- a/drivers/video/via/global.h
+++ b/drivers/video/fbdev/via/global.h
diff --git a/drivers/video/via/hw.c b/drivers/video/fbdev/via/hw.c
index 22450908306c..22450908306c 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/fbdev/via/hw.c
diff --git a/drivers/video/via/hw.h b/drivers/video/fbdev/via/hw.h
index 3be073c58b03..3be073c58b03 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/fbdev/via/hw.h
diff --git a/drivers/video/via/ioctl.c b/drivers/video/fbdev/via/ioctl.c
index ea1c51428823..ea1c51428823 100644
--- a/drivers/video/via/ioctl.c
+++ b/drivers/video/fbdev/via/ioctl.c
diff --git a/drivers/video/via/ioctl.h b/drivers/video/fbdev/via/ioctl.h
index 6010d10b59e8..6010d10b59e8 100644
--- a/drivers/video/via/ioctl.h
+++ b/drivers/video/fbdev/via/ioctl.h
diff --git a/drivers/video/via/lcd.c b/drivers/video/fbdev/via/lcd.c
index 5d21ff436ec8..5d21ff436ec8 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/fbdev/via/lcd.c
diff --git a/drivers/video/via/lcd.h b/drivers/video/fbdev/via/lcd.h
index 5c988a063ad5..5c988a063ad5 100644
--- a/drivers/video/via/lcd.h
+++ b/drivers/video/fbdev/via/lcd.h
diff --git a/drivers/video/via/share.h b/drivers/video/fbdev/via/share.h
index 65c65c611e0a..65c65c611e0a 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/fbdev/via/share.h
diff --git a/drivers/video/via/tblDPASetting.c b/drivers/video/fbdev/via/tblDPASetting.c
index 73bb554e7c1e..73bb554e7c1e 100644
--- a/drivers/video/via/tblDPASetting.c
+++ b/drivers/video/fbdev/via/tblDPASetting.c
diff --git a/drivers/video/via/tblDPASetting.h b/drivers/video/fbdev/via/tblDPASetting.h
index 6db61519cb5d..6db61519cb5d 100644
--- a/drivers/video/via/tblDPASetting.h
+++ b/drivers/video/fbdev/via/tblDPASetting.h
diff --git a/drivers/video/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index 6e274825fb31..6e274825fb31 100644
--- a/drivers/video/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/fbdev/via/via-gpio.c
index e408679081ab..e408679081ab 100644
--- a/drivers/video/via/via-gpio.c
+++ b/drivers/video/fbdev/via/via-gpio.c
diff --git a/drivers/video/via/via_aux.c b/drivers/video/fbdev/via/via_aux.c
index 4a0a55cdac3d..4a0a55cdac3d 100644
--- a/drivers/video/via/via_aux.c
+++ b/drivers/video/fbdev/via/via_aux.c
diff --git a/drivers/video/via/via_aux.h b/drivers/video/fbdev/via/via_aux.h
index a8de3f038cea..a8de3f038cea 100644
--- a/drivers/video/via/via_aux.h
+++ b/drivers/video/fbdev/via/via_aux.h
diff --git a/drivers/video/via/via_aux_ch7301.c b/drivers/video/fbdev/via/via_aux_ch7301.c
index 1cbe5037a6b0..1cbe5037a6b0 100644
--- a/drivers/video/via/via_aux_ch7301.c
+++ b/drivers/video/fbdev/via/via_aux_ch7301.c
diff --git a/drivers/video/via/via_aux_edid.c b/drivers/video/fbdev/via/via_aux_edid.c
index 754d4509033f..754d4509033f 100644
--- a/drivers/video/via/via_aux_edid.c
+++ b/drivers/video/fbdev/via/via_aux_edid.c
diff --git a/drivers/video/via/via_aux_sii164.c b/drivers/video/fbdev/via/via_aux_sii164.c
index ca1b35f033b1..ca1b35f033b1 100644
--- a/drivers/video/via/via_aux_sii164.c
+++ b/drivers/video/fbdev/via/via_aux_sii164.c
diff --git a/drivers/video/via/via_aux_vt1621.c b/drivers/video/fbdev/via/via_aux_vt1621.c
index 38eca8479898..38eca8479898 100644
--- a/drivers/video/via/via_aux_vt1621.c
+++ b/drivers/video/fbdev/via/via_aux_vt1621.c
diff --git a/drivers/video/via/via_aux_vt1622.c b/drivers/video/fbdev/via/via_aux_vt1622.c
index 8c79c68ba683..8c79c68ba683 100644
--- a/drivers/video/via/via_aux_vt1622.c
+++ b/drivers/video/fbdev/via/via_aux_vt1622.c
diff --git a/drivers/video/via/via_aux_vt1625.c b/drivers/video/fbdev/via/via_aux_vt1625.c
index 03eb30165d36..03eb30165d36 100644
--- a/drivers/video/via/via_aux_vt1625.c
+++ b/drivers/video/fbdev/via/via_aux_vt1625.c
diff --git a/drivers/video/via/via_aux_vt1631.c b/drivers/video/fbdev/via/via_aux_vt1631.c
index 06e742f1f723..06e742f1f723 100644
--- a/drivers/video/via/via_aux_vt1631.c
+++ b/drivers/video/fbdev/via/via_aux_vt1631.c
diff --git a/drivers/video/via/via_aux_vt1632.c b/drivers/video/fbdev/via/via_aux_vt1632.c
index d24f4cd97401..d24f4cd97401 100644
--- a/drivers/video/via/via_aux_vt1632.c
+++ b/drivers/video/fbdev/via/via_aux_vt1632.c
diff --git a/drivers/video/via/via_aux_vt1636.c b/drivers/video/fbdev/via/via_aux_vt1636.c
index 9e015c101d4d..9e015c101d4d 100644
--- a/drivers/video/via/via_aux_vt1636.c
+++ b/drivers/video/fbdev/via/via_aux_vt1636.c
diff --git a/drivers/video/via/via_clock.c b/drivers/video/fbdev/via/via_clock.c
index db1e39277e32..db1e39277e32 100644
--- a/drivers/video/via/via_clock.c
+++ b/drivers/video/fbdev/via/via_clock.c
diff --git a/drivers/video/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
index 88714ae0d157..88714ae0d157 100644
--- a/drivers/video/via/via_clock.h
+++ b/drivers/video/fbdev/via/via_clock.h
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/fbdev/via/via_i2c.c
index dd53058bbbb7..dd53058bbbb7 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/fbdev/via/via_i2c.c
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/fbdev/via/via_modesetting.c
index 0b414b09b9b4..0b414b09b9b4 100644
--- a/drivers/video/via/via_modesetting.c
+++ b/drivers/video/fbdev/via/via_modesetting.c
diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/fbdev/via/via_modesetting.h
index f6a6503da3b3..f6a6503da3b3 100644
--- a/drivers/video/via/via_modesetting.h
+++ b/drivers/video/fbdev/via/via_modesetting.h
diff --git a/drivers/video/via/via_utility.c b/drivers/video/fbdev/via/via_utility.c
index 35458a5eadc8..35458a5eadc8 100644
--- a/drivers/video/via/via_utility.c
+++ b/drivers/video/fbdev/via/via_utility.c
diff --git a/drivers/video/via/via_utility.h b/drivers/video/fbdev/via/via_utility.h
index f23be1708c14..f23be1708c14 100644
--- a/drivers/video/via/via_utility.h
+++ b/drivers/video/fbdev/via/via_utility.h
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 325c43c6ff97..325c43c6ff97 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
diff --git a/drivers/video/via/viafbdev.h b/drivers/video/fbdev/via/viafbdev.h
index f6b2ddf56e94..f6b2ddf56e94 100644
--- a/drivers/video/via/viafbdev.h
+++ b/drivers/video/fbdev/via/viafbdev.h
diff --git a/drivers/video/via/viamode.c b/drivers/video/fbdev/via/viamode.c
index 0666ab01cf4a..0666ab01cf4a 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/fbdev/via/viamode.c
diff --git a/drivers/video/via/viamode.h b/drivers/video/fbdev/via/viamode.h
index dd19106698e7..dd19106698e7 100644
--- a/drivers/video/via/viamode.h
+++ b/drivers/video/fbdev/via/viamode.h
diff --git a/drivers/video/via/vt1636.c b/drivers/video/fbdev/via/vt1636.c
index ee2903b472cf..ee2903b472cf 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/fbdev/via/vt1636.c
diff --git a/drivers/video/via/vt1636.h b/drivers/video/fbdev/via/vt1636.h
index 4c1314e57468..4c1314e57468 100644
--- a/drivers/video/via/vt1636.h
+++ b/drivers/video/fbdev/via/vt1636.h
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index a8f2b280f796..a8f2b280f796 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
diff --git a/drivers/video/vt8500lcdfb.h b/drivers/video/fbdev/vt8500lcdfb.h
index 36ca3ca09d83..36ca3ca09d83 100644
--- a/drivers/video/vt8500lcdfb.h
+++ b/drivers/video/fbdev/vt8500lcdfb.h
diff --git a/drivers/video/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 5c7cbc6c6236..5c7cbc6c6236 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
diff --git a/drivers/video/w100fb.c b/drivers/video/fbdev/w100fb.c
index 10951c82f6ed..10951c82f6ed 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
diff --git a/drivers/video/w100fb.h b/drivers/video/fbdev/w100fb.h
index fffae7b4f6e9..fffae7b4f6e9 100644
--- a/drivers/video/w100fb.h
+++ b/drivers/video/fbdev/w100fb.h
diff --git a/drivers/video/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index 537d199612af..537d199612af 100644
--- a/drivers/video/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
diff --git a/drivers/video/wm8505fb_regs.h b/drivers/video/fbdev/wm8505fb_regs.h
index 4dd41668c6d1..4dd41668c6d1 100644
--- a/drivers/video/wm8505fb_regs.h
+++ b/drivers/video/fbdev/wm8505fb_regs.h
diff --git a/drivers/video/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index b0a9f34b2e01..9df6fe78a44b 100644
--- a/drivers/video/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -18,7 +18,7 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/fb.h> 19#include <linux/fb.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include "fb_draw.h" 21#include "core/fb_draw.h"
22 22
23#define GE_COMMAND_OFF 0x00 23#define GE_COMMAND_OFF 0x00
24#define GE_DEPTH_OFF 0x04 24#define GE_DEPTH_OFF 0x04
diff --git a/drivers/video/wmt_ge_rops.h b/drivers/video/fbdev/wmt_ge_rops.h
index f73ec6377a46..f73ec6377a46 100644
--- a/drivers/video/wmt_ge_rops.h
+++ b/drivers/video/fbdev/wmt_ge_rops.h
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 901014bbc821..901014bbc821 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
diff --git a/drivers/video/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c
index 553cff2f3f4c..553cff2f3f4c 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/fbdev/xilinxfb.c
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
deleted file mode 100644
index 63b23f87081d..000000000000
--- a/drivers/video/omap2/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config OMAP2_VRFB
2 bool
3
4if ARCH_OMAP2PLUS
5
6source "drivers/video/omap2/dss/Kconfig"
7source "drivers/video/omap2/omapfb/Kconfig"
8source "drivers/video/omap2/displays-new/Kconfig"
9
10endif
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 06990c6a1a69..61e706c0e00c 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -320,7 +320,7 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
320 struct pci_dev *pdev; 320 struct pci_dev *pdev;
321 struct tsi148_driver *bridge; 321 struct tsi148_driver *bridge;
322 322
323 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev); 323 pdev = to_pci_dev(tsi148_bridge->parent);
324 324
325 bridge = tsi148_bridge->driver_priv; 325 bridge = tsi148_bridge->driver_priv;
326 326
@@ -433,9 +433,7 @@ static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
433 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO); 433 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
434 434
435 if (sync != 0) { 435 if (sync != 0) {
436 pdev = container_of(tsi148_bridge->parent, 436 pdev = to_pci_dev(tsi148_bridge->parent);
437 struct pci_dev, dev);
438
439 synchronize_irq(pdev->irq); 437 synchronize_irq(pdev->irq);
440 } 438 }
441 } else { 439 } else {
@@ -741,7 +739,7 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
741 reg_join(vme_bound_high, vme_bound_low, &vme_bound); 739 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
742 reg_join(pci_offset_high, pci_offset_low, &pci_offset); 740 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
743 741
744 *pci_base = (dma_addr_t)vme_base + pci_offset; 742 *pci_base = (dma_addr_t)(*vme_base + pci_offset);
745 743
746 *enabled = 0; 744 *enabled = 0;
747 *aspace = 0; 745 *aspace = 0;
@@ -814,7 +812,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
814 812
815 tsi148_bridge = image->parent; 813 tsi148_bridge = image->parent;
816 814
817 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev); 815 pdev = to_pci_dev(tsi148_bridge->parent);
818 816
819 existing_size = (unsigned long long)(image->bus_resource.end - 817 existing_size = (unsigned long long)(image->bus_resource.end -
820 image->bus_resource.start); 818 image->bus_resource.start);
@@ -910,11 +908,15 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
910 unsigned long long pci_bound, vme_offset, pci_base; 908 unsigned long long pci_bound, vme_offset, pci_base;
911 struct vme_bridge *tsi148_bridge; 909 struct vme_bridge *tsi148_bridge;
912 struct tsi148_driver *bridge; 910 struct tsi148_driver *bridge;
911 struct pci_bus_region region;
912 struct pci_dev *pdev;
913 913
914 tsi148_bridge = image->parent; 914 tsi148_bridge = image->parent;
915 915
916 bridge = tsi148_bridge->driver_priv; 916 bridge = tsi148_bridge->driver_priv;
917 917
918 pdev = to_pci_dev(tsi148_bridge->parent);
919
918 /* Verify input data */ 920 /* Verify input data */
919 if (vme_base & 0xFFFF) { 921 if (vme_base & 0xFFFF) {
920 dev_err(tsi148_bridge->parent, "Invalid VME Window " 922 dev_err(tsi148_bridge->parent, "Invalid VME Window "
@@ -949,7 +951,9 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled,
949 pci_bound = 0; 951 pci_bound = 0;
950 vme_offset = 0; 952 vme_offset = 0;
951 } else { 953 } else {
952 pci_base = (unsigned long long)image->bus_resource.start; 954 pcibios_resource_to_bus(pdev->bus, &region,
955 &image->bus_resource);
956 pci_base = region.start;
953 957
954 /* 958 /*
955 * Bound address is a valid address for the window, adjust 959 * Bound address is a valid address for the window, adjust
@@ -2232,7 +2236,7 @@ static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2232 struct pci_dev *pdev; 2236 struct pci_dev *pdev;
2233 2237
2234 /* Find pci_dev container of dev */ 2238 /* Find pci_dev container of dev */
2235 pdev = container_of(parent, struct pci_dev, dev); 2239 pdev = to_pci_dev(parent);
2236 2240
2237 return pci_alloc_consistent(pdev, size, dma); 2241 return pci_alloc_consistent(pdev, size, dma);
2238} 2242}
@@ -2243,7 +2247,7 @@ static void tsi148_free_consistent(struct device *parent, size_t size,
2243 struct pci_dev *pdev; 2247 struct pci_dev *pdev;
2244 2248
2245 /* Find pci_dev container of dev */ 2249 /* Find pci_dev container of dev */
2246 pdev = container_of(parent, struct pci_dev, dev); 2250 pdev = to_pci_dev(parent);
2247 2251
2248 pci_free_consistent(pdev, size, vaddr, dma); 2252 pci_free_consistent(pdev, size, vaddr, dma);
2249} 2253}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index b96f61b15dc6..ff52618cafbe 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -614,27 +614,11 @@ end:
614 return err; 614 return err;
615} 615}
616 616
617/* 617static int w1_family_notify(unsigned long action, struct w1_slave *sl)
618 * Handle sysfs file creation and removal here, before userspace is told that
619 * the device is added / removed from the system
620 */
621static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
622 void *data)
623{ 618{
624 struct device *dev = data;
625 struct w1_slave *sl;
626 struct w1_family_ops *fops; 619 struct w1_family_ops *fops;
627 int err; 620 int err;
628 621
629 /*
630 * Only care about slave devices at the moment. Yes, we should use a
631 * separate "type" for this, but for now, look at the release function
632 * to know which type it is...
633 */
634 if (dev->release != w1_slave_release)
635 return 0;
636
637 sl = dev_to_w1_slave(dev);
638 fops = sl->family->fops; 622 fops = sl->family->fops;
639 623
640 if (!fops) 624 if (!fops)
@@ -673,10 +657,6 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
673 return 0; 657 return 0;
674} 658}
675 659
676static struct notifier_block w1_bus_nb = {
677 .notifier_call = w1_bus_notify,
678};
679
680static int __w1_attach_slave_device(struct w1_slave *sl) 660static int __w1_attach_slave_device(struct w1_slave *sl)
681{ 661{
682 int err; 662 int err;
@@ -698,6 +678,9 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
698 dev_dbg(&sl->dev, "%s: registering %s as %p.\n", __func__, 678 dev_dbg(&sl->dev, "%s: registering %s as %p.\n", __func__,
699 dev_name(&sl->dev), sl); 679 dev_name(&sl->dev), sl);
700 680
681 /* suppress for w1_family_notify before sending KOBJ_ADD */
682 dev_set_uevent_suppress(&sl->dev, true);
683
701 err = device_register(&sl->dev); 684 err = device_register(&sl->dev);
702 if (err < 0) { 685 if (err < 0) {
703 dev_err(&sl->dev, 686 dev_err(&sl->dev,
@@ -705,7 +688,7 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
705 dev_name(&sl->dev), err); 688 dev_name(&sl->dev), err);
706 return err; 689 return err;
707 } 690 }
708 691 w1_family_notify(BUS_NOTIFY_ADD_DEVICE, sl);
709 692
710 dev_set_uevent_suppress(&sl->dev, false); 693 dev_set_uevent_suppress(&sl->dev, false);
711 kobject_uevent(&sl->dev.kobj, KOBJ_ADD); 694 kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
@@ -799,6 +782,7 @@ int w1_unref_slave(struct w1_slave *sl)
799 msg.type = W1_SLAVE_REMOVE; 782 msg.type = W1_SLAVE_REMOVE;
800 w1_netlink_send(sl->master, &msg); 783 w1_netlink_send(sl->master, &msg);
801 784
785 w1_family_notify(BUS_NOTIFY_DEL_DEVICE, sl);
802 device_unregister(&sl->dev); 786 device_unregister(&sl->dev);
803 #ifdef DEBUG 787 #ifdef DEBUG
804 memset(sl, 0, sizeof(*sl)); 788 memset(sl, 0, sizeof(*sl));
@@ -1186,10 +1170,6 @@ static int __init w1_init(void)
1186 goto err_out_exit_init; 1170 goto err_out_exit_init;
1187 } 1171 }
1188 1172
1189 retval = bus_register_notifier(&w1_bus_type, &w1_bus_nb);
1190 if (retval)
1191 goto err_out_bus_unregister;
1192
1193 retval = driver_register(&w1_master_driver); 1173 retval = driver_register(&w1_master_driver);
1194 if (retval) { 1174 if (retval) {
1195 printk(KERN_ERR 1175 printk(KERN_ERR
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index 5234964fe001..a02704a59321 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -300,12 +300,6 @@ static int w1_process_command_root(struct cn_msg *msg,
300 struct w1_netlink_msg *w; 300 struct w1_netlink_msg *w;
301 u32 *id; 301 u32 *id;
302 302
303 if (mcmd->type != W1_LIST_MASTERS) {
304 printk(KERN_NOTICE "%s: msg: %x.%x, wrong type: %u, len: %u.\n",
305 __func__, msg->id.idx, msg->id.val, mcmd->type, mcmd->len);
306 return -EPROTO;
307 }
308
309 cn = kmalloc(PAGE_SIZE, GFP_KERNEL); 303 cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
310 if (!cn) 304 if (!cn)
311 return -ENOMEM; 305 return -ENOMEM;
@@ -441,6 +435,9 @@ static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
441 w1_netlink_send_error(&node->block->msg, node->m, cmd, 435 w1_netlink_send_error(&node->block->msg, node->m, cmd,
442 node->block->portid, err); 436 node->block->portid, err);
443 437
438 /* ref taken in w1_search_slave or w1_search_master_id when building
439 * the block
440 */
444 if (sl) 441 if (sl)
445 w1_unref_slave(sl); 442 w1_unref_slave(sl);
446 else 443 else
@@ -503,30 +500,42 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
503 500
504 msg_len = msg->len; 501 msg_len = msg->len;
505 while (msg_len && !err) { 502 while (msg_len && !err) {
506 struct w1_reg_num id;
507 u16 mlen = m->len;
508 503
509 dev = NULL; 504 dev = NULL;
510 sl = NULL; 505 sl = NULL;
511 506
512 memcpy(&id, m->id.id, sizeof(id));
513#if 0
514 printk("%s: %02x.%012llx.%02x: type=%02x, len=%u.\n",
515 __func__, id.family, (unsigned long long)id.id, id.crc, m->type, m->len);
516#endif
517 if (m->len + sizeof(struct w1_netlink_msg) > msg_len) { 507 if (m->len + sizeof(struct w1_netlink_msg) > msg_len) {
518 err = -E2BIG; 508 err = -E2BIG;
519 break; 509 break;
520 } 510 }
521 511
512 /* execute on this thread, no need to process later */
513 if (m->type == W1_LIST_MASTERS) {
514 err = w1_process_command_root(msg, m, nsp->portid);
515 goto out_cont;
516 }
517
518 /* All following message types require additional data,
519 * check here before references are taken.
520 */
521 if (!m->len) {
522 err = -EPROTO;
523 goto out_cont;
524 }
525
526 /* both search calls take reference counts */
522 if (m->type == W1_MASTER_CMD) { 527 if (m->type == W1_MASTER_CMD) {
523 dev = w1_search_master_id(m->id.mst.id); 528 dev = w1_search_master_id(m->id.mst.id);
524 } else if (m->type == W1_SLAVE_CMD) { 529 } else if (m->type == W1_SLAVE_CMD) {
525 sl = w1_search_slave(&id); 530 sl = w1_search_slave((struct w1_reg_num *)m->id.id);
526 if (sl) 531 if (sl)
527 dev = sl->master; 532 dev = sl->master;
528 } else { 533 } else {
529 err = w1_process_command_root(msg, m, nsp->portid); 534 printk(KERN_NOTICE
535 "%s: msg: %x.%x, wrong type: %u, len: %u.\n",
536 __func__, msg->id.idx, msg->id.val,
537 m->type, m->len);
538 err = -EPROTO;
530 goto out_cont; 539 goto out_cont;
531 } 540 }
532 541
@@ -536,8 +545,6 @@ static void w1_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
536 } 545 }
537 546
538 err = 0; 547 err = 0;
539 if (!mlen)
540 goto out_cont;
541 548
542 atomic_inc(&block->refcnt); 549 atomic_inc(&block->refcnt);
543 node->async.cb = w1_process_cb; 550 node->async.cb = w1_process_cb;
@@ -557,7 +564,8 @@ out_cont:
557 if (err) 564 if (err)
558 w1_netlink_send_error(msg, m, NULL, nsp->portid, err); 565 w1_netlink_send_error(msg, m, NULL, nsp->portid, err);
559 msg_len -= sizeof(struct w1_netlink_msg) + m->len; 566 msg_len -= sizeof(struct w1_netlink_msg) + m->len;
560 m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); 567 m = (struct w1_netlink_msg *)(((u8 *)m) +
568 sizeof(struct w1_netlink_msg) + m->len);
561 569
562 /* 570 /*
563 * Let's allow requests for nonexisting devices. 571 * Let's allow requests for nonexisting devices.
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 96109a9972b6..84b4bfb84344 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
66static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; 66static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
67static unsigned event_array_pages __read_mostly; 67static unsigned event_array_pages __read_mostly;
68 68
69/*
70 * sync_set_bit() and friends must be unsigned long aligned on non-x86
71 * platforms.
72 */
73#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
74
75#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
76#define EVTCHN_FIFO_BIT(b, w) \
77 (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
78
79#else
80
69#define BM(w) ((unsigned long *)(w)) 81#define BM(w) ((unsigned long *)(w))
82#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
83
84#endif
70 85
71static inline event_word_t *event_word_from_port(unsigned port) 86static inline event_word_t *event_word_from_port(unsigned port)
72{ 87{
@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
161static void evtchn_fifo_clear_pending(unsigned port) 176static void evtchn_fifo_clear_pending(unsigned port)
162{ 177{
163 event_word_t *word = event_word_from_port(port); 178 event_word_t *word = event_word_from_port(port);
164 sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); 179 sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
165} 180}
166 181
167static void evtchn_fifo_set_pending(unsigned port) 182static void evtchn_fifo_set_pending(unsigned port)
168{ 183{
169 event_word_t *word = event_word_from_port(port); 184 event_word_t *word = event_word_from_port(port);
170 sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); 185 sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
171} 186}
172 187
173static bool evtchn_fifo_is_pending(unsigned port) 188static bool evtchn_fifo_is_pending(unsigned port)
174{ 189{
175 event_word_t *word = event_word_from_port(port); 190 event_word_t *word = event_word_from_port(port);
176 return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); 191 return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
177} 192}
178 193
179static bool evtchn_fifo_test_and_set_mask(unsigned port) 194static bool evtchn_fifo_test_and_set_mask(unsigned port)
180{ 195{
181 event_word_t *word = event_word_from_port(port); 196 event_word_t *word = event_word_from_port(port);
182 return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); 197 return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
183} 198}
184 199
185static void evtchn_fifo_mask(unsigned port) 200static void evtchn_fifo_mask(unsigned port)
186{ 201{
187 event_word_t *word = event_word_from_port(port); 202 event_word_t *word = event_word_from_port(port);
188 sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); 203 sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
189} 204}
190 205
206static bool evtchn_fifo_is_masked(unsigned port)
207{
208 event_word_t *word = event_word_from_port(port);
209 return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
210}
191/* 211/*
192 * Clear MASKED, spinning if BUSY is set. 212 * Clear MASKED, spinning if BUSY is set.
193 */ 213 */
@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port)
211 BUG_ON(!irqs_disabled()); 231 BUG_ON(!irqs_disabled());
212 232
213 clear_masked(word); 233 clear_masked(word);
214 if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { 234 if (evtchn_fifo_is_pending(port)) {
215 struct evtchn_unmask unmask = { .port = port }; 235 struct evtchn_unmask unmask = { .port = port };
216 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); 236 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
217 } 237 }
@@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port)
243 263
244static void consume_one_event(unsigned cpu, 264static void consume_one_event(unsigned cpu,
245 struct evtchn_fifo_control_block *control_block, 265 struct evtchn_fifo_control_block *control_block,
246 unsigned priority, uint32_t *ready) 266 unsigned priority, unsigned long *ready)
247{ 267{
248 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); 268 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
249 uint32_t head; 269 uint32_t head;
@@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu,
273 * copy of the ready word. 293 * copy of the ready word.
274 */ 294 */
275 if (head == 0) 295 if (head == 0)
276 clear_bit(priority, BM(ready)); 296 clear_bit(priority, ready);
277 297
278 if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) 298 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
279 && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
280 handle_irq_for_port(port); 299 handle_irq_for_port(port);
281 300
282 q->head[priority] = head; 301 q->head[priority] = head;
@@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu,
285static void evtchn_fifo_handle_events(unsigned cpu) 304static void evtchn_fifo_handle_events(unsigned cpu)
286{ 305{
287 struct evtchn_fifo_control_block *control_block; 306 struct evtchn_fifo_control_block *control_block;
288 uint32_t ready; 307 unsigned long ready;
289 unsigned q; 308 unsigned q;
290 309
291 control_block = per_cpu(cpu_control_block, cpu); 310 control_block = per_cpu(cpu_control_block, cpu);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index fc6c94c0b436..32f9236c959f 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -198,10 +198,32 @@ struct shutdown_handler {
198 void (*cb)(void); 198 void (*cb)(void);
199}; 199};
200 200
201static int poweroff_nb(struct notifier_block *cb, unsigned long code, void *unused)
202{
203 switch (code) {
204 case SYS_DOWN:
205 case SYS_HALT:
206 case SYS_POWER_OFF:
207 shutting_down = SHUTDOWN_POWEROFF;
208 default:
209 break;
210 }
211 return NOTIFY_DONE;
212}
201static void do_poweroff(void) 213static void do_poweroff(void)
202{ 214{
203 shutting_down = SHUTDOWN_POWEROFF; 215 switch (system_state) {
204 orderly_poweroff(false); 216 case SYSTEM_BOOTING:
217 orderly_poweroff(true);
218 break;
219 case SYSTEM_RUNNING:
220 orderly_poweroff(false);
221 break;
222 default:
223 /* Don't do it when we are halting/rebooting. */
224 pr_info("Ignoring Xen toolstack shutdown.\n");
225 break;
226 }
205} 227}
206 228
207static void do_reboot(void) 229static void do_reboot(void)
@@ -307,6 +329,10 @@ static struct xenbus_watch shutdown_watch = {
307 .callback = shutdown_handler 329 .callback = shutdown_handler
308}; 330};
309 331
332static struct notifier_block xen_reboot_nb = {
333 .notifier_call = poweroff_nb,
334};
335
310static int setup_shutdown_watcher(void) 336static int setup_shutdown_watcher(void)
311{ 337{
312 int err; 338 int err;
@@ -317,6 +343,7 @@ static int setup_shutdown_watcher(void)
317 return err; 343 return err;
318 } 344 }
319 345
346
320#ifdef CONFIG_MAGIC_SYSRQ 347#ifdef CONFIG_MAGIC_SYSRQ
321 err = register_xenbus_watch(&sysrq_watch); 348 err = register_xenbus_watch(&sysrq_watch);
322 if (err) { 349 if (err) {
@@ -345,6 +372,7 @@ int xen_setup_shutdown_event(void)
345 if (!xen_domain()) 372 if (!xen_domain())
346 return -ENODEV; 373 return -ENODEV;
347 register_xenstore_notifier(&xenstore_notifier); 374 register_xenstore_notifier(&xenstore_notifier);
375 register_reboot_notifier(&xen_reboot_nb);
348 376
349 return 0; 377 return 0;
350} 378}
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 929dd46bb40c..607e41460c0d 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -217,7 +217,7 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
217 if (result == 0) { 217 if (result == 0) {
218 for (i = 0; i < op->value; i++) { 218 for (i = 0; i < op->value; i++) {
219 op->msix_entries[i].entry = entries[i].entry; 219 op->msix_entries[i].entry = entries[i].entry;
220 if (entries[i].vector) 220 if (entries[i].vector) {
221 op->msix_entries[i].vector = 221 op->msix_entries[i].vector =
222 xen_pirq_from_irq(entries[i].vector); 222 xen_pirq_from_irq(entries[i].vector);
223 if (unlikely(verbose_request)) 223 if (unlikely(verbose_request))
@@ -225,6 +225,7 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
225 "MSI-X[%d]: %d\n", 225 "MSI-X[%d]: %d\n",
226 pci_name(dev), i, 226 pci_name(dev), i,
227 op->msix_entries[i].vector); 227 op->msix_entries[i].vector);
228 }
228 } 229 }
229 } else 230 } else
230 pr_warn_ratelimited("%s: error enabling MSI-X for guest %u: err %d!\n", 231 pr_warn_ratelimited("%s: error enabling MSI-X for guest %u: err %d!\n",
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 3165ce361b00..51afff96c515 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -137,6 +137,8 @@ unlock:
137 /* Publish this device. */ 137 /* Publish this device. */
138 if (!err) 138 if (!err)
139 err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid); 139 err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
140 else
141 kfree(dev_entry);
140 142
141out: 143out:
142 return err; 144 return err;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index b6d5fff43d16..ba804f3d8278 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -50,6 +50,7 @@
50#include <xen/xenbus.h> 50#include <xen/xenbus.h>
51#include <xen/xen.h> 51#include <xen/xen.h>
52#include "xenbus_comms.h" 52#include "xenbus_comms.h"
53#include "xenbus_probe.h"
53 54
54struct xs_stored_msg { 55struct xs_stored_msg {
55 struct list_head list; 56 struct list_head list;
@@ -139,6 +140,29 @@ static int get_error(const char *errorstring)
139 return xsd_errors[i].errnum; 140 return xsd_errors[i].errnum;
140} 141}
141 142
143static bool xenbus_ok(void)
144{
145 switch (xen_store_domain_type) {
146 case XS_LOCAL:
147 switch (system_state) {
148 case SYSTEM_POWER_OFF:
149 case SYSTEM_RESTART:
150 case SYSTEM_HALT:
151 return false;
152 default:
153 break;
154 }
155 return true;
156 case XS_PV:
157 case XS_HVM:
158 /* FIXME: Could check that the remote domain is alive,
159 * but it is normally initial domain. */
160 return true;
161 default:
162 break;
163 }
164 return false;
165}
142static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) 166static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
143{ 167{
144 struct xs_stored_msg *msg; 168 struct xs_stored_msg *msg;
@@ -148,9 +172,20 @@ static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
148 172
149 while (list_empty(&xs_state.reply_list)) { 173 while (list_empty(&xs_state.reply_list)) {
150 spin_unlock(&xs_state.reply_lock); 174 spin_unlock(&xs_state.reply_lock);
151 /* XXX FIXME: Avoid synchronous wait for response here. */ 175 if (xenbus_ok())
152 wait_event(xs_state.reply_waitq, 176 /* XXX FIXME: Avoid synchronous wait for response here. */
153 !list_empty(&xs_state.reply_list)); 177 wait_event_timeout(xs_state.reply_waitq,
178 !list_empty(&xs_state.reply_list),
179 msecs_to_jiffies(500));
180 else {
181 /*
182 * If we are in the process of being shut-down there is
183 * no point of trying to contact XenBus - it is either
184 * killed (xenstored application) or the other domain
185 * has been killed or is unreachable.
186 */
187 return ERR_PTR(-EIO);
188 }
154 spin_lock(&xs_state.reply_lock); 189 spin_lock(&xs_state.reply_lock);
155 } 190 }
156 191
@@ -215,6 +250,9 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
215 250
216 mutex_unlock(&xs_state.request_mutex); 251 mutex_unlock(&xs_state.request_mutex);
217 252
253 if (IS_ERR(ret))
254 return ret;
255
218 if ((msg->type == XS_TRANSACTION_END) || 256 if ((msg->type == XS_TRANSACTION_END) ||
219 ((req_msg.type == XS_TRANSACTION_START) && 257 ((req_msg.type == XS_TRANSACTION_START) &&
220 (msg->type == XS_ERROR))) 258 (msg->type == XS_ERROR)))