aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Documentation/clk.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/exynos4415-clock.txt38
-rw-r--r--Documentation/devicetree/bindings/clock/exynos7-clock.txt93
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,pxa168.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,pxa910.txt21
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt18
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt8
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi.txt31
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile21
-rw-r--r--arch/arm/boot/dts/Makefile3
-rw-r--r--arch/arm/boot/dts/mmp2-brownstone.dts2
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi29
-rw-r--r--arch/arm/boot/dts/pxa168-aspenite.dts2
-rw-r--r--arch/arm/boot/dts/pxa168.dtsi27
-rw-r--r--arch/arm/boot/dts/pxa910-dkb.dts2
-rw-r--r--arch/arm/boot/dts/pxa910.dtsi28
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi12
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi12
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi12
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi12
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi12
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi12
-rw-r--r--arch/arm/configs/ape6evm_defconfig2
-rw-r--r--arch/arm/configs/armadillo800eva_defconfig2
-rw-r--r--arch/arm/configs/bcm_defconfig2
-rw-r--r--arch/arm/configs/bockw_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/exynos_defconfig2
-rw-r--r--arch/arm/configs/ezx_defconfig1
-rw-r--r--arch/arm/configs/hisi_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/keystone_defconfig2
-rw-r--r--arch/arm/configs/kzm9g_defconfig2
-rw-r--r--arch/arm/configs/lager_defconfig2
-rw-r--r--arch/arm/configs/mackerel_defconfig1
-rw-r--r--arch/arm/configs/marzen_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/prima2_defconfig2
-rw-r--r--arch/arm/configs/sama5_defconfig2
-rw-r--r--arch/arm/configs/shmobile_defconfig2
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig2
-rw-r--r--arch/arm/configs/u8500_defconfig2
-rw-r--r--arch/arm/configs/vt8500_v6_v7_defconfig2
-rw-r--r--arch/arm/include/asm/spinlock.h4
-rw-r--r--arch/arm/mach-mmp/Kconfig12
-rw-r--r--arch/arm/mach-mmp/mmp-dt.c57
-rw-r--r--arch/arm/mach-mmp/mmp2-dt.c26
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c23
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c6
-rw-r--r--arch/arm/mach-omap2/dpll44xx.c6
-rw-r--r--arch/arm64/include/asm/spinlock.h4
-rw-r--r--arch/cris/arch-v10/lib/usercopy.c14
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig8
-rw-r--r--arch/cris/arch-v32/drivers/Makefile1
-rw-r--r--arch/cris/arch-v32/drivers/i2c.h1
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c1430
-rw-r--r--arch/cris/arch-v32/kernel/debugport.c82
-rw-r--r--arch/cris/arch-v32/kernel/time.c29
-rw-r--r--arch/cris/arch-v32/lib/usercopy.c15
-rw-r--r--arch/cris/arch-v32/mach-fs/pinmux.c152
-rw-r--r--arch/cris/include/arch-v32/mach-fs/mach/pinmux.h2
-rw-r--r--arch/cris/include/asm/Kbuild4
-rw-r--r--arch/cris/include/uapi/asm/Kbuild4
-rw-r--r--arch/cris/kernel/crisksyms.c9
-rw-r--r--arch/cris/kernel/traps.c61
-rw-r--r--arch/cris/mm/init.c38
-rw-r--r--arch/cris/mm/ioremap.c3
-rw-r--r--arch/hexagon/include/asm/cache.h4
-rw-r--r--arch/hexagon/include/asm/cacheflush.h36
-rw-r--r--arch/hexagon/include/asm/io.h5
-rw-r--r--arch/hexagon/kernel/setup.c1
-rw-r--r--arch/hexagon/kernel/traps.c4
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S4
-rw-r--r--arch/hexagon/mm/cache.c10
-rw-r--r--arch/hexagon/mm/ioremap.c1
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/mips/alchemy/common/clock.c10
-rw-r--r--arch/mips/configs/db1xxx_defconfig2
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/configs/loongson3_defconfig2
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig2
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig2
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/s390/kvm/gaccess.c18
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig2
-rw-r--r--arch/sh/configs/sdk7786_defconfig2
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/include/asm/spinlock.h8
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh2
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/init.c37
-rw-r--r--drivers/clk/at91/clk-programmable.c4
-rw-r--r--drivers/clk/bcm/clk-kona.c4
-rw-r--r--drivers/clk/clk-composite.c9
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk.c42
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c72
-rw-r--r--drivers/clk/mmp/Makefile7
-rw-r--r--drivers/clk/mmp/clk-frac.c74
-rw-r--r--drivers/clk/mmp/clk-gate.c133
-rw-r--r--drivers/clk/mmp/clk-mix.c513
-rw-r--r--drivers/clk/mmp/clk-mmp2.c6
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c334
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c279
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c301
-rw-r--r--drivers/clk/mmp/clk-pxa168.c6
-rw-r--r--drivers/clk/mmp/clk-pxa910.c6
-rw-r--r--drivers/clk/mmp/clk.c192
-rw-r--r--drivers/clk/mmp/clk.h226
-rw-r--r--drivers/clk/mmp/reset.c99
-rw-r--r--drivers/clk/mmp/reset.h31
-rw-r--r--drivers/clk/pxa/Makefile1
-rw-r--r--drivers/clk/pxa/clk-pxa.c45
-rw-r--r--drivers/clk/pxa/clk-pxa.h9
-rw-r--r--drivers/clk/pxa/clk-pxa25x.c273
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c9
-rw-r--r--drivers/clk/qcom/clk-pll.c2
-rw-r--r--drivers/clk/qcom/clk-rcg.c20
-rw-r--r--drivers/clk/qcom/clk-rcg2.c28
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c154
-rw-r--r--drivers/clk/rockchip/clk-pll.c81
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c79
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c246
-rw-r--r--drivers/clk/rockchip/clk.c20
-rw-r--r--drivers/clk/rockchip/clk.h45
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c33
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4415.c1144
-rw-r--r--drivers/clk/samsung/clk-exynos5260.c185
-rw-r--r--drivers/clk/samsung/clk-exynos7.c743
-rw-r--r--drivers/clk/samsung/clk-pll.c25
-rw-r--r--drivers/clk/samsung/clk-pll.h4
-rw-r--r--drivers/clk/samsung/clk.c102
-rw-r--r--drivers/clk/samsung/clk.h43
-rw-r--r--drivers/clk/shmobile/clk-div6.c113
-rw-r--r--drivers/clk/sunxi/Makefile1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c7
-rw-r--r--drivers/clk/sunxi/clk-factors.c6
-rw-r--r--drivers/clk/sunxi/clk-factors.h3
-rw-r--r--drivers/clk/sunxi/clk-mod0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-core.c271
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c85
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c328
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c112
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1599
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h80
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c10
-rw-r--r--drivers/macintosh/Kconfig10
-rw-r--r--drivers/macintosh/Makefile1
-rw-r--r--drivers/macintosh/therm_pm72.c2278
-rw-r--r--drivers/macintosh/therm_pm72.h326
-rw-r--r--drivers/scsi/53c700.c41
-rw-r--r--drivers/scsi/Kconfig17
-rw-r--r--drivers/scsi/advansys.c8
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c5
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c1
-rw-r--r--drivers/scsi/fcoe/fcoe.c1
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ipr.c116
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/pmcraid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c12
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_debug.c62
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/spi/spi-img-spfi.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c4
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h24
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c17
-rw-r--r--drivers/target/iscsi/iscsi_target.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_transport.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c26
-rw-r--r--drivers/target/loopback/tcm_loop.c71
-rw-r--r--drivers/target/loopback/tcm_loop.h7
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_configfs.c344
-rw-r--r--drivers/target/target_core_device.c90
-rw-r--r--drivers/target/target_core_file.c42
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_iblock.c42
-rw-r--r--drivers/target/target_core_internal.h28
-rw-r--r--drivers/target/target_core_pr.c125
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c41
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c16
-rw-r--r--drivers/target/target_core_user.c42
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/tty/serial/8250/8250_omap.c14
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c10
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/xen/xen-scsiback.c2
-rw-r--r--fs/binfmt_misc.c7
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/extent-tree.c23
-rw-r--r--fs/btrfs/free-space-cache.c12
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ecryptfs/crypto.c1
-rw-r--r--fs/ecryptfs/file.c12
-rw-r--r--fs/ecryptfs/keystore.c6
-rw-r--r--fs/ecryptfs/main.c16
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/kernfs/file.c22
-rw-r--r--fs/proc_namespace.c16
-rw-r--r--include/dt-bindings/clock/exynos4415.h360
-rw-r--r--include/dt-bindings/clock/exynos7-clk.h92
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h74
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h57
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h54
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h13
-rw-r--r--include/linux/clk-provider.h8
-rw-r--r--include/linux/clk/ti.h4
-rw-r--r--include/linux/compiler.h74
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/migrate.h10
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/uio.h5
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_host.h13
-rw-r--r--include/scsi/scsi_tcq.h36
-rw-r--r--include/target/target_core_backend.h43
-rw-r--r--include/target/target_core_backend_configfs.h120
-rw-r--r--include/target/target_core_base.h6
-rw-r--r--include/trace/events/target.h8
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--init/do_mounts.c6
-rw-r--r--kernel/power/Kconfig16
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/gup.c2
-rw-r--r--mm/memory.c11
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c21
-rw-r--r--mm/rmap.c3
-rw-r--r--mm/shmem.c2
-rw-r--r--net/socket.c1
-rw-r--r--scripts/Kbuild.include12
-rw-r--r--scripts/Makefile.clean10
-rw-r--r--scripts/Makefile.headersinst1
-rw-r--r--scripts/coccinelle/misc/bugon.cocci2
-rwxr-xr-xscripts/headers.sh2
-rw-r--r--scripts/kconfig/menu.c4
-rwxr-xr-xscripts/package/mkspec4
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c6
-rw-r--r--sound/firewire/oxfw/oxfw-proc.c2
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c3
-rw-r--r--sound/firewire/oxfw/oxfw.c2
-rw-r--r--sound/pci/asihpi/hpi_internal.h6
-rw-r--r--sound/pci/asihpi/hpi_version.h6
-rw-r--r--sound/pci/asihpi/hpidspcd.c26
-rw-r--r--sound/pci/hda/hda_generic.c10
-rw-r--r--sound/pci/hda/hda_generic.h9
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/patch_analog.c42
-rw-r--r--sound/pci/hda/patch_conexant.c4
-rw-r--r--sound/pci/hda/patch_hdmi.c6
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c2
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/pcm512x-i2c.c7
-rw-r--r--sound/soc/codecs/rt5645.c4
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c4
-rw-r--r--sound/soc/intel/sst/sst_acpi.c10
-rw-r--r--sound/soc/samsung/i2s.c2
-rw-r--r--sound/usb/mixer_maps.c15
-rw-r--r--sound/usb/mixer_scarlett.c2
-rw-r--r--sound/usb/quirks.c5
310 files changed, 10066 insertions, 6290 deletions
diff --git a/.gitignore b/.gitignore
index e213b27f3921..ce57b79670a5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -96,3 +96,6 @@ x509.genkey
96 96
97# Kconfig presets 97# Kconfig presets
98all.config 98all.config
99
100# Kdevelop4
101*.kdev4
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index 1fee72f4d331..4ff84623d5e1 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -74,7 +74,7 @@ the operations defined in clk.h:
74 long (*determine_rate)(struct clk_hw *hw, 74 long (*determine_rate)(struct clk_hw *hw,
75 unsigned long rate, 75 unsigned long rate,
76 unsigned long *best_parent_rate, 76 unsigned long *best_parent_rate,
77 struct clk **best_parent_clk); 77 struct clk_hw **best_parent_clk);
78 int (*set_parent)(struct clk_hw *hw, u8 index); 78 int (*set_parent)(struct clk_hw *hw, u8 index);
79 u8 (*get_parent)(struct clk_hw *hw); 79 u8 (*get_parent)(struct clk_hw *hw);
80 int (*set_rate)(struct clk_hw *hw, 80 int (*set_rate)(struct clk_hw *hw,
diff --git a/Documentation/devicetree/bindings/clock/exynos4415-clock.txt b/Documentation/devicetree/bindings/clock/exynos4415-clock.txt
new file mode 100644
index 000000000000..847d98bae8cf
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/exynos4415-clock.txt
@@ -0,0 +1,38 @@
1* Samsung Exynos4415 Clock Controller
2
3The Exynos4415 clock controller generates and supplies clock to various
4consumer devices within the Exynos4415 SoC.
5
6Required properties:
7
8- compatible: should be one of the following:
9 - "samsung,exynos4415-cmu" - for the main system clocks controller
10 (CMU_LEFTBUS, CMU_RIGHTBUS, CMU_TOP, CMU_CPU clock domains).
11 - "samsung,exynos4415-cmu-dmc" - for the Exynos4415 SoC DRAM Memory
12 Controller (DMC) domain clock controller.
13
14- reg: physical base address of the controller and length of memory mapped
15 region.
16
17- #clock-cells: should be 1.
18
19Each clock is assigned an identifier and client nodes can use this identifier
20to specify the clock which they consume.
21
22All available clocks are defined as preprocessor macros in
23dt-bindings/clock/exynos4415.h header and can be used in device
24tree sources.
25
26Example 1: An example of a clock controller node is listed below.
27
28 cmu: clock-controller@10030000 {
29 compatible = "samsung,exynos4415-cmu";
30 reg = <0x10030000 0x18000>;
31 #clock-cells = <1>;
32 };
33
34 cmu-dmc: clock-controller@105C0000 {
35 compatible = "samsung,exynos4415-cmu-dmc";
36 reg = <0x105C0000 0x3000>;
37 #clock-cells = <1>;
38 };
diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
new file mode 100644
index 000000000000..6d3d5f80c1c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
@@ -0,0 +1,93 @@
1* Samsung Exynos7 Clock Controller
2
3Exynos7 clock controller has various blocks which are instantiated
4independently from the device-tree. These clock controllers
5generate and supply clocks to various hardware blocks within
6the SoC.
7
8Each clock is assigned an identifier and client nodes can use
9this identifier to specify the clock which they consume. All
10available clocks are defined as preprocessor macros in
11dt-bindings/clock/exynos7-clk.h header and can be used in
12device tree sources.
13
14External clocks:
15
16There are several clocks that are generated outside the SoC. It
17is expected that they are defined using standard clock bindings
18with following clock-output-names:
19
20 - "fin_pll" - PLL input clock from XXTI
21
22Required Properties for Clock Controller:
23
24 - compatible: clock controllers will use one of the following
25 compatible strings to indicate the clock controller
26 functionality.
27
28 - "samsung,exynos7-clock-topc"
29 - "samsung,exynos7-clock-top0"
30 - "samsung,exynos7-clock-top1"
31 - "samsung,exynos7-clock-ccore"
32 - "samsung,exynos7-clock-peric0"
33 - "samsung,exynos7-clock-peric1"
34 - "samsung,exynos7-clock-peris"
35 - "samsung,exynos7-clock-fsys0"
36 - "samsung,exynos7-clock-fsys1"
37
38 - reg: physical base address of the controller and the length of
39 memory mapped region.
40
41 - #clock-cells: should be 1.
42
43 - clocks: list of clock identifiers which are fed as the input to
44 the given clock controller. Please refer the next section to
45 find the input clocks for a given controller.
46
47- clock-names: list of names of clocks which are fed as the input
48 to the given clock controller.
49
50Input clocks for top0 clock controller:
51 - fin_pll
52 - dout_sclk_bus0_pll
53 - dout_sclk_bus1_pll
54 - dout_sclk_cc_pll
55 - dout_sclk_mfc_pll
56
57Input clocks for top1 clock controller:
58 - fin_pll
59 - dout_sclk_bus0_pll
60 - dout_sclk_bus1_pll
61 - dout_sclk_cc_pll
62 - dout_sclk_mfc_pll
63
64Input clocks for ccore clock controller:
65 - fin_pll
66 - dout_aclk_ccore_133
67
68Input clocks for peric0 clock controller:
69 - fin_pll
70 - dout_aclk_peric0_66
71 - sclk_uart0
72
73Input clocks for peric1 clock controller:
74 - fin_pll
75 - dout_aclk_peric1_66
76 - sclk_uart1
77 - sclk_uart2
78 - sclk_uart3
79
80Input clocks for peris clock controller:
81 - fin_pll
82 - dout_aclk_peris_66
83
84Input clocks for fsys0 clock controller:
85 - fin_pll
86 - dout_aclk_fsys0_200
87 - dout_sclk_mmc2
88
89Input clocks for fsys1 clock controller:
90 - fin_pll
91 - dout_aclk_fsys1_200
92 - dout_sclk_mmc0
93 - dout_sclk_mmc1
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
new file mode 100644
index 000000000000..af376a01f2b7
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
@@ -0,0 +1,21 @@
1* Marvell MMP2 Clock Controller
2
3The MMP2 clock subsystem generates and supplies clock to various
4controllers within the MMP2 SoC.
5
6Required Properties:
7
8- compatible: should be one of the following.
9 - "marvell,mmp2-clock" - controller compatible with MMP2 SoC.
10
11- reg: physical base address of the clock subsystem and length of memory mapped
12 region. There are 3 places in SOC has clock control logic:
13 "mpmu", "apmu", "apbc". So three reg spaces need to be defined.
14
15- #clock-cells: should be 1.
16- #reset-cells: should be 1.
17
18Each clock is assigned an identifier and client nodes use this identifier
19to specify the clock which they consume.
20
21All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>.
diff --git a/Documentation/devicetree/bindings/clock/marvell,pxa168.txt b/Documentation/devicetree/bindings/clock/marvell,pxa168.txt
new file mode 100644
index 000000000000..c62eb1d173a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,pxa168.txt
@@ -0,0 +1,21 @@
1* Marvell PXA168 Clock Controller
2
3The PXA168 clock subsystem generates and supplies clock to various
4controllers within the PXA168 SoC.
5
6Required Properties:
7
8- compatible: should be one of the following.
9 - "marvell,pxa168-clock" - controller compatible with PXA168 SoC.
10
11- reg: physical base address of the clock subsystem and length of memory mapped
12 region. There are 3 places in SOC has clock control logic:
13 "mpmu", "apmu", "apbc". So three reg spaces need to be defined.
14
15- #clock-cells: should be 1.
16- #reset-cells: should be 1.
17
18Each clock is assigned an identifier and client nodes use this identifier
19to specify the clock which they consume.
20
21All these identifier could be found in <dt-bindings/clock/marvell,pxa168.h>.
diff --git a/Documentation/devicetree/bindings/clock/marvell,pxa910.txt b/Documentation/devicetree/bindings/clock/marvell,pxa910.txt
new file mode 100644
index 000000000000..d9f41f3c03a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/marvell,pxa910.txt
@@ -0,0 +1,21 @@
1* Marvell PXA910 Clock Controller
2
3The PXA910 clock subsystem generates and supplies clock to various
4controllers within the PXA910 SoC.
5
6Required Properties:
7
8- compatible: should be one of the following.
9 - "marvell,pxa910-clock" - controller compatible with PXA910 SoC.
10
11- reg: physical base address of the clock subsystem and length of memory mapped
12 region. There are 4 places in SOC has clock control logic:
13 "mpmu", "apmu", "apbc", "apbcp". So four reg spaces need to be defined.
14
15- #clock-cells: should be 1.
16- #reset-cells: should be 1.
17
18Each clock is assigned an identifier and client nodes use this identifier
19to specify the clock which they consume.
20
21All these identifier could be found in <dt-bindings/clock/marvell-pxa910.h>.
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
index 952e373178d2..054f65f9319c 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
@@ -7,11 +7,16 @@ to 64.
7Required Properties: 7Required Properties:
8 8
9 - compatible: Must be one of the following 9 - compatible: Must be one of the following
10 - "renesas,r8a73a4-div6-clock" for R8A73A4 (R-Mobile APE6) DIV6 clocks
11 - "renesas,r8a7740-div6-clock" for R8A7740 (R-Mobile A1) DIV6 clocks
10 - "renesas,r8a7790-div6-clock" for R8A7790 (R-Car H2) DIV6 clocks 12 - "renesas,r8a7790-div6-clock" for R8A7790 (R-Car H2) DIV6 clocks
11 - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2) DIV6 clocks 13 - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2) DIV6 clocks
14 - "renesas,sh73a0-div6-clock" for SH73A0 (SH-Mobile AG5) DIV6 clocks
12 - "renesas,cpg-div6-clock" for generic DIV6 clocks 15 - "renesas,cpg-div6-clock" for generic DIV6 clocks
13 - reg: Base address and length of the memory resource used by the DIV6 clock 16 - reg: Base address and length of the memory resource used by the DIV6 clock
14 - clocks: Reference to the parent clock 17 - clocks: Reference to the parent clock(s); either one, four, or eight
18 clocks must be specified. For clocks with multiple parents, invalid
19 settings must be specified as "<0>".
15 - #clock-cells: Must be 0 20 - #clock-cells: Must be 0
16 - clock-output-names: The name of the clock as a free-form string 21 - clock-output-names: The name of the clock as a free-form string
17 22
@@ -19,10 +24,11 @@ Required Properties:
19Example 24Example
20------- 25-------
21 26
22 sd2_clk: sd2_clk@e6150078 { 27 sdhi2_clk: sdhi2_clk@e615007c {
23 compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock"; 28 compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
24 reg = <0 0xe6150078 0 4>; 29 reg = <0 0xe615007c 0 4>;
25 clocks = <&pll1_div2_clk>; 30 clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
31 <0>, <&extal2_clk>;
26 #clock-cells = <0>; 32 #clock-cells = <0>;
27 clock-output-names = "sd2"; 33 clock-output-names = "sdhi2ck";
28 }; 34 };
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index a5f52238c80d..2e18676bd4b5 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -26,11 +26,11 @@ Required Properties:
26 must appear in the same order as the output clocks. 26 must appear in the same order as the output clocks.
27 - #clock-cells: Must be 1 27 - #clock-cells: Must be 1
28 - clock-output-names: The name of the clocks as free-form strings 28 - clock-output-names: The name of the clocks as free-form strings
29 - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31) 29 - clock-indices: Indices of the gate clocks into the group (0 to 31)
30 30
31The clocks, clock-output-names and renesas,clock-indices properties contain one 31The clocks, clock-output-names and clock-indices properties contain one entry
32entry per gate clock. The MSTP groups are sparsely populated. Unimplemented 32per gate clock. The MSTP groups are sparsely populated. Unimplemented gate
33gate clocks must not be declared. 33clocks must not be declared.
34 34
35 35
36Example 36Example
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index ed116df9c3e7..67b2b99f2b33 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -10,14 +10,17 @@ Required properties:
10 "allwinner,sun4i-a10-pll1-clk" - for the main PLL clock and PLL4 10 "allwinner,sun4i-a10-pll1-clk" - for the main PLL clock and PLL4
11 "allwinner,sun6i-a31-pll1-clk" - for the main PLL clock on A31 11 "allwinner,sun6i-a31-pll1-clk" - for the main PLL clock on A31
12 "allwinner,sun8i-a23-pll1-clk" - for the main PLL clock on A23 12 "allwinner,sun8i-a23-pll1-clk" - for the main PLL clock on A23
13 "allwinner,sun9i-a80-pll4-clk" - for the peripheral PLLs on A80
13 "allwinner,sun4i-a10-pll5-clk" - for the PLL5 clock 14 "allwinner,sun4i-a10-pll5-clk" - for the PLL5 clock
14 "allwinner,sun4i-a10-pll6-clk" - for the PLL6 clock 15 "allwinner,sun4i-a10-pll6-clk" - for the PLL6 clock
15 "allwinner,sun6i-a31-pll6-clk" - for the PLL6 clock on A31 16 "allwinner,sun6i-a31-pll6-clk" - for the PLL6 clock on A31
17 "allwinner,sun9i-a80-gt-clk" - for the GT bus clock on A80
16 "allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock 18 "allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock
17 "allwinner,sun4i-a10-axi-clk" - for the AXI clock 19 "allwinner,sun4i-a10-axi-clk" - for the AXI clock
18 "allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23 20 "allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
19 "allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates 21 "allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
20 "allwinner,sun4i-a10-ahb-clk" - for the AHB clock 22 "allwinner,sun4i-a10-ahb-clk" - for the AHB clock
23 "allwinner,sun9i-a80-ahb-clk" - for the AHB bus clocks on A80
21 "allwinner,sun4i-a10-ahb-gates-clk" - for the AHB gates on A10 24 "allwinner,sun4i-a10-ahb-gates-clk" - for the AHB gates on A10
22 "allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13 25 "allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13
23 "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s 26 "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
@@ -26,24 +29,29 @@ Required properties:
26 "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31 29 "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
27 "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31 30 "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
28 "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23 31 "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
32 "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
33 "allwinner,sun9i-a80-ahb1-gates-clk" - for the AHB1 gates on A80
34 "allwinner,sun9i-a80-ahb2-gates-clk" - for the AHB2 gates on A80
29 "allwinner,sun4i-a10-apb0-clk" - for the APB0 clock 35 "allwinner,sun4i-a10-apb0-clk" - for the APB0 clock
30 "allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31 36 "allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
31 "allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23 37 "allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23
38 "allwinner,sun9i-a80-apb0-clk" - for the APB0 bus clock on A80
32 "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10 39 "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
33 "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13 40 "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
34 "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s 41 "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
35 "allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31 42 "allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
36 "allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20 43 "allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
37 "allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23 44 "allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23
45 "allwinner,sun9i-a80-apb0-gates-clk" - for the APB0 gates on A80
38 "allwinner,sun4i-a10-apb1-clk" - for the APB1 clock 46 "allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
39 "allwinner,sun4i-a10-apb1-mux-clk" - for the APB1 clock muxing 47 "allwinner,sun9i-a80-apb1-clk" - for the APB1 bus clock on A80
40 "allwinner,sun4i-a10-apb1-gates-clk" - for the APB1 gates on A10 48 "allwinner,sun4i-a10-apb1-gates-clk" - for the APB1 gates on A10
41 "allwinner,sun5i-a13-apb1-gates-clk" - for the APB1 gates on A13 49 "allwinner,sun5i-a13-apb1-gates-clk" - for the APB1 gates on A13
42 "allwinner,sun5i-a10s-apb1-gates-clk" - for the APB1 gates on A10s 50 "allwinner,sun5i-a10s-apb1-gates-clk" - for the APB1 gates on A10s
43 "allwinner,sun6i-a31-apb1-gates-clk" - for the APB1 gates on A31 51 "allwinner,sun6i-a31-apb1-gates-clk" - for the APB1 gates on A31
44 "allwinner,sun7i-a20-apb1-gates-clk" - for the APB1 gates on A20 52 "allwinner,sun7i-a20-apb1-gates-clk" - for the APB1 gates on A20
45 "allwinner,sun8i-a23-apb1-gates-clk" - for the APB1 gates on A23 53 "allwinner,sun8i-a23-apb1-gates-clk" - for the APB1 gates on A23
46 "allwinner,sun6i-a31-apb2-div-clk" - for the APB2 gates on A31 54 "allwinner,sun9i-a80-apb1-gates-clk" - for the APB1 gates on A80
47 "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31 55 "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
48 "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23 56 "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
49 "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13 57 "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
@@ -63,8 +71,9 @@ Required properties for all clocks:
63 multiplexed clocks, the list order must match the hardware 71 multiplexed clocks, the list order must match the hardware
64 programming order. 72 programming order.
65- #clock-cells : from common clock binding; shall be set to 0 except for 73- #clock-cells : from common clock binding; shall be set to 0 except for
66 "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk" and 74 the following compatibles where it shall be set to 1:
67 "allwinner,sun4i-pll6-clk" where it shall be set to 1 75 "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
76 "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk"
68- clock-output-names : shall be the corresponding names of the outputs. 77- clock-output-names : shall be the corresponding names of the outputs.
69 If the clock module only has one output, the name shall be the 78 If the clock module only has one output, the name shall be the
70 module name. 79 module name.
@@ -79,6 +88,12 @@ Clock consumers should specify the desired clocks they use with a
79"clocks" phandle cell. Consumers that are using a gated clock should 88"clocks" phandle cell. Consumers that are using a gated clock should
80provide an additional ID in their clock property. This ID is the 89provide an additional ID in their clock property. This ID is the
81offset of the bit controlling this particular gate in the register. 90offset of the bit controlling this particular gate in the register.
91For the other clocks with "#clock-cells" = 1, the additional ID shall
92refer to the index of the output.
93
94For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output
95is the normal PLL6 output, or "pll6". The second output is rate doubled
96PLL6, or "pll6x2".
82 97
83For example: 98For example:
84 99
@@ -106,6 +121,14 @@ pll5: clk@01c20020 {
106 clock-output-names = "pll5_ddr", "pll5_other"; 121 clock-output-names = "pll5_ddr", "pll5_other";
107}; 122};
108 123
124pll6: clk@01c20028 {
125 #clock-cells = <1>;
126 compatible = "allwinner,sun6i-a31-pll6-clk";
127 reg = <0x01c20028 0x4>;
128 clocks = <&osc24M>;
129 clock-output-names = "pll6", "pll6x2";
130};
131
109cpu: cpu@01c20054 { 132cpu: cpu@01c20054 {
110 #clock-cells = <0>; 133 #clock-cells = <0>;
111 compatible = "allwinner,sun4i-a10-cpu-clk"; 134 compatible = "allwinner,sun4i-a10-cpu-clk";
diff --git a/MAINTAINERS b/MAINTAINERS
index 08f671dad3e9..ddb9ac8d32b3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2576,8 +2576,9 @@ F: drivers/media/platform/coda/
2576 2576
2577COMMON CLK FRAMEWORK 2577COMMON CLK FRAMEWORK
2578M: Mike Turquette <mturquette@linaro.org> 2578M: Mike Turquette <mturquette@linaro.org>
2579M: Stephen Boyd <sboyd@codeaurora.org>
2579L: linux-kernel@vger.kernel.org 2580L: linux-kernel@vger.kernel.org
2580T: git git://git.linaro.org/people/mturquette/linux.git 2581T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
2581S: Maintained 2582S: Maintained
2582F: drivers/clk/ 2583F: drivers/clk/
2583X: drivers/clk/clkdev.c 2584X: drivers/clk/clkdev.c
diff --git a/Makefile b/Makefile
index fd80c6e9bc23..b1c3254441f3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 18 2PATCHLEVEL = 19
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc1
5NAME = Diseased Newt 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -481,9 +481,10 @@ asm-generic:
481# of make so .config is not included in this case either (for *config). 481# of make so .config is not included in this case either (for *config).
482 482
483version_h := include/generated/uapi/linux/version.h 483version_h := include/generated/uapi/linux/version.h
484old_version_h := include/linux/version.h
484 485
485no-dot-config-targets := clean mrproper distclean \ 486no-dot-config-targets := clean mrproper distclean \
486 cscope gtags TAGS tags help %docs check% coccicheck \ 487 cscope gtags TAGS tags help% %docs check% coccicheck \
487 $(version_h) headers_% archheaders archscripts \ 488 $(version_h) headers_% archheaders archscripts \
488 kernelversion %src-pkg 489 kernelversion %src-pkg
489 490
@@ -1005,6 +1006,7 @@ endef
1005 1006
1006$(version_h): $(srctree)/Makefile FORCE 1007$(version_h): $(srctree)/Makefile FORCE
1007 $(call filechk,version.h) 1008 $(call filechk,version.h)
1009 $(Q)rm -f $(old_version_h)
1008 1010
1009include/generated/utsrelease.h: include/config/kernel.release FORCE 1011include/generated/utsrelease.h: include/config/kernel.release FORCE
1010 $(call filechk,utsrelease.h) 1012 $(call filechk,utsrelease.h)
@@ -1036,8 +1038,6 @@ firmware_install: FORCE
1036#Default location for installed headers 1038#Default location for installed headers
1037export INSTALL_HDR_PATH = $(objtree)/usr 1039export INSTALL_HDR_PATH = $(objtree)/usr
1038 1040
1039hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
1040
1041# If we do an all arch process set dst to asm-$(hdr-arch) 1041# If we do an all arch process set dst to asm-$(hdr-arch)
1042hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm) 1042hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
1043 1043
@@ -1175,7 +1175,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
1175 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ 1175 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
1176 signing_key.priv signing_key.x509 x509.genkey \ 1176 signing_key.priv signing_key.x509 x509.genkey \
1177 extra_certificates signing_key.x509.keyid \ 1177 extra_certificates signing_key.x509.keyid \
1178 signing_key.x509.signer include/linux/version.h 1178 signing_key.x509.signer
1179 1179
1180# clean - Delete most, but leave enough to build external modules 1180# clean - Delete most, but leave enough to build external modules
1181# 1181#
@@ -1235,7 +1235,7 @@ rpm: include/config/kernel.release FORCE
1235# --------------------------------------------------------------------------- 1235# ---------------------------------------------------------------------------
1236 1236
1237boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig) 1237boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig)
1238boards := $(notdir $(boards)) 1238boards := $(sort $(notdir $(boards)))
1239board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig)) 1239board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
1240board-dirs := $(sort $(notdir $(board-dirs:/=))) 1240board-dirs := $(sort $(notdir $(board-dirs:/=)))
1241 1241
@@ -1326,7 +1326,7 @@ help-board-dirs := $(addprefix help-,$(board-dirs))
1326 1326
1327help-boards: $(help-board-dirs) 1327help-boards: $(help-board-dirs)
1328 1328
1329boards-per-dir = $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig)) 1329boards-per-dir = $(sort $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig)))
1330 1330
1331$(help-board-dirs): help-%: 1331$(help-board-dirs): help-%:
1332 @echo 'Architecture specific targets ($(SRCARCH) $*):' 1332 @echo 'Architecture specific targets ($(SRCARCH) $*):'
@@ -1581,11 +1581,6 @@ ifneq ($(cmd_files),)
1581 include $(cmd_files) 1581 include $(cmd_files)
1582endif 1582endif
1583 1583
1584# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir
1585# Usage:
1586# $(Q)$(MAKE) $(clean)=dir
1587clean := -f $(srctree)/scripts/Makefile.clean obj
1588
1589endif # skip-makefile 1584endif # skip-makefile
1590 1585
1591PHONY += FORCE 1586PHONY += FORCE
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 6a3d9a6c4497..91bd5bd62857 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -177,6 +177,9 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += kirkwood-b3.dtb \
177dtb-$(CONFIG_ARCH_LPC32XX) += ea3250.dtb phy3250.dtb 177dtb-$(CONFIG_ARCH_LPC32XX) += ea3250.dtb phy3250.dtb
178dtb-$(CONFIG_ARCH_MARCO) += marco-evb.dtb 178dtb-$(CONFIG_ARCH_MARCO) += marco-evb.dtb
179dtb-$(CONFIG_MACH_MESON6) += meson6-atv1200.dtb 179dtb-$(CONFIG_MACH_MESON6) += meson6-atv1200.dtb
180dtb-$(CONFIG_ARCH_MMP) += pxa168-aspenite.dtb \
181 pxa910-dkb.dtb \
182 mmp2-brownstone.dtb
180dtb-$(CONFIG_ARCH_MOXART) += moxart-uc7112lx.dtb 183dtb-$(CONFIG_ARCH_MOXART) += moxart-uc7112lx.dtb
181dtb-$(CONFIG_ARCH_MXC) += \ 184dtb-$(CONFIG_ARCH_MXC) += \
182 imx1-ads.dtb \ 185 imx1-ads.dtb \
diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
index 7f70a39459f6..350208c5e1ed 100644
--- a/arch/arm/boot/dts/mmp2-brownstone.dts
+++ b/arch/arm/boot/dts/mmp2-brownstone.dts
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10/dts-v1/; 10/dts-v1/;
11/include/ "mmp2.dtsi" 11#include "mmp2.dtsi"
12 12
13/ { 13/ {
14 model = "Marvell MMP2 Brownstone Development Board"; 14 model = "Marvell MMP2 Brownstone Development Board";
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 4e8b08c628c7..766bbb8495b6 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -7,7 +7,8 @@
7 * publishhed by the Free Software Foundation. 7 * publishhed by the Free Software Foundation.
8 */ 8 */
9 9
10/include/ "skeleton.dtsi" 10#include "skeleton.dtsi"
11#include <dt-bindings/clock/marvell,mmp2.h>
11 12
12/ { 13/ {
13 aliases { 14 aliases {
@@ -135,6 +136,8 @@
135 compatible = "mrvl,mmp-uart"; 136 compatible = "mrvl,mmp-uart";
136 reg = <0xd4030000 0x1000>; 137 reg = <0xd4030000 0x1000>;
137 interrupts = <27>; 138 interrupts = <27>;
139 clocks = <&soc_clocks MMP2_CLK_UART0>;
140 resets = <&soc_clocks MMP2_CLK_UART0>;
138 status = "disabled"; 141 status = "disabled";
139 }; 142 };
140 143
@@ -142,6 +145,8 @@
142 compatible = "mrvl,mmp-uart"; 145 compatible = "mrvl,mmp-uart";
143 reg = <0xd4017000 0x1000>; 146 reg = <0xd4017000 0x1000>;
144 interrupts = <28>; 147 interrupts = <28>;
148 clocks = <&soc_clocks MMP2_CLK_UART1>;
149 resets = <&soc_clocks MMP2_CLK_UART1>;
145 status = "disabled"; 150 status = "disabled";
146 }; 151 };
147 152
@@ -149,6 +154,8 @@
149 compatible = "mrvl,mmp-uart"; 154 compatible = "mrvl,mmp-uart";
150 reg = <0xd4018000 0x1000>; 155 reg = <0xd4018000 0x1000>;
151 interrupts = <24>; 156 interrupts = <24>;
157 clocks = <&soc_clocks MMP2_CLK_UART2>;
158 resets = <&soc_clocks MMP2_CLK_UART2>;
152 status = "disabled"; 159 status = "disabled";
153 }; 160 };
154 161
@@ -156,6 +163,8 @@
156 compatible = "mrvl,mmp-uart"; 163 compatible = "mrvl,mmp-uart";
157 reg = <0xd4016000 0x1000>; 164 reg = <0xd4016000 0x1000>;
158 interrupts = <46>; 165 interrupts = <46>;
166 clocks = <&soc_clocks MMP2_CLK_UART3>;
167 resets = <&soc_clocks MMP2_CLK_UART3>;
159 status = "disabled"; 168 status = "disabled";
160 }; 169 };
161 170
@@ -168,6 +177,8 @@
168 #gpio-cells = <2>; 177 #gpio-cells = <2>;
169 interrupts = <49>; 178 interrupts = <49>;
170 interrupt-names = "gpio_mux"; 179 interrupt-names = "gpio_mux";
180 clocks = <&soc_clocks MMP2_CLK_GPIO>;
181 resets = <&soc_clocks MMP2_CLK_GPIO>;
171 interrupt-controller; 182 interrupt-controller;
172 #interrupt-cells = <1>; 183 #interrupt-cells = <1>;
173 ranges; 184 ranges;
@@ -201,6 +212,8 @@
201 compatible = "mrvl,mmp-twsi"; 212 compatible = "mrvl,mmp-twsi";
202 reg = <0xd4011000 0x1000>; 213 reg = <0xd4011000 0x1000>;
203 interrupts = <7>; 214 interrupts = <7>;
215 clocks = <&soc_clocks MMP2_CLK_TWSI0>;
216 resets = <&soc_clocks MMP2_CLK_TWSI0>;
204 #address-cells = <1>; 217 #address-cells = <1>;
205 #size-cells = <0>; 218 #size-cells = <0>;
206 mrvl,i2c-fast-mode; 219 mrvl,i2c-fast-mode;
@@ -211,6 +224,8 @@
211 compatible = "mrvl,mmp-twsi"; 224 compatible = "mrvl,mmp-twsi";
212 reg = <0xd4025000 0x1000>; 225 reg = <0xd4025000 0x1000>;
213 interrupts = <58>; 226 interrupts = <58>;
227 clocks = <&soc_clocks MMP2_CLK_TWSI1>;
228 resets = <&soc_clocks MMP2_CLK_TWSI1>;
214 status = "disabled"; 229 status = "disabled";
215 }; 230 };
216 231
@@ -220,8 +235,20 @@
220 interrupts = <1 0>; 235 interrupts = <1 0>;
221 interrupt-names = "rtc 1Hz", "rtc alarm"; 236 interrupt-names = "rtc 1Hz", "rtc alarm";
222 interrupt-parent = <&intcmux5>; 237 interrupt-parent = <&intcmux5>;
238 clocks = <&soc_clocks MMP2_CLK_RTC>;
239 resets = <&soc_clocks MMP2_CLK_RTC>;
223 status = "disabled"; 240 status = "disabled";
224 }; 241 };
225 }; 242 };
243
244 soc_clocks: clocks{
245 compatible = "marvell,mmp2-clock";
246 reg = <0xd4050000 0x1000>,
247 <0xd4282800 0x400>,
248 <0xd4015000 0x1000>;
249 reg-names = "mpmu", "apmu", "apbc";
250 #clock-cells = <1>;
251 #reset-cells = <1>;
252 };
226 }; 253 };
227}; 254};
diff --git a/arch/arm/boot/dts/pxa168-aspenite.dts b/arch/arm/boot/dts/pxa168-aspenite.dts
index e762facb3fa4..0a988b3fb248 100644
--- a/arch/arm/boot/dts/pxa168-aspenite.dts
+++ b/arch/arm/boot/dts/pxa168-aspenite.dts
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10/dts-v1/; 10/dts-v1/;
11/include/ "pxa168.dtsi" 11#include "pxa168.dtsi"
12 12
13/ { 13/ {
14 model = "Marvell PXA168 Aspenite Development Board"; 14 model = "Marvell PXA168 Aspenite Development Board";
diff --git a/arch/arm/boot/dts/pxa168.dtsi b/arch/arm/boot/dts/pxa168.dtsi
index 975dad21ac38..b899e25cbb1b 100644
--- a/arch/arm/boot/dts/pxa168.dtsi
+++ b/arch/arm/boot/dts/pxa168.dtsi
@@ -7,7 +7,8 @@
7 * publishhed by the Free Software Foundation. 7 * publishhed by the Free Software Foundation.
8 */ 8 */
9 9
10/include/ "skeleton.dtsi" 10#include "skeleton.dtsi"
11#include <dt-bindings/clock/marvell,pxa168.h>
11 12
12/ { 13/ {
13 aliases { 14 aliases {
@@ -59,6 +60,8 @@
59 compatible = "mrvl,mmp-uart"; 60 compatible = "mrvl,mmp-uart";
60 reg = <0xd4017000 0x1000>; 61 reg = <0xd4017000 0x1000>;
61 interrupts = <27>; 62 interrupts = <27>;
63 clocks = <&soc_clocks PXA168_CLK_UART0>;
64 resets = <&soc_clocks PXA168_CLK_UART0>;
62 status = "disabled"; 65 status = "disabled";
63 }; 66 };
64 67
@@ -66,6 +69,8 @@
66 compatible = "mrvl,mmp-uart"; 69 compatible = "mrvl,mmp-uart";
67 reg = <0xd4018000 0x1000>; 70 reg = <0xd4018000 0x1000>;
68 interrupts = <28>; 71 interrupts = <28>;
72 clocks = <&soc_clocks PXA168_CLK_UART1>;
73 resets = <&soc_clocks PXA168_CLK_UART1>;
69 status = "disabled"; 74 status = "disabled";
70 }; 75 };
71 76
@@ -73,6 +78,8 @@
73 compatible = "mrvl,mmp-uart"; 78 compatible = "mrvl,mmp-uart";
74 reg = <0xd4026000 0x1000>; 79 reg = <0xd4026000 0x1000>;
75 interrupts = <29>; 80 interrupts = <29>;
81 clocks = <&soc_clocks PXA168_CLK_UART2>;
82 resets = <&soc_clocks PXA168_CLK_UART2>;
76 status = "disabled"; 83 status = "disabled";
77 }; 84 };
78 85
@@ -84,6 +91,8 @@
84 gpio-controller; 91 gpio-controller;
85 #gpio-cells = <2>; 92 #gpio-cells = <2>;
86 interrupts = <49>; 93 interrupts = <49>;
94 clocks = <&soc_clocks PXA168_CLK_GPIO>;
95 resets = <&soc_clocks PXA168_CLK_GPIO>;
87 interrupt-names = "gpio_mux"; 96 interrupt-names = "gpio_mux";
88 interrupt-controller; 97 interrupt-controller;
89 #interrupt-cells = <1>; 98 #interrupt-cells = <1>;
@@ -110,6 +119,8 @@
110 compatible = "mrvl,mmp-twsi"; 119 compatible = "mrvl,mmp-twsi";
111 reg = <0xd4011000 0x1000>; 120 reg = <0xd4011000 0x1000>;
112 interrupts = <7>; 121 interrupts = <7>;
122 clocks = <&soc_clocks PXA168_CLK_TWSI0>;
123 resets = <&soc_clocks PXA168_CLK_TWSI0>;
113 mrvl,i2c-fast-mode; 124 mrvl,i2c-fast-mode;
114 status = "disabled"; 125 status = "disabled";
115 }; 126 };
@@ -118,6 +129,8 @@
118 compatible = "mrvl,mmp-twsi"; 129 compatible = "mrvl,mmp-twsi";
119 reg = <0xd4025000 0x1000>; 130 reg = <0xd4025000 0x1000>;
120 interrupts = <58>; 131 interrupts = <58>;
132 clocks = <&soc_clocks PXA168_CLK_TWSI1>;
133 resets = <&soc_clocks PXA168_CLK_TWSI1>;
121 status = "disabled"; 134 status = "disabled";
122 }; 135 };
123 136
@@ -126,8 +139,20 @@
126 reg = <0xd4010000 0x1000>; 139 reg = <0xd4010000 0x1000>;
127 interrupts = <5 6>; 140 interrupts = <5 6>;
128 interrupt-names = "rtc 1Hz", "rtc alarm"; 141 interrupt-names = "rtc 1Hz", "rtc alarm";
142 clocks = <&soc_clocks PXA168_CLK_RTC>;
143 resets = <&soc_clocks PXA168_CLK_RTC>;
129 status = "disabled"; 144 status = "disabled";
130 }; 145 };
131 }; 146 };
147
148 soc_clocks: clocks{
149 compatible = "marvell,pxa168-clock";
150 reg = <0xd4050000 0x1000>,
151 <0xd4282800 0x400>,
152 <0xd4015000 0x1000>;
153 reg-names = "mpmu", "apmu", "apbc";
154 #clock-cells = <1>;
155 #reset-cells = <1>;
156 };
132 }; 157 };
133}; 158};
diff --git a/arch/arm/boot/dts/pxa910-dkb.dts b/arch/arm/boot/dts/pxa910-dkb.dts
index 595492aa5053..c82f2810ec73 100644
--- a/arch/arm/boot/dts/pxa910-dkb.dts
+++ b/arch/arm/boot/dts/pxa910-dkb.dts
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10/dts-v1/; 10/dts-v1/;
11/include/ "pxa910.dtsi" 11#include "pxa910.dtsi"
12 12
13/ { 13/ {
14 model = "Marvell PXA910 DKB Development Board"; 14 model = "Marvell PXA910 DKB Development Board";
diff --git a/arch/arm/boot/dts/pxa910.dtsi b/arch/arm/boot/dts/pxa910.dtsi
index 0247c622f580..0868f6729be1 100644
--- a/arch/arm/boot/dts/pxa910.dtsi
+++ b/arch/arm/boot/dts/pxa910.dtsi
@@ -7,7 +7,8 @@
7 * publishhed by the Free Software Foundation. 7 * publishhed by the Free Software Foundation.
8 */ 8 */
9 9
10/include/ "skeleton.dtsi" 10#include "skeleton.dtsi"
11#include <dt-bindings/clock/marvell,pxa910.h>
11 12
12/ { 13/ {
13 aliases { 14 aliases {
@@ -71,6 +72,8 @@
71 compatible = "mrvl,mmp-uart"; 72 compatible = "mrvl,mmp-uart";
72 reg = <0xd4017000 0x1000>; 73 reg = <0xd4017000 0x1000>;
73 interrupts = <27>; 74 interrupts = <27>;
75 clocks = <&soc_clocks PXA910_CLK_UART0>;
76 resets = <&soc_clocks PXA910_CLK_UART0>;
74 status = "disabled"; 77 status = "disabled";
75 }; 78 };
76 79
@@ -78,6 +81,8 @@
78 compatible = "mrvl,mmp-uart"; 81 compatible = "mrvl,mmp-uart";
79 reg = <0xd4018000 0x1000>; 82 reg = <0xd4018000 0x1000>;
80 interrupts = <28>; 83 interrupts = <28>;
84 clocks = <&soc_clocks PXA910_CLK_UART1>;
85 resets = <&soc_clocks PXA910_CLK_UART1>;
81 status = "disabled"; 86 status = "disabled";
82 }; 87 };
83 88
@@ -85,6 +90,8 @@
85 compatible = "mrvl,mmp-uart"; 90 compatible = "mrvl,mmp-uart";
86 reg = <0xd4036000 0x1000>; 91 reg = <0xd4036000 0x1000>;
87 interrupts = <59>; 92 interrupts = <59>;
93 clocks = <&soc_clocks PXA910_CLK_UART2>;
94 resets = <&soc_clocks PXA910_CLK_UART2>;
88 status = "disabled"; 95 status = "disabled";
89 }; 96 };
90 97
@@ -97,6 +104,8 @@
97 #gpio-cells = <2>; 104 #gpio-cells = <2>;
98 interrupts = <49>; 105 interrupts = <49>;
99 interrupt-names = "gpio_mux"; 106 interrupt-names = "gpio_mux";
107 clocks = <&soc_clocks PXA910_CLK_GPIO>;
108 resets = <&soc_clocks PXA910_CLK_GPIO>;
100 interrupt-controller; 109 interrupt-controller;
101 #interrupt-cells = <1>; 110 #interrupt-cells = <1>;
102 ranges; 111 ranges;
@@ -124,6 +133,8 @@
124 #size-cells = <0>; 133 #size-cells = <0>;
125 reg = <0xd4011000 0x1000>; 134 reg = <0xd4011000 0x1000>;
126 interrupts = <7>; 135 interrupts = <7>;
136 clocks = <&soc_clocks PXA910_CLK_TWSI0>;
137 resets = <&soc_clocks PXA910_CLK_TWSI0>;
127 mrvl,i2c-fast-mode; 138 mrvl,i2c-fast-mode;
128 status = "disabled"; 139 status = "disabled";
129 }; 140 };
@@ -134,6 +145,8 @@
134 #size-cells = <0>; 145 #size-cells = <0>;
135 reg = <0xd4037000 0x1000>; 146 reg = <0xd4037000 0x1000>;
136 interrupts = <54>; 147 interrupts = <54>;
148 clocks = <&soc_clocks PXA910_CLK_TWSI1>;
149 resets = <&soc_clocks PXA910_CLK_TWSI1>;
137 status = "disabled"; 150 status = "disabled";
138 }; 151 };
139 152
@@ -142,8 +155,21 @@
142 reg = <0xd4010000 0x1000>; 155 reg = <0xd4010000 0x1000>;
143 interrupts = <5 6>; 156 interrupts = <5 6>;
144 interrupt-names = "rtc 1Hz", "rtc alarm"; 157 interrupt-names = "rtc 1Hz", "rtc alarm";
158 clocks = <&soc_clocks PXA910_CLK_RTC>;
159 resets = <&soc_clocks PXA910_CLK_RTC>;
145 status = "disabled"; 160 status = "disabled";
146 }; 161 };
147 }; 162 };
163
164 soc_clocks: clocks{
165 compatible = "marvell,pxa910-clock";
166 reg = <0xd4050000 0x1000>,
167 <0xd4282800 0x400>,
168 <0xd4015000 0x1000>,
169 <0xd403b000 0x1000>;
170 reg-names = "mpmu", "apmu", "apbc", "apbcp";
171 #clock-cells = <1>;
172 #reset-cells = <1>;
173 };
148 }; 174 };
149}; 175};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index e3ab942fd148..7b4099fcf817 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -188,19 +188,11 @@
188 "apb0_ir1", "apb0_keypad"; 188 "apb0_ir1", "apb0_keypad";
189 }; 189 };
190 190
191 apb1_mux: apb1_mux@01c20058 { 191 apb1: clk@01c20058 {
192 #clock-cells = <0>;
193 compatible = "allwinner,sun4i-a10-apb1-mux-clk";
194 reg = <0x01c20058 0x4>;
195 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
196 clock-output-names = "apb1_mux";
197 };
198
199 apb1: apb1@01c20058 {
200 #clock-cells = <0>; 192 #clock-cells = <0>;
201 compatible = "allwinner,sun4i-a10-apb1-clk"; 193 compatible = "allwinner,sun4i-a10-apb1-clk";
202 reg = <0x01c20058 0x4>; 194 reg = <0x01c20058 0x4>;
203 clocks = <&apb1_mux>; 195 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
204 clock-output-names = "apb1"; 196 clock-output-names = "apb1";
205 }; 197 };
206 198
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 81ad4b94e812..1b76667f3182 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -176,19 +176,11 @@
176 "apb0_ir", "apb0_keypad"; 176 "apb0_ir", "apb0_keypad";
177 }; 177 };
178 178
179 apb1_mux: apb1_mux@01c20058 { 179 apb1: clk@01c20058 {
180 #clock-cells = <0>;
181 compatible = "allwinner,sun4i-a10-apb1-mux-clk";
182 reg = <0x01c20058 0x4>;
183 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
184 clock-output-names = "apb1_mux";
185 };
186
187 apb1: apb1@01c20058 {
188 #clock-cells = <0>; 180 #clock-cells = <0>;
189 compatible = "allwinner,sun4i-a10-apb1-clk"; 181 compatible = "allwinner,sun4i-a10-apb1-clk";
190 reg = <0x01c20058 0x4>; 182 reg = <0x01c20058 0x4>;
191 clocks = <&apb1_mux>; 183 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
192 clock-output-names = "apb1"; 184 clock-output-names = "apb1";
193 }; 185 };
194 186
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index b131068f4f35..c35217ea1f64 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -161,19 +161,11 @@
161 clock-output-names = "apb0_codec", "apb0_pio", "apb0_ir"; 161 clock-output-names = "apb0_codec", "apb0_pio", "apb0_ir";
162 }; 162 };
163 163
164 apb1_mux: apb1_mux@01c20058 { 164 apb1: clk@01c20058 {
165 #clock-cells = <0>;
166 compatible = "allwinner,sun4i-a10-apb1-mux-clk";
167 reg = <0x01c20058 0x4>;
168 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
169 clock-output-names = "apb1_mux";
170 };
171
172 apb1: apb1@01c20058 {
173 #clock-cells = <0>; 165 #clock-cells = <0>;
174 compatible = "allwinner,sun4i-a10-apb1-clk"; 166 compatible = "allwinner,sun4i-a10-apb1-clk";
175 reg = <0x01c20058 0x4>; 167 reg = <0x01c20058 0x4>;
176 clocks = <&apb1_mux>; 168 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
177 clock-output-names = "apb1"; 169 clock-output-names = "apb1";
178 }; 170 };
179 171
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index a400172a8a52..f47156b6572b 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -229,19 +229,11 @@
229 "apb1_daudio1"; 229 "apb1_daudio1";
230 }; 230 };
231 231
232 apb2_mux: apb2_mux@01c20058 { 232 apb2: clk@01c20058 {
233 #clock-cells = <0>; 233 #clock-cells = <0>;
234 compatible = "allwinner,sun4i-a10-apb1-mux-clk"; 234 compatible = "allwinner,sun4i-a10-apb1-clk";
235 reg = <0x01c20058 0x4>; 235 reg = <0x01c20058 0x4>;
236 clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>; 236 clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
237 clock-output-names = "apb2_mux";
238 };
239
240 apb2: apb2@01c20058 {
241 #clock-cells = <0>;
242 compatible = "allwinner,sun6i-a31-apb2-div-clk";
243 reg = <0x01c20058 0x4>;
244 clocks = <&apb2_mux>;
245 clock-output-names = "apb2"; 237 clock-output-names = "apb2";
246 }; 238 };
247 239
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 82a524ce28ad..e21ce5992d56 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -236,19 +236,11 @@
236 "apb0_iis2", "apb0_keypad"; 236 "apb0_iis2", "apb0_keypad";
237 }; 237 };
238 238
239 apb1_mux: apb1_mux@01c20058 { 239 apb1: clk@01c20058 {
240 #clock-cells = <0>;
241 compatible = "allwinner,sun4i-a10-apb1-mux-clk";
242 reg = <0x01c20058 0x4>;
243 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
244 clock-output-names = "apb1_mux";
245 };
246
247 apb1: apb1@01c20058 {
248 #clock-cells = <0>; 240 #clock-cells = <0>;
249 compatible = "allwinner,sun4i-a10-apb1-clk"; 241 compatible = "allwinner,sun4i-a10-apb1-clk";
250 reg = <0x01c20058 0x4>; 242 reg = <0x01c20058 0x4>;
251 clocks = <&apb1_mux>; 243 clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
252 clock-output-names = "apb1"; 244 clock-output-names = "apb1";
253 }; 245 };
254 246
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index 6086adbf9d74..0746cd1024d7 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -189,19 +189,11 @@
189 "apb1_daudio0", "apb1_daudio1"; 189 "apb1_daudio0", "apb1_daudio1";
190 }; 190 };
191 191
192 apb2_mux: apb2_mux_clk@01c20058 { 192 apb2: clk@01c20058 {
193 #clock-cells = <0>; 193 #clock-cells = <0>;
194 compatible = "allwinner,sun4i-a10-apb1-mux-clk"; 194 compatible = "allwinner,sun4i-a10-apb1-clk";
195 reg = <0x01c20058 0x4>; 195 reg = <0x01c20058 0x4>;
196 clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>; 196 clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
197 clock-output-names = "apb2_mux";
198 };
199
200 apb2: apb2_clk@01c20058 {
201 #clock-cells = <0>;
202 compatible = "allwinner,sun6i-a31-apb2-div-clk";
203 reg = <0x01c20058 0x4>;
204 clocks = <&apb2_mux>;
205 clock-output-names = "apb2"; 197 clock-output-names = "apb2";
206 }; 198 };
207 199
diff --git a/arch/arm/configs/ape6evm_defconfig b/arch/arm/configs/ape6evm_defconfig
index db81d8ce4c03..9e9a72e3d30f 100644
--- a/arch/arm/configs/ape6evm_defconfig
+++ b/arch/arm/configs/ape6evm_defconfig
@@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y
33CONFIG_VFP=y 33CONFIG_VFP=y
34CONFIG_NEON=y 34CONFIG_NEON=y
35CONFIG_BINFMT_MISC=y 35CONFIG_BINFMT_MISC=y
36CONFIG_PM_RUNTIME=y 36CONFIG_PM=y
37CONFIG_NET=y 37CONFIG_NET=y
38CONFIG_PACKET=y 38CONFIG_PACKET=y
39CONFIG_UNIX=y 39CONFIG_UNIX=y
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index d9675c68a399..5666e3700a82 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -43,7 +43,7 @@ CONFIG_KEXEC=y
43CONFIG_VFP=y 43CONFIG_VFP=y
44CONFIG_NEON=y 44CONFIG_NEON=y
45# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 45# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
46CONFIG_PM_RUNTIME=y 46CONFIG_PM=y
47CONFIG_NET=y 47CONFIG_NET=y
48CONFIG_PACKET=y 48CONFIG_PACKET=y
49CONFIG_UNIX=y 49CONFIG_UNIX=y
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 83a87e48901c..7117662bab2e 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -39,7 +39,7 @@ CONFIG_CPU_IDLE=y
39CONFIG_VFP=y 39CONFIG_VFP=y
40CONFIG_NEON=y 40CONFIG_NEON=y
41# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 41# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
42CONFIG_PM_RUNTIME=y 42CONFIG_PM=y
43CONFIG_NET=y 43CONFIG_NET=y
44CONFIG_PACKET=y 44CONFIG_PACKET=y
45CONFIG_PACKET_DIAG=y 45CONFIG_PACKET_DIAG=y
diff --git a/arch/arm/configs/bockw_defconfig b/arch/arm/configs/bockw_defconfig
index 1dde5daa84f9..3125e00f05ab 100644
--- a/arch/arm/configs/bockw_defconfig
+++ b/arch/arm/configs/bockw_defconfig
@@ -29,7 +29,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
29CONFIG_ARM_APPENDED_DTB=y 29CONFIG_ARM_APPENDED_DTB=y
30CONFIG_VFP=y 30CONFIG_VFP=y
31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 31# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
32CONFIG_PM_RUNTIME=y 32CONFIG_PM=y
33CONFIG_NET=y 33CONFIG_NET=y
34CONFIG_PACKET=y 34CONFIG_PACKET=y
35CONFIG_UNIX=y 35CONFIG_UNIX=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 759f9b0053e2..235842c9ba96 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -49,7 +49,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=m
49CONFIG_CPU_FREQ_GOV_POWERSAVE=m 49CONFIG_CPU_FREQ_GOV_POWERSAVE=m
50CONFIG_CPU_FREQ_GOV_ONDEMAND=m 50CONFIG_CPU_FREQ_GOV_ONDEMAND=m
51CONFIG_CPU_IDLE=y 51CONFIG_CPU_IDLE=y
52CONFIG_PM_RUNTIME=y 52CONFIG_PM=y
53CONFIG_NET=y 53CONFIG_NET=y
54CONFIG_PACKET=y 54CONFIG_PACKET=y
55CONFIG_UNIX=y 55CONFIG_UNIX=y
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index c41990729024..5ef14de00a29 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -27,7 +27,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
27CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M" 27CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
28CONFIG_VFP=y 28CONFIG_VFP=y
29CONFIG_NEON=y 29CONFIG_NEON=y
30CONFIG_PM_RUNTIME=y 30CONFIG_PM=y
31CONFIG_NET=y 31CONFIG_NET=y
32CONFIG_PACKET=y 32CONFIG_PACKET=y
33CONFIG_UNIX=y 33CONFIG_UNIX=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index eb440aae4283..ea316c4b890e 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -39,7 +39,6 @@ CONFIG_BINFMT_AOUT=m
39CONFIG_BINFMT_MISC=m 39CONFIG_BINFMT_MISC=m
40CONFIG_PM=y 40CONFIG_PM=y
41CONFIG_APM_EMULATION=y 41CONFIG_APM_EMULATION=y
42CONFIG_PM_RUNTIME=y
43CONFIG_NET=y 42CONFIG_NET=y
44CONFIG_PACKET=y 43CONFIG_PACKET=y
45CONFIG_UNIX=y 44CONFIG_UNIX=y
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index 1fe3621faf65..112543665dd7 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -18,7 +18,7 @@ CONFIG_ARM_APPENDED_DTB=y
18CONFIG_ARM_ATAG_DTB_COMPAT=y 18CONFIG_ARM_ATAG_DTB_COMPAT=y
19CONFIG_NEON=y 19CONFIG_NEON=y
20CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y 20CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y
21CONFIG_PM_RUNTIME=y 21CONFIG_PM=y
22CONFIG_NET=y 22CONFIG_NET=y
23CONFIG_PACKET=y 23CONFIG_PACKET=y
24CONFIG_UNIX=y 24CONFIG_UNIX=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 182e54692664..18e59feaa307 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -31,7 +31,6 @@ CONFIG_BINFMT_AOUT=m
31CONFIG_BINFMT_MISC=m 31CONFIG_BINFMT_MISC=m
32CONFIG_PM=y 32CONFIG_PM=y
33CONFIG_APM_EMULATION=y 33CONFIG_APM_EMULATION=y
34CONFIG_PM_RUNTIME=y
35CONFIG_NET=y 34CONFIG_NET=y
36CONFIG_PACKET=y 35CONFIG_PACKET=y
37CONFIG_UNIX=y 36CONFIG_UNIX=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f707cd2691cf..7c2075a07eba 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -54,7 +54,7 @@ CONFIG_ARM_IMX6Q_CPUFREQ=y
54CONFIG_VFP=y 54CONFIG_VFP=y
55CONFIG_NEON=y 55CONFIG_NEON=y
56CONFIG_BINFMT_MISC=m 56CONFIG_BINFMT_MISC=m
57CONFIG_PM_RUNTIME=y 57CONFIG_PM=y
58CONFIG_PM_DEBUG=y 58CONFIG_PM_DEBUG=y
59CONFIG_PM_TEST_SUSPEND=y 59CONFIG_PM_TEST_SUSPEND=y
60CONFIG_NET=y 60CONFIG_NET=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 20a3ff99fae2..a2067cbfe173 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -30,7 +30,7 @@ CONFIG_HIGHMEM=y
30CONFIG_VFP=y 30CONFIG_VFP=y
31CONFIG_NEON=y 31CONFIG_NEON=y
32# CONFIG_SUSPEND is not set 32# CONFIG_SUSPEND is not set
33CONFIG_PM_RUNTIME=y 33CONFIG_PM=y
34CONFIG_NET=y 34CONFIG_NET=y
35CONFIG_PACKET=y 35CONFIG_PACKET=y
36CONFIG_UNIX=y 36CONFIG_UNIX=y
diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig
index 8cb115d74fdf..5d63fc5d2d48 100644
--- a/arch/arm/configs/kzm9g_defconfig
+++ b/arch/arm/configs/kzm9g_defconfig
@@ -43,7 +43,7 @@ CONFIG_KEXEC=y
43CONFIG_VFP=y 43CONFIG_VFP=y
44CONFIG_NEON=y 44CONFIG_NEON=y
45# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 45# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
46CONFIG_PM_RUNTIME=y 46CONFIG_PM=y
47CONFIG_NET=y 47CONFIG_NET=y
48CONFIG_PACKET=y 48CONFIG_PACKET=y
49CONFIG_UNIX=y 49CONFIG_UNIX=y
diff --git a/arch/arm/configs/lager_defconfig b/arch/arm/configs/lager_defconfig
index 929c571ea29b..a82afc916a89 100644
--- a/arch/arm/configs/lager_defconfig
+++ b/arch/arm/configs/lager_defconfig
@@ -37,7 +37,7 @@ CONFIG_AUTO_ZRELADDR=y
37CONFIG_VFP=y 37CONFIG_VFP=y
38CONFIG_NEON=y 38CONFIG_NEON=y
39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 39# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
40CONFIG_PM_RUNTIME=y 40CONFIG_PM=y
41CONFIG_NET=y 41CONFIG_NET=y
42CONFIG_PACKET=y 42CONFIG_PACKET=y
43CONFIG_UNIX=y 43CONFIG_UNIX=y
diff --git a/arch/arm/configs/mackerel_defconfig b/arch/arm/configs/mackerel_defconfig
index 57ececba2ae6..05a529311b4d 100644
--- a/arch/arm/configs/mackerel_defconfig
+++ b/arch/arm/configs/mackerel_defconfig
@@ -28,7 +28,6 @@ CONFIG_KEXEC=y
28CONFIG_VFP=y 28CONFIG_VFP=y
29# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 29# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
30CONFIG_PM=y 30CONFIG_PM=y
31CONFIG_PM_RUNTIME=y
32CONFIG_NET=y 31CONFIG_NET=y
33CONFIG_PACKET=y 32CONFIG_PACKET=y
34CONFIG_UNIX=y 33CONFIG_UNIX=y
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
index ff91630d34e1..3c8b6d823189 100644
--- a/arch/arm/configs/marzen_defconfig
+++ b/arch/arm/configs/marzen_defconfig
@@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y
33CONFIG_VFP=y 33CONFIG_VFP=y
34CONFIG_KEXEC=y 34CONFIG_KEXEC=y
35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 35# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
36CONFIG_PM_RUNTIME=y 36CONFIG_PM=y
37CONFIG_NET=y 37CONFIG_NET=y
38CONFIG_PACKET=y 38CONFIG_PACKET=y
39CONFIG_UNIX=y 39CONFIG_UNIX=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 115cda9f3260..a7dce674f1be 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -63,7 +63,6 @@ CONFIG_FPE_NWFPE=y
63CONFIG_BINFMT_MISC=y 63CONFIG_BINFMT_MISC=y
64CONFIG_PM=y 64CONFIG_PM=y
65# CONFIG_SUSPEND is not set 65# CONFIG_SUSPEND is not set
66CONFIG_PM_RUNTIME=y
67CONFIG_NET=y 66CONFIG_NET=y
68CONFIG_PACKET=y 67CONFIG_PACKET=y
69CONFIG_UNIX=y 68CONFIG_UNIX=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
index 23591dba47a0..f610230b9c1f 100644
--- a/arch/arm/configs/prima2_defconfig
+++ b/arch/arm/configs/prima2_defconfig
@@ -18,7 +18,7 @@ CONFIG_PREEMPT=y
18CONFIG_AEABI=y 18CONFIG_AEABI=y
19CONFIG_KEXEC=y 19CONFIG_KEXEC=y
20CONFIG_BINFMT_MISC=y 20CONFIG_BINFMT_MISC=y
21CONFIG_PM_RUNTIME=y 21CONFIG_PM=y
22CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 22CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
23CONFIG_BLK_DEV_LOOP=y 23CONFIG_BLK_DEV_LOOP=y
24CONFIG_BLK_DEV_RAM=y 24CONFIG_BLK_DEV_RAM=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index b58fb32770a0..afa24799477a 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -32,7 +32,7 @@ CONFIG_VFP=y
32CONFIG_NEON=y 32CONFIG_NEON=y
33CONFIG_KERNEL_MODE_NEON=y 33CONFIG_KERNEL_MODE_NEON=y
34# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 34# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
35CONFIG_PM_RUNTIME=y 35CONFIG_PM=y
36CONFIG_PM_DEBUG=y 36CONFIG_PM_DEBUG=y
37CONFIG_PM_ADVANCED_DEBUG=y 37CONFIG_PM_ADVANCED_DEBUG=y
38CONFIG_NET=y 38CONFIG_NET=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index df2c0f514b0a..3df6ca0c1d1f 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -39,7 +39,7 @@ CONFIG_KEXEC=y
39CONFIG_VFP=y 39CONFIG_VFP=y
40CONFIG_NEON=y 40CONFIG_NEON=y
41# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 41# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
42CONFIG_PM_RUNTIME=y 42CONFIG_PM=y
43CONFIG_NET=y 43CONFIG_NET=y
44CONFIG_PACKET=y 44CONFIG_PACKET=y
45CONFIG_UNIX=y 45CONFIG_UNIX=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index f7ac0379850f..7a342d2780a8 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -11,7 +11,7 @@ CONFIG_ARM_APPENDED_DTB=y
11CONFIG_ARM_ATAG_DTB_COMPAT=y 11CONFIG_ARM_ATAG_DTB_COMPAT=y
12CONFIG_VFP=y 12CONFIG_VFP=y
13CONFIG_NEON=y 13CONFIG_NEON=y
14CONFIG_PM_RUNTIME=y 14CONFIG_PM=y
15CONFIG_NET=y 15CONFIG_NET=y
16CONFIG_PACKET=y 16CONFIG_PACKET=y
17CONFIG_UNIX=y 17CONFIG_UNIX=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 40750f93aa83..3ea9c3377ccb 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -46,7 +46,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
46CONFIG_CPU_IDLE=y 46CONFIG_CPU_IDLE=y
47CONFIG_VFP=y 47CONFIG_VFP=y
48CONFIG_NEON=y 48CONFIG_NEON=y
49CONFIG_PM_RUNTIME=y 49CONFIG_PM=y
50CONFIG_NET=y 50CONFIG_NET=y
51CONFIG_PACKET=y 51CONFIG_PACKET=y
52CONFIG_UNIX=y 52CONFIG_UNIX=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index d219d6a43238..6a1c9898fd03 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -25,7 +25,7 @@ CONFIG_CPU_IDLE=y
25CONFIG_ARM_U8500_CPUIDLE=y 25CONFIG_ARM_U8500_CPUIDLE=y
26CONFIG_VFP=y 26CONFIG_VFP=y
27CONFIG_NEON=y 27CONFIG_NEON=y
28CONFIG_PM_RUNTIME=y 28CONFIG_PM=y
29CONFIG_NET=y 29CONFIG_NET=y
30CONFIG_PACKET=y 30CONFIG_PACKET=y
31CONFIG_UNIX=y 31CONFIG_UNIX=y
diff --git a/arch/arm/configs/vt8500_v6_v7_defconfig b/arch/arm/configs/vt8500_v6_v7_defconfig
index 9e7a25639690..1bfaa7bfc392 100644
--- a/arch/arm/configs/vt8500_v6_v7_defconfig
+++ b/arch/arm/configs/vt8500_v6_v7_defconfig
@@ -16,7 +16,7 @@ CONFIG_ARM_APPENDED_DTB=y
16CONFIG_ARM_ATAG_DTB_COMPAT=y 16CONFIG_ARM_ATAG_DTB_COMPAT=y
17CONFIG_VFP=y 17CONFIG_VFP=y
18CONFIG_NEON=y 18CONFIG_NEON=y
19CONFIG_PM_RUNTIME=y 19CONFIG_PM=y
20CONFIG_NET=y 20CONFIG_NET=y
21CONFIG_UNIX=y 21CONFIG_UNIX=y
22CONFIG_INET=y 22CONFIG_INET=y
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ac4bfae26702..0fa418463f49 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -120,12 +120,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
120 120
121static inline int arch_spin_is_locked(arch_spinlock_t *lock) 121static inline int arch_spin_is_locked(arch_spinlock_t *lock)
122{ 122{
123 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); 123 return !arch_spin_value_unlocked(READ_ONCE(*lock));
124} 124}
125 125
126static inline int arch_spin_is_contended(arch_spinlock_t *lock) 126static inline int arch_spin_is_contended(arch_spinlock_t *lock)
127{ 127{
128 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); 128 struct __raw_tickets tickets = READ_ONCE(lock->tickets);
129 return (tickets.next - tickets.owner) > 1; 129 return (tickets.next - tickets.owner) > 1;
130} 130}
131#define arch_spin_is_contended arch_spin_is_contended 131#define arch_spin_is_contended arch_spin_is_contended
diff --git a/arch/arm/mach-mmp/Kconfig b/arch/arm/mach-mmp/Kconfig
index ebdba87b9671..fdbfadf00c84 100644
--- a/arch/arm/mach-mmp/Kconfig
+++ b/arch/arm/mach-mmp/Kconfig
@@ -86,11 +86,12 @@ config MACH_GPLUGD
86 86
87config MACH_MMP_DT 87config MACH_MMP_DT
88 bool "Support MMP (ARMv5) platforms from device tree" 88 bool "Support MMP (ARMv5) platforms from device tree"
89 select CPU_PXA168
90 select CPU_PXA910
91 select USE_OF 89 select USE_OF
92 select PINCTRL 90 select PINCTRL
93 select PINCTRL_SINGLE 91 select PINCTRL_SINGLE
92 select COMMON_CLK
93 select ARCH_HAS_RESET_CONTROLLER
94 select CPU_MOHAWK
94 help 95 help
95 Include support for Marvell MMP2 based platforms using 96 Include support for Marvell MMP2 based platforms using
96 the device tree. Needn't select any other machine while 97 the device tree. Needn't select any other machine while
@@ -99,10 +100,12 @@ config MACH_MMP_DT
99config MACH_MMP2_DT 100config MACH_MMP2_DT
100 bool "Support MMP2 (ARMv7) platforms from device tree" 101 bool "Support MMP2 (ARMv7) platforms from device tree"
101 depends on !CPU_MOHAWK 102 depends on !CPU_MOHAWK
102 select CPU_MMP2
103 select USE_OF 103 select USE_OF
104 select PINCTRL 104 select PINCTRL
105 select PINCTRL_SINGLE 105 select PINCTRL_SINGLE
106 select COMMON_CLK
107 select ARCH_HAS_RESET_CONTROLLER
108 select CPU_PJ4
106 help 109 help
107 Include support for Marvell MMP2 based platforms using 110 Include support for Marvell MMP2 based platforms using
108 the device tree. 111 the device tree.
@@ -111,21 +114,18 @@ endmenu
111 114
112config CPU_PXA168 115config CPU_PXA168
113 bool 116 bool
114 select COMMON_CLK
115 select CPU_MOHAWK 117 select CPU_MOHAWK
116 help 118 help
117 Select code specific to PXA168 119 Select code specific to PXA168
118 120
119config CPU_PXA910 121config CPU_PXA910
120 bool 122 bool
121 select COMMON_CLK
122 select CPU_MOHAWK 123 select CPU_MOHAWK
123 help 124 help
124 Select code specific to PXA910 125 Select code specific to PXA910
125 126
126config CPU_MMP2 127config CPU_MMP2
127 bool 128 bool
128 select COMMON_CLK
129 select CPU_PJ4 129 select CPU_PJ4
130 help 130 help
131 Select code specific to MMP2. MMP2 is ARMv7 compatible. 131 Select code specific to MMP2. MMP2 is ARMv7 compatible.
diff --git a/arch/arm/mach-mmp/mmp-dt.c b/arch/arm/mach-mmp/mmp-dt.c
index cca529ceecb7..b2296c9309b8 100644
--- a/arch/arm/mach-mmp/mmp-dt.c
+++ b/arch/arm/mach-mmp/mmp-dt.c
@@ -11,63 +11,42 @@
11 11
12#include <linux/irqchip.h> 12#include <linux/irqchip.h>
13#include <linux/of_platform.h> 13#include <linux/of_platform.h>
14#include <linux/clk-provider.h>
14#include <asm/mach/arch.h> 15#include <asm/mach/arch.h>
15#include <asm/mach/time.h> 16#include <asm/mach/time.h>
17#include <asm/hardware/cache-tauros2.h>
16 18
17#include "common.h" 19#include "common.h"
18 20
19extern void __init mmp_dt_init_timer(void); 21extern void __init mmp_dt_init_timer(void);
20 22
21static const struct of_dev_auxdata pxa168_auxdata_lookup[] __initconst = { 23static const char *pxa168_dt_board_compat[] __initdata = {
22 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL), 24 "mrvl,pxa168-aspenite",
23 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL), 25 NULL,
24 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4026000, "pxa2xx-uart.2", NULL),
25 OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL),
26 OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4025000, "pxa2xx-i2c.1", NULL),
27 OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL),
28 OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL),
29 {}
30}; 26};
31 27
32static const struct of_dev_auxdata pxa910_auxdata_lookup[] __initconst = { 28static const char *pxa910_dt_board_compat[] __initdata = {
33 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL), 29 "mrvl,pxa910-dkb",
34 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL), 30 NULL,
35 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4036000, "pxa2xx-uart.2", NULL),
36 OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL),
37 OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4037000, "pxa2xx-i2c.1", NULL),
38 OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL),
39 OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL),
40 {}
41}; 31};
42 32
43static void __init pxa168_dt_init(void) 33static void __init mmp_init_time(void)
44{
45 of_platform_populate(NULL, of_default_bus_match_table,
46 pxa168_auxdata_lookup, NULL);
47}
48
49static void __init pxa910_dt_init(void)
50{ 34{
51 of_platform_populate(NULL, of_default_bus_match_table, 35#ifdef CONFIG_CACHE_TAUROS2
52 pxa910_auxdata_lookup, NULL); 36 tauros2_init(0);
37#endif
38 mmp_dt_init_timer();
39 of_clk_init(NULL);
53} 40}
54 41
55static const char *mmp_dt_board_compat[] __initdata = {
56 "mrvl,pxa168-aspenite",
57 "mrvl,pxa910-dkb",
58 NULL,
59};
60
61DT_MACHINE_START(PXA168_DT, "Marvell PXA168 (Device Tree Support)") 42DT_MACHINE_START(PXA168_DT, "Marvell PXA168 (Device Tree Support)")
62 .map_io = mmp_map_io, 43 .map_io = mmp_map_io,
63 .init_time = mmp_dt_init_timer, 44 .init_time = mmp_init_time,
64 .init_machine = pxa168_dt_init, 45 .dt_compat = pxa168_dt_board_compat,
65 .dt_compat = mmp_dt_board_compat,
66MACHINE_END 46MACHINE_END
67 47
68DT_MACHINE_START(PXA910_DT, "Marvell PXA910 (Device Tree Support)") 48DT_MACHINE_START(PXA910_DT, "Marvell PXA910 (Device Tree Support)")
69 .map_io = mmp_map_io, 49 .map_io = mmp_map_io,
70 .init_time = mmp_dt_init_timer, 50 .init_time = mmp_init_time,
71 .init_machine = pxa910_dt_init, 51 .dt_compat = pxa910_dt_board_compat,
72 .dt_compat = mmp_dt_board_compat,
73MACHINE_END 52MACHINE_END
diff --git a/arch/arm/mach-mmp/mmp2-dt.c b/arch/arm/mach-mmp/mmp2-dt.c
index 023cb453f157..998c0f533abc 100644
--- a/arch/arm/mach-mmp/mmp2-dt.c
+++ b/arch/arm/mach-mmp/mmp2-dt.c
@@ -12,29 +12,22 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/irqchip.h> 13#include <linux/irqchip.h>
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <linux/clk-provider.h>
15#include <asm/mach/arch.h> 16#include <asm/mach/arch.h>
16#include <asm/mach/time.h> 17#include <asm/mach/time.h>
18#include <asm/hardware/cache-tauros2.h>
17 19
18#include "common.h" 20#include "common.h"
19 21
20extern void __init mmp_dt_init_timer(void); 22extern void __init mmp_dt_init_timer(void);
21 23
22static const struct of_dev_auxdata mmp2_auxdata_lookup[] __initconst = { 24static void __init mmp_init_time(void)
23 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4030000, "pxa2xx-uart.0", NULL),
24 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.1", NULL),
25 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.2", NULL),
26 OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4016000, "pxa2xx-uart.3", NULL),
27 OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL),
28 OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4025000, "pxa2xx-i2c.1", NULL),
29 OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp2-gpio", NULL),
30 OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL),
31 {}
32};
33
34static void __init mmp2_dt_init(void)
35{ 25{
36 of_platform_populate(NULL, of_default_bus_match_table, 26#ifdef CONFIG_CACHE_TAUROS2
37 mmp2_auxdata_lookup, NULL); 27 tauros2_init(0);
28#endif
29 mmp_dt_init_timer();
30 of_clk_init(NULL);
38} 31}
39 32
40static const char *mmp2_dt_board_compat[] __initdata = { 33static const char *mmp2_dt_board_compat[] __initdata = {
@@ -44,7 +37,6 @@ static const char *mmp2_dt_board_compat[] __initdata = {
44 37
45DT_MACHINE_START(MMP2_DT, "Marvell MMP2 (Device Tree Support)") 38DT_MACHINE_START(MMP2_DT, "Marvell MMP2 (Device Tree Support)")
46 .map_io = mmp_map_io, 39 .map_io = mmp_map_io,
47 .init_time = mmp_dt_init_timer, 40 .init_time = mmp_init_time,
48 .init_machine = mmp2_dt_init,
49 .dt_compat = mmp2_dt_board_compat, 41 .dt_compat = mmp2_dt_board_compat,
50MACHINE_END 42MACHINE_END
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index f0edec199cd4..6ab656cc4f16 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -15,7 +15,7 @@ config ARCH_OMAP3
15 select ARM_CPU_SUSPEND if PM 15 select ARM_CPU_SUSPEND if PM
16 select OMAP_INTERCONNECT 16 select OMAP_INTERCONNECT
17 select PM_OPP if PM 17 select PM_OPP if PM
18 select PM_RUNTIME if CPU_IDLE 18 select PM if CPU_IDLE
19 select SOC_HAS_OMAP2_SDRC 19 select SOC_HAS_OMAP2_SDRC
20 20
21config ARCH_OMAP4 21config ARCH_OMAP4
@@ -32,7 +32,7 @@ config ARCH_OMAP4
32 select PL310_ERRATA_588369 if CACHE_L2X0 32 select PL310_ERRATA_588369 if CACHE_L2X0
33 select PL310_ERRATA_727915 if CACHE_L2X0 33 select PL310_ERRATA_727915 if CACHE_L2X0
34 select PM_OPP if PM 34 select PM_OPP if PM
35 select PM_RUNTIME if CPU_IDLE 35 select PM if CPU_IDLE
36 select ARM_ERRATA_754322 36 select ARM_ERRATA_754322
37 select ARM_ERRATA_775420 37 select ARM_ERRATA_775420
38 38
@@ -103,7 +103,7 @@ config ARCH_OMAP2PLUS_TYPICAL
103 select I2C_OMAP 103 select I2C_OMAP
104 select MENELAUS if ARCH_OMAP2 104 select MENELAUS if ARCH_OMAP2
105 select NEON if CPU_V7 105 select NEON if CPU_V7
106 select PM_RUNTIME 106 select PM
107 select REGULATOR 107 select REGULATOR
108 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 108 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
109 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 109 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 5c5ebb4db5f7..644ff3231bb8 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -111,6 +111,7 @@ static struct clk dpll3_ck;
111 111
112static const char *dpll3_ck_parent_names[] = { 112static const char *dpll3_ck_parent_names[] = {
113 "sys_ck", 113 "sys_ck",
114 "sys_ck",
114}; 115};
115 116
116static const struct clk_ops dpll3_ck_ops = { 117static const struct clk_ops dpll3_ck_ops = {
@@ -733,6 +734,10 @@ static const char *corex2_fck_parent_names[] = {
733DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL); 734DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
734DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops); 735DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
735 736
737static const char *cpefuse_fck_parent_names[] = {
738 "sys_ck",
739};
740
736static struct clk cpefuse_fck; 741static struct clk cpefuse_fck;
737 742
738static struct clk_hw_omap cpefuse_fck_hw = { 743static struct clk_hw_omap cpefuse_fck_hw = {
@@ -744,7 +749,7 @@ static struct clk_hw_omap cpefuse_fck_hw = {
744 .clkdm_name = "core_l4_clkdm", 749 .clkdm_name = "core_l4_clkdm",
745}; 750};
746 751
747DEFINE_STRUCT_CLK(cpefuse_fck, dpll3_ck_parent_names, aes2_ick_ops); 752DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops);
748 753
749static struct clk csi2_96m_fck; 754static struct clk csi2_96m_fck;
750 755
@@ -775,7 +780,7 @@ static struct clk_hw_omap d2d_26m_fck_hw = {
775 .clkdm_name = "d2d_clkdm", 780 .clkdm_name = "d2d_clkdm",
776}; 781};
777 782
778DEFINE_STRUCT_CLK(d2d_26m_fck, dpll3_ck_parent_names, aes2_ick_ops); 783DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops);
779 784
780static struct clk des1_ick; 785static struct clk des1_ick;
781 786
@@ -1046,7 +1051,7 @@ static struct clk_hw_omap dss2_alwon_fck_hw = {
1046 .clkdm_name = "dss_clkdm", 1051 .clkdm_name = "dss_clkdm",
1047}; 1052};
1048 1053
1049DEFINE_STRUCT_CLK(dss2_alwon_fck, dpll3_ck_parent_names, aes2_ick_ops); 1054DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops);
1050 1055
1051static struct clk dss_96m_fck; 1056static struct clk dss_96m_fck;
1052 1057
@@ -1368,7 +1373,7 @@ DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
1368static struct clk wkup_l4_ick; 1373static struct clk wkup_l4_ick;
1369 1374
1370DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm"); 1375DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
1371DEFINE_STRUCT_CLK(wkup_l4_ick, dpll3_ck_parent_names, core_l4_ick_ops); 1376DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops);
1372 1377
1373static struct clk gpio1_ick; 1378static struct clk gpio1_ick;
1374 1379
@@ -1862,7 +1867,7 @@ static struct clk_hw_omap hecc_ck_hw = {
1862 .clkdm_name = "core_l3_clkdm", 1867 .clkdm_name = "core_l3_clkdm",
1863}; 1868};
1864 1869
1865DEFINE_STRUCT_CLK(hecc_ck, dpll3_ck_parent_names, aes2_ick_ops); 1870DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops);
1866 1871
1867static struct clk hsotgusb_fck_am35xx; 1872static struct clk hsotgusb_fck_am35xx;
1868 1873
@@ -1875,7 +1880,7 @@ static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
1875 .clkdm_name = "core_l3_clkdm", 1880 .clkdm_name = "core_l3_clkdm",
1876}; 1881};
1877 1882
1878DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, dpll3_ck_parent_names, aes2_ick_ops); 1883DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops);
1879 1884
1880static struct clk hsotgusb_ick_3430es1; 1885static struct clk hsotgusb_ick_3430es1;
1881 1886
@@ -2411,7 +2416,7 @@ static struct clk_hw_omap modem_fck_hw = {
2411 .clkdm_name = "d2d_clkdm", 2416 .clkdm_name = "d2d_clkdm",
2412}; 2417};
2413 2418
2414DEFINE_STRUCT_CLK(modem_fck, dpll3_ck_parent_names, aes2_ick_ops); 2419DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops);
2415 2420
2416static struct clk mspro_fck; 2421static struct clk mspro_fck;
2417 2422
@@ -2710,7 +2715,7 @@ static struct clk_hw_omap sr1_fck_hw = {
2710 .clkdm_name = "wkup_clkdm", 2715 .clkdm_name = "wkup_clkdm",
2711}; 2716};
2712 2717
2713DEFINE_STRUCT_CLK(sr1_fck, dpll3_ck_parent_names, aes2_ick_ops); 2718DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops);
2714 2719
2715static struct clk sr2_fck; 2720static struct clk sr2_fck;
2716 2721
@@ -2724,7 +2729,7 @@ static struct clk_hw_omap sr2_fck_hw = {
2724 .clkdm_name = "wkup_clkdm", 2729 .clkdm_name = "wkup_clkdm",
2725}; 2730};
2726 2731
2727DEFINE_STRUCT_CLK(sr2_fck, dpll3_ck_parent_names, aes2_ick_ops); 2732DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops);
2728 2733
2729static struct clk sr_l4_ick; 2734static struct clk sr_l4_ick;
2730 2735
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index 20e120d071dd..c2da2a0fe5ad 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -474,7 +474,7 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw)
474 */ 474 */
475long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, 475long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
476 unsigned long *best_parent_rate, 476 unsigned long *best_parent_rate,
477 struct clk **best_parent_clk) 477 struct clk_hw **best_parent_clk)
478{ 478{
479 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 479 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
480 struct dpll_data *dd; 480 struct dpll_data *dd;
@@ -488,10 +488,10 @@ long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
488 488
489 if (__clk_get_rate(dd->clk_bypass) == rate && 489 if (__clk_get_rate(dd->clk_bypass) == rate &&
490 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 490 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
491 *best_parent_clk = dd->clk_bypass; 491 *best_parent_clk = __clk_get_hw(dd->clk_bypass);
492 } else { 492 } else {
493 rate = omap2_dpll_round_rate(hw, rate, best_parent_rate); 493 rate = omap2_dpll_round_rate(hw, rate, best_parent_rate);
494 *best_parent_clk = dd->clk_ref; 494 *best_parent_clk = __clk_get_hw(dd->clk_ref);
495 } 495 }
496 496
497 *best_parent_rate = rate; 497 *best_parent_rate = rate;
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
index 535822fcf4bb..0e58e5a85d53 100644
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ b/arch/arm/mach-omap2/dpll44xx.c
@@ -223,7 +223,7 @@ out:
223 */ 223 */
224long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, 224long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
225 unsigned long *best_parent_rate, 225 unsigned long *best_parent_rate,
226 struct clk **best_parent_clk) 226 struct clk_hw **best_parent_clk)
227{ 227{
228 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 228 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
229 struct dpll_data *dd; 229 struct dpll_data *dd;
@@ -237,11 +237,11 @@ long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
237 237
238 if (__clk_get_rate(dd->clk_bypass) == rate && 238 if (__clk_get_rate(dd->clk_bypass) == rate &&
239 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 239 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
240 *best_parent_clk = dd->clk_bypass; 240 *best_parent_clk = __clk_get_hw(dd->clk_bypass);
241 } else { 241 } else {
242 rate = omap4_dpll_regm4xen_round_rate(hw, rate, 242 rate = omap4_dpll_regm4xen_round_rate(hw, rate,
243 best_parent_rate); 243 best_parent_rate);
244 *best_parent_clk = dd->clk_ref; 244 *best_parent_clk = __clk_get_hw(dd->clk_ref);
245 } 245 }
246 246
247 *best_parent_rate = rate; 247 *best_parent_rate = rate;
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index c45b7b1b7197..cee128732435 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -99,12 +99,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
99 99
100static inline int arch_spin_is_locked(arch_spinlock_t *lock) 100static inline int arch_spin_is_locked(arch_spinlock_t *lock)
101{ 101{
102 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); 102 return !arch_spin_value_unlocked(READ_ONCE(*lock));
103} 103}
104 104
105static inline int arch_spin_is_contended(arch_spinlock_t *lock) 105static inline int arch_spin_is_contended(arch_spinlock_t *lock)
106{ 106{
107 arch_spinlock_t lockval = ACCESS_ONCE(*lock); 107 arch_spinlock_t lockval = READ_ONCE(*lock);
108 return (lockval.next - lockval.owner) > 1; 108 return (lockval.next - lockval.owner) > 1;
109} 109}
110#define arch_spin_is_contended arch_spin_is_contended 110#define arch_spin_is_contended arch_spin_is_contended
diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c
index b0a608da7bd1..b964c667aced 100644
--- a/arch/cris/arch-v10/lib/usercopy.c
+++ b/arch/cris/arch-v10/lib/usercopy.c
@@ -30,8 +30,7 @@
30/* Copy to userspace. This is based on the memcpy used for 30/* Copy to userspace. This is based on the memcpy used for
31 kernel-to-kernel copying; see "string.c". */ 31 kernel-to-kernel copying; see "string.c". */
32 32
33unsigned long 33unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
34__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
35{ 34{
36 /* We want the parameters put in special registers. 35 /* We want the parameters put in special registers.
37 Make sure the compiler is able to make something useful of this. 36 Make sure the compiler is able to make something useful of this.
@@ -187,13 +186,14 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
187 186
188 return retn; 187 return retn;
189} 188}
189EXPORT_SYMBOL(__copy_user);
190 190
191/* Copy from user to kernel, zeroing the bytes that were inaccessible in 191/* Copy from user to kernel, zeroing the bytes that were inaccessible in
192 userland. The return-value is the number of bytes that were 192 userland. The return-value is the number of bytes that were
193 inaccessible. */ 193 inaccessible. */
194 194
195unsigned long 195unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
196__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn) 196 unsigned long pn)
197{ 197{
198 /* We want the parameters put in special registers. 198 /* We want the parameters put in special registers.
199 Make sure the compiler is able to make something useful of this. 199 Make sure the compiler is able to make something useful of this.
@@ -369,11 +369,10 @@ copy_exception_bytes:
369 369
370 return retn + n; 370 return retn + n;
371} 371}
372EXPORT_SYMBOL(__copy_user_zeroing);
372 373
373/* Zero userspace. */ 374/* Zero userspace. */
374 375unsigned long __do_clear_user(void __user *pto, unsigned long pn)
375unsigned long
376__do_clear_user (void __user *pto, unsigned long pn)
377{ 376{
378 /* We want the parameters put in special registers. 377 /* We want the parameters put in special registers.
379 Make sure the compiler is able to make something useful of this. 378 Make sure the compiler is able to make something useful of this.
@@ -521,3 +520,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
521 520
522 return retn; 521 return retn;
523} 522}
523EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 15a9ed1d579c..4fc16b44fff2 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -108,6 +108,7 @@ config ETRAX_AXISFLASHMAP
108 select MTD_JEDECPROBE 108 select MTD_JEDECPROBE
109 select MTD_BLOCK 109 select MTD_BLOCK
110 select MTD_COMPLEX_MAPPINGS 110 select MTD_COMPLEX_MAPPINGS
111 select MTD_MTDRAM
111 help 112 help
112 This option enables MTD mapping of flash devices. Needed to use 113 This option enables MTD mapping of flash devices. Needed to use
113 flash memories. If unsure, say Y. 114 flash memories. If unsure, say Y.
@@ -358,13 +359,6 @@ config ETRAX_SPI_MMC
358 default MMC 359 default MMC
359 select SPI 360 select SPI
360 select MMC_SPI 361 select MMC_SPI
361 select ETRAX_SPI_MMC_BOARD
362
363# For the parts that can't be a module (due to restrictions in
364# framework elsewhere).
365config ETRAX_SPI_MMC_BOARD
366 boolean
367 default n
368 362
369# While the board info is MMC_SPI only, the drivers are written to be 363# While the board info is MMC_SPI only, the drivers are written to be
370# independent of MMC_SPI, so we'll keep SPI non-dependent on the 364# independent of MMC_SPI, so we'll keep SPI non-dependent on the
diff --git a/arch/cris/arch-v32/drivers/Makefile b/arch/cris/arch-v32/drivers/Makefile
index 39aa3c117a86..15fbfefced2c 100644
--- a/arch/cris/arch-v32/drivers/Makefile
+++ b/arch/cris/arch-v32/drivers/Makefile
@@ -10,4 +10,3 @@ obj-$(CONFIG_ETRAX_IOP_FW_LOAD) += iop_fw_load.o
10obj-$(CONFIG_ETRAX_I2C) += i2c.o 10obj-$(CONFIG_ETRAX_I2C) += i2c.o
11obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o 11obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
12obj-$(CONFIG_PCI) += pci/ 12obj-$(CONFIG_PCI) += pci/
13obj-$(CONFIG_ETRAX_SPI_MMC_BOARD) += board_mmcspi.o
diff --git a/arch/cris/arch-v32/drivers/i2c.h b/arch/cris/arch-v32/drivers/i2c.h
index c073cf4ba016..d9cc856f89fb 100644
--- a/arch/cris/arch-v32/drivers/i2c.h
+++ b/arch/cris/arch-v32/drivers/i2c.h
@@ -2,7 +2,6 @@
2#include <linux/init.h> 2#include <linux/init.h>
3 3
4/* High level I2C actions */ 4/* High level I2C actions */
5int __init i2c_init(void);
6int i2c_write(unsigned char theSlave, void *data, size_t nbytes); 5int i2c_write(unsigned char theSlave, void *data, size_t nbytes);
7int i2c_read(unsigned char theSlave, void *data, size_t nbytes); 6int i2c_read(unsigned char theSlave, void *data, size_t nbytes);
8int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue); 7int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 5a149134cfb5..08a313fc2241 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1,8 +1,7 @@
1/* 1/*
2 * Simple synchronous serial port driver for ETRAX FS and Artpec-3. 2 * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
3 *
4 * Copyright (c) 2005 Axis Communications AB
5 * 3 *
4 * Copyright (c) 2005, 2008 Axis Communications AB
6 * Author: Mikael Starvik 5 * Author: Mikael Starvik
7 * 6 *
8 */ 7 */
@@ -16,16 +15,17 @@
16#include <linux/mutex.h> 15#include <linux/mutex.h>
17#include <linux/interrupt.h> 16#include <linux/interrupt.h>
18#include <linux/poll.h> 17#include <linux/poll.h>
19#include <linux/init.h> 18#include <linux/fs.h>
20#include <linux/timer.h> 19#include <linux/cdev.h>
21#include <linux/spinlock.h> 20#include <linux/device.h>
22#include <linux/wait.h> 21#include <linux/wait.h>
23 22
24#include <asm/io.h> 23#include <asm/io.h>
25#include <dma.h> 24#include <mach/dma.h>
26#include <pinmux.h> 25#include <pinmux.h>
27#include <hwregs/reg_rdwr.h> 26#include <hwregs/reg_rdwr.h>
28#include <hwregs/sser_defs.h> 27#include <hwregs/sser_defs.h>
28#include <hwregs/timer_defs.h>
29#include <hwregs/dma_defs.h> 29#include <hwregs/dma_defs.h>
30#include <hwregs/dma.h> 30#include <hwregs/dma.h>
31#include <hwregs/intr_vect_defs.h> 31#include <hwregs/intr_vect_defs.h>
@@ -59,22 +59,23 @@
59/* the rest of the data pointed out by Descr1 and set readp to the start */ 59/* the rest of the data pointed out by Descr1 and set readp to the start */
60/* of Descr2 */ 60/* of Descr2 */
61 61
62#define SYNC_SERIAL_MAJOR 125
63
64/* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */ 62/* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
65/* words can be handled */ 63/* words can be handled */
66#define IN_BUFFER_SIZE 12288 64#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
67#define IN_DESCR_SIZE 256 65#define NBR_IN_DESCR (8*6)
68#define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) 66#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
69 67
70#define OUT_BUFFER_SIZE 1024*8
71#define NBR_OUT_DESCR 8 68#define NBR_OUT_DESCR 8
69#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
72 70
73#define DEFAULT_FRAME_RATE 0 71#define DEFAULT_FRAME_RATE 0
74#define DEFAULT_WORD_RATE 7 72#define DEFAULT_WORD_RATE 7
75 73
74/* To be removed when we move to pure udev. */
75#define SYNC_SERIAL_MAJOR 125
76
76/* NOTE: Enabling some debug will likely cause overrun or underrun, 77/* NOTE: Enabling some debug will likely cause overrun or underrun,
77 * especially if manual mode is use. 78 * especially if manual mode is used.
78 */ 79 */
79#define DEBUG(x) 80#define DEBUG(x)
80#define DEBUGREAD(x) 81#define DEBUGREAD(x)
@@ -85,11 +86,28 @@
85#define DEBUGTRDMA(x) 86#define DEBUGTRDMA(x)
86#define DEBUGOUTBUF(x) 87#define DEBUGOUTBUF(x)
87 88
88typedef struct sync_port 89enum syncser_irq_setup {
89{ 90 no_irq_setup = 0,
90 reg_scope_instances regi_sser; 91 dma_irq_setup = 1,
91 reg_scope_instances regi_dmain; 92 manual_irq_setup = 2,
92 reg_scope_instances regi_dmaout; 93};
94
95struct sync_port {
96 unsigned long regi_sser;
97 unsigned long regi_dmain;
98 unsigned long regi_dmaout;
99
100 /* Interrupt vectors. */
101 unsigned long dma_in_intr_vect; /* Used for DMA in. */
102 unsigned long dma_out_intr_vect; /* Used for DMA out. */
103 unsigned long syncser_intr_vect; /* Used when no DMA. */
104
105 /* DMA number for in and out. */
106 unsigned int dma_in_nbr;
107 unsigned int dma_out_nbr;
108
109 /* DMA owner. */
110 enum dma_owner req_dma;
93 111
94 char started; /* 1 if port has been started */ 112 char started; /* 1 if port has been started */
95 char port_nbr; /* Port 0 or 1 */ 113 char port_nbr; /* Port 0 or 1 */
@@ -99,22 +117,29 @@ typedef struct sync_port
99 char use_dma; /* 1 if port uses dma */ 117 char use_dma; /* 1 if port uses dma */
100 char tr_running; 118 char tr_running;
101 119
102 char init_irqs; 120 enum syncser_irq_setup init_irqs;
103 int output; 121 int output;
104 int input; 122 int input;
105 123
106 /* Next byte to be read by application */ 124 /* Next byte to be read by application */
107 volatile unsigned char *volatile readp; 125 unsigned char *readp;
108 /* Next byte to be written by etrax */ 126 /* Next byte to be written by etrax */
109 volatile unsigned char *volatile writep; 127 unsigned char *writep;
110 128
111 unsigned int in_buffer_size; 129 unsigned int in_buffer_size;
130 unsigned int in_buffer_len;
112 unsigned int inbufchunk; 131 unsigned int inbufchunk;
113 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); 132 /* Data buffers for in and output. */
114 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32))); 133 unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
115 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); 134 unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
116 struct dma_descr_data* next_rx_desc; 135 unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
117 struct dma_descr_data* prev_rx_desc; 136 struct timespec timestamp[NBR_IN_DESCR];
137 struct dma_descr_data *next_rx_desc;
138 struct dma_descr_data *prev_rx_desc;
139
140 struct timeval last_timestamp;
141 int read_ts_idx;
142 int write_ts_idx;
118 143
119 /* Pointer to the first available descriptor in the ring, 144 /* Pointer to the first available descriptor in the ring,
120 * unless active_tr_descr == catch_tr_descr and a dma 145 * unless active_tr_descr == catch_tr_descr and a dma
@@ -135,114 +160,138 @@ typedef struct sync_port
135 /* Number of bytes currently locked for being read by DMA */ 160 /* Number of bytes currently locked for being read by DMA */
136 int out_buf_count; 161 int out_buf_count;
137 162
138 dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16))); 163 dma_descr_context in_context __aligned(32);
139 dma_descr_context in_context __attribute__ ((__aligned__(32))); 164 dma_descr_context out_context __aligned(32);
140 dma_descr_data out_descr[NBR_OUT_DESCR] 165 dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
141 __attribute__ ((__aligned__(16))); 166 dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
142 dma_descr_context out_context __attribute__ ((__aligned__(32))); 167
143 wait_queue_head_t out_wait_q; 168 wait_queue_head_t out_wait_q;
144 wait_queue_head_t in_wait_q; 169 wait_queue_head_t in_wait_q;
145 170
146 spinlock_t lock; 171 spinlock_t lock;
147} sync_port; 172};
148 173
149static DEFINE_MUTEX(sync_serial_mutex); 174static DEFINE_MUTEX(sync_serial_mutex);
150static int etrax_sync_serial_init(void); 175static int etrax_sync_serial_init(void);
151static void initialize_port(int portnbr); 176static void initialize_port(int portnbr);
152static inline int sync_data_avail(struct sync_port *port); 177static inline int sync_data_avail(struct sync_port *port);
153 178
154static int sync_serial_open(struct inode *, struct file*); 179static int sync_serial_open(struct inode *, struct file *);
155static int sync_serial_release(struct inode*, struct file*); 180static int sync_serial_release(struct inode *, struct file *);
156static unsigned int sync_serial_poll(struct file *filp, poll_table *wait); 181static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
157 182
158static int sync_serial_ioctl(struct file *, 183static long sync_serial_ioctl(struct file *file,
159 unsigned int cmd, unsigned long arg); 184 unsigned int cmd, unsigned long arg);
160static ssize_t sync_serial_write(struct file * file, const char * buf, 185static int sync_serial_ioctl_unlocked(struct file *file,
186 unsigned int cmd, unsigned long arg);
187static ssize_t sync_serial_write(struct file *file, const char __user *buf,
161 size_t count, loff_t *ppos); 188 size_t count, loff_t *ppos);
162static ssize_t sync_serial_read(struct file *file, char *buf, 189static ssize_t sync_serial_read(struct file *file, char __user *buf,
163 size_t count, loff_t *ppos); 190 size_t count, loff_t *ppos);
164 191
165#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ 192#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
166 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \ 193 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
167 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \ 194 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
168 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)) 195 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
169#define SYNC_SER_DMA 196#define SYNC_SER_DMA
197#else
198#define SYNC_SER_MANUAL
170#endif 199#endif
171 200
172static void send_word(sync_port* port);
173static void start_dma_out(struct sync_port *port, const char *data, int count);
174static void start_dma_in(sync_port* port);
175#ifdef SYNC_SER_DMA 201#ifdef SYNC_SER_DMA
202static void start_dma_out(struct sync_port *port, const char *data, int count);
203static void start_dma_in(struct sync_port *port);
176static irqreturn_t tr_interrupt(int irq, void *dev_id); 204static irqreturn_t tr_interrupt(int irq, void *dev_id);
177static irqreturn_t rx_interrupt(int irq, void *dev_id); 205static irqreturn_t rx_interrupt(int irq, void *dev_id);
178#endif 206#endif
179
180#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
181 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
182 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
183 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
184#define SYNC_SER_MANUAL
185#endif
186#ifdef SYNC_SER_MANUAL 207#ifdef SYNC_SER_MANUAL
208static void send_word(struct sync_port *port);
187static irqreturn_t manual_interrupt(int irq, void *dev_id); 209static irqreturn_t manual_interrupt(int irq, void *dev_id);
188#endif 210#endif
189 211
190#ifdef CONFIG_ETRAXFS /* ETRAX FS */ 212#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
191#define OUT_DMA_NBR 4 213#define artpec_request_dma crisv32_request_dma
192#define IN_DMA_NBR 5 214#define artpec_free_dma crisv32_free_dma
193#define PINMUX_SSER pinmux_sser0 215
194#define SYNCSER_INST regi_sser0 216#ifdef CONFIG_ETRAXFS
195#define SYNCSER_INTR_VECT SSER0_INTR_VECT 217/* ETRAX FS */
196#define OUT_DMA_INST regi_dma4 218#define DMA_OUT_NBR0 SYNC_SER0_TX_DMA_NBR
197#define IN_DMA_INST regi_dma5 219#define DMA_IN_NBR0 SYNC_SER0_RX_DMA_NBR
198#define DMA_OUT_INTR_VECT DMA4_INTR_VECT 220#define DMA_OUT_NBR1 SYNC_SER1_TX_DMA_NBR
199#define DMA_IN_INTR_VECT DMA5_INTR_VECT 221#define DMA_IN_NBR1 SYNC_SER1_RX_DMA_NBR
200#define REQ_DMA_SYNCSER dma_sser0 222#define PINMUX_SSER0 pinmux_sser0
201#else /* Artpec-3 */ 223#define PINMUX_SSER1 pinmux_sser1
202#define OUT_DMA_NBR 6 224#define SYNCSER_INST0 regi_sser0
203#define IN_DMA_NBR 7 225#define SYNCSER_INST1 regi_sser1
204#define PINMUX_SSER pinmux_sser 226#define SYNCSER_INTR_VECT0 SSER0_INTR_VECT
205#define SYNCSER_INST regi_sser 227#define SYNCSER_INTR_VECT1 SSER1_INTR_VECT
206#define SYNCSER_INTR_VECT SSER_INTR_VECT 228#define OUT_DMA_INST0 regi_dma4
207#define OUT_DMA_INST regi_dma6 229#define IN_DMA_INST0 regi_dma5
208#define IN_DMA_INST regi_dma7 230#define DMA_OUT_INTR_VECT0 DMA4_INTR_VECT
209#define DMA_OUT_INTR_VECT DMA6_INTR_VECT 231#define DMA_OUT_INTR_VECT1 DMA7_INTR_VECT
210#define DMA_IN_INTR_VECT DMA7_INTR_VECT 232#define DMA_IN_INTR_VECT0 DMA5_INTR_VECT
211#define REQ_DMA_SYNCSER dma_sser 233#define DMA_IN_INTR_VECT1 DMA6_INTR_VECT
234#define REQ_DMA_SYNCSER0 dma_sser0
235#define REQ_DMA_SYNCSER1 dma_sser1
236#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
237#define PORT1_DMA 1
238#else
239#define PORT1_DMA 0
240#endif
241#elif defined(CONFIG_CRIS_MACH_ARTPEC3)
242/* ARTPEC-3 */
243#define DMA_OUT_NBR0 SYNC_SER_TX_DMA_NBR
244#define DMA_IN_NBR0 SYNC_SER_RX_DMA_NBR
245#define PINMUX_SSER0 pinmux_sser
246#define SYNCSER_INST0 regi_sser
247#define SYNCSER_INTR_VECT0 SSER_INTR_VECT
248#define OUT_DMA_INST0 regi_dma6
249#define IN_DMA_INST0 regi_dma7
250#define DMA_OUT_INTR_VECT0 DMA6_INTR_VECT
251#define DMA_IN_INTR_VECT0 DMA7_INTR_VECT
252#define REQ_DMA_SYNCSER0 dma_sser
253#define REQ_DMA_SYNCSER1 dma_sser
212#endif 254#endif
213 255
214/* The ports */
215static struct sync_port ports[]=
216{
217 {
218 .regi_sser = SYNCSER_INST,
219 .regi_dmaout = OUT_DMA_INST,
220 .regi_dmain = IN_DMA_INST,
221#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) 256#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
222 .use_dma = 1, 257#define PORT0_DMA 1
223#else 258#else
224 .use_dma = 0, 259#define PORT0_DMA 0
225#endif 260#endif
226 }
227#ifdef CONFIG_ETRAXFS
228 ,
229 261
262/* The ports */
263static struct sync_port ports[] = {
230 { 264 {
231 .regi_sser = regi_sser1, 265 .regi_sser = SYNCSER_INST0,
232 .regi_dmaout = regi_dma6, 266 .regi_dmaout = OUT_DMA_INST0,
233 .regi_dmain = regi_dma7, 267 .regi_dmain = IN_DMA_INST0,
234#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA) 268 .use_dma = PORT0_DMA,
235 .use_dma = 1, 269 .dma_in_intr_vect = DMA_IN_INTR_VECT0,
236#else 270 .dma_out_intr_vect = DMA_OUT_INTR_VECT0,
237 .use_dma = 0, 271 .dma_in_nbr = DMA_IN_NBR0,
238#endif 272 .dma_out_nbr = DMA_OUT_NBR0,
239 } 273 .req_dma = REQ_DMA_SYNCSER0,
274 .syncser_intr_vect = SYNCSER_INTR_VECT0,
275 },
276#ifdef CONFIG_ETRAXFS
277 {
278 .regi_sser = SYNCSER_INST1,
279 .regi_dmaout = regi_dma6,
280 .regi_dmain = regi_dma7,
281 .use_dma = PORT1_DMA,
282 .dma_in_intr_vect = DMA_IN_INTR_VECT1,
283 .dma_out_intr_vect = DMA_OUT_INTR_VECT1,
284 .dma_in_nbr = DMA_IN_NBR1,
285 .dma_out_nbr = DMA_OUT_NBR1,
286 .req_dma = REQ_DMA_SYNCSER1,
287 .syncser_intr_vect = SYNCSER_INTR_VECT1,
288 },
240#endif 289#endif
241}; 290};
242 291
243#define NBR_PORTS ARRAY_SIZE(ports) 292#define NBR_PORTS ARRAY_SIZE(ports)
244 293
245static const struct file_operations sync_serial_fops = { 294static const struct file_operations syncser_fops = {
246 .owner = THIS_MODULE, 295 .owner = THIS_MODULE,
247 .write = sync_serial_write, 296 .write = sync_serial_write,
248 .read = sync_serial_read, 297 .read = sync_serial_read,
@@ -253,61 +302,40 @@ static const struct file_operations sync_serial_fops = {
253 .llseek = noop_llseek, 302 .llseek = noop_llseek,
254}; 303};
255 304
256static int __init etrax_sync_serial_init(void) 305static dev_t syncser_first;
257{ 306static int minor_count = NBR_PORTS;
258 ports[0].enabled = 0; 307#define SYNCSER_NAME "syncser"
259#ifdef CONFIG_ETRAXFS 308static struct cdev *syncser_cdev;
260 ports[1].enabled = 0; 309static struct class *syncser_class;
261#endif
262 if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
263 &sync_serial_fops) < 0) {
264 printk(KERN_WARNING
265 "Unable to get major for synchronous serial port\n");
266 return -EBUSY;
267 }
268
269 /* Initialize Ports */
270#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
271 if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
272 printk(KERN_WARNING
273 "Unable to alloc pins for synchronous serial port 0\n");
274 return -EIO;
275 }
276 ports[0].enabled = 1;
277 initialize_port(0);
278#endif
279
280#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
281 if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
282 printk(KERN_WARNING
283 "Unable to alloc pins for synchronous serial port 0\n");
284 return -EIO;
285 }
286 ports[1].enabled = 1;
287 initialize_port(1);
288#endif
289 310
290#ifdef CONFIG_ETRAXFS 311static void sync_serial_start_port(struct sync_port *port)
291 printk(KERN_INFO "ETRAX FS synchronous serial port driver\n"); 312{
292#else 313 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
293 printk(KERN_INFO "Artpec-3 synchronous serial port driver\n"); 314 reg_sser_rw_tr_cfg tr_cfg =
294#endif 315 REG_RD(sser, port->regi_sser, rw_tr_cfg);
295 return 0; 316 reg_sser_rw_rec_cfg rec_cfg =
317 REG_RD(sser, port->regi_sser, rw_rec_cfg);
318 cfg.en = regk_sser_yes;
319 tr_cfg.tr_en = regk_sser_yes;
320 rec_cfg.rec_en = regk_sser_yes;
321 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
322 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
323 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
324 port->started = 1;
296} 325}
297 326
298static void __init initialize_port(int portnbr) 327static void __init initialize_port(int portnbr)
299{ 328{
300 int __attribute__((unused)) i;
301 struct sync_port *port = &ports[portnbr]; 329 struct sync_port *port = &ports[portnbr];
302 reg_sser_rw_cfg cfg = {0}; 330 reg_sser_rw_cfg cfg = { 0 };
303 reg_sser_rw_frm_cfg frm_cfg = {0}; 331 reg_sser_rw_frm_cfg frm_cfg = { 0 };
304 reg_sser_rw_tr_cfg tr_cfg = {0}; 332 reg_sser_rw_tr_cfg tr_cfg = { 0 };
305 reg_sser_rw_rec_cfg rec_cfg = {0}; 333 reg_sser_rw_rec_cfg rec_cfg = { 0 };
306 334
307 DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr)); 335 DEBUG(pr_info("Init sync serial port %d\n", portnbr));
308 336
309 port->port_nbr = portnbr; 337 port->port_nbr = portnbr;
310 port->init_irqs = 1; 338 port->init_irqs = no_irq_setup;
311 339
312 port->out_rd_ptr = port->out_buffer; 340 port->out_rd_ptr = port->out_buffer;
313 port->out_buf_count = 0; 341 port->out_buf_count = 0;
@@ -318,10 +346,11 @@ static void __init initialize_port(int portnbr)
318 port->readp = port->flip; 346 port->readp = port->flip;
319 port->writep = port->flip; 347 port->writep = port->flip;
320 port->in_buffer_size = IN_BUFFER_SIZE; 348 port->in_buffer_size = IN_BUFFER_SIZE;
349 port->in_buffer_len = 0;
321 port->inbufchunk = IN_DESCR_SIZE; 350 port->inbufchunk = IN_DESCR_SIZE;
322 port->next_rx_desc = &port->in_descr[0]; 351
323 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1]; 352 port->read_ts_idx = 0;
324 port->prev_rx_desc->eol = 1; 353 port->write_ts_idx = 0;
325 354
326 init_waitqueue_head(&port->out_wait_q); 355 init_waitqueue_head(&port->out_wait_q);
327 init_waitqueue_head(&port->in_wait_q); 356 init_waitqueue_head(&port->in_wait_q);
@@ -368,14 +397,18 @@ static void __init initialize_port(int portnbr)
368 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); 397 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
369 398
370#ifdef SYNC_SER_DMA 399#ifdef SYNC_SER_DMA
371 /* Setup the descriptor ring for dma out/transmit. */ 400 {
372 for (i = 0; i < NBR_OUT_DESCR; i++) { 401 int i;
373 port->out_descr[i].wait = 0; 402 /* Setup the descriptor ring for dma out/transmit. */
374 port->out_descr[i].intr = 1; 403 for (i = 0; i < NBR_OUT_DESCR; i++) {
375 port->out_descr[i].eol = 0; 404 dma_descr_data *descr = &port->out_descr[i];
376 port->out_descr[i].out_eop = 0; 405 descr->wait = 0;
377 port->out_descr[i].next = 406 descr->intr = 1;
378 (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]); 407 descr->eol = 0;
408 descr->out_eop = 0;
409 descr->next =
410 (dma_descr_data *)virt_to_phys(&descr[i+1]);
411 }
379 } 412 }
380 413
381 /* Create a ring from the list. */ 414 /* Create a ring from the list. */
@@ -391,201 +424,116 @@ static void __init initialize_port(int portnbr)
391 424
392static inline int sync_data_avail(struct sync_port *port) 425static inline int sync_data_avail(struct sync_port *port)
393{ 426{
394 int avail; 427 return port->in_buffer_len;
395 unsigned char *start;
396 unsigned char *end;
397
398 start = (unsigned char*)port->readp; /* cast away volatile */
399 end = (unsigned char*)port->writep; /* cast away volatile */
400 /* 0123456789 0123456789
401 * ----- - -----
402 * ^rp ^wp ^wp ^rp
403 */
404
405 if (end >= start)
406 avail = end - start;
407 else
408 avail = port->in_buffer_size - (start - end);
409 return avail;
410}
411
412static inline int sync_data_avail_to_end(struct sync_port *port)
413{
414 int avail;
415 unsigned char *start;
416 unsigned char *end;
417
418 start = (unsigned char*)port->readp; /* cast away volatile */
419 end = (unsigned char*)port->writep; /* cast away volatile */
420 /* 0123456789 0123456789
421 * ----- -----
422 * ^rp ^wp ^wp ^rp
423 */
424
425 if (end >= start)
426 avail = end - start;
427 else
428 avail = port->flip + port->in_buffer_size - start;
429 return avail;
430} 428}
431 429
432static int sync_serial_open(struct inode *inode, struct file *file) 430static int sync_serial_open(struct inode *inode, struct file *file)
433{ 431{
432 int ret = 0;
434 int dev = iminor(inode); 433 int dev = iminor(inode);
435 int ret = -EBUSY; 434 struct sync_port *port;
436 sync_port *port; 435#ifdef SYNC_SER_DMA
437 reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; 436 reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
438 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; 437 reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
438#endif
439 439
440 mutex_lock(&sync_serial_mutex); 440 DEBUG(pr_debug("Open sync serial port %d\n", dev));
441 DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
442 441
443 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) 442 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
444 { 443 DEBUG(pr_info("Invalid minor %d\n", dev));
445 DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); 444 return -ENODEV;
446 ret = -ENODEV;
447 goto out;
448 } 445 }
449 port = &ports[dev]; 446 port = &ports[dev];
450 /* Allow open this device twice (assuming one reader and one writer) */ 447 /* Allow open this device twice (assuming one reader and one writer) */
451 if (port->busy == 2) 448 if (port->busy == 2) {
452 { 449 DEBUG(pr_info("syncser%d is busy\n", dev));
453 DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); 450 return -EBUSY;
454 goto out;
455 } 451 }
456 452
453 mutex_lock(&sync_serial_mutex);
457 454
458 if (port->init_irqs) { 455 /* Clear any stale date left in the flip buffer */
459 if (port->use_dma) { 456 port->readp = port->writep = port->flip;
460 if (port == &ports[0]) { 457 port->in_buffer_len = 0;
461#ifdef SYNC_SER_DMA 458 port->read_ts_idx = 0;
462 if (request_irq(DMA_OUT_INTR_VECT, 459 port->write_ts_idx = 0;
463 tr_interrupt, 460
464 0, 461 if (port->init_irqs != no_irq_setup) {
465 "synchronous serial 0 dma tr", 462 /* Init only on first call. */
466 &ports[0])) { 463 port->busy++;
467 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); 464 mutex_unlock(&sync_serial_mutex);
468 goto out; 465 return 0;
469 } else if (request_irq(DMA_IN_INTR_VECT, 466 }
470 rx_interrupt, 467 if (port->use_dma) {
471 0,
472 "synchronous serial 1 dma rx",
473 &ports[0])) {
474 free_irq(DMA_OUT_INTR_VECT, &port[0]);
475 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
476 goto out;
477 } else if (crisv32_request_dma(OUT_DMA_NBR,
478 "synchronous serial 0 dma tr",
479 DMA_VERBOSE_ON_ERROR,
480 0,
481 REQ_DMA_SYNCSER)) {
482 free_irq(DMA_OUT_INTR_VECT, &port[0]);
483 free_irq(DMA_IN_INTR_VECT, &port[0]);
484 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
485 goto out;
486 } else if (crisv32_request_dma(IN_DMA_NBR,
487 "synchronous serial 0 dma rec",
488 DMA_VERBOSE_ON_ERROR,
489 0,
490 REQ_DMA_SYNCSER)) {
491 crisv32_free_dma(OUT_DMA_NBR);
492 free_irq(DMA_OUT_INTR_VECT, &port[0]);
493 free_irq(DMA_IN_INTR_VECT, &port[0]);
494 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
495 goto out;
496 }
497#endif
498 }
499#ifdef CONFIG_ETRAXFS
500 else if (port == &ports[1]) {
501#ifdef SYNC_SER_DMA 468#ifdef SYNC_SER_DMA
502 if (request_irq(DMA6_INTR_VECT, 469 const char *tmp;
503 tr_interrupt, 470 DEBUG(pr_info("Using DMA for syncser%d\n", dev));
504 0, 471
505 "synchronous serial 1 dma tr", 472 tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
506 &ports[1])) { 473 if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
507 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ"); 474 tmp, port)) {
508 goto out; 475 pr_err("Can't alloc syncser%d TX IRQ", dev);
509 } else if (request_irq(DMA7_INTR_VECT, 476 ret = -EBUSY;
510 rx_interrupt, 477 goto unlock_and_exit;
511 0, 478 }
512 "synchronous serial 1 dma rx", 479 if (artpec_request_dma(port->dma_out_nbr, tmp,
513 &ports[1])) { 480 DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
514 free_irq(DMA6_INTR_VECT, &ports[1]); 481 free_irq(port->dma_out_intr_vect, port);
515 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ"); 482 pr_err("Can't alloc syncser%d TX DMA", dev);
516 goto out; 483 ret = -EBUSY;
517 } else if (crisv32_request_dma( 484 goto unlock_and_exit;
518 SYNC_SER1_TX_DMA_NBR, 485 }
519 "synchronous serial 1 dma tr", 486 tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
520 DMA_VERBOSE_ON_ERROR, 487 if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
521 0, 488 tmp, port)) {
522 dma_sser1)) { 489 artpec_free_dma(port->dma_out_nbr);
523 free_irq(DMA6_INTR_VECT, &ports[1]); 490 free_irq(port->dma_out_intr_vect, port);
524 free_irq(DMA7_INTR_VECT, &ports[1]); 491 pr_err("Can't alloc syncser%d RX IRQ", dev);
525 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel"); 492 ret = -EBUSY;
526 goto out; 493 goto unlock_and_exit;
527 } else if (crisv32_request_dma( 494 }
528 SYNC_SER1_RX_DMA_NBR, 495 if (artpec_request_dma(port->dma_in_nbr, tmp,
529 "synchronous serial 3 dma rec", 496 DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
530 DMA_VERBOSE_ON_ERROR, 497 artpec_free_dma(port->dma_out_nbr);
531 0, 498 free_irq(port->dma_out_intr_vect, port);
532 dma_sser1)) { 499 free_irq(port->dma_in_intr_vect, port);
533 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR); 500 pr_err("Can't alloc syncser%d RX DMA", dev);
534 free_irq(DMA6_INTR_VECT, &ports[1]); 501 ret = -EBUSY;
535 free_irq(DMA7_INTR_VECT, &ports[1]); 502 goto unlock_and_exit;
536 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel"); 503 }
537 goto out; 504 /* Enable DMAs */
538 } 505 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
539#endif 506 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
540 } 507 /* Enable DMA IRQs */
508 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
509 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
510 /* Set up wordsize = 1 for DMAs. */
511 DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
512 DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
513
514 start_dma_in(port);
515 port->init_irqs = dma_irq_setup;
541#endif 516#endif
542 /* Enable DMAs */ 517 } else { /* !port->use_dma */
543 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
544 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
545 /* Enable DMA IRQs */
546 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
547 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
548 /* Set up wordsize = 1 for DMAs. */
549 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
550 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
551
552 start_dma_in(port);
553 port->init_irqs = 0;
554 } else { /* !port->use_dma */
555#ifdef SYNC_SER_MANUAL 518#ifdef SYNC_SER_MANUAL
556 if (port == &ports[0]) { 519 const char *tmp = dev == 0 ? "syncser0 manual irq" :
557 if (request_irq(SYNCSER_INTR_VECT, 520 "syncser1 manual irq";
558 manual_interrupt, 521 if (request_irq(port->syncser_intr_vect, manual_interrupt,
559 0, 522 0, tmp, port)) {
560 "synchronous serial manual irq", 523 pr_err("Can't alloc syncser%d manual irq",
561 &ports[0])) { 524 dev);
562 printk("Can't allocate sync serial manual irq"); 525 ret = -EBUSY;
563 goto out; 526 goto unlock_and_exit;
564 } 527 }
565 } 528 port->init_irqs = manual_irq_setup;
566#ifdef CONFIG_ETRAXFS
567 else if (port == &ports[1]) {
568 if (request_irq(SSER1_INTR_VECT,
569 manual_interrupt,
570 0,
571 "synchronous serial manual irq",
572 &ports[1])) {
573 printk(KERN_CRIT "Can't allocate sync serial manual irq");
574 goto out;
575 }
576 }
577#endif
578 port->init_irqs = 0;
579#else 529#else
580 panic("sync_serial: Manual mode not supported.\n"); 530 panic("sync_serial: Manual mode not supported\n");
581#endif /* SYNC_SER_MANUAL */ 531#endif /* SYNC_SER_MANUAL */
582 } 532 }
583
584 } /* port->init_irqs */
585
586 port->busy++; 533 port->busy++;
587 ret = 0; 534 ret = 0;
588out: 535
536unlock_and_exit:
589 mutex_unlock(&sync_serial_mutex); 537 mutex_unlock(&sync_serial_mutex);
590 return ret; 538 return ret;
591} 539}
@@ -593,18 +541,17 @@ out:
593static int sync_serial_release(struct inode *inode, struct file *file) 541static int sync_serial_release(struct inode *inode, struct file *file)
594{ 542{
595 int dev = iminor(inode); 543 int dev = iminor(inode);
596 sync_port *port; 544 struct sync_port *port;
597 545
598 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) 546 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
599 { 547 DEBUG(pr_info("Invalid minor %d\n", dev));
600 DEBUG(printk("Invalid minor %d\n", dev));
601 return -ENODEV; 548 return -ENODEV;
602 } 549 }
603 port = &ports[dev]; 550 port = &ports[dev];
604 if (port->busy) 551 if (port->busy)
605 port->busy--; 552 port->busy--;
606 if (!port->busy) 553 if (!port->busy)
607 /* XXX */ ; 554 /* XXX */;
608 return 0; 555 return 0;
609} 556}
610 557
@@ -612,21 +559,15 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
612{ 559{
613 int dev = iminor(file_inode(file)); 560 int dev = iminor(file_inode(file));
614 unsigned int mask = 0; 561 unsigned int mask = 0;
615 sync_port *port; 562 struct sync_port *port;
616 DEBUGPOLL( static unsigned int prev_mask = 0; ); 563 DEBUGPOLL(
564 static unsigned int prev_mask;
565 );
617 566
618 port = &ports[dev]; 567 port = &ports[dev];
619 568
620 if (!port->started) { 569 if (!port->started)
621 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); 570 sync_serial_start_port(port);
622 reg_sser_rw_rec_cfg rec_cfg =
623 REG_RD(sser, port->regi_sser, rw_rec_cfg);
624 cfg.en = regk_sser_yes;
625 rec_cfg.rec_en = port->input;
626 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
627 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
628 port->started = 1;
629 }
630 571
631 poll_wait(file, &port->out_wait_q, wait); 572 poll_wait(file, &port->out_wait_q, wait);
632 poll_wait(file, &port->in_wait_q, wait); 573 poll_wait(file, &port->in_wait_q, wait);
@@ -645,33 +586,175 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
645 if (port->input && sync_data_avail(port) >= port->inbufchunk) 586 if (port->input && sync_data_avail(port) >= port->inbufchunk)
646 mask |= POLLIN | POLLRDNORM; 587 mask |= POLLIN | POLLRDNORM;
647 588
648 DEBUGPOLL(if (mask != prev_mask) 589 DEBUGPOLL(
649 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask, 590 if (mask != prev_mask)
650 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":""); 591 pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
651 prev_mask = mask; 592 mask,
652 ); 593 mask & POLLOUT ? "POLLOUT" : "",
594 mask & POLLIN ? "POLLIN" : "");
595 prev_mask = mask;
596 );
653 return mask; 597 return mask;
654} 598}
655 599
656static int sync_serial_ioctl(struct file *file, 600static ssize_t __sync_serial_read(struct file *file,
657 unsigned int cmd, unsigned long arg) 601 char __user *buf,
602 size_t count,
603 loff_t *ppos,
604 struct timespec *ts)
605{
606 unsigned long flags;
607 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
608 int avail;
609 struct sync_port *port;
610 unsigned char *start;
611 unsigned char *end;
612
613 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
614 DEBUG(pr_info("Invalid minor %d\n", dev));
615 return -ENODEV;
616 }
617 port = &ports[dev];
618
619 if (!port->started)
620 sync_serial_start_port(port);
621
622 /* Calculate number of available bytes */
623 /* Save pointers to avoid that they are modified by interrupt */
624 spin_lock_irqsave(&port->lock, flags);
625 start = port->readp;
626 end = port->writep;
627 spin_unlock_irqrestore(&port->lock, flags);
628
629 while ((start == end) && !port->in_buffer_len) {
630 if (file->f_flags & O_NONBLOCK)
631 return -EAGAIN;
632
633 wait_event_interruptible(port->in_wait_q,
634 !(start == end && !port->full));
635
636 if (signal_pending(current))
637 return -EINTR;
638
639 spin_lock_irqsave(&port->lock, flags);
640 start = port->readp;
641 end = port->writep;
642 spin_unlock_irqrestore(&port->lock, flags);
643 }
644
645 DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
646 dev, count,
647 start - port->flip, end - port->flip,
648 port->in_buffer_size));
649
650 /* Lazy read, never return wrapped data. */
651 if (end > start)
652 avail = end - start;
653 else
654 avail = port->flip + port->in_buffer_size - start;
655
656 count = count > avail ? avail : count;
657 if (copy_to_user(buf, start, count))
658 return -EFAULT;
659
660 /* If timestamp requested, find timestamp of first returned byte
661 * and copy it.
662 * N.B: Applications that request timstamps MUST read data in
663 * chunks that are multiples of IN_DESCR_SIZE.
664 * Otherwise the timestamps will not be aligned to the data read.
665 */
666 if (ts != NULL) {
667 int idx = port->read_ts_idx;
668 memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
669 port->read_ts_idx += count / IN_DESCR_SIZE;
670 if (port->read_ts_idx >= NBR_IN_DESCR)
671 port->read_ts_idx = 0;
672 }
673
674 spin_lock_irqsave(&port->lock, flags);
675 port->readp += count;
676 /* Check for wrap */
677 if (port->readp >= port->flip + port->in_buffer_size)
678 port->readp = port->flip;
679 port->in_buffer_len -= count;
680 port->full = 0;
681 spin_unlock_irqrestore(&port->lock, flags);
682
683 DEBUGREAD(pr_info("r %d\n", count));
684
685 return count;
686}
687
688static ssize_t sync_serial_input(struct file *file, unsigned long arg)
689{
690 struct ssp_request req;
691 int count;
692 int ret;
693
694 /* Copy the request structure from user-mode. */
695 ret = copy_from_user(&req, (struct ssp_request __user *)arg,
696 sizeof(struct ssp_request));
697
698 if (ret) {
699 DEBUG(pr_info("sync_serial_input copy from user failed\n"));
700 return -EFAULT;
701 }
702
703 /* To get the timestamps aligned, make sure that 'len'
704 * is a multiple of IN_DESCR_SIZE.
705 */
706 if ((req.len % IN_DESCR_SIZE) != 0) {
707 DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
708 req.len, IN_DESCR_SIZE));
709 return -EFAULT;
710 }
711
712 /* Do the actual read. */
713 /* Note that req.buf is actually a pointer to user space. */
714 count = __sync_serial_read(file, req.buf, req.len,
715 NULL, &req.ts);
716
717 if (count < 0) {
718 DEBUG(pr_info("sync_serial_input read failed\n"));
719 return count;
720 }
721
722 /* Copy the request back to user-mode. */
723 ret = copy_to_user((struct ssp_request __user *)arg, &req,
724 sizeof(struct ssp_request));
725
726 if (ret) {
727 DEBUG(pr_info("syncser input copy2user failed\n"));
728 return -EFAULT;
729 }
730
731 /* Return the number of bytes read. */
732 return count;
733}
734
735
736static int sync_serial_ioctl_unlocked(struct file *file,
737 unsigned int cmd, unsigned long arg)
658{ 738{
659 int return_val = 0; 739 int return_val = 0;
660 int dma_w_size = regk_dma_set_w_size1; 740 int dma_w_size = regk_dma_set_w_size1;
661 int dev = iminor(file_inode(file)); 741 int dev = iminor(file_inode(file));
662 sync_port *port; 742 struct sync_port *port;
663 reg_sser_rw_tr_cfg tr_cfg; 743 reg_sser_rw_tr_cfg tr_cfg;
664 reg_sser_rw_rec_cfg rec_cfg; 744 reg_sser_rw_rec_cfg rec_cfg;
665 reg_sser_rw_frm_cfg frm_cfg; 745 reg_sser_rw_frm_cfg frm_cfg;
666 reg_sser_rw_cfg gen_cfg; 746 reg_sser_rw_cfg gen_cfg;
667 reg_sser_rw_intr_mask intr_mask; 747 reg_sser_rw_intr_mask intr_mask;
668 748
669 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) 749 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
670 { 750 DEBUG(pr_info("Invalid minor %d\n", dev));
671 DEBUG(printk("Invalid minor %d\n", dev));
672 return -1; 751 return -1;
673 } 752 }
674 port = &ports[dev]; 753
754 if (cmd == SSP_INPUT)
755 return sync_serial_input(file, arg);
756
757 port = &ports[dev];
675 spin_lock_irq(&port->lock); 758 spin_lock_irq(&port->lock);
676 759
677 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); 760 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
@@ -680,11 +763,9 @@ static int sync_serial_ioctl(struct file *file,
680 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg); 763 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
681 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); 764 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
682 765
683 switch(cmd) 766 switch (cmd) {
684 {
685 case SSP_SPEED: 767 case SSP_SPEED:
686 if (GET_SPEED(arg) == CODEC) 768 if (GET_SPEED(arg) == CODEC) {
687 {
688 unsigned int freq; 769 unsigned int freq;
689 770
690 gen_cfg.base_freq = regk_sser_f32; 771 gen_cfg.base_freq = regk_sser_f32;
@@ -701,15 +782,25 @@ static int sync_serial_ioctl(struct file *file,
701 case FREQ_256kHz: 782 case FREQ_256kHz:
702 gen_cfg.clk_div = 125 * 783 gen_cfg.clk_div = 125 *
703 (1 << (freq - FREQ_256kHz)) - 1; 784 (1 << (freq - FREQ_256kHz)) - 1;
704 break; 785 break;
705 case FREQ_512kHz: 786 case FREQ_512kHz:
706 gen_cfg.clk_div = 62; 787 gen_cfg.clk_div = 62;
707 break; 788 break;
708 case FREQ_1MHz: 789 case FREQ_1MHz:
709 case FREQ_2MHz: 790 case FREQ_2MHz:
710 case FREQ_4MHz: 791 case FREQ_4MHz:
711 gen_cfg.clk_div = 8 * (1 << freq) - 1; 792 gen_cfg.clk_div = 8 * (1 << freq) - 1;
712 break; 793 break;
794 }
795 } else if (GET_SPEED(arg) == CODEC_f32768) {
796 gen_cfg.base_freq = regk_sser_f32_768;
797 switch (GET_FREQ(arg)) {
798 case FREQ_4096kHz:
799 gen_cfg.clk_div = 7;
800 break;
801 default:
802 spin_unlock_irq(&port->lock);
803 return -EINVAL;
713 } 804 }
714 } else { 805 } else {
715 gen_cfg.base_freq = regk_sser_f29_493; 806 gen_cfg.base_freq = regk_sser_f29_493;
@@ -767,62 +858,64 @@ static int sync_serial_ioctl(struct file *file,
767 858
768 break; 859 break;
769 case SSP_MODE: 860 case SSP_MODE:
770 switch(arg) 861 switch (arg) {
771 { 862 case MASTER_OUTPUT:
772 case MASTER_OUTPUT: 863 port->output = 1;
773 port->output = 1; 864 port->input = 0;
774 port->input = 0; 865 frm_cfg.out_on = regk_sser_tr;
775 frm_cfg.out_on = regk_sser_tr; 866 frm_cfg.frame_pin_dir = regk_sser_out;
776 frm_cfg.frame_pin_dir = regk_sser_out; 867 gen_cfg.clk_dir = regk_sser_out;
777 gen_cfg.clk_dir = regk_sser_out; 868 break;
778 break; 869 case SLAVE_OUTPUT:
779 case SLAVE_OUTPUT: 870 port->output = 1;
780 port->output = 1; 871 port->input = 0;
781 port->input = 0; 872 frm_cfg.frame_pin_dir = regk_sser_in;
782 frm_cfg.frame_pin_dir = regk_sser_in; 873 gen_cfg.clk_dir = regk_sser_in;
783 gen_cfg.clk_dir = regk_sser_in; 874 break;
784 break; 875 case MASTER_INPUT:
785 case MASTER_INPUT: 876 port->output = 0;
786 port->output = 0; 877 port->input = 1;
787 port->input = 1; 878 frm_cfg.frame_pin_dir = regk_sser_out;
788 frm_cfg.frame_pin_dir = regk_sser_out; 879 frm_cfg.out_on = regk_sser_intern_tb;
789 frm_cfg.out_on = regk_sser_intern_tb; 880 gen_cfg.clk_dir = regk_sser_out;
790 gen_cfg.clk_dir = regk_sser_out; 881 break;
791 break; 882 case SLAVE_INPUT:
792 case SLAVE_INPUT: 883 port->output = 0;
793 port->output = 0; 884 port->input = 1;
794 port->input = 1; 885 frm_cfg.frame_pin_dir = regk_sser_in;
795 frm_cfg.frame_pin_dir = regk_sser_in; 886 gen_cfg.clk_dir = regk_sser_in;
796 gen_cfg.clk_dir = regk_sser_in; 887 break;
797 break; 888 case MASTER_BIDIR:
798 case MASTER_BIDIR: 889 port->output = 1;
799 port->output = 1; 890 port->input = 1;
800 port->input = 1; 891 frm_cfg.frame_pin_dir = regk_sser_out;
801 frm_cfg.frame_pin_dir = regk_sser_out; 892 frm_cfg.out_on = regk_sser_intern_tb;
802 frm_cfg.out_on = regk_sser_intern_tb; 893 gen_cfg.clk_dir = regk_sser_out;
803 gen_cfg.clk_dir = regk_sser_out; 894 break;
804 break; 895 case SLAVE_BIDIR:
805 case SLAVE_BIDIR: 896 port->output = 1;
806 port->output = 1; 897 port->input = 1;
807 port->input = 1; 898 frm_cfg.frame_pin_dir = regk_sser_in;
808 frm_cfg.frame_pin_dir = regk_sser_in; 899 gen_cfg.clk_dir = regk_sser_in;
809 gen_cfg.clk_dir = regk_sser_in; 900 break;
810 break; 901 default:
811 default: 902 spin_unlock_irq(&port->lock);
812 spin_unlock_irq(&port->lock); 903 return -EINVAL;
813 return -EINVAL;
814 } 904 }
815 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT)) 905 if (!port->use_dma || arg == MASTER_OUTPUT ||
906 arg == SLAVE_OUTPUT)
816 intr_mask.rdav = regk_sser_yes; 907 intr_mask.rdav = regk_sser_yes;
817 break; 908 break;
818 case SSP_FRAME_SYNC: 909 case SSP_FRAME_SYNC:
819 if (arg & NORMAL_SYNC) { 910 if (arg & NORMAL_SYNC) {
820 frm_cfg.rec_delay = 1; 911 frm_cfg.rec_delay = 1;
821 frm_cfg.tr_delay = 1; 912 frm_cfg.tr_delay = 1;
822 } 913 } else if (arg & EARLY_SYNC)
823 else if (arg & EARLY_SYNC)
824 frm_cfg.rec_delay = frm_cfg.tr_delay = 0; 914 frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
825 else if (arg & SECOND_WORD_SYNC) { 915 else if (arg & LATE_SYNC) {
916 frm_cfg.tr_delay = 2;
917 frm_cfg.rec_delay = 2;
918 } else if (arg & SECOND_WORD_SYNC) {
826 frm_cfg.rec_delay = 7; 919 frm_cfg.rec_delay = 7;
827 frm_cfg.tr_delay = 1; 920 frm_cfg.tr_delay = 1;
828 } 921 }
@@ -914,15 +1007,12 @@ static int sync_serial_ioctl(struct file *file,
914 frm_cfg.type = regk_sser_level; 1007 frm_cfg.type = regk_sser_level;
915 frm_cfg.tr_delay = 1; 1008 frm_cfg.tr_delay = 1;
916 frm_cfg.level = regk_sser_neg_lo; 1009 frm_cfg.level = regk_sser_neg_lo;
917 if (arg & SPI_SLAVE) 1010 if (arg & SPI_SLAVE) {
918 {
919 rec_cfg.clk_pol = regk_sser_neg; 1011 rec_cfg.clk_pol = regk_sser_neg;
920 gen_cfg.clk_dir = regk_sser_in; 1012 gen_cfg.clk_dir = regk_sser_in;
921 port->input = 1; 1013 port->input = 1;
922 port->output = 0; 1014 port->output = 0;
923 } 1015 } else {
924 else
925 {
926 gen_cfg.out_clk_pol = regk_sser_pos; 1016 gen_cfg.out_clk_pol = regk_sser_pos;
927 port->input = 0; 1017 port->input = 0;
928 port->output = 1; 1018 port->output = 1;
@@ -965,19 +1055,19 @@ static int sync_serial_ioctl(struct file *file,
965} 1055}
966 1056
967static long sync_serial_ioctl(struct file *file, 1057static long sync_serial_ioctl(struct file *file,
968 unsigned int cmd, unsigned long arg) 1058 unsigned int cmd, unsigned long arg)
969{ 1059{
970 long ret; 1060 long ret;
971 1061
972 mutex_lock(&sync_serial_mutex); 1062 mutex_lock(&sync_serial_mutex);
973 ret = sync_serial_ioctl_unlocked(file, cmd, arg); 1063 ret = sync_serial_ioctl_unlocked(file, cmd, arg);
974 mutex_unlock(&sync_serial_mutex); 1064 mutex_unlock(&sync_serial_mutex);
975 1065
976 return ret; 1066 return ret;
977} 1067}
978 1068
979/* NOTE: sync_serial_write does not support concurrency */ 1069/* NOTE: sync_serial_write does not support concurrency */
980static ssize_t sync_serial_write(struct file *file, const char *buf, 1070static ssize_t sync_serial_write(struct file *file, const char __user *buf,
981 size_t count, loff_t *ppos) 1071 size_t count, loff_t *ppos)
982{ 1072{
983 int dev = iminor(file_inode(file)); 1073 int dev = iminor(file_inode(file));
@@ -993,7 +1083,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
993 unsigned char *buf_stop_ptr; /* Last byte + 1 */ 1083 unsigned char *buf_stop_ptr; /* Last byte + 1 */
994 1084
995 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { 1085 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
996 DEBUG(printk("Invalid minor %d\n", dev)); 1086 DEBUG(pr_info("Invalid minor %d\n", dev));
997 return -ENODEV; 1087 return -ENODEV;
998 } 1088 }
999 port = &ports[dev]; 1089 port = &ports[dev];
@@ -1006,9 +1096,9 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
1006 * |_________|___________________|________________________| 1096 * |_________|___________________|________________________|
1007 * ^ rd_ptr ^ wr_ptr 1097 * ^ rd_ptr ^ wr_ptr
1008 */ 1098 */
1009 DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n", 1099 DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
1010 port->port_nbr, count, port->active_tr_descr, 1100 port->port_nbr, count, port->active_tr_descr,
1011 port->catch_tr_descr)); 1101 port->catch_tr_descr));
1012 1102
1013 /* Read variables that may be updated by interrupts */ 1103 /* Read variables that may be updated by interrupts */
1014 spin_lock_irqsave(&port->lock, flags); 1104 spin_lock_irqsave(&port->lock, flags);
@@ -1020,7 +1110,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
1020 if (port->tr_running && 1110 if (port->tr_running &&
1021 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || 1111 ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
1022 out_buf_count >= OUT_BUFFER_SIZE)) { 1112 out_buf_count >= OUT_BUFFER_SIZE)) {
1023 DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev)); 1113 DEBUGWRITE(pr_info("sser%d full\n", dev));
1024 return -EAGAIN; 1114 return -EAGAIN;
1025 } 1115 }
1026 1116
@@ -1043,15 +1133,16 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
1043 if (copy_from_user(wr_ptr, buf, trunc_count)) 1133 if (copy_from_user(wr_ptr, buf, trunc_count))
1044 return -EFAULT; 1134 return -EFAULT;
1045 1135
1046 DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n", 1136 DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d %p %p %p\n",
1047 out_buf_count, trunc_count, 1137 out_buf_count, trunc_count,
1048 port->out_buf_count, port->out_buffer, 1138 port->out_buf_count, port->out_buffer,
1049 wr_ptr, buf_stop_ptr)); 1139 wr_ptr, buf_stop_ptr));
1050 1140
1051 /* Make sure transmitter/receiver is running */ 1141 /* Make sure transmitter/receiver is running */
1052 if (!port->started) { 1142 if (!port->started) {
1053 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); 1143 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1054 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); 1144 reg_sser_rw_rec_cfg rec_cfg =
1145 REG_RD(sser, port->regi_sser, rw_rec_cfg);
1055 cfg.en = regk_sser_yes; 1146 cfg.en = regk_sser_yes;
1056 rec_cfg.rec_en = port->input; 1147 rec_cfg.rec_en = port->input;
1057 REG_WR(sser, port->regi_sser, rw_cfg, cfg); 1148 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
@@ -1068,8 +1159,11 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
1068 spin_lock_irqsave(&port->lock, flags); 1159 spin_lock_irqsave(&port->lock, flags);
1069 port->out_buf_count += trunc_count; 1160 port->out_buf_count += trunc_count;
1070 if (port->use_dma) { 1161 if (port->use_dma) {
1162#ifdef SYNC_SER_DMA
1071 start_dma_out(port, wr_ptr, trunc_count); 1163 start_dma_out(port, wr_ptr, trunc_count);
1164#endif
1072 } else if (!port->tr_running) { 1165 } else if (!port->tr_running) {
1166#ifdef SYNC_SER_MANUAL
1073 reg_sser_rw_intr_mask intr_mask; 1167 reg_sser_rw_intr_mask intr_mask;
1074 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); 1168 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1075 /* Start sender by writing data */ 1169 /* Start sender by writing data */
@@ -1077,14 +1171,15 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
1077 /* and enable transmitter ready IRQ */ 1171 /* and enable transmitter ready IRQ */
1078 intr_mask.trdy = 1; 1172 intr_mask.trdy = 1;
1079 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); 1173 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1174#endif
1080 } 1175 }
1081 spin_unlock_irqrestore(&port->lock, flags); 1176 spin_unlock_irqrestore(&port->lock, flags);
1082 1177
1083 /* Exit if non blocking */ 1178 /* Exit if non blocking */
1084 if (file->f_flags & O_NONBLOCK) { 1179 if (file->f_flags & O_NONBLOCK) {
1085 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n", 1180 DEBUGWRITE(pr_info("w d%d c %u %08x\n",
1086 port->port_nbr, trunc_count, 1181 port->port_nbr, trunc_count,
1087 REG_RD_INT(dma, port->regi_dmaout, r_intr))); 1182 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
1088 return trunc_count; 1183 return trunc_count;
1089 } 1184 }
1090 1185
@@ -1094,105 +1189,32 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
1094 if (signal_pending(current)) 1189 if (signal_pending(current))
1095 return -EINTR; 1190 return -EINTR;
1096 1191
1097 DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n", 1192 DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
1098 port->port_nbr, trunc_count));
1099 return trunc_count; 1193 return trunc_count;
1100} 1194}
1101 1195
1102static ssize_t sync_serial_read(struct file * file, char * buf, 1196static ssize_t sync_serial_read(struct file *file, char __user *buf,
1103 size_t count, loff_t *ppos) 1197 size_t count, loff_t *ppos)
1104{ 1198{
1105 int dev = iminor(file_inode(file)); 1199 return __sync_serial_read(file, buf, count, ppos, NULL);
1106 int avail;
1107 sync_port *port;
1108 unsigned char* start;
1109 unsigned char* end;
1110 unsigned long flags;
1111
1112 if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
1113 {
1114 DEBUG(printk("Invalid minor %d\n", dev));
1115 return -ENODEV;
1116 }
1117 port = &ports[dev];
1118
1119 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
1120
1121 if (!port->started)
1122 {
1123 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
1124 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1125 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1126 cfg.en = regk_sser_yes;
1127 tr_cfg.tr_en = regk_sser_yes;
1128 rec_cfg.rec_en = regk_sser_yes;
1129 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
1130 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1131 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
1132 port->started = 1;
1133 }
1134
1135 /* Calculate number of available bytes */
1136 /* Save pointers to avoid that they are modified by interrupt */
1137 spin_lock_irqsave(&port->lock, flags);
1138 start = (unsigned char*)port->readp; /* cast away volatile */
1139 end = (unsigned char*)port->writep; /* cast away volatile */
1140 spin_unlock_irqrestore(&port->lock, flags);
1141 while ((start == end) && !port->full) /* No data */
1142 {
1143 DEBUGREAD(printk(KERN_DEBUG "&"));
1144 if (file->f_flags & O_NONBLOCK)
1145 return -EAGAIN;
1146
1147 wait_event_interruptible(port->in_wait_q,
1148 !(start == end && !port->full));
1149 if (signal_pending(current))
1150 return -EINTR;
1151
1152 spin_lock_irqsave(&port->lock, flags);
1153 start = (unsigned char*)port->readp; /* cast away volatile */
1154 end = (unsigned char*)port->writep; /* cast away volatile */
1155 spin_unlock_irqrestore(&port->lock, flags);
1156 }
1157
1158 /* Lazy read, never return wrapped data. */
1159 if (port->full)
1160 avail = port->in_buffer_size;
1161 else if (end > start)
1162 avail = end - start;
1163 else
1164 avail = port->flip + port->in_buffer_size - start;
1165
1166 count = count > avail ? avail : count;
1167 if (copy_to_user(buf, start, count))
1168 return -EFAULT;
1169 /* Disable interrupts while updating readp */
1170 spin_lock_irqsave(&port->lock, flags);
1171 port->readp += count;
1172 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1173 port->readp = port->flip;
1174 port->full = 0;
1175 spin_unlock_irqrestore(&port->lock, flags);
1176 DEBUGREAD(printk("r %d\n", count));
1177 return count;
1178} 1200}
1179 1201
1180static void send_word(sync_port* port) 1202#ifdef SYNC_SER_MANUAL
1203static void send_word(struct sync_port *port)
1181{ 1204{
1182 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); 1205 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1183 reg_sser_rw_tr_data tr_data = {0}; 1206 reg_sser_rw_tr_data tr_data = {0};
1184 1207
1185 switch(tr_cfg.sample_size) 1208 switch (tr_cfg.sample_size) {
1209 case 8:
1210 port->out_buf_count--;
1211 tr_data.data = *port->out_rd_ptr++;
1212 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1213 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1214 port->out_rd_ptr = port->out_buffer;
1215 break;
1216 case 12:
1186 { 1217 {
1187 case 8:
1188 port->out_buf_count--;
1189 tr_data.data = *port->out_rd_ptr++;
1190 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1191 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1192 port->out_rd_ptr = port->out_buffer;
1193 break;
1194 case 12:
1195 {
1196 int data = (*port->out_rd_ptr++) << 8; 1218 int data = (*port->out_rd_ptr++) << 8;
1197 data |= *port->out_rd_ptr++; 1219 data |= *port->out_rd_ptr++;
1198 port->out_buf_count -= 2; 1220 port->out_buf_count -= 2;
@@ -1200,8 +1222,8 @@ static void send_word(sync_port* port)
1200 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); 1222 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1201 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) 1223 if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
1202 port->out_rd_ptr = port->out_buffer; 1224 port->out_rd_ptr = port->out_buffer;
1225 break;
1203 } 1226 }
1204 break;
1205 case 16: 1227 case 16:
1206 port->out_buf_count -= 2; 1228 port->out_buf_count -= 2;
1207 tr_data.data = *(unsigned short *)port->out_rd_ptr; 1229 tr_data.data = *(unsigned short *)port->out_rd_ptr;
@@ -1233,27 +1255,28 @@ static void send_word(sync_port* port)
1233 break; 1255 break;
1234 } 1256 }
1235} 1257}
1258#endif
1236 1259
1237static void start_dma_out(struct sync_port *port, 1260#ifdef SYNC_SER_DMA
1238 const char *data, int count) 1261static void start_dma_out(struct sync_port *port, const char *data, int count)
1239{ 1262{
1240 port->active_tr_descr->buf = (char *) virt_to_phys((char *) data); 1263 port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
1241 port->active_tr_descr->after = port->active_tr_descr->buf + count; 1264 port->active_tr_descr->after = port->active_tr_descr->buf + count;
1242 port->active_tr_descr->intr = 1; 1265 port->active_tr_descr->intr = 1;
1243 1266
1244 port->active_tr_descr->eol = 1; 1267 port->active_tr_descr->eol = 1;
1245 port->prev_tr_descr->eol = 0; 1268 port->prev_tr_descr->eol = 0;
1246 1269
1247 DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n", 1270 DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
1248 port->prev_tr_descr, port->active_tr_descr)); 1271 port->prev_tr_descr, port->active_tr_descr));
1249 port->prev_tr_descr = port->active_tr_descr; 1272 port->prev_tr_descr = port->active_tr_descr;
1250 port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next); 1273 port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
1251 1274
1252 if (!port->tr_running) { 1275 if (!port->tr_running) {
1253 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, 1276 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
1254 rw_tr_cfg); 1277 rw_tr_cfg);
1255 1278
1256 port->out_context.next = 0; 1279 port->out_context.next = NULL;
1257 port->out_context.saved_data = 1280 port->out_context.saved_data =
1258 (dma_descr_data *)virt_to_phys(port->prev_tr_descr); 1281 (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
1259 port->out_context.saved_data_buf = port->prev_tr_descr->buf; 1282 port->out_context.saved_data_buf = port->prev_tr_descr->buf;
@@ -1263,57 +1286,58 @@ static void start_dma_out(struct sync_port *port,
1263 1286
1264 tr_cfg.tr_en = regk_sser_yes; 1287 tr_cfg.tr_en = regk_sser_yes;
1265 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); 1288 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
1266 DEBUGTRDMA(printk(KERN_DEBUG "dma s\n");); 1289 DEBUGTRDMA(pr_info(KERN_INFO "dma s\n"););
1267 } else { 1290 } else {
1268 DMA_CONTINUE_DATA(port->regi_dmaout); 1291 DMA_CONTINUE_DATA(port->regi_dmaout);
1269 DEBUGTRDMA(printk(KERN_DEBUG "dma c\n");); 1292 DEBUGTRDMA(pr_info("dma c\n"););
1270 } 1293 }
1271 1294
1272 port->tr_running = 1; 1295 port->tr_running = 1;
1273} 1296}
1274 1297
1275static void start_dma_in(sync_port *port) 1298static void start_dma_in(struct sync_port *port)
1276{ 1299{
1277 int i; 1300 int i;
1278 char *buf; 1301 char *buf;
1302 unsigned long flags;
1303 spin_lock_irqsave(&port->lock, flags);
1279 port->writep = port->flip; 1304 port->writep = port->flip;
1305 spin_unlock_irqrestore(&port->lock, flags);
1280 1306
1281 if (port->writep > port->flip + port->in_buffer_size) { 1307 buf = (char *)virt_to_phys(port->in_buffer);
1282 panic("Offset too large in sync serial driver\n");
1283 return;
1284 }
1285 buf = (char*)virt_to_phys(port->in_buffer);
1286 for (i = 0; i < NBR_IN_DESCR; i++) { 1308 for (i = 0; i < NBR_IN_DESCR; i++) {
1287 port->in_descr[i].buf = buf; 1309 port->in_descr[i].buf = buf;
1288 port->in_descr[i].after = buf + port->inbufchunk; 1310 port->in_descr[i].after = buf + port->inbufchunk;
1289 port->in_descr[i].intr = 1; 1311 port->in_descr[i].intr = 1;
1290 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]); 1312 port->in_descr[i].next =
1313 (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
1291 port->in_descr[i].buf = buf; 1314 port->in_descr[i].buf = buf;
1292 buf += port->inbufchunk; 1315 buf += port->inbufchunk;
1293 } 1316 }
1294 /* Link the last descriptor to the first */ 1317 /* Link the last descriptor to the first */
1295 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); 1318 port->in_descr[i-1].next =
1319 (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
1296 port->in_descr[i-1].eol = regk_sser_yes; 1320 port->in_descr[i-1].eol = regk_sser_yes;
1297 port->next_rx_desc = &port->in_descr[0]; 1321 port->next_rx_desc = &port->in_descr[0];
1298 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; 1322 port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
1299 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); 1323 port->in_context.saved_data =
1324 (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
1300 port->in_context.saved_data_buf = port->in_descr[0].buf; 1325 port->in_context.saved_data_buf = port->in_descr[0].buf;
1301 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); 1326 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1302} 1327}
1303 1328
1304#ifdef SYNC_SER_DMA
1305static irqreturn_t tr_interrupt(int irq, void *dev_id) 1329static irqreturn_t tr_interrupt(int irq, void *dev_id)
1306{ 1330{
1307 reg_dma_r_masked_intr masked; 1331 reg_dma_r_masked_intr masked;
1308 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; 1332 reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
1309 reg_dma_rw_stat stat; 1333 reg_dma_rw_stat stat;
1310 int i; 1334 int i;
1311 int found = 0; 1335 int found = 0;
1312 int stop_sser = 0; 1336 int stop_sser = 0;
1313 1337
1314 for (i = 0; i < NBR_PORTS; i++) { 1338 for (i = 0; i < NBR_PORTS; i++) {
1315 sync_port *port = &ports[i]; 1339 struct sync_port *port = &ports[i];
1316 if (!port->enabled || !port->use_dma) 1340 if (!port->enabled || !port->use_dma)
1317 continue; 1341 continue;
1318 1342
1319 /* IRQ active for the port? */ 1343 /* IRQ active for the port? */
@@ -1338,19 +1362,20 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
1338 int sent; 1362 int sent;
1339 sent = port->catch_tr_descr->after - 1363 sent = port->catch_tr_descr->after -
1340 port->catch_tr_descr->buf; 1364 port->catch_tr_descr->buf;
1341 DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t" 1365 DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
1342 "in descr %p (ac: %p)\n", 1366 "in descr %p (ac: %p)\n",
1343 port->out_buf_count, sent, 1367 port->out_buf_count, sent,
1344 port->out_buf_count - sent, 1368 port->out_buf_count - sent,
1345 port->catch_tr_descr, 1369 port->catch_tr_descr,
1346 port->active_tr_descr);); 1370 port->active_tr_descr););
1347 port->out_buf_count -= sent; 1371 port->out_buf_count -= sent;
1348 port->catch_tr_descr = 1372 port->catch_tr_descr =
1349 phys_to_virt((int) port->catch_tr_descr->next); 1373 phys_to_virt((int) port->catch_tr_descr->next);
1350 port->out_rd_ptr = 1374 port->out_rd_ptr =
1351 phys_to_virt((int) port->catch_tr_descr->buf); 1375 phys_to_virt((int) port->catch_tr_descr->buf);
1352 } else { 1376 } else {
1353 int i, sent; 1377 reg_sser_rw_tr_cfg tr_cfg;
1378 int j, sent;
1354 /* EOL handler. 1379 /* EOL handler.
1355 * Note that if an EOL was encountered during the irq 1380 * Note that if an EOL was encountered during the irq
1356 * locked section of sync_ser_write the DMA will be 1381 * locked section of sync_ser_write the DMA will be
@@ -1358,11 +1383,11 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
1358 * The remaining descriptors will be traversed by 1383 * The remaining descriptors will be traversed by
1359 * the descriptor interrupts as usual. 1384 * the descriptor interrupts as usual.
1360 */ 1385 */
1361 i = 0; 1386 j = 0;
1362 while (!port->catch_tr_descr->eol) { 1387 while (!port->catch_tr_descr->eol) {
1363 sent = port->catch_tr_descr->after - 1388 sent = port->catch_tr_descr->after -
1364 port->catch_tr_descr->buf; 1389 port->catch_tr_descr->buf;
1365 DEBUGOUTBUF(printk(KERN_DEBUG 1390 DEBUGOUTBUF(pr_info(
1366 "traversing descr %p -%d (%d)\n", 1391 "traversing descr %p -%d (%d)\n",
1367 port->catch_tr_descr, 1392 port->catch_tr_descr,
1368 sent, 1393 sent,
@@ -1370,16 +1395,15 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
1370 port->out_buf_count -= sent; 1395 port->out_buf_count -= sent;
1371 port->catch_tr_descr = phys_to_virt( 1396 port->catch_tr_descr = phys_to_virt(
1372 (int)port->catch_tr_descr->next); 1397 (int)port->catch_tr_descr->next);
1373 i++; 1398 j++;
1374 if (i >= NBR_OUT_DESCR) { 1399 if (j >= NBR_OUT_DESCR) {
1375 /* TODO: Reset and recover */ 1400 /* TODO: Reset and recover */
1376 panic("sync_serial: missing eol"); 1401 panic("sync_serial: missing eol");
1377 } 1402 }
1378 } 1403 }
1379 sent = port->catch_tr_descr->after - 1404 sent = port->catch_tr_descr->after -
1380 port->catch_tr_descr->buf; 1405 port->catch_tr_descr->buf;
1381 DEBUGOUTBUF(printk(KERN_DEBUG 1406 DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
1382 "eol at descr %p -%d (%d)\n",
1383 port->catch_tr_descr, 1407 port->catch_tr_descr,
1384 sent, 1408 sent,
1385 port->out_buf_count)); 1409 port->out_buf_count));
@@ -1394,15 +1418,13 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
1394 OUT_BUFFER_SIZE) 1418 OUT_BUFFER_SIZE)
1395 port->out_rd_ptr = port->out_buffer; 1419 port->out_rd_ptr = port->out_buffer;
1396 1420
1397 reg_sser_rw_tr_cfg tr_cfg = 1421 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1398 REG_RD(sser, port->regi_sser, rw_tr_cfg); 1422 DEBUGTXINT(pr_info(
1399 DEBUGTXINT(printk(KERN_DEBUG
1400 "tr_int DMA stop %d, set catch @ %p\n", 1423 "tr_int DMA stop %d, set catch @ %p\n",
1401 port->out_buf_count, 1424 port->out_buf_count,
1402 port->active_tr_descr)); 1425 port->active_tr_descr));
1403 if (port->out_buf_count != 0) 1426 if (port->out_buf_count != 0)
1404 printk(KERN_CRIT "sync_ser: buffer not " 1427 pr_err("sync_ser: buf not empty after eol\n");
1405 "empty after eol.\n");
1406 port->catch_tr_descr = port->active_tr_descr; 1428 port->catch_tr_descr = port->active_tr_descr;
1407 port->tr_running = 0; 1429 port->tr_running = 0;
1408 tr_cfg.tr_en = regk_sser_no; 1430 tr_cfg.tr_en = regk_sser_no;
@@ -1414,62 +1436,79 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
1414 return IRQ_RETVAL(found); 1436 return IRQ_RETVAL(found);
1415} /* tr_interrupt */ 1437} /* tr_interrupt */
1416 1438
1439
1440static inline void handle_rx_packet(struct sync_port *port)
1441{
1442 int idx;
1443 reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
1444 unsigned long flags;
1445
1446 DEBUGRXINT(pr_info(KERN_INFO "!"));
1447 spin_lock_irqsave(&port->lock, flags);
1448
1449 /* If we overrun the user experience is crap regardless if we
1450 * drop new or old data. Its much easier to get it right when
1451 * dropping new data so lets do that.
1452 */
1453 if ((port->writep + port->inbufchunk <=
1454 port->flip + port->in_buffer_size) &&
1455 (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) {
1456 memcpy(port->writep,
1457 phys_to_virt((unsigned)port->next_rx_desc->buf),
1458 port->inbufchunk);
1459 port->writep += port->inbufchunk;
1460 if (port->writep >= port->flip + port->in_buffer_size)
1461 port->writep = port->flip;
1462
1463 /* Timestamp the new data chunk. */
1464 if (port->write_ts_idx == NBR_IN_DESCR)
1465 port->write_ts_idx = 0;
1466 idx = port->write_ts_idx++;
1467 do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
1468 port->in_buffer_len += port->inbufchunk;
1469 }
1470 spin_unlock_irqrestore(&port->lock, flags);
1471
1472 port->next_rx_desc->eol = 1;
1473 port->prev_rx_desc->eol = 0;
1474 /* Cache bug workaround */
1475 flush_dma_descr(port->prev_rx_desc, 0);
1476 port->prev_rx_desc = port->next_rx_desc;
1477 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1478 /* Cache bug workaround */
1479 flush_dma_descr(port->prev_rx_desc, 1);
1480 /* wake up the waiting process */
1481 wake_up_interruptible(&port->in_wait_q);
1482 DMA_CONTINUE(port->regi_dmain);
1483 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1484
1485}
1486
1417static irqreturn_t rx_interrupt(int irq, void *dev_id) 1487static irqreturn_t rx_interrupt(int irq, void *dev_id)
1418{ 1488{
1419 reg_dma_r_masked_intr masked; 1489 reg_dma_r_masked_intr masked;
1420 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1421 1490
1422 int i; 1491 int i;
1423 int found = 0; 1492 int found = 0;
1424 1493
1425 for (i = 0; i < NBR_PORTS; i++) 1494 DEBUG(pr_info("rx_interrupt\n"));
1426 { 1495
1427 sync_port *port = &ports[i]; 1496 for (i = 0; i < NBR_PORTS; i++) {
1497 struct sync_port *port = &ports[i];
1428 1498
1429 if (!port->enabled || !port->use_dma ) 1499 if (!port->enabled || !port->use_dma)
1430 continue; 1500 continue;
1431 1501
1432 masked = REG_RD(dma, port->regi_dmain, r_masked_intr); 1502 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1433 1503
1434 if (masked.data) /* Descriptor interrupt */ 1504 if (!masked.data)
1435 { 1505 continue;
1436 found = 1;
1437 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1438 virt_to_phys(port->next_rx_desc)) {
1439 DEBUGRXINT(printk(KERN_DEBUG "!"));
1440 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1441 int first_size = port->flip + port->in_buffer_size - port->writep;
1442 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1443 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1444 port->writep = port->flip + port->inbufchunk - first_size;
1445 } else {
1446 memcpy((char*)port->writep,
1447 phys_to_virt((unsigned)port->next_rx_desc->buf),
1448 port->inbufchunk);
1449 port->writep += port->inbufchunk;
1450 if (port->writep >= port->flip + port->in_buffer_size)
1451 port->writep = port->flip;
1452 }
1453 if (port->writep == port->readp)
1454 {
1455 port->full = 1;
1456 }
1457
1458 port->next_rx_desc->eol = 1;
1459 port->prev_rx_desc->eol = 0;
1460 /* Cache bug workaround */
1461 flush_dma_descr(port->prev_rx_desc, 0);
1462 port->prev_rx_desc = port->next_rx_desc;
1463 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1464 /* Cache bug workaround */
1465 flush_dma_descr(port->prev_rx_desc, 1);
1466 /* wake up the waiting process */
1467 wake_up_interruptible(&port->in_wait_q);
1468 DMA_CONTINUE(port->regi_dmain);
1469 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1470 1506
1471 } 1507 /* Descriptor interrupt */
1472 } 1508 found = 1;
1509 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1510 virt_to_phys(port->next_rx_desc))
1511 handle_rx_packet(port);
1473 } 1512 }
1474 return IRQ_RETVAL(found); 1513 return IRQ_RETVAL(found);
1475} /* rx_interrupt */ 1514} /* rx_interrupt */
@@ -1478,75 +1517,83 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id)
1478#ifdef SYNC_SER_MANUAL 1517#ifdef SYNC_SER_MANUAL
1479static irqreturn_t manual_interrupt(int irq, void *dev_id) 1518static irqreturn_t manual_interrupt(int irq, void *dev_id)
1480{ 1519{
1520 unsigned long flags;
1481 int i; 1521 int i;
1482 int found = 0; 1522 int found = 0;
1483 reg_sser_r_masked_intr masked; 1523 reg_sser_r_masked_intr masked;
1484 1524
1485 for (i = 0; i < NBR_PORTS; i++) 1525 for (i = 0; i < NBR_PORTS; i++) {
1486 { 1526 struct sync_port *port = &ports[i];
1487 sync_port *port = &ports[i];
1488 1527
1489 if (!port->enabled || port->use_dma) 1528 if (!port->enabled || port->use_dma)
1490 {
1491 continue; 1529 continue;
1492 }
1493 1530
1494 masked = REG_RD(sser, port->regi_sser, r_masked_intr); 1531 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1495 if (masked.rdav) /* Data received? */ 1532 /* Data received? */
1496 { 1533 if (masked.rdav) {
1497 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); 1534 reg_sser_rw_rec_cfg rec_cfg =
1498 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data); 1535 REG_RD(sser, port->regi_sser, rw_rec_cfg);
1536 reg_sser_r_rec_data data = REG_RD(sser,
1537 port->regi_sser, r_rec_data);
1499 found = 1; 1538 found = 1;
1500 /* Read data */ 1539 /* Read data */
1501 switch(rec_cfg.sample_size) 1540 spin_lock_irqsave(&port->lock, flags);
1502 { 1541 switch (rec_cfg.sample_size) {
1503 case 8: 1542 case 8:
1504 *port->writep++ = data.data & 0xff; 1543 *port->writep++ = data.data & 0xff;
1505 break; 1544 break;
1506 case 12: 1545 case 12:
1507 *port->writep = (data.data & 0x0ff0) >> 4; 1546 *port->writep = (data.data & 0x0ff0) >> 4;
1508 *(port->writep + 1) = data.data & 0x0f; 1547 *(port->writep + 1) = data.data & 0x0f;
1509 port->writep+=2; 1548 port->writep += 2;
1510 break; 1549 break;
1511 case 16: 1550 case 16:
1512 *(unsigned short*)port->writep = data.data; 1551 *(unsigned short *)port->writep = data.data;
1513 port->writep+=2; 1552 port->writep += 2;
1514 break; 1553 break;
1515 case 24: 1554 case 24:
1516 *(unsigned int*)port->writep = data.data; 1555 *(unsigned int *)port->writep = data.data;
1517 port->writep+=3; 1556 port->writep += 3;
1518 break; 1557 break;
1519 case 32: 1558 case 32:
1520 *(unsigned int*)port->writep = data.data; 1559 *(unsigned int *)port->writep = data.data;
1521 port->writep+=4; 1560 port->writep += 4;
1522 break; 1561 break;
1523 } 1562 }
1524 1563
1525 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */ 1564 /* Wrap? */
1565 if (port->writep >= port->flip + port->in_buffer_size)
1526 port->writep = port->flip; 1566 port->writep = port->flip;
1527 if (port->writep == port->readp) { 1567 if (port->writep == port->readp) {
1528 /* receive buffer overrun, discard oldest data 1568 /* Receive buf overrun, discard oldest data */
1529 */
1530 port->readp++; 1569 port->readp++;
1531 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */ 1570 /* Wrap? */
1571 if (port->readp >= port->flip +
1572 port->in_buffer_size)
1532 port->readp = port->flip; 1573 port->readp = port->flip;
1533 } 1574 }
1575 spin_unlock_irqrestore(&port->lock, flags);
1534 if (sync_data_avail(port) >= port->inbufchunk) 1576 if (sync_data_avail(port) >= port->inbufchunk)
1535 wake_up_interruptible(&port->in_wait_q); /* Wake up application */ 1577 /* Wake up application */
1578 wake_up_interruptible(&port->in_wait_q);
1536 } 1579 }
1537 1580
1538 if (masked.trdy) /* Transmitter ready? */ 1581 /* Transmitter ready? */
1539 { 1582 if (masked.trdy) {
1540 found = 1; 1583 found = 1;
1541 if (port->out_buf_count > 0) /* More data to send */ 1584 /* More data to send */
1585 if (port->out_buf_count > 0)
1542 send_word(port); 1586 send_word(port);
1543 else /* transmission finished */ 1587 else {
1544 { 1588 /* Transmission finished */
1545 reg_sser_rw_intr_mask intr_mask; 1589 reg_sser_rw_intr_mask intr_mask;
1546 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); 1590 intr_mask = REG_RD(sser, port->regi_sser,
1591 rw_intr_mask);
1547 intr_mask.trdy = 0; 1592 intr_mask.trdy = 0;
1548 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); 1593 REG_WR(sser, port->regi_sser,
1549 wake_up_interruptible(&port->out_wait_q); /* Wake up application */ 1594 rw_intr_mask, intr_mask);
1595 /* Wake up application */
1596 wake_up_interruptible(&port->out_wait_q);
1550 } 1597 }
1551 } 1598 }
1552 } 1599 }
@@ -1554,4 +1601,109 @@ static irqreturn_t manual_interrupt(int irq, void *dev_id)
1554} 1601}
1555#endif 1602#endif
1556 1603
1604static int __init etrax_sync_serial_init(void)
1605{
1606#if 1
1607 /* This code will be removed when we move to udev for all devices. */
1608 syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0);
1609 if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) {
1610 pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR);
1611 return -1;
1612 }
1613#else
1614 /* Allocate dynamic major number. */
1615 if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) {
1616 pr_err("Failed to allocate character device region\n");
1617 return -1;
1618 }
1619#endif
1620 syncser_cdev = cdev_alloc();
1621 if (!syncser_cdev) {
1622 pr_err("Failed to allocate cdev for syncser\n");
1623 unregister_chrdev_region(syncser_first, minor_count);
1624 return -1;
1625 }
1626 cdev_init(syncser_cdev, &syncser_fops);
1627
1628 /* Create a sysfs class for syncser */
1629 syncser_class = class_create(THIS_MODULE, "syncser_class");
1630
1631 /* Initialize Ports */
1632#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
1633 if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) {
1634 pr_warn("Unable to alloc pins for synchronous serial port 0\n");
1635 unregister_chrdev_region(syncser_first, minor_count);
1636 return -EIO;
1637 }
1638 initialize_port(0);
1639 ports[0].enabled = 1;
1640 /* Register with sysfs so udev can pick it up. */
1641 device_create(syncser_class, NULL, syncser_first, NULL,
1642 "%s%d", SYNCSER_NAME, 0);
1643#endif
1644
1645#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
1646 if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) {
1647 pr_warn("Unable to alloc pins for synchronous serial port 1\n");
1648 unregister_chrdev_region(syncser_first, minor_count);
1649 class_destroy(syncser_class);
1650 return -EIO;
1651 }
1652 initialize_port(1);
1653 ports[1].enabled = 1;
1654 /* Register with sysfs so udev can pick it up. */
1655 device_create(syncser_class, NULL, syncser_first, NULL,
1656 "%s%d", SYNCSER_NAME, 0);
1657#endif
1658
1659 /* Add it to system */
1660 if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) {
1661 pr_err("Failed to add syncser as char device\n");
1662 device_destroy(syncser_class, syncser_first);
1663 class_destroy(syncser_class);
1664 cdev_del(syncser_cdev);
1665 unregister_chrdev_region(syncser_first, minor_count);
1666 return -1;
1667 }
1668
1669
1670 pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n",
1671 SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first));
1672
1673 return 0;
1674}
1675
1676static void __exit etrax_sync_serial_exit(void)
1677{
1678 int i;
1679 device_destroy(syncser_class, syncser_first);
1680 class_destroy(syncser_class);
1681
1682 if (syncser_cdev) {
1683 cdev_del(syncser_cdev);
1684 unregister_chrdev_region(syncser_first, minor_count);
1685 }
1686 for (i = 0; i < NBR_PORTS; i++) {
1687 struct sync_port *port = &ports[i];
1688 if (port->init_irqs == dma_irq_setup) {
1689 /* Free dma irqs and dma channels. */
1690#ifdef SYNC_SER_DMA
1691 artpec_free_dma(port->dma_in_nbr);
1692 artpec_free_dma(port->dma_out_nbr);
1693 free_irq(port->dma_out_intr_vect, port);
1694 free_irq(port->dma_in_intr_vect, port);
1695#endif
1696 } else if (port->init_irqs == manual_irq_setup) {
1697 /* Free manual irq. */
1698 free_irq(port->syncser_intr_vect, port);
1699 }
1700 }
1701
1702 pr_info("ARTPEC synchronous serial port unregistered\n");
1703}
1704
1557module_init(etrax_sync_serial_init); 1705module_init(etrax_sync_serial_init);
1706module_exit(etrax_sync_serial_exit);
1707
1708MODULE_LICENSE("GPL");
1709
diff --git a/arch/cris/arch-v32/kernel/debugport.c b/arch/cris/arch-v32/kernel/debugport.c
index 610909b003f6..02e33ebe51ec 100644
--- a/arch/cris/arch-v32/kernel/debugport.c
+++ b/arch/cris/arch-v32/kernel/debugport.c
@@ -3,7 +3,9 @@
3 */ 3 */
4 4
5#include <linux/console.h> 5#include <linux/console.h>
6#include <linux/kernel.h>
6#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/string.h>
7#include <hwregs/reg_rdwr.h> 9#include <hwregs/reg_rdwr.h>
8#include <hwregs/reg_map.h> 10#include <hwregs/reg_map.h>
9#include <hwregs/ser_defs.h> 11#include <hwregs/ser_defs.h>
@@ -65,6 +67,7 @@ struct dbg_port ports[] =
65 }, 67 },
66#endif 68#endif
67}; 69};
70
68static struct dbg_port *port = 71static struct dbg_port *port =
69#if defined(CONFIG_ETRAX_DEBUG_PORT0) 72#if defined(CONFIG_ETRAX_DEBUG_PORT0)
70 &ports[0]; 73 &ports[0];
@@ -97,14 +100,19 @@ static struct dbg_port *kgdb_port =
97#endif 100#endif
98#endif 101#endif
99 102
100static void 103static void start_port(struct dbg_port *p)
101start_port(struct dbg_port* p)
102{ 104{
103 if (!p) 105 /* Set up serial port registers */
104 return; 106 reg_ser_rw_tr_ctrl tr_ctrl = {0};
107 reg_ser_rw_tr_dma_en tr_dma_en = {0};
105 108
106 if (p->started) 109 reg_ser_rw_rec_ctrl rec_ctrl = {0};
110 reg_ser_rw_tr_baud_div tr_baud_div = {0};
111 reg_ser_rw_rec_baud_div rec_baud_div = {0};
112
113 if (!p || p->started)
107 return; 114 return;
115
108 p->started = 1; 116 p->started = 1;
109 117
110 if (p->nbr == 1) 118 if (p->nbr == 1)
@@ -118,36 +126,24 @@ start_port(struct dbg_port* p)
118 crisv32_pinmux_alloc_fixed(pinmux_ser4); 126 crisv32_pinmux_alloc_fixed(pinmux_ser4);
119#endif 127#endif
120 128
121 /* Set up serial port registers */
122 reg_ser_rw_tr_ctrl tr_ctrl = {0};
123 reg_ser_rw_tr_dma_en tr_dma_en = {0};
124
125 reg_ser_rw_rec_ctrl rec_ctrl = {0};
126 reg_ser_rw_tr_baud_div tr_baud_div = {0};
127 reg_ser_rw_rec_baud_div rec_baud_div = {0};
128
129 tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493; 129 tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493;
130 tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no; 130 tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no;
131 tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8; 131 tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8;
132 tr_ctrl.en = rec_ctrl.en = 1; 132 tr_ctrl.en = rec_ctrl.en = 1;
133 133
134 if (p->parity == 'O') 134 if (p->parity == 'O') {
135 {
136 tr_ctrl.par_en = regk_ser_yes; 135 tr_ctrl.par_en = regk_ser_yes;
137 tr_ctrl.par = regk_ser_odd; 136 tr_ctrl.par = regk_ser_odd;
138 rec_ctrl.par_en = regk_ser_yes; 137 rec_ctrl.par_en = regk_ser_yes;
139 rec_ctrl.par = regk_ser_odd; 138 rec_ctrl.par = regk_ser_odd;
140 } 139 } else if (p->parity == 'E') {
141 else if (p->parity == 'E')
142 {
143 tr_ctrl.par_en = regk_ser_yes; 140 tr_ctrl.par_en = regk_ser_yes;
144 tr_ctrl.par = regk_ser_even; 141 tr_ctrl.par = regk_ser_even;
145 rec_ctrl.par_en = regk_ser_yes; 142 rec_ctrl.par_en = regk_ser_yes;
146 rec_ctrl.par = regk_ser_odd; 143 rec_ctrl.par = regk_ser_odd;
147 } 144 }
148 145
149 if (p->bits == 7) 146 if (p->bits == 7) {
150 {
151 tr_ctrl.data_bits = regk_ser_bits7; 147 tr_ctrl.data_bits = regk_ser_bits7;
152 rec_ctrl.data_bits = regk_ser_bits7; 148 rec_ctrl.data_bits = regk_ser_bits7;
153 } 149 }
@@ -161,8 +157,7 @@ start_port(struct dbg_port* p)
161 157
162#ifdef CONFIG_ETRAX_KGDB 158#ifdef CONFIG_ETRAX_KGDB
163/* Use polling to get a single character from the kernel debug port */ 159/* Use polling to get a single character from the kernel debug port */
164int 160int getDebugChar(void)
165getDebugChar(void)
166{ 161{
167 reg_ser_rs_stat_din stat; 162 reg_ser_rs_stat_din stat;
168 reg_ser_rw_ack_intr ack_intr = { 0 }; 163 reg_ser_rw_ack_intr ack_intr = { 0 };
@@ -179,8 +174,7 @@ getDebugChar(void)
179} 174}
180 175
181/* Use polling to put a single character to the kernel debug port */ 176/* Use polling to put a single character to the kernel debug port */
182void 177void putDebugChar(int val)
183putDebugChar(int val)
184{ 178{
185 reg_ser_r_stat_din stat; 179 reg_ser_r_stat_din stat;
186 do { 180 do {
@@ -190,12 +184,48 @@ putDebugChar(int val)
190} 184}
191#endif /* CONFIG_ETRAX_KGDB */ 185#endif /* CONFIG_ETRAX_KGDB */
192 186
187static void __init early_putch(int c)
188{
189 reg_ser_r_stat_din stat;
190 /* Wait until transmitter is ready and send. */
191 do
192 stat = REG_RD(ser, port->instance, r_stat_din);
193 while (!stat.tr_rdy);
194 REG_WR_INT(ser, port->instance, rw_dout, c);
195}
196
197static void __init
198early_console_write(struct console *con, const char *s, unsigned n)
199{
200 extern void reset_watchdog(void);
201 int i;
202
203 /* Send data. */
204 for (i = 0; i < n; i++) {
205 /* TODO: the '\n' -> '\n\r' translation should be done at the
206 receiver. Remove it when the serial driver removes it. */
207 if (s[i] == '\n')
208 early_putch('\r');
209 early_putch(s[i]);
210 reset_watchdog();
211 }
212}
213
214static struct console early_console_dev __initdata = {
215 .name = "early",
216 .write = early_console_write,
217 .flags = CON_PRINTBUFFER | CON_BOOT,
218 .index = -1
219};
220
193/* Register console for printk's, etc. */ 221/* Register console for printk's, etc. */
194int __init 222int __init init_etrax_debug(void)
195init_etrax_debug(void)
196{ 223{
197 start_port(port); 224 start_port(port);
198 225
226 /* Register an early console if a debug port was chosen. */
227 register_console(&early_console_dev);
228
199#ifdef CONFIG_ETRAX_KGDB 229#ifdef CONFIG_ETRAX_KGDB
200 start_port(kgdb_port); 230 start_port(kgdb_port);
201#endif /* CONFIG_ETRAX_KGDB */ 231#endif /* CONFIG_ETRAX_KGDB */
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index ee66866538f8..eb74dabbeb96 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/threads.h> 15#include <linux/threads.h>
16#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
17#include <linux/mm.h>
17#include <asm/types.h> 18#include <asm/types.h>
18#include <asm/signal.h> 19#include <asm/signal.h>
19#include <asm/io.h> 20#include <asm/io.h>
@@ -56,7 +57,6 @@ static int __init etrax_init_cont_rotime(void)
56} 57}
57arch_initcall(etrax_init_cont_rotime); 58arch_initcall(etrax_init_cont_rotime);
58 59
59
60unsigned long timer_regs[NR_CPUS] = 60unsigned long timer_regs[NR_CPUS] =
61{ 61{
62 regi_timer0, 62 regi_timer0,
@@ -68,9 +68,8 @@ unsigned long timer_regs[NR_CPUS] =
68extern int set_rtc_mmss(unsigned long nowtime); 68extern int set_rtc_mmss(unsigned long nowtime);
69 69
70#ifdef CONFIG_CPU_FREQ 70#ifdef CONFIG_CPU_FREQ
71static int 71static int cris_time_freq_notifier(struct notifier_block *nb,
72cris_time_freq_notifier(struct notifier_block *nb, unsigned long val, 72 unsigned long val, void *data);
73 void *data);
74 73
75static struct notifier_block cris_time_freq_notifier_block = { 74static struct notifier_block cris_time_freq_notifier_block = {
76 .notifier_call = cris_time_freq_notifier, 75 .notifier_call = cris_time_freq_notifier,
@@ -87,7 +86,6 @@ unsigned long get_ns_in_jiffie(void)
87 return ns; 86 return ns;
88} 87}
89 88
90
91/* From timer MDS describing the hardware watchdog: 89/* From timer MDS describing the hardware watchdog:
92 * 4.3.1 Watchdog Operation 90 * 4.3.1 Watchdog Operation
93 * The watchdog timer is an 8-bit timer with a configurable start value. 91 * The watchdog timer is an 8-bit timer with a configurable start value.
@@ -109,11 +107,18 @@ static short int watchdog_key = 42; /* arbitrary 7 bit number */
109 * is used though, so set this really low. */ 107 * is used though, so set this really low. */
110#define WATCHDOG_MIN_FREE_PAGES 8 108#define WATCHDOG_MIN_FREE_PAGES 8
111 109
110/* for reliable NICE_DOGGY behaviour */
111static int bite_in_progress;
112
112void reset_watchdog(void) 113void reset_watchdog(void)
113{ 114{
114#if defined(CONFIG_ETRAX_WATCHDOG) 115#if defined(CONFIG_ETRAX_WATCHDOG)
115 reg_timer_rw_wd_ctrl wd_ctrl = { 0 }; 116 reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
116 117
118#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY)
119 if (unlikely(bite_in_progress))
120 return;
121#endif
117 /* Only keep watchdog happy as long as we have memory left! */ 122 /* Only keep watchdog happy as long as we have memory left! */
118 if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) { 123 if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
119 /* Reset the watchdog with the inverse of the old key */ 124 /* Reset the watchdog with the inverse of the old key */
@@ -148,7 +153,9 @@ void handle_watchdog_bite(struct pt_regs *regs)
148#if defined(CONFIG_ETRAX_WATCHDOG) 153#if defined(CONFIG_ETRAX_WATCHDOG)
149 extern int cause_of_death; 154 extern int cause_of_death;
150 155
156 nmi_enter();
151 oops_in_progress = 1; 157 oops_in_progress = 1;
158 bite_in_progress = 1;
152 printk(KERN_WARNING "Watchdog bite\n"); 159 printk(KERN_WARNING "Watchdog bite\n");
153 160
154 /* Check if forced restart or unexpected watchdog */ 161 /* Check if forced restart or unexpected watchdog */
@@ -170,6 +177,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
170 printk(KERN_WARNING "Oops: bitten by watchdog\n"); 177 printk(KERN_WARNING "Oops: bitten by watchdog\n");
171 show_registers(regs); 178 show_registers(regs);
172 oops_in_progress = 0; 179 oops_in_progress = 0;
180 printk("\n"); /* Flush mtdoops. */
173#ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY 181#ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
174 reset_watchdog(); 182 reset_watchdog();
175#endif 183#endif
@@ -202,7 +210,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
202 /* Reset watchdog otherwise it resets us! */ 210 /* Reset watchdog otherwise it resets us! */
203 reset_watchdog(); 211 reset_watchdog();
204 212
205 /* Update statistics. */ 213 /* Update statistics. */
206 update_process_times(user_mode(regs)); 214 update_process_times(user_mode(regs));
207 215
208 cris_do_profile(regs); /* Save profiling information */ 216 cris_do_profile(regs); /* Save profiling information */
@@ -213,7 +221,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
213 221
214 /* Call the real timer interrupt handler */ 222 /* Call the real timer interrupt handler */
215 xtime_update(1); 223 xtime_update(1);
216 return IRQ_HANDLED; 224 return IRQ_HANDLED;
217} 225}
218 226
219/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */ 227/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
@@ -293,14 +301,13 @@ void __init time_init(void)
293 301
294#ifdef CONFIG_CPU_FREQ 302#ifdef CONFIG_CPU_FREQ
295 cpufreq_register_notifier(&cris_time_freq_notifier_block, 303 cpufreq_register_notifier(&cris_time_freq_notifier_block,
296 CPUFREQ_TRANSITION_NOTIFIER); 304 CPUFREQ_TRANSITION_NOTIFIER);
297#endif 305#endif
298} 306}
299 307
300#ifdef CONFIG_CPU_FREQ 308#ifdef CONFIG_CPU_FREQ
301static int 309static int cris_time_freq_notifier(struct notifier_block *nb,
302cris_time_freq_notifier(struct notifier_block *nb, unsigned long val, 310 unsigned long val, void *data)
303 void *data)
304{ 311{
305 struct cpufreq_freqs *freqs = data; 312 struct cpufreq_freqs *freqs = data;
306 if (val == CPUFREQ_POSTCHANGE) { 313 if (val == CPUFREQ_POSTCHANGE) {
diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c
index 0b5b70d5f58a..f0f335d8aa79 100644
--- a/arch/cris/arch-v32/lib/usercopy.c
+++ b/arch/cris/arch-v32/lib/usercopy.c
@@ -26,8 +26,7 @@
26/* Copy to userspace. This is based on the memcpy used for 26/* Copy to userspace. This is based on the memcpy used for
27 kernel-to-kernel copying; see "string.c". */ 27 kernel-to-kernel copying; see "string.c". */
28 28
29unsigned long 29unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
30__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
31{ 30{
32 /* We want the parameters put in special registers. 31 /* We want the parameters put in special registers.
33 Make sure the compiler is able to make something useful of this. 32 Make sure the compiler is able to make something useful of this.
@@ -155,13 +154,13 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
155 154
156 return retn; 155 return retn;
157} 156}
157EXPORT_SYMBOL(__copy_user);
158 158
159/* Copy from user to kernel, zeroing the bytes that were inaccessible in 159/* Copy from user to kernel, zeroing the bytes that were inaccessible in
160 userland. The return-value is the number of bytes that were 160 userland. The return-value is the number of bytes that were
161 inaccessible. */ 161 inaccessible. */
162 162unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
163unsigned long 163 unsigned long pn)
164__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
165{ 164{
166 /* We want the parameters put in special registers. 165 /* We want the parameters put in special registers.
167 Make sure the compiler is able to make something useful of this. 166 Make sure the compiler is able to make something useful of this.
@@ -321,11 +320,10 @@ copy_exception_bytes:
321 320
322 return retn + n; 321 return retn + n;
323} 322}
323EXPORT_SYMBOL(__copy_user_zeroing);
324 324
325/* Zero userspace. */ 325/* Zero userspace. */
326 326unsigned long __do_clear_user(void __user *pto, unsigned long pn)
327unsigned long
328__do_clear_user (void __user *pto, unsigned long pn)
329{ 327{
330 /* We want the parameters put in special registers. 328 /* We want the parameters put in special registers.
331 Make sure the compiler is able to make something useful of this. 329 Make sure the compiler is able to make something useful of this.
@@ -468,3 +466,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
468 466
469 return retn; 467 return retn;
470} 468}
469EXPORT_SYMBOL(__do_clear_user);
diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c
index 38f29eec14a6..05a04708b8eb 100644
--- a/arch/cris/arch-v32/mach-fs/pinmux.c
+++ b/arch/cris/arch-v32/mach-fs/pinmux.c
@@ -26,7 +26,29 @@ static DEFINE_SPINLOCK(pinmux_lock);
26 26
27static void crisv32_pinmux_set(int port); 27static void crisv32_pinmux_set(int port);
28 28
29int crisv32_pinmux_init(void) 29static int __crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
30 enum pin_mode mode)
31{
32 int i;
33
34 for (i = first_pin; i <= last_pin; i++) {
35 if ((pins[port][i] != pinmux_none)
36 && (pins[port][i] != pinmux_gpio)
37 && (pins[port][i] != mode)) {
38#ifdef DEBUG
39 panic("Pinmux alloc failed!\n");
40#endif
41 return -EPERM;
42 }
43 }
44
45 for (i = first_pin; i <= last_pin; i++)
46 pins[port][i] = mode;
47
48 crisv32_pinmux_set(port);
49}
50
51static int crisv32_pinmux_init(void)
30{ 52{
31 static int initialized; 53 static int initialized;
32 54
@@ -37,20 +59,20 @@ int crisv32_pinmux_init(void)
37 pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 = 59 pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
38 pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes; 60 pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
39 REG_WR(pinmux, regi_pinmux, rw_pa, pa); 61 REG_WR(pinmux, regi_pinmux, rw_pa, pa);
40 crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio); 62 __crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
41 crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio); 63 __crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
42 crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio); 64 __crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
43 crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio); 65 __crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
44 } 66 }
45 67
46 return 0; 68 return 0;
47} 69}
48 70
49int 71int crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
50crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode) 72 enum pin_mode mode)
51{ 73{
52 int i;
53 unsigned long flags; 74 unsigned long flags;
75 int ret;
54 76
55 crisv32_pinmux_init(); 77 crisv32_pinmux_init();
56 78
@@ -59,26 +81,11 @@ crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
59 81
60 spin_lock_irqsave(&pinmux_lock, flags); 82 spin_lock_irqsave(&pinmux_lock, flags);
61 83
62 for (i = first_pin; i <= last_pin; i++) { 84 ret = __crisv32_pinmux_alloc(port, first_pin, last_pin, mode);
63 if ((pins[port][i] != pinmux_none)
64 && (pins[port][i] != pinmux_gpio)
65 && (pins[port][i] != mode)) {
66 spin_unlock_irqrestore(&pinmux_lock, flags);
67#ifdef DEBUG
68 panic("Pinmux alloc failed!\n");
69#endif
70 return -EPERM;
71 }
72 }
73
74 for (i = first_pin; i <= last_pin; i++)
75 pins[port][i] = mode;
76
77 crisv32_pinmux_set(port);
78 85
79 spin_unlock_irqrestore(&pinmux_lock, flags); 86 spin_unlock_irqrestore(&pinmux_lock, flags);
80 87
81 return 0; 88 return ret;
82} 89}
83 90
84int crisv32_pinmux_alloc_fixed(enum fixed_function function) 91int crisv32_pinmux_alloc_fixed(enum fixed_function function)
@@ -98,58 +105,58 @@ int crisv32_pinmux_alloc_fixed(enum fixed_function function)
98 105
99 switch (function) { 106 switch (function) {
100 case pinmux_ser1: 107 case pinmux_ser1:
101 ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed); 108 ret = __crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
102 hwprot.ser1 = regk_pinmux_yes; 109 hwprot.ser1 = regk_pinmux_yes;
103 break; 110 break;
104 case pinmux_ser2: 111 case pinmux_ser2:
105 ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed); 112 ret = __crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
106 hwprot.ser2 = regk_pinmux_yes; 113 hwprot.ser2 = regk_pinmux_yes;
107 break; 114 break;
108 case pinmux_ser3: 115 case pinmux_ser3:
109 ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed); 116 ret = __crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
110 hwprot.ser3 = regk_pinmux_yes; 117 hwprot.ser3 = regk_pinmux_yes;
111 break; 118 break;
112 case pinmux_sser0: 119 case pinmux_sser0:
113 ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed); 120 ret = __crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
114 ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed); 121 ret |= __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
115 hwprot.sser0 = regk_pinmux_yes; 122 hwprot.sser0 = regk_pinmux_yes;
116 break; 123 break;
117 case pinmux_sser1: 124 case pinmux_sser1:
118 ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed); 125 ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
119 hwprot.sser1 = regk_pinmux_yes; 126 hwprot.sser1 = regk_pinmux_yes;
120 break; 127 break;
121 case pinmux_ata0: 128 case pinmux_ata0:
122 ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed); 129 ret = __crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
123 ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed); 130 ret |= __crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
124 hwprot.ata0 = regk_pinmux_yes; 131 hwprot.ata0 = regk_pinmux_yes;
125 break; 132 break;
126 case pinmux_ata1: 133 case pinmux_ata1:
127 ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed); 134 ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
128 ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed); 135 ret |= __crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
129 hwprot.ata1 = regk_pinmux_yes; 136 hwprot.ata1 = regk_pinmux_yes;
130 break; 137 break;
131 case pinmux_ata2: 138 case pinmux_ata2:
132 ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed); 139 ret = __crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
133 ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed); 140 ret |= __crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
134 hwprot.ata2 = regk_pinmux_yes; 141 hwprot.ata2 = regk_pinmux_yes;
135 break; 142 break;
136 case pinmux_ata3: 143 case pinmux_ata3:
137 ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed); 144 ret = __crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
138 ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed); 145 ret |= __crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
139 hwprot.ata2 = regk_pinmux_yes; 146 hwprot.ata2 = regk_pinmux_yes;
140 break; 147 break;
141 case pinmux_ata: 148 case pinmux_ata:
142 ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed); 149 ret = __crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
143 ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed); 150 ret |= __crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
144 hwprot.ata = regk_pinmux_yes; 151 hwprot.ata = regk_pinmux_yes;
145 break; 152 break;
146 case pinmux_eth1: 153 case pinmux_eth1:
147 ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed); 154 ret = __crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
148 hwprot.eth1 = regk_pinmux_yes; 155 hwprot.eth1 = regk_pinmux_yes;
149 hwprot.eth1_mgm = regk_pinmux_yes; 156 hwprot.eth1_mgm = regk_pinmux_yes;
150 break; 157 break;
151 case pinmux_timer: 158 case pinmux_timer:
152 ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed); 159 ret = __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
153 hwprot.timer = regk_pinmux_yes; 160 hwprot.timer = regk_pinmux_yes;
154 spin_unlock_irqrestore(&pinmux_lock, flags); 161 spin_unlock_irqrestore(&pinmux_lock, flags);
155 return ret; 162 return ret;
@@ -188,9 +195,19 @@ void crisv32_pinmux_set(int port)
188#endif 195#endif
189} 196}
190 197
191int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin) 198static int __crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
192{ 199{
193 int i; 200 int i;
201
202 for (i = first_pin; i <= last_pin; i++)
203 pins[port][i] = pinmux_none;
204
205 crisv32_pinmux_set(port);
206 return 0;
207}
208
209int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
210{
194 unsigned long flags; 211 unsigned long flags;
195 212
196 crisv32_pinmux_init(); 213 crisv32_pinmux_init();
@@ -199,11 +216,7 @@ int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
199 return -EINVAL; 216 return -EINVAL;
200 217
201 spin_lock_irqsave(&pinmux_lock, flags); 218 spin_lock_irqsave(&pinmux_lock, flags);
202 219 __crisv32_pinmux_dealloc(port, first_pin, last_pin);
203 for (i = first_pin; i <= last_pin; i++)
204 pins[port][i] = pinmux_none;
205
206 crisv32_pinmux_set(port);
207 spin_unlock_irqrestore(&pinmux_lock, flags); 220 spin_unlock_irqrestore(&pinmux_lock, flags);
208 221
209 return 0; 222 return 0;
@@ -226,58 +239,58 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
226 239
227 switch (function) { 240 switch (function) {
228 case pinmux_ser1: 241 case pinmux_ser1:
229 ret = crisv32_pinmux_dealloc(PORT_C, 4, 7); 242 ret = __crisv32_pinmux_dealloc(PORT_C, 4, 7);
230 hwprot.ser1 = regk_pinmux_no; 243 hwprot.ser1 = regk_pinmux_no;
231 break; 244 break;
232 case pinmux_ser2: 245 case pinmux_ser2:
233 ret = crisv32_pinmux_dealloc(PORT_C, 8, 11); 246 ret = __crisv32_pinmux_dealloc(PORT_C, 8, 11);
234 hwprot.ser2 = regk_pinmux_no; 247 hwprot.ser2 = regk_pinmux_no;
235 break; 248 break;
236 case pinmux_ser3: 249 case pinmux_ser3:
237 ret = crisv32_pinmux_dealloc(PORT_C, 12, 15); 250 ret = __crisv32_pinmux_dealloc(PORT_C, 12, 15);
238 hwprot.ser3 = regk_pinmux_no; 251 hwprot.ser3 = regk_pinmux_no;
239 break; 252 break;
240 case pinmux_sser0: 253 case pinmux_sser0:
241 ret = crisv32_pinmux_dealloc(PORT_C, 0, 3); 254 ret = __crisv32_pinmux_dealloc(PORT_C, 0, 3);
242 ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16); 255 ret |= __crisv32_pinmux_dealloc(PORT_C, 16, 16);
243 hwprot.sser0 = regk_pinmux_no; 256 hwprot.sser0 = regk_pinmux_no;
244 break; 257 break;
245 case pinmux_sser1: 258 case pinmux_sser1:
246 ret = crisv32_pinmux_dealloc(PORT_D, 0, 4); 259 ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
247 hwprot.sser1 = regk_pinmux_no; 260 hwprot.sser1 = regk_pinmux_no;
248 break; 261 break;
249 case pinmux_ata0: 262 case pinmux_ata0:
250 ret = crisv32_pinmux_dealloc(PORT_D, 5, 7); 263 ret = __crisv32_pinmux_dealloc(PORT_D, 5, 7);
251 ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17); 264 ret |= __crisv32_pinmux_dealloc(PORT_D, 15, 17);
252 hwprot.ata0 = regk_pinmux_no; 265 hwprot.ata0 = regk_pinmux_no;
253 break; 266 break;
254 case pinmux_ata1: 267 case pinmux_ata1:
255 ret = crisv32_pinmux_dealloc(PORT_D, 0, 4); 268 ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
256 ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17); 269 ret |= __crisv32_pinmux_dealloc(PORT_E, 17, 17);
257 hwprot.ata1 = regk_pinmux_no; 270 hwprot.ata1 = regk_pinmux_no;
258 break; 271 break;
259 case pinmux_ata2: 272 case pinmux_ata2:
260 ret = crisv32_pinmux_dealloc(PORT_C, 11, 15); 273 ret = __crisv32_pinmux_dealloc(PORT_C, 11, 15);
261 ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3); 274 ret |= __crisv32_pinmux_dealloc(PORT_E, 3, 3);
262 hwprot.ata2 = regk_pinmux_no; 275 hwprot.ata2 = regk_pinmux_no;
263 break; 276 break;
264 case pinmux_ata3: 277 case pinmux_ata3:
265 ret = crisv32_pinmux_dealloc(PORT_C, 8, 10); 278 ret = __crisv32_pinmux_dealloc(PORT_C, 8, 10);
266 ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2); 279 ret |= __crisv32_pinmux_dealloc(PORT_C, 0, 2);
267 hwprot.ata2 = regk_pinmux_no; 280 hwprot.ata2 = regk_pinmux_no;
268 break; 281 break;
269 case pinmux_ata: 282 case pinmux_ata:
270 ret = crisv32_pinmux_dealloc(PORT_B, 0, 15); 283 ret = __crisv32_pinmux_dealloc(PORT_B, 0, 15);
271 ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15); 284 ret |= __crisv32_pinmux_dealloc(PORT_D, 8, 15);
272 hwprot.ata = regk_pinmux_no; 285 hwprot.ata = regk_pinmux_no;
273 break; 286 break;
274 case pinmux_eth1: 287 case pinmux_eth1:
275 ret = crisv32_pinmux_dealloc(PORT_E, 0, 17); 288 ret = __crisv32_pinmux_dealloc(PORT_E, 0, 17);
276 hwprot.eth1 = regk_pinmux_no; 289 hwprot.eth1 = regk_pinmux_no;
277 hwprot.eth1_mgm = regk_pinmux_no; 290 hwprot.eth1_mgm = regk_pinmux_no;
278 break; 291 break;
279 case pinmux_timer: 292 case pinmux_timer:
280 ret = crisv32_pinmux_dealloc(PORT_C, 16, 16); 293 ret = __crisv32_pinmux_dealloc(PORT_C, 16, 16);
281 hwprot.timer = regk_pinmux_no; 294 hwprot.timer = regk_pinmux_no;
282 spin_unlock_irqrestore(&pinmux_lock, flags); 295 spin_unlock_irqrestore(&pinmux_lock, flags);
283 return ret; 296 return ret;
@@ -293,7 +306,8 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
293 return ret; 306 return ret;
294} 307}
295 308
296void crisv32_pinmux_dump(void) 309#ifdef DEBUG
310static void crisv32_pinmux_dump(void)
297{ 311{
298 int i, j; 312 int i, j;
299 313
@@ -305,5 +319,5 @@ void crisv32_pinmux_dump(void)
305 printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i][j]); 319 printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i][j]);
306 } 320 }
307} 321}
308 322#endif
309__initcall(crisv32_pinmux_init); 323__initcall(crisv32_pinmux_init);
diff --git a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
index c2b3036779df..09bf0c90d2d3 100644
--- a/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
+++ b/arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
@@ -28,11 +28,9 @@ enum fixed_function {
28 pinmux_timer 28 pinmux_timer
29}; 29};
30 30
31int crisv32_pinmux_init(void);
32int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode); 31int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode);
33int crisv32_pinmux_alloc_fixed(enum fixed_function function); 32int crisv32_pinmux_alloc_fixed(enum fixed_function function);
34int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin); 33int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin);
35int crisv32_pinmux_dealloc_fixed(enum fixed_function function); 34int crisv32_pinmux_dealloc_fixed(enum fixed_function function);
36void crisv32_pinmux_dump(void);
37 35
38#endif 36#endif
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index d5f124832fd1..889f2de050a3 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -1,8 +1,4 @@
1 1
2header-y += arch-v10/
3header-y += arch-v32/
4
5
6generic-y += barrier.h 2generic-y += barrier.h
7generic-y += clkdev.h 3generic-y += clkdev.h
8generic-y += cputime.h 4generic-y += cputime.h
diff --git a/arch/cris/include/uapi/asm/Kbuild b/arch/cris/include/uapi/asm/Kbuild
index 7d47b366ad82..01f66b8f15e5 100644
--- a/arch/cris/include/uapi/asm/Kbuild
+++ b/arch/cris/include/uapi/asm/Kbuild
@@ -1,8 +1,8 @@
1# UAPI Header export list 1# UAPI Header export list
2include include/uapi/asm-generic/Kbuild.asm 2include include/uapi/asm-generic/Kbuild.asm
3 3
4header-y += arch-v10/ 4header-y += ../arch-v10/arch/
5header-y += arch-v32/ 5header-y += ../arch-v32/arch/
6header-y += auxvec.h 6header-y += auxvec.h
7header-y += bitsperlong.h 7header-y += bitsperlong.h
8header-y += byteorder.h 8header-y += byteorder.h
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 5868cee20ebd..3908b942fd4c 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -47,16 +47,16 @@ EXPORT_SYMBOL(__negdi2);
47EXPORT_SYMBOL(__ioremap); 47EXPORT_SYMBOL(__ioremap);
48EXPORT_SYMBOL(iounmap); 48EXPORT_SYMBOL(iounmap);
49 49
50/* Userspace access functions */
51EXPORT_SYMBOL(__copy_user_zeroing);
52EXPORT_SYMBOL(__copy_user);
53
54#undef memcpy 50#undef memcpy
55#undef memset 51#undef memset
56extern void * memset(void *, int, __kernel_size_t); 52extern void * memset(void *, int, __kernel_size_t);
57extern void * memcpy(void *, const void *, __kernel_size_t); 53extern void * memcpy(void *, const void *, __kernel_size_t);
58EXPORT_SYMBOL(memcpy); 54EXPORT_SYMBOL(memcpy);
59EXPORT_SYMBOL(memset); 55EXPORT_SYMBOL(memset);
56#ifdef CONFIG_ETRAX_ARCH_V32
57#undef strcmp
58EXPORT_SYMBOL(strcmp);
59#endif
60 60
61#ifdef CONFIG_ETRAX_FAST_TIMER 61#ifdef CONFIG_ETRAX_FAST_TIMER
62/* Fast timer functions */ 62/* Fast timer functions */
@@ -66,3 +66,4 @@ EXPORT_SYMBOL(del_fast_timer);
66EXPORT_SYMBOL(schedule_usleep); 66EXPORT_SYMBOL(schedule_usleep);
67#endif 67#endif
68EXPORT_SYMBOL(csum_partial); 68EXPORT_SYMBOL(csum_partial);
69EXPORT_SYMBOL(csum_partial_copy_from_user);
diff --git a/arch/cris/kernel/traps.c b/arch/cris/kernel/traps.c
index 0ffda73734f5..da4c72401e27 100644
--- a/arch/cris/kernel/traps.c
+++ b/arch/cris/kernel/traps.c
@@ -14,6 +14,10 @@
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/utsname.h>
18#ifdef CONFIG_KALLSYMS
19#include <linux/kallsyms.h>
20#endif
17 21
18#include <asm/pgtable.h> 22#include <asm/pgtable.h>
19#include <asm/uaccess.h> 23#include <asm/uaccess.h>
@@ -34,25 +38,24 @@ static int kstack_depth_to_print = 24;
34 38
35void (*nmi_handler)(struct pt_regs *); 39void (*nmi_handler)(struct pt_regs *);
36 40
37void 41void show_trace(unsigned long *stack)
38show_trace(unsigned long *stack)
39{ 42{
40 unsigned long addr, module_start, module_end; 43 unsigned long addr, module_start, module_end;
41 extern char _stext, _etext; 44 extern char _stext, _etext;
42 int i; 45 int i;
43 46
44 printk("\nCall Trace: "); 47 pr_err("\nCall Trace: ");
45 48
46 i = 1; 49 i = 1;
47 module_start = VMALLOC_START; 50 module_start = VMALLOC_START;
48 module_end = VMALLOC_END; 51 module_end = VMALLOC_END;
49 52
50 while (((long)stack & (THREAD_SIZE-1)) != 0) { 53 while (((long)stack & (THREAD_SIZE - 1)) != 0) {
51 if (__get_user(addr, stack)) { 54 if (__get_user(addr, stack)) {
52 /* This message matches "failing address" marked 55 /* This message matches "failing address" marked
53 s390 in ksymoops, so lines containing it will 56 s390 in ksymoops, so lines containing it will
54 not be filtered out by ksymoops. */ 57 not be filtered out by ksymoops. */
55 printk("Failing address 0x%lx\n", (unsigned long)stack); 58 pr_err("Failing address 0x%lx\n", (unsigned long)stack);
56 break; 59 break;
57 } 60 }
58 stack++; 61 stack++;
@@ -68,10 +71,14 @@ show_trace(unsigned long *stack)
68 if (((addr >= (unsigned long)&_stext) && 71 if (((addr >= (unsigned long)&_stext) &&
69 (addr <= (unsigned long)&_etext)) || 72 (addr <= (unsigned long)&_etext)) ||
70 ((addr >= module_start) && (addr <= module_end))) { 73 ((addr >= module_start) && (addr <= module_end))) {
74#ifdef CONFIG_KALLSYMS
75 print_ip_sym(addr);
76#else
71 if (i && ((i % 8) == 0)) 77 if (i && ((i % 8) == 0))
72 printk("\n "); 78 pr_err("\n ");
73 printk("[<%08lx>] ", addr); 79 pr_err("[<%08lx>] ", addr);
74 i++; 80 i++;
81#endif
75 } 82 }
76 } 83 }
77} 84}
@@ -111,21 +118,21 @@ show_stack(struct task_struct *task, unsigned long *sp)
111 118
112 stack = sp; 119 stack = sp;
113 120
114 printk("\nStack from %08lx:\n ", (unsigned long)stack); 121 pr_err("\nStack from %08lx:\n ", (unsigned long)stack);
115 for (i = 0; i < kstack_depth_to_print; i++) { 122 for (i = 0; i < kstack_depth_to_print; i++) {
116 if (((long)stack & (THREAD_SIZE-1)) == 0) 123 if (((long)stack & (THREAD_SIZE-1)) == 0)
117 break; 124 break;
118 if (i && ((i % 8) == 0)) 125 if (i && ((i % 8) == 0))
119 printk("\n "); 126 pr_err("\n ");
120 if (__get_user(addr, stack)) { 127 if (__get_user(addr, stack)) {
121 /* This message matches "failing address" marked 128 /* This message matches "failing address" marked
122 s390 in ksymoops, so lines containing it will 129 s390 in ksymoops, so lines containing it will
123 not be filtered out by ksymoops. */ 130 not be filtered out by ksymoops. */
124 printk("Failing address 0x%lx\n", (unsigned long)stack); 131 pr_err("Failing address 0x%lx\n", (unsigned long)stack);
125 break; 132 break;
126 } 133 }
127 stack++; 134 stack++;
128 printk("%08lx ", addr); 135 pr_err("%08lx ", addr);
129 } 136 }
130 show_trace(sp); 137 show_trace(sp);
131} 138}
@@ -139,33 +146,32 @@ show_stack(void)
139 unsigned long *sp = (unsigned long *)rdusp(); 146 unsigned long *sp = (unsigned long *)rdusp();
140 int i; 147 int i;
141 148
142 printk("Stack dump [0x%08lx]:\n", (unsigned long)sp); 149 pr_err("Stack dump [0x%08lx]:\n", (unsigned long)sp);
143 for (i = 0; i < 16; i++) 150 for (i = 0; i < 16; i++)
144 printk("sp + %d: 0x%08lx\n", i*4, sp[i]); 151 pr_err("sp + %d: 0x%08lx\n", i*4, sp[i]);
145 return 0; 152 return 0;
146} 153}
147#endif 154#endif
148 155
149void 156void set_nmi_handler(void (*handler)(struct pt_regs *))
150set_nmi_handler(void (*handler)(struct pt_regs *))
151{ 157{
152 nmi_handler = handler; 158 nmi_handler = handler;
153 arch_enable_nmi(); 159 arch_enable_nmi();
154} 160}
155 161
156#ifdef CONFIG_DEBUG_NMI_OOPS 162#ifdef CONFIG_DEBUG_NMI_OOPS
157void 163void oops_nmi_handler(struct pt_regs *regs)
158oops_nmi_handler(struct pt_regs *regs)
159{ 164{
160 stop_watchdog(); 165 stop_watchdog();
161 oops_in_progress = 1; 166 oops_in_progress = 1;
162 printk("NMI!\n"); 167 pr_err("NMI!\n");
163 show_registers(regs); 168 show_registers(regs);
164 oops_in_progress = 0; 169 oops_in_progress = 0;
170 oops_exit();
171 pr_err("\n"); /* Flush mtdoops. */
165} 172}
166 173
167static int __init 174static int __init oops_nmi_register(void)
168oops_nmi_register(void)
169{ 175{
170 set_nmi_handler(oops_nmi_handler); 176 set_nmi_handler(oops_nmi_handler);
171 return 0; 177 return 0;
@@ -180,8 +186,7 @@ __initcall(oops_nmi_register);
180 * similar to an Oops dump, and if the kernel is configured to be a nice 186 * similar to an Oops dump, and if the kernel is configured to be a nice
181 * doggy, then halt instead of reboot. 187 * doggy, then halt instead of reboot.
182 */ 188 */
183void 189void watchdog_bite_hook(struct pt_regs *regs)
184watchdog_bite_hook(struct pt_regs *regs)
185{ 190{
186#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY 191#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
187 local_irq_disable(); 192 local_irq_disable();
@@ -196,8 +201,7 @@ watchdog_bite_hook(struct pt_regs *regs)
196} 201}
197 202
198/* This is normally the Oops function. */ 203/* This is normally the Oops function. */
199void 204void die_if_kernel(const char *str, struct pt_regs *regs, long err)
200die_if_kernel(const char *str, struct pt_regs *regs, long err)
201{ 205{
202 if (user_mode(regs)) 206 if (user_mode(regs))
203 return; 207 return;
@@ -211,13 +215,17 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
211 stop_watchdog(); 215 stop_watchdog();
212#endif 216#endif
213 217
218 oops_enter();
214 handle_BUG(regs); 219 handle_BUG(regs);
215 220
216 printk("%s: %04lx\n", str, err & 0xffff); 221 pr_err("Linux %s %s\n", utsname()->release, utsname()->version);
222 pr_err("%s: %04lx\n", str, err & 0xffff);
217 223
218 show_registers(regs); 224 show_registers(regs);
219 225
226 oops_exit();
220 oops_in_progress = 0; 227 oops_in_progress = 0;
228 pr_err("\n"); /* Flush mtdoops. */
221 229
222#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY 230#ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
223 reset_watchdog(); 231 reset_watchdog();
@@ -225,8 +233,7 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
225 do_exit(SIGSEGV); 233 do_exit(SIGSEGV);
226} 234}
227 235
228void __init 236void __init trap_init(void)
229trap_init(void)
230{ 237{
231 /* Nothing needs to be done */ 238 /* Nothing needs to be done */
232} 239}
diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c
index c81af5bd9167..1e7fd45b60f8 100644
--- a/arch/cris/mm/init.c
+++ b/arch/cris/mm/init.c
@@ -11,13 +11,15 @@
11#include <linux/gfp.h> 11#include <linux/gfp.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/proc_fs.h>
15#include <linux/kcore.h>
14#include <asm/tlb.h> 16#include <asm/tlb.h>
15#include <asm/sections.h> 17#include <asm/sections.h>
16 18
17unsigned long empty_zero_page; 19unsigned long empty_zero_page;
20EXPORT_SYMBOL(empty_zero_page);
18 21
19void __init 22void __init mem_init(void)
20mem_init(void)
21{ 23{
22 BUG_ON(!mem_map); 24 BUG_ON(!mem_map);
23 25
@@ -31,10 +33,36 @@ mem_init(void)
31 mem_init_print_info(NULL); 33 mem_init_print_info(NULL);
32} 34}
33 35
34/* free the pages occupied by initialization code */ 36/* Free a range of init pages. Virtual addresses. */
35 37
36void 38void free_init_pages(const char *what, unsigned long begin, unsigned long end)
37free_initmem(void) 39{
40 unsigned long addr;
41
42 for (addr = begin; addr < end; addr += PAGE_SIZE) {
43 ClearPageReserved(virt_to_page(addr));
44 init_page_count(virt_to_page(addr));
45 free_page(addr);
46 totalram_pages++;
47 }
48
49 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
50}
51
52/* Free the pages occupied by initialization code. */
53
54void free_initmem(void)
38{ 55{
39 free_initmem_default(-1); 56 free_initmem_default(-1);
40} 57}
58
59/* Free the pages occupied by initrd code. */
60
61#ifdef CONFIG_BLK_DEV_INITRD
62void free_initrd_mem(unsigned long start, unsigned long end)
63{
64 free_init_pages("initrd memory",
65 start,
66 end);
67}
68#endif
diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c
index f9ca44bdea20..80fdb995a8ce 100644
--- a/arch/cris/mm/ioremap.c
+++ b/arch/cris/mm/ioremap.c
@@ -76,10 +76,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
76 * Must be freed with iounmap. 76 * Must be freed with iounmap.
77 */ 77 */
78 78
79void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) 79void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
80{ 80{
81 return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0); 81 return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
82} 82}
83EXPORT_SYMBOL(ioremap_nocache);
83 84
84void iounmap(volatile void __iomem *addr) 85void iounmap(volatile void __iomem *addr)
85{ 86{
diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
index 263511719a4a..69952c184207 100644
--- a/arch/hexagon/include/asm/cache.h
+++ b/arch/hexagon/include/asm/cache.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cache definitions for the Hexagon architecture 2 * Cache definitions for the Hexagon architecture
3 * 3 *
4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and 7 * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,8 @@
25#define L1_CACHE_SHIFT (5) 25#define L1_CACHE_SHIFT (5)
26#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 26#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
27 27
28#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
29
28#define __cacheline_aligned __aligned(L1_CACHE_BYTES) 30#define __cacheline_aligned __aligned(L1_CACHE_BYTES)
29#define ____cacheline_aligned __aligned(L1_CACHE_BYTES) 31#define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
30 32
diff --git a/arch/hexagon/include/asm/cacheflush.h b/arch/hexagon/include/asm/cacheflush.h
index 49e0896ec240..b86f9f300e94 100644
--- a/arch/hexagon/include/asm/cacheflush.h
+++ b/arch/hexagon/include/asm/cacheflush.h
@@ -21,10 +21,7 @@
21#ifndef _ASM_CACHEFLUSH_H 21#ifndef _ASM_CACHEFLUSH_H
22#define _ASM_CACHEFLUSH_H 22#define _ASM_CACHEFLUSH_H
23 23
24#include <linux/cache.h> 24#include <linux/mm_types.h>
25#include <linux/mm.h>
26#include <asm/string.h>
27#include <asm-generic/cacheflush.h>
28 25
29/* Cache flushing: 26/* Cache flushing:
30 * 27 *
@@ -41,6 +38,20 @@
41#define LINESIZE 32 38#define LINESIZE 32
42#define LINEBITS 5 39#define LINEBITS 5
43 40
41#define flush_cache_all() do { } while (0)
42#define flush_cache_mm(mm) do { } while (0)
43#define flush_cache_dup_mm(mm) do { } while (0)
44#define flush_cache_range(vma, start, end) do { } while (0)
45#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
46#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
47#define flush_dcache_page(page) do { } while (0)
48#define flush_dcache_mmap_lock(mapping) do { } while (0)
49#define flush_dcache_mmap_unlock(mapping) do { } while (0)
50#define flush_icache_page(vma, pg) do { } while (0)
51#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
52#define flush_cache_vmap(start, end) do { } while (0)
53#define flush_cache_vunmap(start, end) do { } while (0)
54
44/* 55/*
45 * Flush Dcache range through current map. 56 * Flush Dcache range through current map.
46 */ 57 */
@@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end);
49/* 60/*
50 * Flush Icache range through current map. 61 * Flush Icache range through current map.
51 */ 62 */
52#undef flush_icache_range
53extern void flush_icache_range(unsigned long start, unsigned long end); 63extern void flush_icache_range(unsigned long start, unsigned long end);
54 64
55/* 65/*
@@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
79 /* generic_ptrace_pokedata doesn't wind up here, does it? */ 89 /* generic_ptrace_pokedata doesn't wind up here, does it? */
80} 90}
81 91
82#undef copy_to_user_page 92void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
83static inline void copy_to_user_page(struct vm_area_struct *vma, 93 unsigned long vaddr, void *dst, void *src, int len);
84 struct page *page,
85 unsigned long vaddr,
86 void *dst, void *src, int len)
87{
88 memcpy(dst, src, len);
89 if (vma->vm_flags & VM_EXEC) {
90 flush_icache_range((unsigned long) dst,
91 (unsigned long) dst + len);
92 }
93}
94 94
95#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
96 memcpy(dst, src, len)
95 97
96extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end); 98extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
97extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end); 99extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 70298996e9b2..66f5e9a61efc 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -24,14 +24,9 @@
24#ifdef __KERNEL__ 24#ifdef __KERNEL__
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/delay.h>
28#include <linux/vmalloc.h>
29#include <asm/string.h>
30#include <asm/mem-layout.h>
31#include <asm/iomap.h> 27#include <asm/iomap.h>
32#include <asm/page.h> 28#include <asm/page.h>
33#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
34#include <asm/tlbflush.h>
35 30
36/* 31/*
37 * We don't have PCI yet. 32 * We don't have PCI yet.
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index 0e7c1dbb37b2..6981949f5df3 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/delay.h>
22#include <linux/bootmem.h> 23#include <linux/bootmem.h>
23#include <linux/mmzone.h> 24#include <linux/mmzone.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index 7858663352b9..110dab152f82 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Kernel traps/events for Hexagon processor 2 * Kernel traps/events for Hexagon processor
3 * 3 *
4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and 7 * it under the terms of the GNU General Public License version 2 and
@@ -423,7 +423,7 @@ void do_trap0(struct pt_regs *regs)
423 */ 423 */
424 info.si_code = TRAP_BRKPT; 424 info.si_code = TRAP_BRKPT;
425 info.si_addr = (void __user *) pt_elr(regs); 425 info.si_addr = (void __user *) pt_elr(regs);
426 send_sig_info(SIGTRAP, &info, current); 426 force_sig_info(SIGTRAP, &info, current);
427 } else { 427 } else {
428#ifdef CONFIG_KGDB 428#ifdef CONFIG_KGDB
429 kgdb_handle_exception(pt_cause(regs), SIGTRAP, 429 kgdb_handle_exception(pt_cause(regs), SIGTRAP,
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 44d8c47bae2f..5f268c1071b3 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * Linker script for Hexagon kernel 2 * Linker script for Hexagon kernel
3 * 3 *
4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. 4 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and 7 * it under the terms of the GNU General Public License version 2 and
@@ -59,7 +59,7 @@ SECTIONS
59 INIT_DATA_SECTION(PAGE_SIZE) 59 INIT_DATA_SECTION(PAGE_SIZE)
60 60
61 _sdata = .; 61 _sdata = .;
62 RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE) 62 RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
63 RO_DATA_SECTION(PAGE_SIZE) 63 RO_DATA_SECTION(PAGE_SIZE)
64 _edata = .; 64 _edata = .;
65 65
diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c
index 0c76c802e31c..a7c6d827d8b6 100644
--- a/arch/hexagon/mm/cache.c
+++ b/arch/hexagon/mm/cache.c
@@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void)
127 local_irq_restore(flags); 127 local_irq_restore(flags);
128 mb(); 128 mb();
129} 129}
130
131void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
132 unsigned long vaddr, void *dst, void *src, int len)
133{
134 memcpy(dst, src, len);
135 if (vma->vm_flags & VM_EXEC) {
136 flush_icache_range((unsigned long) dst,
137 (unsigned long) dst + len);
138 }
139}
diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c
index 5905fd5f97f6..d27d67224046 100644
--- a/arch/hexagon/mm/ioremap.c
+++ b/arch/hexagon/mm/ioremap.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/mm.h>
23 24
24void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) 25void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
25{ 26{
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 14aa1c58912b..0ec484d2dcbc 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -35,8 +35,8 @@ extern void *per_cpu_init(void);
35 35
36/* 36/*
37 * Be extremely careful when taking the address of this variable! Due to virtual 37 * Be extremely careful when taking the address of this variable! Due to virtual
38 * remapping, it is different from the canonical address returned by __get_cpu_var(var)! 38 * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)!
39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly 39 * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly
40 * more efficient. 40 * more efficient.
41 */ 41 */
42#define __ia64_per_cpu_var(var) (*({ \ 42#define __ia64_per_cpu_var(var) (*({ \
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index 203e4403c366..48a9dfc55b51 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -374,7 +374,7 @@ static long alchemy_calc_div(unsigned long rate, unsigned long prate,
374 374
375static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, 375static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate,
376 unsigned long *best_parent_rate, 376 unsigned long *best_parent_rate,
377 struct clk **best_parent_clk, 377 struct clk_hw **best_parent_clk,
378 int scale, int maxdiv) 378 int scale, int maxdiv)
379{ 379{
380 struct clk *pc, *bpc, *free; 380 struct clk *pc, *bpc, *free;
@@ -453,7 +453,7 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate,
453 } 453 }
454 454
455 *best_parent_rate = bpr; 455 *best_parent_rate = bpr;
456 *best_parent_clk = bpc; 456 *best_parent_clk = __clk_get_hw(bpc);
457 return br; 457 return br;
458} 458}
459 459
@@ -547,7 +547,7 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
547 547
548static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, 548static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
549 unsigned long *best_parent_rate, 549 unsigned long *best_parent_rate,
550 struct clk **best_parent_clk) 550 struct clk_hw **best_parent_clk)
551{ 551{
552 return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate, 552 return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate,
553 best_parent_clk, 2, 512); 553 best_parent_clk, 2, 512);
@@ -679,7 +679,7 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
679 679
680static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, 680static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
681 unsigned long *best_parent_rate, 681 unsigned long *best_parent_rate,
682 struct clk **best_parent_clk) 682 struct clk_hw **best_parent_clk)
683{ 683{
684 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 684 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
685 int scale, maxdiv; 685 int scale, maxdiv;
@@ -898,7 +898,7 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
898 898
899static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, 899static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
900 unsigned long *best_parent_rate, 900 unsigned long *best_parent_rate,
901 struct clk **best_parent_clk) 901 struct clk_hw **best_parent_clk)
902{ 902{
903 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw); 903 struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
904 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */ 904 int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index 46e8f7676a15..3bdb72a70364 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -36,7 +36,7 @@ CONFIG_PCI=y
36CONFIG_PCI_REALLOC_ENABLE_AUTO=y 36CONFIG_PCI_REALLOC_ENABLE_AUTO=y
37CONFIG_PCCARD=y 37CONFIG_PCCARD=y
38CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y 38CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
39CONFIG_PM_RUNTIME=y 39CONFIG_PM=y
40CONFIG_NET=y 40CONFIG_NET=y
41CONFIG_PACKET=y 41CONFIG_PACKET=y
42CONFIG_PACKET_DIAG=y 42CONFIG_PACKET_DIAG=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 227a9de32246..e51aad9a94b1 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -37,7 +37,6 @@ CONFIG_MIPS32_N32=y
37CONFIG_PM=y 37CONFIG_PM=y
38CONFIG_HIBERNATION=y 38CONFIG_HIBERNATION=y
39CONFIG_PM_STD_PARTITION="/dev/hda3" 39CONFIG_PM_STD_PARTITION="/dev/hda3"
40CONFIG_PM_RUNTIME=y
41CONFIG_CPU_FREQ=y 40CONFIG_CPU_FREQ=y
42CONFIG_CPU_FREQ_DEBUG=y 41CONFIG_CPU_FREQ_DEBUG=y
43CONFIG_CPU_FREQ_STAT=m 42CONFIG_CPU_FREQ_STAT=m
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 1c6191ebd583..7eabcd2031ea 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -58,7 +58,7 @@ CONFIG_BINFMT_MISC=m
58CONFIG_MIPS32_COMPAT=y 58CONFIG_MIPS32_COMPAT=y
59CONFIG_MIPS32_O32=y 59CONFIG_MIPS32_O32=y
60CONFIG_MIPS32_N32=y 60CONFIG_MIPS32_N32=y
61CONFIG_PM_RUNTIME=y 61CONFIG_PM=y
62CONFIG_NET=y 62CONFIG_NET=y
63CONFIG_PACKET=y 63CONFIG_PACKET=y
64CONFIG_UNIX=y 64CONFIG_UNIX=y
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index 70509a48df82..b3d1d37f85ea 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -61,7 +61,7 @@ CONFIG_BINFMT_MISC=y
61CONFIG_MIPS32_COMPAT=y 61CONFIG_MIPS32_COMPAT=y
62CONFIG_MIPS32_O32=y 62CONFIG_MIPS32_O32=y
63CONFIG_MIPS32_N32=y 63CONFIG_MIPS32_N32=y
64CONFIG_PM_RUNTIME=y 64CONFIG_PM=y
65CONFIG_PM_DEBUG=y 65CONFIG_PM_DEBUG=y
66CONFIG_NET=y 66CONFIG_NET=y
67CONFIG_PACKET=y 67CONFIG_PACKET=y
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 82207e8079f3..3d8016d6cf3e 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -41,7 +41,7 @@ CONFIG_PCI=y
41CONFIG_PCI_MSI=y 41CONFIG_PCI_MSI=y
42CONFIG_PCI_DEBUG=y 42CONFIG_PCI_DEBUG=y
43CONFIG_BINFMT_MISC=m 43CONFIG_BINFMT_MISC=m
44CONFIG_PM_RUNTIME=y 44CONFIG_PM=y
45CONFIG_PM_DEBUG=y 45CONFIG_PM_DEBUG=y
46CONFIG_NET=y 46CONFIG_NET=y
47CONFIG_PACKET=y 47CONFIG_PACKET=y
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 7cba480568c8..70795a67a276 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -30,7 +30,7 @@ retry:
30 30
31 return pte; 31 return pte;
32#else 32#else
33 return ACCESS_ONCE(*ptep); 33 return READ_ONCE(*ptep);
34#endif 34#endif
35} 35}
36 36
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 2e637c881d2b..879de5efb073 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -36,7 +36,7 @@ CONFIG_KEXEC=y
36CONFIG_SCHED_SMT=y 36CONFIG_SCHED_SMT=y
37CONFIG_CMDLINE_BOOL=y 37CONFIG_CMDLINE_BOOL=y
38CONFIG_CMDLINE="" 38CONFIG_CMDLINE=""
39CONFIG_PM_RUNTIME=y 39CONFIG_PM=y
40CONFIG_PM_DEBUG=y 40CONFIG_PM_DEBUG=y
41# CONFIG_SECCOMP is not set 41# CONFIG_SECCOMP is not set
42# CONFIG_PCI is not set 42# CONFIG_PCI is not set
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 8b9ccf02a2c5..8a1be9017730 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -227,12 +227,10 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
227 goto out; 227 goto out;
228 ic = &vcpu->kvm->arch.sca->ipte_control; 228 ic = &vcpu->kvm->arch.sca->ipte_control;
229 do { 229 do {
230 old = *ic; 230 old = READ_ONCE(*ic);
231 barrier();
232 while (old.k) { 231 while (old.k) {
233 cond_resched(); 232 cond_resched();
234 old = *ic; 233 old = READ_ONCE(*ic);
235 barrier();
236 } 234 }
237 new = old; 235 new = old;
238 new.k = 1; 236 new.k = 1;
@@ -251,8 +249,7 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
251 goto out; 249 goto out;
252 ic = &vcpu->kvm->arch.sca->ipte_control; 250 ic = &vcpu->kvm->arch.sca->ipte_control;
253 do { 251 do {
254 old = *ic; 252 old = READ_ONCE(*ic);
255 barrier();
256 new = old; 253 new = old;
257 new.k = 0; 254 new.k = 0;
258 } while (cmpxchg(&ic->val, old.val, new.val) != old.val); 255 } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
@@ -267,12 +264,10 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
267 264
268 ic = &vcpu->kvm->arch.sca->ipte_control; 265 ic = &vcpu->kvm->arch.sca->ipte_control;
269 do { 266 do {
270 old = *ic; 267 old = READ_ONCE(*ic);
271 barrier();
272 while (old.kg) { 268 while (old.kg) {
273 cond_resched(); 269 cond_resched();
274 old = *ic; 270 old = READ_ONCE(*ic);
275 barrier();
276 } 271 }
277 new = old; 272 new = old;
278 new.k = 1; 273 new.k = 1;
@@ -286,8 +281,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
286 281
287 ic = &vcpu->kvm->arch.sca->ipte_control; 282 ic = &vcpu->kvm->arch.sca->ipte_control;
288 do { 283 do {
289 old = *ic; 284 old = READ_ONCE(*ic);
290 barrier();
291 new = old; 285 new = old;
292 new.kh--; 286 new.kh--;
293 if (!new.kh) 287 if (!new.kh)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index c6b6ee5f38b2..0f09f5285d5e 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -223,7 +223,7 @@ config CPU_SHX3
223config ARCH_SHMOBILE 223config ARCH_SHMOBILE
224 bool 224 bool
225 select ARCH_SUSPEND_POSSIBLE 225 select ARCH_SUSPEND_POSSIBLE
226 select PM_RUNTIME 226 select PM
227 227
228config CPU_HAS_PMU 228config CPU_HAS_PMU
229 depends on CPU_SH4 || CPU_SH4A 229 depends on CPU_SH4 || CPU_SH4A
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index ec70475da890..a8d975793b6d 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -47,7 +47,7 @@ CONFIG_PREEMPT=y
47CONFIG_BINFMT_MISC=y 47CONFIG_BINFMT_MISC=y
48CONFIG_PM=y 48CONFIG_PM=y
49CONFIG_PM_DEBUG=y 49CONFIG_PM_DEBUG=y
50CONFIG_PM_RUNTIME=y 50CONFIG_PM=y
51CONFIG_CPU_IDLE=y 51CONFIG_CPU_IDLE=y
52CONFIG_NET=y 52CONFIG_NET=y
53CONFIG_PACKET=y 53CONFIG_PACKET=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 76a76a295d74..e7e56a4131b4 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -82,7 +82,7 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
82CONFIG_BINFMT_MISC=y 82CONFIG_BINFMT_MISC=y
83CONFIG_PM=y 83CONFIG_PM=y
84CONFIG_PM_DEBUG=y 84CONFIG_PM_DEBUG=y
85CONFIG_PM_RUNTIME=y 85CONFIG_PM=y
86CONFIG_CPU_IDLE=y 86CONFIG_CPU_IDLE=y
87CONFIG_NET=y 87CONFIG_NET=y
88CONFIG_PACKET=y 88CONFIG_PACKET=y
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5b016e2498f3..3db07f30636f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -51,6 +51,7 @@ targets += cpustr.h
51$(obj)/cpustr.h: $(obj)/mkcpustr FORCE 51$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
52 $(call if_changed,cpustr) 52 $(call if_changed,cpustr)
53endif 53endif
54clean-files += cpustr.h
54 55
55# --------------------------------------------------------------------------- 56# ---------------------------------------------------------------------------
56 57
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index a4efe477ceab..625660f8a2fc 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -92,7 +92,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
92 unsigned count = SPIN_THRESHOLD; 92 unsigned count = SPIN_THRESHOLD;
93 93
94 do { 94 do {
95 if (ACCESS_ONCE(lock->tickets.head) == inc.tail) 95 if (READ_ONCE(lock->tickets.head) == inc.tail)
96 goto out; 96 goto out;
97 cpu_relax(); 97 cpu_relax();
98 } while (--count); 98 } while (--count);
@@ -105,7 +105,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
105{ 105{
106 arch_spinlock_t old, new; 106 arch_spinlock_t old, new;
107 107
108 old.tickets = ACCESS_ONCE(lock->tickets); 108 old.tickets = READ_ONCE(lock->tickets);
109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) 109 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
110 return 0; 110 return 0;
111 111
@@ -162,14 +162,14 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
162 162
163static inline int arch_spin_is_locked(arch_spinlock_t *lock) 163static inline int arch_spin_is_locked(arch_spinlock_t *lock)
164{ 164{
165 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 165 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
166 166
167 return tmp.tail != tmp.head; 167 return tmp.tail != tmp.head;
168} 168}
169 169
170static inline int arch_spin_is_contended(arch_spinlock_t *lock) 170static inline int arch_spin_is_contended(arch_spinlock_t *lock)
171{ 171{
172 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 172 struct __raw_tickets tmp = READ_ONCE(lock->tickets);
173 173
174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; 174 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
175} 175}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index e27b49d7c922..80091ae54c2b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -66,3 +66,4 @@ targets += capflags.c
66$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE 66$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
67 $(call if_changed,mkcapflags) 67 $(call if_changed,mkcapflags)
68endif 68endif
69clean-files += capflags.c
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index e2b22df964cd..36d99a337b49 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -28,7 +28,7 @@ function dump_array()
28 # If the /* comment */ starts with a quote string, grab that. 28 # If the /* comment */ starts with a quote string, grab that.
29 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" 29 VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
30 [ -z "$VALUE" ] && VALUE="\"$NAME\"" 30 [ -z "$VALUE" ] && VALUE="\"$NAME\""
31 [ "$VALUE" == '""' ] && continue 31 [ "$VALUE" = '""' ] && continue
32 32
33 # Name is uppercase, VALUE is all lowercase 33 # Name is uppercase, VALUE is all lowercase
34 VALUE="$(echo "$VALUE" | tr A-Z a-z)" 34 VALUE="$(echo "$VALUE" | tr A-Z a-z)"
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 207d9aef662d..d7547824e763 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -15,7 +15,7 @@
15static inline pte_t gup_get_pte(pte_t *ptep) 15static inline pte_t gup_get_pte(pte_t *ptep)
16{ 16{
17#ifndef CONFIG_X86_PAE 17#ifndef CONFIG_X86_PAE
18 return ACCESS_ONCE(*ptep); 18 return READ_ONCE(*ptep);
19#else 19#else
20 /* 20 /*
21 * With get_user_pages_fast, we walk down the pagetables without taking 21 * With get_user_pages_fast, we walk down the pagetables without taking
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a97ee0801475..08a7d313538a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
438static unsigned long __init get_new_step_size(unsigned long step_size) 438static unsigned long __init get_new_step_size(unsigned long step_size)
439{ 439{
440 /* 440 /*
441 * Explain why we shift by 5 and why we don't have to worry about 441 * Initial mapped size is PMD_SIZE (2M).
442 * 'step_size << 5' overflowing:
443 *
444 * initial mapped size is PMD_SIZE (2M).
445 * We can not set step_size to be PUD_SIZE (1G) yet. 442 * We can not set step_size to be PUD_SIZE (1G) yet.
446 * In worse case, when we cross the 1G boundary, and 443 * In worse case, when we cross the 1G boundary, and
447 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) 444 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
448 * to map 1G range with PTE. Use 5 as shift for now. 445 * to map 1G range with PTE. Hence we use one less than the
446 * difference of page table level shifts.
449 * 447 *
450 * Don't need to worry about overflow, on 32bit, when step_size 448 * Don't need to worry about overflow in the top-down case, on 32bit,
451 * is 0, round_down() returns 0 for start, and that turns it 449 * when step_size is 0, round_down() returns 0 for start, and that
452 * into 0x100000000ULL. 450 * turns it into 0x100000000ULL.
451 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
452 * needs to be taken into consideration by the code below.
453 */ 453 */
454 return step_size << 5; 454 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
455} 455}
456 456
457/** 457/**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
471 unsigned long step_size; 471 unsigned long step_size;
472 unsigned long addr; 472 unsigned long addr;
473 unsigned long mapped_ram_size = 0; 473 unsigned long mapped_ram_size = 0;
474 unsigned long new_mapped_ram_size;
475 474
476 /* xen has big range in reserved near end of ram, skip it at first.*/ 475 /* xen has big range in reserved near end of ram, skip it at first.*/
477 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); 476 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
496 start = map_start; 495 start = map_start;
497 } else 496 } else
498 start = map_start; 497 start = map_start;
499 new_mapped_ram_size = init_range_memory_mapping(start, 498 mapped_ram_size += init_range_memory_mapping(start,
500 last_start); 499 last_start);
501 last_start = start; 500 last_start = start;
502 min_pfn_mapped = last_start >> PAGE_SHIFT; 501 min_pfn_mapped = last_start >> PAGE_SHIFT;
503 /* only increase step_size after big range get mapped */ 502 if (mapped_ram_size >= step_size)
504 if (new_mapped_ram_size > mapped_ram_size)
505 step_size = get_new_step_size(step_size); 503 step_size = get_new_step_size(step_size);
506 mapped_ram_size += new_mapped_ram_size;
507 } 504 }
508 505
509 if (real_end < map_end) 506 if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
524static void __init memory_map_bottom_up(unsigned long map_start, 521static void __init memory_map_bottom_up(unsigned long map_start,
525 unsigned long map_end) 522 unsigned long map_end)
526{ 523{
527 unsigned long next, new_mapped_ram_size, start; 524 unsigned long next, start;
528 unsigned long mapped_ram_size = 0; 525 unsigned long mapped_ram_size = 0;
529 /* step_size need to be small so pgt_buf from BRK could cover it */ 526 /* step_size need to be small so pgt_buf from BRK could cover it */
530 unsigned long step_size = PMD_SIZE; 527 unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
539 * for page table. 536 * for page table.
540 */ 537 */
541 while (start < map_end) { 538 while (start < map_end) {
542 if (map_end - start > step_size) { 539 if (step_size && map_end - start > step_size) {
543 next = round_up(start + 1, step_size); 540 next = round_up(start + 1, step_size);
544 if (next > map_end) 541 if (next > map_end)
545 next = map_end; 542 next = map_end;
546 } else 543 } else {
547 next = map_end; 544 next = map_end;
545 }
548 546
549 new_mapped_ram_size = init_range_memory_mapping(start, next); 547 mapped_ram_size += init_range_memory_mapping(start, next);
550 start = next; 548 start = next;
551 549
552 if (new_mapped_ram_size > mapped_ram_size) 550 if (mapped_ram_size >= step_size)
553 step_size = get_new_step_size(step_size); 551 step_size = get_new_step_size(step_size);
554 mapped_ram_size += new_mapped_ram_size;
555 } 552 }
556} 553}
557 554
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 62e2509f9df1..bbdb1b985c91 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -57,7 +57,7 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
57static long clk_programmable_determine_rate(struct clk_hw *hw, 57static long clk_programmable_determine_rate(struct clk_hw *hw,
58 unsigned long rate, 58 unsigned long rate,
59 unsigned long *best_parent_rate, 59 unsigned long *best_parent_rate,
60 struct clk **best_parent_clk) 60 struct clk_hw **best_parent_hw)
61{ 61{
62 struct clk *parent = NULL; 62 struct clk *parent = NULL;
63 long best_rate = -EINVAL; 63 long best_rate = -EINVAL;
@@ -84,7 +84,7 @@ static long clk_programmable_determine_rate(struct clk_hw *hw,
84 if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) { 84 if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
85 best_rate = tmp_rate; 85 best_rate = tmp_rate;
86 *best_parent_rate = parent_rate; 86 *best_parent_rate = parent_rate;
87 *best_parent_clk = parent; 87 *best_parent_hw = __clk_get_hw(parent);
88 } 88 }
89 89
90 if (!best_rate) 90 if (!best_rate)
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 95af2e665dd3..1c06f6f3a8c5 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -1032,7 +1032,7 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1032} 1032}
1033 1033
1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 1034static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
1035 unsigned long *best_parent_rate, struct clk **best_parent) 1035 unsigned long *best_parent_rate, struct clk_hw **best_parent)
1036{ 1036{
1037 struct kona_clk *bcm_clk = to_kona_clk(hw); 1037 struct kona_clk *bcm_clk = to_kona_clk(hw);
1038 struct clk *clk = hw->clk; 1038 struct clk *clk = hw->clk;
@@ -1075,7 +1075,7 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
1075 if (delta < best_delta) { 1075 if (delta < best_delta) {
1076 best_delta = delta; 1076 best_delta = delta;
1077 best_rate = other_rate; 1077 best_rate = other_rate;
1078 *best_parent = parent; 1078 *best_parent = __clk_get_hw(parent);
1079 *best_parent_rate = parent_rate; 1079 *best_parent_rate = parent_rate;
1080 } 1080 }
1081 } 1081 }
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index b9355daf8065..4386697236a7 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -57,7 +57,7 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
57 57
58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, 58static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long *best_parent_rate, 59 unsigned long *best_parent_rate,
60 struct clk **best_parent_p) 60 struct clk_hw **best_parent_p)
61{ 61{
62 struct clk_composite *composite = to_clk_composite(hw); 62 struct clk_composite *composite = to_clk_composite(hw);
63 const struct clk_ops *rate_ops = composite->rate_ops; 63 const struct clk_ops *rate_ops = composite->rate_ops;
@@ -80,8 +80,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
80 *best_parent_p = NULL; 80 *best_parent_p = NULL;
81 81
82 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) { 82 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
83 *best_parent_p = clk_get_parent(mux_hw->clk); 83 parent = clk_get_parent(mux_hw->clk);
84 *best_parent_rate = __clk_get_rate(*best_parent_p); 84 *best_parent_p = __clk_get_hw(parent);
85 *best_parent_rate = __clk_get_rate(parent);
85 86
86 return rate_ops->round_rate(rate_hw, rate, 87 return rate_ops->round_rate(rate_hw, rate,
87 best_parent_rate); 88 best_parent_rate);
@@ -103,7 +104,7 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
103 104
104 if (!rate_diff || !*best_parent_p 105 if (!rate_diff || !*best_parent_p
105 || best_rate_diff > rate_diff) { 106 || best_rate_diff > rate_diff) {
106 *best_parent_p = parent; 107 *best_parent_p = __clk_get_hw(parent);
107 *best_parent_rate = parent_rate; 108 *best_parent_rate = parent_rate;
108 best_rate_diff = rate_diff; 109 best_rate_diff = rate_diff;
109 best_rate = tmp_rate; 110 best_rate = tmp_rate;
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 4f96ff3ba728..6e1ecf94bf58 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -77,7 +77,7 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
77 77
78 else { 78 else {
79 if (mux->flags & CLK_MUX_INDEX_BIT) 79 if (mux->flags & CLK_MUX_INDEX_BIT)
80 index = (1 << ffs(index)); 80 index = 1 << index;
81 81
82 if (mux->flags & CLK_MUX_INDEX_ONE) 82 if (mux->flags & CLK_MUX_INDEX_ONE)
83 index++; 83 index++;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 87a41038237d..bfa1e64e267d 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -218,7 +218,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
218 default: 218 default:
219 dev_err(&pdev->dev, "Invalid device type\n"); 219 dev_err(&pdev->dev, "Invalid device type\n");
220 return -EINVAL; 220 return -EINVAL;
221 }; 221 }
222 222
223 /* Store clocks of_node in first element of s2mps11_clks array */ 223 /* Store clocks of_node in first element of s2mps11_clks array */
224 s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init); 224 s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 4896ae9e23da..f4963b7d4e17 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -240,7 +240,6 @@ static const struct file_operations clk_dump_fops = {
240 .release = single_release, 240 .release = single_release,
241}; 241};
242 242
243/* caller must hold prepare_lock */
244static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) 243static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
245{ 244{
246 struct dentry *d; 245 struct dentry *d;
@@ -354,13 +353,13 @@ out:
354 mutex_unlock(&clk_debug_lock); 353 mutex_unlock(&clk_debug_lock);
355} 354}
356 355
357struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, 356struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
358 void *data, const struct file_operations *fops) 357 void *data, const struct file_operations *fops)
359{ 358{
360 struct dentry *d = NULL; 359 struct dentry *d = NULL;
361 360
362 if (clk->dentry) 361 if (hw->clk->dentry)
363 d = debugfs_create_file(name, mode, clk->dentry, data, fops); 362 d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
364 363
365 return d; 364 return d;
366} 365}
@@ -574,11 +573,6 @@ unsigned int __clk_get_enable_count(struct clk *clk)
574 return !clk ? 0 : clk->enable_count; 573 return !clk ? 0 : clk->enable_count;
575} 574}
576 575
577unsigned int __clk_get_prepare_count(struct clk *clk)
578{
579 return !clk ? 0 : clk->prepare_count;
580}
581
582unsigned long __clk_get_rate(struct clk *clk) 576unsigned long __clk_get_rate(struct clk *clk)
583{ 577{
584 unsigned long ret; 578 unsigned long ret;
@@ -601,7 +595,7 @@ out:
601} 595}
602EXPORT_SYMBOL_GPL(__clk_get_rate); 596EXPORT_SYMBOL_GPL(__clk_get_rate);
603 597
604unsigned long __clk_get_accuracy(struct clk *clk) 598static unsigned long __clk_get_accuracy(struct clk *clk)
605{ 599{
606 if (!clk) 600 if (!clk)
607 return 0; 601 return 0;
@@ -707,7 +701,7 @@ struct clk *__clk_lookup(const char *name)
707 */ 701 */
708long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 702long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
709 unsigned long *best_parent_rate, 703 unsigned long *best_parent_rate,
710 struct clk **best_parent_p) 704 struct clk_hw **best_parent_p)
711{ 705{
712 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 706 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
713 int i, num_parents; 707 int i, num_parents;
@@ -743,7 +737,7 @@ long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
743 737
744out: 738out:
745 if (best_parent) 739 if (best_parent)
746 *best_parent_p = best_parent; 740 *best_parent_p = best_parent->hw;
747 *best_parent_rate = best; 741 *best_parent_rate = best;
748 742
749 return best; 743 return best;
@@ -951,6 +945,7 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
951{ 945{
952 unsigned long parent_rate = 0; 946 unsigned long parent_rate = 0;
953 struct clk *parent; 947 struct clk *parent;
948 struct clk_hw *parent_hw;
954 949
955 if (!clk) 950 if (!clk)
956 return 0; 951 return 0;
@@ -959,10 +954,11 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
959 if (parent) 954 if (parent)
960 parent_rate = parent->rate; 955 parent_rate = parent->rate;
961 956
962 if (clk->ops->determine_rate) 957 if (clk->ops->determine_rate) {
958 parent_hw = parent ? parent->hw : NULL;
963 return clk->ops->determine_rate(clk->hw, rate, &parent_rate, 959 return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
964 &parent); 960 &parent_hw);
965 else if (clk->ops->round_rate) 961 } else if (clk->ops->round_rate)
966 return clk->ops->round_rate(clk->hw, rate, &parent_rate); 962 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
967 else if (clk->flags & CLK_SET_RATE_PARENT) 963 else if (clk->flags & CLK_SET_RATE_PARENT)
968 return __clk_round_rate(clk->parent, rate); 964 return __clk_round_rate(clk->parent, rate);
@@ -1350,6 +1346,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1350{ 1346{
1351 struct clk *top = clk; 1347 struct clk *top = clk;
1352 struct clk *old_parent, *parent; 1348 struct clk *old_parent, *parent;
1349 struct clk_hw *parent_hw;
1353 unsigned long best_parent_rate = 0; 1350 unsigned long best_parent_rate = 0;
1354 unsigned long new_rate; 1351 unsigned long new_rate;
1355 int p_index = 0; 1352 int p_index = 0;
@@ -1365,9 +1362,11 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1365 1362
1366 /* find the closest rate and parent clk/rate */ 1363 /* find the closest rate and parent clk/rate */
1367 if (clk->ops->determine_rate) { 1364 if (clk->ops->determine_rate) {
1365 parent_hw = parent ? parent->hw : NULL;
1368 new_rate = clk->ops->determine_rate(clk->hw, rate, 1366 new_rate = clk->ops->determine_rate(clk->hw, rate,
1369 &best_parent_rate, 1367 &best_parent_rate,
1370 &parent); 1368 &parent_hw);
1369 parent = parent_hw->clk;
1371 } else if (clk->ops->round_rate) { 1370 } else if (clk->ops->round_rate) {
1372 new_rate = clk->ops->round_rate(clk->hw, rate, 1371 new_rate = clk->ops->round_rate(clk->hw, rate,
1373 &best_parent_rate); 1372 &best_parent_rate);
@@ -1614,7 +1613,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
1614 1613
1615 if (clk->num_parents == 1) { 1614 if (clk->num_parents == 1) {
1616 if (IS_ERR_OR_NULL(clk->parent)) 1615 if (IS_ERR_OR_NULL(clk->parent))
1617 ret = clk->parent = __clk_lookup(clk->parent_names[0]); 1616 clk->parent = __clk_lookup(clk->parent_names[0]);
1618 ret = clk->parent; 1617 ret = clk->parent;
1619 goto out; 1618 goto out;
1620 } 1619 }
@@ -1944,7 +1943,6 @@ int __clk_init(struct device *dev, struct clk *clk)
1944 else 1943 else
1945 clk->rate = 0; 1944 clk->rate = 0;
1946 1945
1947 clk_debug_register(clk);
1948 /* 1946 /*
1949 * walk the list of orphan clocks and reparent any that are children of 1947 * walk the list of orphan clocks and reparent any that are children of
1950 * this clock 1948 * this clock
@@ -1979,6 +1977,9 @@ int __clk_init(struct device *dev, struct clk *clk)
1979out: 1977out:
1980 clk_prepare_unlock(); 1978 clk_prepare_unlock();
1981 1979
1980 if (!ret)
1981 clk_debug_register(clk);
1982
1982 return ret; 1983 return ret;
1983} 1984}
1984 1985
@@ -2273,14 +2274,17 @@ int __clk_get(struct clk *clk)
2273 2274
2274void __clk_put(struct clk *clk) 2275void __clk_put(struct clk *clk)
2275{ 2276{
2277 struct module *owner;
2278
2276 if (!clk || WARN_ON_ONCE(IS_ERR(clk))) 2279 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2277 return; 2280 return;
2278 2281
2279 clk_prepare_lock(); 2282 clk_prepare_lock();
2283 owner = clk->owner;
2280 kref_put(&clk->ref, __clk_release); 2284 kref_put(&clk->ref, __clk_release);
2281 clk_prepare_unlock(); 2285 clk_prepare_unlock();
2282 2286
2283 module_put(clk->owner); 2287 module_put(owner);
2284} 2288}
2285 2289
2286/*** clk rate change notifiers ***/ 2290/*** clk rate change notifiers ***/
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 339945d2503b..007144f81f50 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -38,44 +38,44 @@
38#include "clk.h" 38#include "clk.h"
39 39
40/* clock parent list */ 40/* clock parent list */
41static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", }; 41static const char *timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
42static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", }; 42static const char *timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
43static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", }; 43static const char *timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
44static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", }; 44static const char *timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
45static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", }; 45static const char *timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
46static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", }; 46static const char *timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
47static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", }; 47static const char *timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
48static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", }; 48static const char *timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
49static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", }; 49static const char *timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
50static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", }; 50static const char *timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
51static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", }; 51static const char *uart0_mux_p[] __initconst = { "osc26m", "pclk", };
52static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", }; 52static const char *uart1_mux_p[] __initconst = { "osc26m", "pclk", };
53static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", }; 53static const char *uart2_mux_p[] __initconst = { "osc26m", "pclk", };
54static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", }; 54static const char *uart3_mux_p[] __initconst = { "osc26m", "pclk", };
55static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", }; 55static const char *uart4_mux_p[] __initconst = { "osc26m", "pclk", };
56static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; 56static const char *spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
57static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; 57static const char *spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
58static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; 58static const char *spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
59/* share axi parent */ 59/* share axi parent */
60static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", }; 60static const char *saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
61static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", }; 61static const char *pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
62static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", }; 62static const char *pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
63static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", }; 63static const char *sd_mux_p[] __initconst = { "armpll2", "armpll3", };
64static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", }; 64static const char *mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
65static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", }; 65static const char *mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
66static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", }; 66static const char *g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
67static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", }; 67static const char *venc_mux_p[] __initconst = { "armpll2", "armpll3", };
68static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", }; 68static const char *vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
69static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", }; 69static const char *vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
70static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", }; 70static const char *edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
71static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4", 71static const char *ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
72 "armpll3", "armpll5", }; 72 "armpll3", "armpll5", };
73static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", }; 73static const char *edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
74static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4", 74static const char *ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
75 "armpll3", "armpll5", }; 75 "armpll3", "armpll5", };
76static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", }; 76static const char *rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
77static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", }; 77static const char *mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
78static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", }; 78static const char *mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
79 79
80 80
81/* fixed rate clocks */ 81/* fixed rate clocks */
@@ -296,7 +296,7 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
296 296
297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 297static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
298 unsigned long *best_parent_rate, 298 unsigned long *best_parent_rate,
299 struct clk **best_parent_p) 299 struct clk_hw **best_parent_p)
300{ 300{
301 struct clk_mmc *mclk = to_mmc(hw); 301 struct clk_mmc *mclk = to_mmc(hw);
302 unsigned long best = 0; 302 unsigned long best = 0;
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 392d78044ce3..3caaf7cc169c 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -2,7 +2,12 @@
2# Makefile for mmp specific clk 2# Makefile for mmp specific clk
3# 3#
4 4
5obj-y += clk-apbc.o clk-apmu.o clk-frac.o 5obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
6
7obj-$(CONFIG_RESET_CONTROLLER) += reset.o
8
9obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
10obj-$(CONFIG_MACH_MMP2_DT) += clk-of-mmp2.o
6 11
7obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o 12obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
8obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o 13obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 23a56f561812..584a9927993b 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -22,19 +22,12 @@
22 * numerator/denominator = Fin / (Fout * factor) 22 * numerator/denominator = Fin / (Fout * factor)
23 */ 23 */
24 24
25#define to_clk_factor(hw) container_of(hw, struct clk_factor, hw) 25#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
26struct clk_factor {
27 struct clk_hw hw;
28 void __iomem *base;
29 struct clk_factor_masks *masks;
30 struct clk_factor_tbl *ftbl;
31 unsigned int ftbl_cnt;
32};
33 26
34static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate, 27static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
35 unsigned long *prate) 28 unsigned long *prate)
36{ 29{
37 struct clk_factor *factor = to_clk_factor(hw); 30 struct mmp_clk_factor *factor = to_clk_factor(hw);
38 unsigned long rate = 0, prev_rate; 31 unsigned long rate = 0, prev_rate;
39 int i; 32 int i;
40 33
@@ -58,8 +51,8 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
58static unsigned long clk_factor_recalc_rate(struct clk_hw *hw, 51static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
59 unsigned long parent_rate) 52 unsigned long parent_rate)
60{ 53{
61 struct clk_factor *factor = to_clk_factor(hw); 54 struct mmp_clk_factor *factor = to_clk_factor(hw);
62 struct clk_factor_masks *masks = factor->masks; 55 struct mmp_clk_factor_masks *masks = factor->masks;
63 unsigned int val, num, den; 56 unsigned int val, num, den;
64 57
65 val = readl_relaxed(factor->base); 58 val = readl_relaxed(factor->base);
@@ -81,11 +74,12 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
81static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate, 74static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
82 unsigned long prate) 75 unsigned long prate)
83{ 76{
84 struct clk_factor *factor = to_clk_factor(hw); 77 struct mmp_clk_factor *factor = to_clk_factor(hw);
85 struct clk_factor_masks *masks = factor->masks; 78 struct mmp_clk_factor_masks *masks = factor->masks;
86 int i; 79 int i;
87 unsigned long val; 80 unsigned long val;
88 unsigned long prev_rate, rate = 0; 81 unsigned long prev_rate, rate = 0;
82 unsigned long flags = 0;
89 83
90 for (i = 0; i < factor->ftbl_cnt; i++) { 84 for (i = 0; i < factor->ftbl_cnt; i++) {
91 prev_rate = rate; 85 prev_rate = rate;
@@ -97,6 +91,9 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
97 if (i > 0) 91 if (i > 0)
98 i--; 92 i--;
99 93
94 if (factor->lock)
95 spin_lock_irqsave(factor->lock, flags);
96
100 val = readl_relaxed(factor->base); 97 val = readl_relaxed(factor->base);
101 98
102 val &= ~(masks->num_mask << masks->num_shift); 99 val &= ~(masks->num_mask << masks->num_shift);
@@ -107,21 +104,65 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
107 104
108 writel_relaxed(val, factor->base); 105 writel_relaxed(val, factor->base);
109 106
107 if (factor->lock)
108 spin_unlock_irqrestore(factor->lock, flags);
109
110 return 0; 110 return 0;
111} 111}
112 112
113static void clk_factor_init(struct clk_hw *hw)
114{
115 struct mmp_clk_factor *factor = to_clk_factor(hw);
116 struct mmp_clk_factor_masks *masks = factor->masks;
117 u32 val, num, den;
118 int i;
119 unsigned long flags = 0;
120
121 if (factor->lock)
122 spin_lock_irqsave(factor->lock, flags);
123
124 val = readl(factor->base);
125
126 /* calculate numerator */
127 num = (val >> masks->num_shift) & masks->num_mask;
128
129 /* calculate denominator */
130 den = (val >> masks->den_shift) & masks->den_mask;
131
132 for (i = 0; i < factor->ftbl_cnt; i++)
133 if (den == factor->ftbl[i].den && num == factor->ftbl[i].num)
134 break;
135
136 if (i >= factor->ftbl_cnt) {
137 val &= ~(masks->num_mask << masks->num_shift);
138 val |= (factor->ftbl[0].num & masks->num_mask) <<
139 masks->num_shift;
140
141 val &= ~(masks->den_mask << masks->den_shift);
142 val |= (factor->ftbl[0].den & masks->den_mask) <<
143 masks->den_shift;
144
145 writel(val, factor->base);
146 }
147
148 if (factor->lock)
149 spin_unlock_irqrestore(factor->lock, flags);
150}
151
113static struct clk_ops clk_factor_ops = { 152static struct clk_ops clk_factor_ops = {
114 .recalc_rate = clk_factor_recalc_rate, 153 .recalc_rate = clk_factor_recalc_rate,
115 .round_rate = clk_factor_round_rate, 154 .round_rate = clk_factor_round_rate,
116 .set_rate = clk_factor_set_rate, 155 .set_rate = clk_factor_set_rate,
156 .init = clk_factor_init,
117}; 157};
118 158
119struct clk *mmp_clk_register_factor(const char *name, const char *parent_name, 159struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
120 unsigned long flags, void __iomem *base, 160 unsigned long flags, void __iomem *base,
121 struct clk_factor_masks *masks, struct clk_factor_tbl *ftbl, 161 struct mmp_clk_factor_masks *masks,
122 unsigned int ftbl_cnt) 162 struct mmp_clk_factor_tbl *ftbl,
163 unsigned int ftbl_cnt, spinlock_t *lock)
123{ 164{
124 struct clk_factor *factor; 165 struct mmp_clk_factor *factor;
125 struct clk_init_data init; 166 struct clk_init_data init;
126 struct clk *clk; 167 struct clk *clk;
127 168
@@ -142,6 +183,7 @@ struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
142 factor->ftbl = ftbl; 183 factor->ftbl = ftbl;
143 factor->ftbl_cnt = ftbl_cnt; 184 factor->ftbl_cnt = ftbl_cnt;
144 factor->hw.init = &init; 185 factor->hw.init = &init;
186 factor->lock = lock;
145 187
146 init.name = name; 188 init.name = name;
147 init.ops = &clk_factor_ops; 189 init.ops = &clk_factor_ops;
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
new file mode 100644
index 000000000000..adbd9d64ded2
--- /dev/null
+++ b/drivers/clk/mmp/clk-gate.c
@@ -0,0 +1,133 @@
1/*
2 * mmp gate clock operation source file
3 *
4 * Copyright (C) 2014 Marvell
5 * Chao Xie <chao.xie@marvell.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/err.h>
16#include <linux/delay.h>
17
18#include "clk.h"
19
20/*
21 * Some clocks will have mutiple bits to enable the clocks, and
22 * the bits to disable the clock is not same as enabling bits.
23 */
24
25#define to_clk_mmp_gate(hw) container_of(hw, struct mmp_clk_gate, hw)
26
27static int mmp_clk_gate_enable(struct clk_hw *hw)
28{
29 struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
30 struct clk *clk = hw->clk;
31 unsigned long flags = 0;
32 unsigned long rate;
33 u32 tmp;
34
35 if (gate->lock)
36 spin_lock_irqsave(gate->lock, flags);
37
38 tmp = readl(gate->reg);
39 tmp &= ~gate->mask;
40 tmp |= gate->val_enable;
41 writel(tmp, gate->reg);
42
43 if (gate->lock)
44 spin_unlock_irqrestore(gate->lock, flags);
45
46 if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
47 rate = __clk_get_rate(clk);
48 /* Need delay 2 cycles. */
49 udelay(2000000/rate);
50 }
51
52 return 0;
53}
54
55static void mmp_clk_gate_disable(struct clk_hw *hw)
56{
57 struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
58 unsigned long flags = 0;
59 u32 tmp;
60
61 if (gate->lock)
62 spin_lock_irqsave(gate->lock, flags);
63
64 tmp = readl(gate->reg);
65 tmp &= ~gate->mask;
66 tmp |= gate->val_disable;
67 writel(tmp, gate->reg);
68
69 if (gate->lock)
70 spin_unlock_irqrestore(gate->lock, flags);
71}
72
73static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
74{
75 struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
76 unsigned long flags = 0;
77 u32 tmp;
78
79 if (gate->lock)
80 spin_lock_irqsave(gate->lock, flags);
81
82 tmp = readl(gate->reg);
83
84 if (gate->lock)
85 spin_unlock_irqrestore(gate->lock, flags);
86
87 return (tmp & gate->mask) == gate->val_enable;
88}
89
90const struct clk_ops mmp_clk_gate_ops = {
91 .enable = mmp_clk_gate_enable,
92 .disable = mmp_clk_gate_disable,
93 .is_enabled = mmp_clk_gate_is_enabled,
94};
95
96struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
97 const char *parent_name, unsigned long flags,
98 void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
99 unsigned int gate_flags, spinlock_t *lock)
100{
101 struct mmp_clk_gate *gate;
102 struct clk *clk;
103 struct clk_init_data init;
104
105 /* allocate the gate */
106 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
107 if (!gate) {
108 pr_err("%s:%s could not allocate gate clk\n", __func__, name);
109 return ERR_PTR(-ENOMEM);
110 }
111
112 init.name = name;
113 init.ops = &mmp_clk_gate_ops;
114 init.flags = flags | CLK_IS_BASIC;
115 init.parent_names = (parent_name ? &parent_name : NULL);
116 init.num_parents = (parent_name ? 1 : 0);
117
118 /* struct clk_gate assignments */
119 gate->reg = reg;
120 gate->mask = mask;
121 gate->val_enable = val_enable;
122 gate->val_disable = val_disable;
123 gate->flags = gate_flags;
124 gate->lock = lock;
125 gate->hw.init = &init;
126
127 clk = clk_register(dev, &gate->hw);
128
129 if (IS_ERR(clk))
130 kfree(gate);
131
132 return clk;
133}
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
new file mode 100644
index 000000000000..48fa53c7ce5e
--- /dev/null
+++ b/drivers/clk/mmp/clk-mix.c
@@ -0,0 +1,513 @@
1/*
2 * mmp mix(div and mux) clock operation source file
3 *
4 * Copyright (C) 2014 Marvell
5 * Chao Xie <chao.xie@marvell.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/err.h>
16
17#include "clk.h"
18
19/*
20 * The mix clock is a clock combined mux and div type clock.
21 * Because the div field and mux field need to be set at same
22 * time, we can not divide it into 2 types of clock
23 */
24
25#define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
26
27static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
28{
29 unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
30 unsigned int maxdiv = 0;
31 struct clk_div_table *clkt;
32
33 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
34 return div_mask;
35 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
36 return 1 << div_mask;
37 if (mix->div_table) {
38 for (clkt = mix->div_table; clkt->div; clkt++)
39 if (clkt->div > maxdiv)
40 maxdiv = clkt->div;
41 return maxdiv;
42 }
43 return div_mask + 1;
44}
45
46static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
47{
48 struct clk_div_table *clkt;
49
50 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
51 return val;
52 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
53 return 1 << val;
54 if (mix->div_table) {
55 for (clkt = mix->div_table; clkt->div; clkt++)
56 if (clkt->val == val)
57 return clkt->div;
58 if (clkt->div == 0)
59 return 0;
60 }
61 return val + 1;
62}
63
64static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
65{
66 int num_parents = __clk_get_num_parents(mix->hw.clk);
67 int i;
68
69 if (mix->mux_flags & CLK_MUX_INDEX_BIT)
70 return ffs(val) - 1;
71 if (mix->mux_flags & CLK_MUX_INDEX_ONE)
72 return val - 1;
73 if (mix->mux_table) {
74 for (i = 0; i < num_parents; i++)
75 if (mix->mux_table[i] == val)
76 return i;
77 if (i == num_parents)
78 return 0;
79 }
80
81 return val;
82}
83static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
84{
85 struct clk_div_table *clkt;
86
87 if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
88 return div;
89 if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
90 return __ffs(div);
91 if (mix->div_table) {
92 for (clkt = mix->div_table; clkt->div; clkt++)
93 if (clkt->div == div)
94 return clkt->val;
95 if (clkt->div == 0)
96 return 0;
97 }
98
99 return div - 1;
100}
101
102static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
103{
104 if (mix->mux_table)
105 return mix->mux_table[mux];
106
107 return mux;
108}
109
110static void _filter_clk_table(struct mmp_clk_mix *mix,
111 struct mmp_clk_mix_clk_table *table,
112 unsigned int table_size)
113{
114 int i;
115 struct mmp_clk_mix_clk_table *item;
116 struct clk *parent, *clk;
117 unsigned long parent_rate;
118
119 clk = mix->hw.clk;
120
121 for (i = 0; i < table_size; i++) {
122 item = &table[i];
123 parent = clk_get_parent_by_index(clk, item->parent_index);
124 parent_rate = __clk_get_rate(parent);
125 if (parent_rate % item->rate) {
126 item->valid = 0;
127 } else {
128 item->divisor = parent_rate / item->rate;
129 item->valid = 1;
130 }
131 }
132}
133
134static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
135 unsigned int change_mux, unsigned int change_div)
136{
137 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
138 u8 width, shift;
139 u32 mux_div, fc_req;
140 int ret, timeout = 50;
141 unsigned long flags = 0;
142
143 if (!change_mux && !change_div)
144 return -EINVAL;
145
146 if (mix->lock)
147 spin_lock_irqsave(mix->lock, flags);
148
149 if (mix->type == MMP_CLK_MIX_TYPE_V1
150 || mix->type == MMP_CLK_MIX_TYPE_V2)
151 mux_div = readl(ri->reg_clk_ctrl);
152 else
153 mux_div = readl(ri->reg_clk_sel);
154
155 if (change_div) {
156 width = ri->width_div;
157 shift = ri->shift_div;
158 mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
159 mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
160 }
161
162 if (change_mux) {
163 width = ri->width_mux;
164 shift = ri->shift_mux;
165 mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
166 mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
167 }
168
169 if (mix->type == MMP_CLK_MIX_TYPE_V1) {
170 writel(mux_div, ri->reg_clk_ctrl);
171 } else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
172 mux_div |= (1 << ri->bit_fc);
173 writel(mux_div, ri->reg_clk_ctrl);
174
175 do {
176 fc_req = readl(ri->reg_clk_ctrl);
177 timeout--;
178 if (!(fc_req & (1 << ri->bit_fc)))
179 break;
180 } while (timeout);
181
182 if (timeout == 0) {
183 pr_err("%s:%s cannot do frequency change\n",
184 __func__, __clk_get_name(mix->hw.clk));
185 ret = -EBUSY;
186 goto error;
187 }
188 } else {
189 fc_req = readl(ri->reg_clk_ctrl);
190 fc_req |= 1 << ri->bit_fc;
191 writel(fc_req, ri->reg_clk_ctrl);
192 writel(mux_div, ri->reg_clk_sel);
193 fc_req &= ~(1 << ri->bit_fc);
194 }
195
196 ret = 0;
197error:
198 if (mix->lock)
199 spin_unlock_irqrestore(mix->lock, flags);
200
201 return ret;
202}
203
204static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
205 unsigned long *best_parent_rate,
206 struct clk_hw **best_parent_clk)
207{
208 struct mmp_clk_mix *mix = to_clk_mix(hw);
209 struct mmp_clk_mix_clk_table *item;
210 struct clk *parent, *parent_best, *mix_clk;
211 unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
212 unsigned long gap, gap_best;
213 u32 div_val_max;
214 unsigned int div;
215 int i, j;
216
217 mix_clk = hw->clk;
218
219 parent = NULL;
220 mix_rate_best = 0;
221 parent_rate_best = 0;
222 gap_best = rate;
223 parent_best = NULL;
224
225 if (mix->table) {
226 for (i = 0; i < mix->table_size; i++) {
227 item = &mix->table[i];
228 if (item->valid == 0)
229 continue;
230 parent = clk_get_parent_by_index(mix_clk,
231 item->parent_index);
232 parent_rate = __clk_get_rate(parent);
233 mix_rate = parent_rate / item->divisor;
234 gap = abs(mix_rate - rate);
235 if (parent_best == NULL || gap < gap_best) {
236 parent_best = parent;
237 parent_rate_best = parent_rate;
238 mix_rate_best = mix_rate;
239 gap_best = gap;
240 if (gap_best == 0)
241 goto found;
242 }
243 }
244 } else {
245 for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
246 parent = clk_get_parent_by_index(mix_clk, i);
247 parent_rate = __clk_get_rate(parent);
248 div_val_max = _get_maxdiv(mix);
249 for (j = 0; j < div_val_max; j++) {
250 div = _get_div(mix, j);
251 mix_rate = parent_rate / div;
252 gap = abs(mix_rate - rate);
253 if (parent_best == NULL || gap < gap_best) {
254 parent_best = parent;
255 parent_rate_best = parent_rate;
256 mix_rate_best = mix_rate;
257 gap_best = gap;
258 if (gap_best == 0)
259 goto found;
260 }
261 }
262 }
263 }
264
265found:
266 *best_parent_rate = parent_rate_best;
267 *best_parent_clk = __clk_get_hw(parent_best);
268
269 return mix_rate_best;
270}
271
272static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
273 unsigned long rate,
274 unsigned long parent_rate,
275 u8 index)
276{
277 struct mmp_clk_mix *mix = to_clk_mix(hw);
278 unsigned int div;
279 u32 div_val, mux_val;
280
281 div = parent_rate / rate;
282 div_val = _get_div_val(mix, div);
283 mux_val = _get_mux_val(mix, index);
284
285 return _set_rate(mix, mux_val, div_val, 1, 1);
286}
287
288static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
289{
290 struct mmp_clk_mix *mix = to_clk_mix(hw);
291 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
292 unsigned long flags = 0;
293 u32 mux_div = 0;
294 u8 width, shift;
295 u32 mux_val;
296
297 if (mix->lock)
298 spin_lock_irqsave(mix->lock, flags);
299
300 if (mix->type == MMP_CLK_MIX_TYPE_V1
301 || mix->type == MMP_CLK_MIX_TYPE_V2)
302 mux_div = readl(ri->reg_clk_ctrl);
303 else
304 mux_div = readl(ri->reg_clk_sel);
305
306 if (mix->lock)
307 spin_unlock_irqrestore(mix->lock, flags);
308
309 width = mix->reg_info.width_mux;
310 shift = mix->reg_info.shift_mux;
311
312 mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
313
314 return _get_mux(mix, mux_val);
315}
316
317static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
318 unsigned long parent_rate)
319{
320 struct mmp_clk_mix *mix = to_clk_mix(hw);
321 struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
322 unsigned long flags = 0;
323 u32 mux_div = 0;
324 u8 width, shift;
325 unsigned int div;
326
327 if (mix->lock)
328 spin_lock_irqsave(mix->lock, flags);
329
330 if (mix->type == MMP_CLK_MIX_TYPE_V1
331 || mix->type == MMP_CLK_MIX_TYPE_V2)
332 mux_div = readl(ri->reg_clk_ctrl);
333 else
334 mux_div = readl(ri->reg_clk_sel);
335
336 if (mix->lock)
337 spin_unlock_irqrestore(mix->lock, flags);
338
339 width = mix->reg_info.width_div;
340 shift = mix->reg_info.shift_div;
341
342 div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
343
344 return parent_rate / div;
345}
346
347static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
348{
349 struct mmp_clk_mix *mix = to_clk_mix(hw);
350 struct mmp_clk_mix_clk_table *item;
351 int i;
352 u32 div_val, mux_val;
353
354 if (mix->table) {
355 for (i = 0; i < mix->table_size; i++) {
356 item = &mix->table[i];
357 if (item->valid == 0)
358 continue;
359 if (item->parent_index == index)
360 break;
361 }
362 if (i < mix->table_size) {
363 div_val = _get_div_val(mix, item->divisor);
364 mux_val = _get_mux_val(mix, item->parent_index);
365 } else
366 return -EINVAL;
367 } else {
368 mux_val = _get_mux_val(mix, index);
369 div_val = 0;
370 }
371
372 return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
373}
374
375static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
376 unsigned long best_parent_rate)
377{
378 struct mmp_clk_mix *mix = to_clk_mix(hw);
379 struct mmp_clk_mix_clk_table *item;
380 unsigned long parent_rate;
381 unsigned int best_divisor;
382 struct clk *mix_clk, *parent;
383 int i;
384
385 best_divisor = best_parent_rate / rate;
386
387 mix_clk = hw->clk;
388 if (mix->table) {
389 for (i = 0; i < mix->table_size; i++) {
390 item = &mix->table[i];
391 if (item->valid == 0)
392 continue;
393 parent = clk_get_parent_by_index(mix_clk,
394 item->parent_index);
395 parent_rate = __clk_get_rate(parent);
396 if (parent_rate == best_parent_rate
397 && item->divisor == best_divisor)
398 break;
399 }
400 if (i < mix->table_size)
401 return _set_rate(mix,
402 _get_mux_val(mix, item->parent_index),
403 _get_div_val(mix, item->divisor),
404 1, 1);
405 else
406 return -EINVAL;
407 } else {
408 for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
409 parent = clk_get_parent_by_index(mix_clk, i);
410 parent_rate = __clk_get_rate(parent);
411 if (parent_rate == best_parent_rate)
412 break;
413 }
414 if (i < __clk_get_num_parents(mix_clk))
415 return _set_rate(mix, _get_mux_val(mix, i),
416 _get_div_val(mix, best_divisor), 1, 1);
417 else
418 return -EINVAL;
419 }
420}
421
422static void mmp_clk_mix_init(struct clk_hw *hw)
423{
424 struct mmp_clk_mix *mix = to_clk_mix(hw);
425
426 if (mix->table)
427 _filter_clk_table(mix, mix->table, mix->table_size);
428}
429
430const struct clk_ops mmp_clk_mix_ops = {
431 .determine_rate = mmp_clk_mix_determine_rate,
432 .set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
433 .set_rate = mmp_clk_set_rate,
434 .set_parent = mmp_clk_set_parent,
435 .get_parent = mmp_clk_mix_get_parent,
436 .recalc_rate = mmp_clk_mix_recalc_rate,
437 .init = mmp_clk_mix_init,
438};
439
440struct clk *mmp_clk_register_mix(struct device *dev,
441 const char *name,
442 const char **parent_names,
443 u8 num_parents,
444 unsigned long flags,
445 struct mmp_clk_mix_config *config,
446 spinlock_t *lock)
447{
448 struct mmp_clk_mix *mix;
449 struct clk *clk;
450 struct clk_init_data init;
451 size_t table_bytes;
452
453 mix = kzalloc(sizeof(*mix), GFP_KERNEL);
454 if (!mix) {
455 pr_err("%s:%s: could not allocate mmp mix clk\n",
456 __func__, name);
457 return ERR_PTR(-ENOMEM);
458 }
459
460 init.name = name;
461 init.flags = flags | CLK_GET_RATE_NOCACHE;
462 init.parent_names = parent_names;
463 init.num_parents = num_parents;
464 init.ops = &mmp_clk_mix_ops;
465
466 memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
467 if (config->table) {
468 table_bytes = sizeof(*config->table) * config->table_size;
469 mix->table = kzalloc(table_bytes, GFP_KERNEL);
470 if (!mix->table) {
471 pr_err("%s:%s: could not allocate mmp mix table\n",
472 __func__, name);
473 kfree(mix);
474 return ERR_PTR(-ENOMEM);
475 }
476 memcpy(mix->table, config->table, table_bytes);
477 mix->table_size = config->table_size;
478 }
479
480 if (config->mux_table) {
481 table_bytes = sizeof(u32) * num_parents;
482 mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
483 if (!mix->mux_table) {
484 pr_err("%s:%s: could not allocate mmp mix mux-table\n",
485 __func__, name);
486 kfree(mix->table);
487 kfree(mix);
488 return ERR_PTR(-ENOMEM);
489 }
490 memcpy(mix->mux_table, config->mux_table, table_bytes);
491 }
492
493 mix->div_flags = config->div_flags;
494 mix->mux_flags = config->mux_flags;
495 mix->lock = lock;
496 mix->hw.init = &init;
497
498 if (config->reg_info.bit_fc >= 32)
499 mix->type = MMP_CLK_MIX_TYPE_V1;
500 else if (config->reg_info.reg_clk_sel)
501 mix->type = MMP_CLK_MIX_TYPE_V3;
502 else
503 mix->type = MMP_CLK_MIX_TYPE_V2;
504 clk = clk_register(dev, &mix->hw);
505
506 if (IS_ERR(clk)) {
507 kfree(mix->mux_table);
508 kfree(mix->table);
509 kfree(mix);
510 }
511
512 return clk;
513}
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index b2721cae257a..5c90a4230fa3 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -54,7 +54,7 @@
54 54
55static DEFINE_SPINLOCK(clk_lock); 55static DEFINE_SPINLOCK(clk_lock);
56 56
57static struct clk_factor_masks uart_factor_masks = { 57static struct mmp_clk_factor_masks uart_factor_masks = {
58 .factor = 2, 58 .factor = 2,
59 .num_mask = 0x1fff, 59 .num_mask = 0x1fff,
60 .den_mask = 0x1fff, 60 .den_mask = 0x1fff,
@@ -62,7 +62,7 @@ static struct clk_factor_masks uart_factor_masks = {
62 .den_shift = 0, 62 .den_shift = 0,
63}; 63};
64 64
65static struct clk_factor_tbl uart_factor_tbl[] = { 65static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
66 {.num = 14634, .den = 2165}, /*14.745MHZ */ 66 {.num = 14634, .den = 2165}, /*14.745MHZ */
67 {.num = 3521, .den = 689}, /*19.23MHZ */ 67 {.num = 3521, .den = 689}, /*19.23MHZ */
68 {.num = 9679, .den = 5728}, /*58.9824MHZ */ 68 {.num = 9679, .den = 5728}, /*58.9824MHZ */
@@ -191,7 +191,7 @@ void __init mmp2_clk_init(void)
191 clk = mmp_clk_register_factor("uart_pll", "pll1_4", 0, 191 clk = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
192 mpmu_base + MPMU_UART_PLL, 192 mpmu_base + MPMU_UART_PLL,
193 &uart_factor_masks, uart_factor_tbl, 193 &uart_factor_masks, uart_factor_tbl,
194 ARRAY_SIZE(uart_factor_tbl)); 194 ARRAY_SIZE(uart_factor_tbl), &clk_lock);
195 clk_set_rate(clk, 14745600); 195 clk_set_rate(clk, 14745600);
196 clk_register_clkdev(clk, "uart_pll", NULL); 196 clk_register_clkdev(clk, "uart_pll", NULL);
197 197
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
new file mode 100644
index 000000000000..2cbc2b43ae52
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -0,0 +1,334 @@
1/*
2 * mmp2 clock framework source file
3 *
4 * Copyright (C) 2012 Marvell
5 * Chao Xie <xiechao.mail@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/of_address.h>
19
20#include <dt-bindings/clock/marvell,mmp2.h>
21
22#include "clk.h"
23#include "reset.h"
24
25#define APBC_RTC 0x0
26#define APBC_TWSI0 0x4
27#define APBC_TWSI1 0x8
28#define APBC_TWSI2 0xc
29#define APBC_TWSI3 0x10
30#define APBC_TWSI4 0x7c
31#define APBC_TWSI5 0x80
32#define APBC_KPC 0x18
33#define APBC_UART0 0x2c
34#define APBC_UART1 0x30
35#define APBC_UART2 0x34
36#define APBC_UART3 0x88
37#define APBC_GPIO 0x38
38#define APBC_PWM0 0x3c
39#define APBC_PWM1 0x40
40#define APBC_PWM2 0x44
41#define APBC_PWM3 0x48
42#define APBC_SSP0 0x50
43#define APBC_SSP1 0x54
44#define APBC_SSP2 0x58
45#define APBC_SSP3 0x5c
46#define APMU_SDH0 0x54
47#define APMU_SDH1 0x58
48#define APMU_SDH2 0xe8
49#define APMU_SDH3 0xec
50#define APMU_USB 0x5c
51#define APMU_DISP0 0x4c
52#define APMU_DISP1 0x110
53#define APMU_CCIC0 0x50
54#define APMU_CCIC1 0xf4
55#define MPMU_UART_PLL 0x14
56
57struct mmp2_clk_unit {
58 struct mmp_clk_unit unit;
59 void __iomem *mpmu_base;
60 void __iomem *apmu_base;
61 void __iomem *apbc_base;
62};
63
64static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
65 {MMP2_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
66 {MMP2_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
67 {MMP2_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 800000000},
68 {MMP2_CLK_PLL2, "pll2", NULL, CLK_IS_ROOT, 960000000},
69 {MMP2_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
70};
71
72static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
73 {MMP2_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
74 {MMP2_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
75 {MMP2_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
76 {MMP2_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
77 {MMP2_CLK_PLL1_20, "pll1_20", "pll1_4", 1, 5, 0},
78 {MMP2_CLK_PLL1_3, "pll1_3", "pll1", 1, 3, 0},
79 {MMP2_CLK_PLL1_6, "pll1_6", "pll1_3", 1, 2, 0},
80 {MMP2_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
81 {MMP2_CLK_PLL2_2, "pll2_2", "pll2", 1, 2, 0},
82 {MMP2_CLK_PLL2_4, "pll2_4", "pll2_2", 1, 2, 0},
83 {MMP2_CLK_PLL2_8, "pll2_8", "pll2_4", 1, 2, 0},
84 {MMP2_CLK_PLL2_16, "pll2_16", "pll2_8", 1, 2, 0},
85 {MMP2_CLK_PLL2_3, "pll2_3", "pll2", 1, 3, 0},
86 {MMP2_CLK_PLL2_6, "pll2_6", "pll2_3", 1, 2, 0},
87 {MMP2_CLK_PLL2_12, "pll2_12", "pll2_6", 1, 2, 0},
88 {MMP2_CLK_VCTCXO_2, "vctcxo_2", "vctcxo", 1, 2, 0},
89 {MMP2_CLK_VCTCXO_4, "vctcxo_4", "vctcxo_2", 1, 2, 0},
90};
91
92static struct mmp_clk_factor_masks uart_factor_masks = {
93 .factor = 2,
94 .num_mask = 0x1fff,
95 .den_mask = 0x1fff,
96 .num_shift = 16,
97 .den_shift = 0,
98};
99
100static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
101 {.num = 14634, .den = 2165}, /*14.745MHZ */
102 {.num = 3521, .den = 689}, /*19.23MHZ */
103 {.num = 9679, .den = 5728}, /*58.9824MHZ */
104 {.num = 15850, .den = 9451}, /*59.429MHZ */
105};
106
107static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
108{
109 struct clk *clk;
110 struct mmp_clk_unit *unit = &pxa_unit->unit;
111
112 mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
113 ARRAY_SIZE(fixed_rate_clks));
114
115 mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
116 ARRAY_SIZE(fixed_factor_clks));
117
118 clk = mmp_clk_register_factor("uart_pll", "pll1_4",
119 CLK_SET_RATE_PARENT,
120 pxa_unit->mpmu_base + MPMU_UART_PLL,
121 &uart_factor_masks, uart_factor_tbl,
122 ARRAY_SIZE(uart_factor_tbl), NULL);
123 mmp_clk_add(unit, MMP2_CLK_UART_PLL, clk);
124}
125
126static DEFINE_SPINLOCK(uart0_lock);
127static DEFINE_SPINLOCK(uart1_lock);
128static DEFINE_SPINLOCK(uart2_lock);
129static const char *uart_parent_names[] = {"uart_pll", "vctcxo"};
130
131static DEFINE_SPINLOCK(ssp0_lock);
132static DEFINE_SPINLOCK(ssp1_lock);
133static DEFINE_SPINLOCK(ssp2_lock);
134static DEFINE_SPINLOCK(ssp3_lock);
135static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
136
137static DEFINE_SPINLOCK(reset_lock);
138
139static struct mmp_param_mux_clk apbc_mux_clks[] = {
140 {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
141 {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
142 {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
143 {0, "uart3_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART3, 4, 3, 0, &uart2_lock},
144 {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
145 {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
146 {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
147 {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
148};
149
150static struct mmp_param_gate_clk apbc_gate_clks[] = {
151 {MMP2_CLK_TWSI0, "twsi0_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x7, 0x3, 0x0, 0, &reset_lock},
152 {MMP2_CLK_TWSI1, "twsi1_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x7, 0x3, 0x0, 0, &reset_lock},
153 {MMP2_CLK_TWSI2, "twsi2_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI2, 0x7, 0x3, 0x0, 0, &reset_lock},
154 {MMP2_CLK_TWSI3, "twsi3_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI3, 0x7, 0x3, 0x0, 0, &reset_lock},
155 {MMP2_CLK_TWSI4, "twsi4_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI4, 0x7, 0x3, 0x0, 0, &reset_lock},
156 {MMP2_CLK_TWSI5, "twsi5_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI5, 0x7, 0x3, 0x0, 0, &reset_lock},
157 {MMP2_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x7, 0x3, 0x0, 0, &reset_lock},
158 {MMP2_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
159 {MMP2_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x87, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
160 {MMP2_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x7, 0x3, 0x0, 0, &reset_lock},
161 {MMP2_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x7, 0x3, 0x0, 0, &reset_lock},
162 {MMP2_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x7, 0x3, 0x0, 0, &reset_lock},
163 {MMP2_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x7, 0x3, 0x0, 0, &reset_lock},
164 /* The gate clocks has mux parent. */
165 {MMP2_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x7, 0x3, 0x0, 0, &uart0_lock},
166 {MMP2_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x7, 0x3, 0x0, 0, &uart1_lock},
167 {MMP2_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x7, 0x3, 0x0, 0, &uart2_lock},
168 {MMP2_CLK_UART3, "uart3_clk", "uart3_mux", CLK_SET_RATE_PARENT, APBC_UART3, 0x7, 0x3, 0x0, 0, &uart2_lock},
169 {MMP2_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x7, 0x3, 0x0, 0, &ssp0_lock},
170 {MMP2_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x7, 0x3, 0x0, 0, &ssp1_lock},
171 {MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock},
172 {MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock},
173};
174
175static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
176{
177 struct mmp_clk_unit *unit = &pxa_unit->unit;
178
179 mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
180 ARRAY_SIZE(apbc_mux_clks));
181
182 mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
183 ARRAY_SIZE(apbc_gate_clks));
184}
185
186static DEFINE_SPINLOCK(sdh_lock);
187static const char *sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
188static struct mmp_clk_mix_config sdh_mix_config = {
189 .reg_info = DEFINE_MIX_REG_INFO(4, 10, 2, 8, 32),
190};
191
192static DEFINE_SPINLOCK(usb_lock);
193
194static DEFINE_SPINLOCK(disp0_lock);
195static DEFINE_SPINLOCK(disp1_lock);
196static const char *disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
197
198static DEFINE_SPINLOCK(ccic0_lock);
199static DEFINE_SPINLOCK(ccic1_lock);
200static const char *ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
201static struct mmp_clk_mix_config ccic0_mix_config = {
202 .reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
203};
204static struct mmp_clk_mix_config ccic1_mix_config = {
205 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
206};
207
208static struct mmp_param_mux_clk apmu_mux_clks[] = {
209 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
210 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
211};
212
213static struct mmp_param_div_clk apmu_div_clks[] = {
214 {0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock},
215 {0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock},
216 {0, "disp1_div", "disp1_mux", CLK_SET_RATE_PARENT, APMU_DISP1, 8, 4, 0, &disp1_lock},
217 {0, "ccic0_sphy_div", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
218 {0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock},
219};
220
221static struct mmp_param_gate_clk apmu_gate_clks[] = {
222 {MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
223 /* The gate clocks has mux parent. */
224 {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
225 {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
226 {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
227 {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
228 {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
229 {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
230 {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
231 {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
232 {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
233 {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
234 {MMP2_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
235 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
236 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
237 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
238};
239
240static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
241{
242 struct clk *clk;
243 struct mmp_clk_unit *unit = &pxa_unit->unit;
244
245 sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_SDH0;
246 clk = mmp_clk_register_mix(NULL, "sdh_mix_clk", sdh_parent_names,
247 ARRAY_SIZE(sdh_parent_names),
248 CLK_SET_RATE_PARENT,
249 &sdh_mix_config, &sdh_lock);
250
251 ccic0_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC0;
252 clk = mmp_clk_register_mix(NULL, "ccic0_mix_clk", ccic_parent_names,
253 ARRAY_SIZE(ccic_parent_names),
254 CLK_SET_RATE_PARENT,
255 &ccic0_mix_config, &ccic0_lock);
256 mmp_clk_add(unit, MMP2_CLK_CCIC0_MIX, clk);
257
258 ccic1_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC1;
259 clk = mmp_clk_register_mix(NULL, "ccic1_mix_clk", ccic_parent_names,
260 ARRAY_SIZE(ccic_parent_names),
261 CLK_SET_RATE_PARENT,
262 &ccic1_mix_config, &ccic1_lock);
263 mmp_clk_add(unit, MMP2_CLK_CCIC1_MIX, clk);
264
265 mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
266 ARRAY_SIZE(apmu_mux_clks));
267
268 mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
269 ARRAY_SIZE(apmu_div_clks));
270
271 mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
272 ARRAY_SIZE(apmu_gate_clks));
273}
274
275static void mmp2_clk_reset_init(struct device_node *np,
276 struct mmp2_clk_unit *pxa_unit)
277{
278 struct mmp_clk_reset_cell *cells;
279 int i, nr_resets;
280
281 nr_resets = ARRAY_SIZE(apbc_gate_clks);
282 cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
283 if (!cells)
284 return;
285
286 for (i = 0; i < nr_resets; i++) {
287 cells[i].clk_id = apbc_gate_clks[i].id;
288 cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
289 cells[i].flags = 0;
290 cells[i].lock = apbc_gate_clks[i].lock;
291 cells[i].bits = 0x4;
292 }
293
294 mmp_clk_reset_register(np, cells, nr_resets);
295}
296
297static void __init mmp2_clk_init(struct device_node *np)
298{
299 struct mmp2_clk_unit *pxa_unit;
300
301 pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
302 if (!pxa_unit)
303 return;
304
305 pxa_unit->mpmu_base = of_iomap(np, 0);
306 if (!pxa_unit->mpmu_base) {
307 pr_err("failed to map mpmu registers\n");
308 return;
309 }
310
311 pxa_unit->apmu_base = of_iomap(np, 1);
312 if (!pxa_unit->mpmu_base) {
313 pr_err("failed to map apmu registers\n");
314 return;
315 }
316
317 pxa_unit->apbc_base = of_iomap(np, 2);
318 if (!pxa_unit->apbc_base) {
319 pr_err("failed to map apbc registers\n");
320 return;
321 }
322
323 mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
324
325 mmp2_pll_init(pxa_unit);
326
327 mmp2_apb_periph_clk_init(pxa_unit);
328
329 mmp2_axi_periph_clk_init(pxa_unit);
330
331 mmp2_clk_reset_init(np, pxa_unit);
332}
333
334CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
new file mode 100644
index 000000000000..5b1810dc4bd2
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -0,0 +1,279 @@
1/*
2 * pxa168 clock framework source file
3 *
4 * Copyright (C) 2012 Marvell
5 * Chao Xie <xiechao.mail@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/of_address.h>
19
20#include <dt-bindings/clock/marvell,pxa168.h>
21
22#include "clk.h"
23#include "reset.h"
24
25#define APBC_RTC 0x28
26#define APBC_TWSI0 0x2c
27#define APBC_KPC 0x30
28#define APBC_UART0 0x0
29#define APBC_UART1 0x4
30#define APBC_GPIO 0x8
31#define APBC_PWM0 0xc
32#define APBC_PWM1 0x10
33#define APBC_PWM2 0x14
34#define APBC_PWM3 0x18
35#define APBC_SSP0 0x81c
36#define APBC_SSP1 0x820
37#define APBC_SSP2 0x84c
38#define APBC_SSP3 0x858
39#define APBC_SSP4 0x85c
40#define APBC_TWSI1 0x6c
41#define APBC_UART2 0x70
42#define APMU_SDH0 0x54
43#define APMU_SDH1 0x58
44#define APMU_USB 0x5c
45#define APMU_DISP0 0x4c
46#define APMU_CCIC0 0x50
47#define APMU_DFC 0x60
48#define MPMU_UART_PLL 0x14
49
50struct pxa168_clk_unit {
51 struct mmp_clk_unit unit;
52 void __iomem *mpmu_base;
53 void __iomem *apmu_base;
54 void __iomem *apbc_base;
55};
56
57static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
58 {PXA168_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
59 {PXA168_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
60 {PXA168_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
61};
62
63static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
64 {PXA168_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
65 {PXA168_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
66 {PXA168_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
67 {PXA168_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
68 {PXA168_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
69 {PXA168_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
70 {PXA168_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
71 {PXA168_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
72 {PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
73 {PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
74 {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
75 {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
76 {PXA168_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
77};
78
79static struct mmp_clk_factor_masks uart_factor_masks = {
80 .factor = 2,
81 .num_mask = 0x1fff,
82 .den_mask = 0x1fff,
83 .num_shift = 16,
84 .den_shift = 0,
85};
86
87static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
88 {.num = 8125, .den = 1536}, /*14.745MHZ */
89};
90
91static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
92{
93 struct clk *clk;
94 struct mmp_clk_unit *unit = &pxa_unit->unit;
95
96 mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
97 ARRAY_SIZE(fixed_rate_clks));
98
99 mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
100 ARRAY_SIZE(fixed_factor_clks));
101
102 clk = mmp_clk_register_factor("uart_pll", "pll1_4",
103 CLK_SET_RATE_PARENT,
104 pxa_unit->mpmu_base + MPMU_UART_PLL,
105 &uart_factor_masks, uart_factor_tbl,
106 ARRAY_SIZE(uart_factor_tbl), NULL);
107 mmp_clk_add(unit, PXA168_CLK_UART_PLL, clk);
108}
109
110static DEFINE_SPINLOCK(uart0_lock);
111static DEFINE_SPINLOCK(uart1_lock);
112static DEFINE_SPINLOCK(uart2_lock);
113static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
114
115static DEFINE_SPINLOCK(ssp0_lock);
116static DEFINE_SPINLOCK(ssp1_lock);
117static DEFINE_SPINLOCK(ssp2_lock);
118static DEFINE_SPINLOCK(ssp3_lock);
119static DEFINE_SPINLOCK(ssp4_lock);
120static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
121
122static DEFINE_SPINLOCK(reset_lock);
123
124static struct mmp_param_mux_clk apbc_mux_clks[] = {
125 {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
126 {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
127 {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
128 {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
129 {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
130 {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
131 {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
132 {0, "ssp4_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP4, 4, 3, 0, &ssp4_lock},
133};
134
135static struct mmp_param_gate_clk apbc_gate_clks[] = {
136 {PXA168_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
137 {PXA168_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
138 {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
139 {PXA168_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
140 {PXA168_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
141 {PXA168_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
142 {PXA168_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
143 {PXA168_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
144 {PXA168_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
145 /* The gate clocks has mux parent. */
146 {PXA168_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
147 {PXA168_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
148 {PXA168_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
149 {PXA168_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
150 {PXA168_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
151 {PXA168_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x3, 0x3, 0x0, 0, &ssp2_lock},
152 {PXA168_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x3, 0x3, 0x0, 0, &ssp3_lock},
153 {PXA168_CLK_SSP4, "ssp4_clk", "ssp4_mux", CLK_SET_RATE_PARENT, APBC_SSP4, 0x3, 0x3, 0x0, 0, &ssp4_lock},
154};
155
156static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
157{
158 struct mmp_clk_unit *unit = &pxa_unit->unit;
159
160 mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
161 ARRAY_SIZE(apbc_mux_clks));
162
163 mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
164 ARRAY_SIZE(apbc_gate_clks));
165
166}
167
168static DEFINE_SPINLOCK(sdh0_lock);
169static DEFINE_SPINLOCK(sdh1_lock);
170static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
171
172static DEFINE_SPINLOCK(usb_lock);
173
174static DEFINE_SPINLOCK(disp0_lock);
175static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
176
177static DEFINE_SPINLOCK(ccic0_lock);
178static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
179static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
180
181static struct mmp_param_mux_clk apmu_mux_clks[] = {
182 {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
183 {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
184 {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
185 {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
186 {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
187};
188
189static struct mmp_param_div_clk apmu_div_clks[] = {
190 {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
191};
192
193static struct mmp_param_gate_clk apmu_gate_clks[] = {
194 {PXA168_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
195 {PXA168_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
196 {PXA168_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
197 /* The gate clocks has mux parent. */
198 {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
199 {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
200 {PXA168_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
201 {PXA168_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
202 {PXA168_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
203 {PXA168_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
204};
205
206static void pxa168_axi_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
207{
208 struct mmp_clk_unit *unit = &pxa_unit->unit;
209
210 mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
211 ARRAY_SIZE(apmu_mux_clks));
212
213 mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
214 ARRAY_SIZE(apmu_div_clks));
215
216 mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
217 ARRAY_SIZE(apmu_gate_clks));
218}
219
220static void pxa168_clk_reset_init(struct device_node *np,
221 struct pxa168_clk_unit *pxa_unit)
222{
223 struct mmp_clk_reset_cell *cells;
224 int i, nr_resets;
225
226 nr_resets = ARRAY_SIZE(apbc_gate_clks);
227 cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
228 if (!cells)
229 return;
230
231 for (i = 0; i < nr_resets; i++) {
232 cells[i].clk_id = apbc_gate_clks[i].id;
233 cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
234 cells[i].flags = 0;
235 cells[i].lock = apbc_gate_clks[i].lock;
236 cells[i].bits = 0x4;
237 }
238
239 mmp_clk_reset_register(np, cells, nr_resets);
240}
241
242static void __init pxa168_clk_init(struct device_node *np)
243{
244 struct pxa168_clk_unit *pxa_unit;
245
246 pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
247 if (!pxa_unit)
248 return;
249
250 pxa_unit->mpmu_base = of_iomap(np, 0);
251 if (!pxa_unit->mpmu_base) {
252 pr_err("failed to map mpmu registers\n");
253 return;
254 }
255
256 pxa_unit->apmu_base = of_iomap(np, 1);
257 if (!pxa_unit->mpmu_base) {
258 pr_err("failed to map apmu registers\n");
259 return;
260 }
261
262 pxa_unit->apbc_base = of_iomap(np, 2);
263 if (!pxa_unit->apbc_base) {
264 pr_err("failed to map apbc registers\n");
265 return;
266 }
267
268 mmp_clk_init(np, &pxa_unit->unit, PXA168_NR_CLKS);
269
270 pxa168_pll_init(pxa_unit);
271
272 pxa168_apb_periph_clk_init(pxa_unit);
273
274 pxa168_axi_periph_clk_init(pxa_unit);
275
276 pxa168_clk_reset_init(np, pxa_unit);
277}
278
279CLK_OF_DECLARE(pxa168_clk, "marvell,pxa168-clock", pxa168_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
new file mode 100644
index 000000000000..5e3c80dad336
--- /dev/null
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -0,0 +1,301 @@
1/*
2 * pxa910 clock framework source file
3 *
4 * Copyright (C) 2012 Marvell
5 * Chao Xie <xiechao.mail@gmail.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/spinlock.h>
15#include <linux/io.h>
16#include <linux/delay.h>
17#include <linux/err.h>
18#include <linux/of_address.h>
19
20#include <dt-bindings/clock/marvell,pxa910.h>
21
22#include "clk.h"
23#include "reset.h"
24
25#define APBC_RTC 0x28
26#define APBC_TWSI0 0x2c
27#define APBC_KPC 0x18
28#define APBC_UART0 0x0
29#define APBC_UART1 0x4
30#define APBC_GPIO 0x8
31#define APBC_PWM0 0xc
32#define APBC_PWM1 0x10
33#define APBC_PWM2 0x14
34#define APBC_PWM3 0x18
35#define APBC_SSP0 0x1c
36#define APBC_SSP1 0x20
37#define APBC_SSP2 0x4c
38#define APBCP_TWSI1 0x28
39#define APBCP_UART2 0x1c
40#define APMU_SDH0 0x54
41#define APMU_SDH1 0x58
42#define APMU_USB 0x5c
43#define APMU_DISP0 0x4c
44#define APMU_CCIC0 0x50
45#define APMU_DFC 0x60
46#define MPMU_UART_PLL 0x14
47
48struct pxa910_clk_unit {
49 struct mmp_clk_unit unit;
50 void __iomem *mpmu_base;
51 void __iomem *apmu_base;
52 void __iomem *apbc_base;
53 void __iomem *apbcp_base;
54};
55
56static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
57 {PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
58 {PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
59 {PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
60};
61
62static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
63 {PXA910_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
64 {PXA910_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
65 {PXA910_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
66 {PXA910_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
67 {PXA910_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
68 {PXA910_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
69 {PXA910_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
70 {PXA910_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
71 {PXA910_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
72 {PXA910_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
73 {PXA910_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
74 {PXA910_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
75 {PXA910_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
76};
77
78static struct mmp_clk_factor_masks uart_factor_masks = {
79 .factor = 2,
80 .num_mask = 0x1fff,
81 .den_mask = 0x1fff,
82 .num_shift = 16,
83 .den_shift = 0,
84};
85
86static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
87 {.num = 8125, .den = 1536}, /*14.745MHZ */
88};
89
90static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit)
91{
92 struct clk *clk;
93 struct mmp_clk_unit *unit = &pxa_unit->unit;
94
95 mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
96 ARRAY_SIZE(fixed_rate_clks));
97
98 mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
99 ARRAY_SIZE(fixed_factor_clks));
100
101 clk = mmp_clk_register_factor("uart_pll", "pll1_4",
102 CLK_SET_RATE_PARENT,
103 pxa_unit->mpmu_base + MPMU_UART_PLL,
104 &uart_factor_masks, uart_factor_tbl,
105 ARRAY_SIZE(uart_factor_tbl), NULL);
106 mmp_clk_add(unit, PXA910_CLK_UART_PLL, clk);
107}
108
109static DEFINE_SPINLOCK(uart0_lock);
110static DEFINE_SPINLOCK(uart1_lock);
111static DEFINE_SPINLOCK(uart2_lock);
112static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
113
114static DEFINE_SPINLOCK(ssp0_lock);
115static DEFINE_SPINLOCK(ssp1_lock);
116static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
117
118static DEFINE_SPINLOCK(reset_lock);
119
120static struct mmp_param_mux_clk apbc_mux_clks[] = {
121 {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
122 {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
123 {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
124 {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
125};
126
127static struct mmp_param_mux_clk apbcp_mux_clks[] = {
128 {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock},
129};
130
131static struct mmp_param_gate_clk apbc_gate_clks[] = {
132 {PXA910_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
133 {PXA910_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
134 {PXA910_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
135 {PXA910_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
136 {PXA910_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
137 {PXA910_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
138 {PXA910_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
139 {PXA910_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
140 /* The gate clocks has mux parent. */
141 {PXA910_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
142 {PXA910_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
143 {PXA910_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
144 {PXA910_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
145};
146
147static struct mmp_param_gate_clk apbcp_gate_clks[] = {
148 {PXA910_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBCP_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
149 /* The gate clocks has mux parent. */
150 {PXA910_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
151};
152
153static void pxa910_apb_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
154{
155 struct mmp_clk_unit *unit = &pxa_unit->unit;
156
157 mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
158 ARRAY_SIZE(apbc_mux_clks));
159
160 mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->apbcp_base,
161 ARRAY_SIZE(apbcp_mux_clks));
162
163 mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
164 ARRAY_SIZE(apbc_gate_clks));
165
166 mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->apbcp_base,
167 ARRAY_SIZE(apbcp_gate_clks));
168}
169
170static DEFINE_SPINLOCK(sdh0_lock);
171static DEFINE_SPINLOCK(sdh1_lock);
172static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
173
174static DEFINE_SPINLOCK(usb_lock);
175
176static DEFINE_SPINLOCK(disp0_lock);
177static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
178
179static DEFINE_SPINLOCK(ccic0_lock);
180static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
181static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
182
183static struct mmp_param_mux_clk apmu_mux_clks[] = {
184 {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
185 {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
186 {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
187 {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
188 {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
189};
190
191static struct mmp_param_div_clk apmu_div_clks[] = {
192 {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
193};
194
195static struct mmp_param_gate_clk apmu_gate_clks[] = {
196 {PXA910_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
197 {PXA910_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
198 {PXA910_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
199 /* The gate clocks has mux parent. */
200 {PXA910_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
201 {PXA910_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
202 {PXA910_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
203 {PXA910_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
204 {PXA910_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
205 {PXA910_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
206};
207
208static void pxa910_axi_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
209{
210 struct mmp_clk_unit *unit = &pxa_unit->unit;
211
212 mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
213 ARRAY_SIZE(apmu_mux_clks));
214
215 mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
216 ARRAY_SIZE(apmu_div_clks));
217
218 mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
219 ARRAY_SIZE(apmu_gate_clks));
220}
221
222static void pxa910_clk_reset_init(struct device_node *np,
223 struct pxa910_clk_unit *pxa_unit)
224{
225 struct mmp_clk_reset_cell *cells;
226 int i, base, nr_resets_apbc, nr_resets_apbcp, nr_resets;
227
228 nr_resets_apbc = ARRAY_SIZE(apbc_gate_clks);
229 nr_resets_apbcp = ARRAY_SIZE(apbcp_gate_clks);
230 nr_resets = nr_resets_apbc + nr_resets_apbcp;
231 cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
232 if (!cells)
233 return;
234
235 base = 0;
236 for (i = 0; i < nr_resets_apbc; i++) {
237 cells[base + i].clk_id = apbc_gate_clks[i].id;
238 cells[base + i].reg =
239 pxa_unit->apbc_base + apbc_gate_clks[i].offset;
240 cells[base + i].flags = 0;
241 cells[base + i].lock = apbc_gate_clks[i].lock;
242 cells[base + i].bits = 0x4;
243 }
244
245 base = nr_resets_apbc;
246 for (i = 0; i < nr_resets_apbcp; i++) {
247 cells[base + i].clk_id = apbcp_gate_clks[i].id;
248 cells[base + i].reg =
249 pxa_unit->apbc_base + apbc_gate_clks[i].offset;
250 cells[base + i].flags = 0;
251 cells[base + i].lock = apbc_gate_clks[i].lock;
252 cells[base + i].bits = 0x4;
253 }
254
255 mmp_clk_reset_register(np, cells, nr_resets);
256}
257
258static void __init pxa910_clk_init(struct device_node *np)
259{
260 struct pxa910_clk_unit *pxa_unit;
261
262 pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
263 if (!pxa_unit)
264 return;
265
266 pxa_unit->mpmu_base = of_iomap(np, 0);
267 if (!pxa_unit->mpmu_base) {
268 pr_err("failed to map mpmu registers\n");
269 return;
270 }
271
272 pxa_unit->apmu_base = of_iomap(np, 1);
273 if (!pxa_unit->mpmu_base) {
274 pr_err("failed to map apmu registers\n");
275 return;
276 }
277
278 pxa_unit->apbc_base = of_iomap(np, 2);
279 if (!pxa_unit->apbc_base) {
280 pr_err("failed to map apbc registers\n");
281 return;
282 }
283
284 pxa_unit->apbcp_base = of_iomap(np, 3);
285 if (!pxa_unit->mpmu_base) {
286 pr_err("failed to map apbcp registers\n");
287 return;
288 }
289
290 mmp_clk_init(np, &pxa_unit->unit, PXA910_NR_CLKS);
291
292 pxa910_pll_init(pxa_unit);
293
294 pxa910_apb_periph_clk_init(pxa_unit);
295
296 pxa910_axi_periph_clk_init(pxa_unit);
297
298 pxa910_clk_reset_init(np, pxa_unit);
299}
300
301CLK_OF_DECLARE(pxa910_clk, "marvell,pxa910-clock", pxa910_clk_init);
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 014396b028a2..93e967c0f972 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -47,7 +47,7 @@
47 47
48static DEFINE_SPINLOCK(clk_lock); 48static DEFINE_SPINLOCK(clk_lock);
49 49
50static struct clk_factor_masks uart_factor_masks = { 50static struct mmp_clk_factor_masks uart_factor_masks = {
51 .factor = 2, 51 .factor = 2,
52 .num_mask = 0x1fff, 52 .num_mask = 0x1fff,
53 .den_mask = 0x1fff, 53 .den_mask = 0x1fff,
@@ -55,7 +55,7 @@ static struct clk_factor_masks uart_factor_masks = {
55 .den_shift = 0, 55 .den_shift = 0,
56}; 56};
57 57
58static struct clk_factor_tbl uart_factor_tbl[] = { 58static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
59 {.num = 8125, .den = 1536}, /*14.745MHZ */ 59 {.num = 8125, .den = 1536}, /*14.745MHZ */
60}; 60};
61 61
@@ -158,7 +158,7 @@ void __init pxa168_clk_init(void)
158 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0, 158 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
159 mpmu_base + MPMU_UART_PLL, 159 mpmu_base + MPMU_UART_PLL,
160 &uart_factor_masks, uart_factor_tbl, 160 &uart_factor_masks, uart_factor_tbl,
161 ARRAY_SIZE(uart_factor_tbl)); 161 ARRAY_SIZE(uart_factor_tbl), &clk_lock);
162 clk_set_rate(uart_pll, 14745600); 162 clk_set_rate(uart_pll, 14745600);
163 clk_register_clkdev(uart_pll, "uart_pll", NULL); 163 clk_register_clkdev(uart_pll, "uart_pll", NULL);
164 164
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index 9efc6a47535d..993abcdb32cc 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -45,7 +45,7 @@
45 45
46static DEFINE_SPINLOCK(clk_lock); 46static DEFINE_SPINLOCK(clk_lock);
47 47
48static struct clk_factor_masks uart_factor_masks = { 48static struct mmp_clk_factor_masks uart_factor_masks = {
49 .factor = 2, 49 .factor = 2,
50 .num_mask = 0x1fff, 50 .num_mask = 0x1fff,
51 .den_mask = 0x1fff, 51 .den_mask = 0x1fff,
@@ -53,7 +53,7 @@ static struct clk_factor_masks uart_factor_masks = {
53 .den_shift = 0, 53 .den_shift = 0,
54}; 54};
55 55
56static struct clk_factor_tbl uart_factor_tbl[] = { 56static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
57 {.num = 8125, .den = 1536}, /*14.745MHZ */ 57 {.num = 8125, .den = 1536}, /*14.745MHZ */
58}; 58};
59 59
@@ -163,7 +163,7 @@ void __init pxa910_clk_init(void)
163 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0, 163 uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
164 mpmu_base + MPMU_UART_PLL, 164 mpmu_base + MPMU_UART_PLL,
165 &uart_factor_masks, uart_factor_tbl, 165 &uart_factor_masks, uart_factor_tbl,
166 ARRAY_SIZE(uart_factor_tbl)); 166 ARRAY_SIZE(uart_factor_tbl), &clk_lock);
167 clk_set_rate(uart_pll, 14745600); 167 clk_set_rate(uart_pll, 14745600);
168 clk_register_clkdev(uart_pll, "uart_pll", NULL); 168 clk_register_clkdev(uart_pll, "uart_pll", NULL);
169 169
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
new file mode 100644
index 000000000000..cf038ef54c59
--- /dev/null
+++ b/drivers/clk/mmp/clk.c
@@ -0,0 +1,192 @@
1#include <linux/io.h>
2#include <linux/clk.h>
3#include <linux/clk-provider.h>
4#include <linux/clkdev.h>
5#include <linux/of.h>
6#include <linux/of_address.h>
7
8#include "clk.h"
9
10void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
11 int nr_clks)
12{
13 static struct clk **clk_table;
14
15 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
16 if (!clk_table)
17 return;
18
19 unit->clk_table = clk_table;
20 unit->nr_clks = nr_clks;
21 unit->clk_data.clks = clk_table;
22 unit->clk_data.clk_num = nr_clks;
23 of_clk_add_provider(np, of_clk_src_onecell_get, &unit->clk_data);
24}
25
26void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
27 struct mmp_param_fixed_rate_clk *clks,
28 int size)
29{
30 int i;
31 struct clk *clk;
32
33 for (i = 0; i < size; i++) {
34 clk = clk_register_fixed_rate(NULL, clks[i].name,
35 clks[i].parent_name,
36 clks[i].flags,
37 clks[i].fixed_rate);
38 if (IS_ERR(clk)) {
39 pr_err("%s: failed to register clock %s\n",
40 __func__, clks[i].name);
41 continue;
42 }
43 if (clks[i].id)
44 unit->clk_table[clks[i].id] = clk;
45 }
46}
47
48void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
49 struct mmp_param_fixed_factor_clk *clks,
50 int size)
51{
52 struct clk *clk;
53 int i;
54
55 for (i = 0; i < size; i++) {
56 clk = clk_register_fixed_factor(NULL, clks[i].name,
57 clks[i].parent_name,
58 clks[i].flags, clks[i].mult,
59 clks[i].div);
60 if (IS_ERR(clk)) {
61 pr_err("%s: failed to register clock %s\n",
62 __func__, clks[i].name);
63 continue;
64 }
65 if (clks[i].id)
66 unit->clk_table[clks[i].id] = clk;
67 }
68}
69
70void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
71 struct mmp_param_general_gate_clk *clks,
72 void __iomem *base, int size)
73{
74 struct clk *clk;
75 int i;
76
77 for (i = 0; i < size; i++) {
78 clk = clk_register_gate(NULL, clks[i].name,
79 clks[i].parent_name,
80 clks[i].flags,
81 base + clks[i].offset,
82 clks[i].bit_idx,
83 clks[i].gate_flags,
84 clks[i].lock);
85
86 if (IS_ERR(clk)) {
87 pr_err("%s: failed to register clock %s\n",
88 __func__, clks[i].name);
89 continue;
90 }
91 if (clks[i].id)
92 unit->clk_table[clks[i].id] = clk;
93 }
94}
95
96void mmp_register_gate_clks(struct mmp_clk_unit *unit,
97 struct mmp_param_gate_clk *clks,
98 void __iomem *base, int size)
99{
100 struct clk *clk;
101 int i;
102
103 for (i = 0; i < size; i++) {
104 clk = mmp_clk_register_gate(NULL, clks[i].name,
105 clks[i].parent_name,
106 clks[i].flags,
107 base + clks[i].offset,
108 clks[i].mask,
109 clks[i].val_enable,
110 clks[i].val_disable,
111 clks[i].gate_flags,
112 clks[i].lock);
113
114 if (IS_ERR(clk)) {
115 pr_err("%s: failed to register clock %s\n",
116 __func__, clks[i].name);
117 continue;
118 }
119 if (clks[i].id)
120 unit->clk_table[clks[i].id] = clk;
121 }
122}
123
124void mmp_register_mux_clks(struct mmp_clk_unit *unit,
125 struct mmp_param_mux_clk *clks,
126 void __iomem *base, int size)
127{
128 struct clk *clk;
129 int i;
130
131 for (i = 0; i < size; i++) {
132 clk = clk_register_mux(NULL, clks[i].name,
133 clks[i].parent_name,
134 clks[i].num_parents,
135 clks[i].flags,
136 base + clks[i].offset,
137 clks[i].shift,
138 clks[i].width,
139 clks[i].mux_flags,
140 clks[i].lock);
141
142 if (IS_ERR(clk)) {
143 pr_err("%s: failed to register clock %s\n",
144 __func__, clks[i].name);
145 continue;
146 }
147 if (clks[i].id)
148 unit->clk_table[clks[i].id] = clk;
149 }
150}
151
152void mmp_register_div_clks(struct mmp_clk_unit *unit,
153 struct mmp_param_div_clk *clks,
154 void __iomem *base, int size)
155{
156 struct clk *clk;
157 int i;
158
159 for (i = 0; i < size; i++) {
160 clk = clk_register_divider(NULL, clks[i].name,
161 clks[i].parent_name,
162 clks[i].flags,
163 base + clks[i].offset,
164 clks[i].shift,
165 clks[i].width,
166 clks[i].div_flags,
167 clks[i].lock);
168
169 if (IS_ERR(clk)) {
170 pr_err("%s: failed to register clock %s\n",
171 __func__, clks[i].name);
172 continue;
173 }
174 if (clks[i].id)
175 unit->clk_table[clks[i].id] = clk;
176 }
177}
178
179void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
180 struct clk *clk)
181{
182 if (IS_ERR_OR_NULL(clk)) {
183 pr_err("CLK %d has invalid pointer %p\n", id, clk);
184 return;
185 }
186 if (id > unit->nr_clks) {
187 pr_err("CLK %d is invalid\n", id);
188 return;
189 }
190
191 unit->clk_table[id] = clk;
192}
diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h
index ab86dd4a416a..adf9b711b037 100644
--- a/drivers/clk/mmp/clk.h
+++ b/drivers/clk/mmp/clk.h
@@ -7,19 +7,123 @@
7#define APBC_NO_BUS_CTRL BIT(0) 7#define APBC_NO_BUS_CTRL BIT(0)
8#define APBC_POWER_CTRL BIT(1) 8#define APBC_POWER_CTRL BIT(1)
9 9
10struct clk_factor_masks { 10
11 unsigned int factor; 11/* Clock type "factor" */
12 unsigned int num_mask; 12struct mmp_clk_factor_masks {
13 unsigned int den_mask; 13 unsigned int factor;
14 unsigned int num_shift; 14 unsigned int num_mask;
15 unsigned int den_shift; 15 unsigned int den_mask;
16 unsigned int num_shift;
17 unsigned int den_shift;
16}; 18};
17 19
18struct clk_factor_tbl { 20struct mmp_clk_factor_tbl {
19 unsigned int num; 21 unsigned int num;
20 unsigned int den; 22 unsigned int den;
21}; 23};
22 24
25struct mmp_clk_factor {
26 struct clk_hw hw;
27 void __iomem *base;
28 struct mmp_clk_factor_masks *masks;
29 struct mmp_clk_factor_tbl *ftbl;
30 unsigned int ftbl_cnt;
31 spinlock_t *lock;
32};
33
34extern struct clk *mmp_clk_register_factor(const char *name,
35 const char *parent_name, unsigned long flags,
36 void __iomem *base, struct mmp_clk_factor_masks *masks,
37 struct mmp_clk_factor_tbl *ftbl, unsigned int ftbl_cnt,
38 spinlock_t *lock);
39
40/* Clock type "mix" */
41#define MMP_CLK_BITS_MASK(width, shift) \
42 (((1 << (width)) - 1) << (shift))
43#define MMP_CLK_BITS_GET_VAL(data, width, shift) \
44 ((data & MMP_CLK_BITS_MASK(width, shift)) >> (shift))
45#define MMP_CLK_BITS_SET_VAL(val, width, shift) \
46 (((val) << (shift)) & MMP_CLK_BITS_MASK(width, shift))
47
48enum {
49 MMP_CLK_MIX_TYPE_V1,
50 MMP_CLK_MIX_TYPE_V2,
51 MMP_CLK_MIX_TYPE_V3,
52};
53
54/* The register layout */
55struct mmp_clk_mix_reg_info {
56 void __iomem *reg_clk_ctrl;
57 void __iomem *reg_clk_sel;
58 u8 width_div;
59 u8 shift_div;
60 u8 width_mux;
61 u8 shift_mux;
62 u8 bit_fc;
63};
64
65/* The suggested clock table from user. */
66struct mmp_clk_mix_clk_table {
67 unsigned long rate;
68 u8 parent_index;
69 unsigned int divisor;
70 unsigned int valid;
71};
72
73struct mmp_clk_mix_config {
74 struct mmp_clk_mix_reg_info reg_info;
75 struct mmp_clk_mix_clk_table *table;
76 unsigned int table_size;
77 u32 *mux_table;
78 struct clk_div_table *div_table;
79 u8 div_flags;
80 u8 mux_flags;
81};
82
83struct mmp_clk_mix {
84 struct clk_hw hw;
85 struct mmp_clk_mix_reg_info reg_info;
86 struct mmp_clk_mix_clk_table *table;
87 u32 *mux_table;
88 struct clk_div_table *div_table;
89 unsigned int table_size;
90 u8 div_flags;
91 u8 mux_flags;
92 unsigned int type;
93 spinlock_t *lock;
94};
95
96extern const struct clk_ops mmp_clk_mix_ops;
97extern struct clk *mmp_clk_register_mix(struct device *dev,
98 const char *name,
99 const char **parent_names,
100 u8 num_parents,
101 unsigned long flags,
102 struct mmp_clk_mix_config *config,
103 spinlock_t *lock);
104
105
106/* Clock type "gate". MMP private gate */
107#define MMP_CLK_GATE_NEED_DELAY BIT(0)
108
109struct mmp_clk_gate {
110 struct clk_hw hw;
111 void __iomem *reg;
112 u32 mask;
113 u32 val_enable;
114 u32 val_disable;
115 unsigned int flags;
116 spinlock_t *lock;
117};
118
119extern const struct clk_ops mmp_clk_gate_ops;
120extern struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
121 const char *parent_name, unsigned long flags,
122 void __iomem *reg, u32 mask, u32 val_enable,
123 u32 val_disable, unsigned int gate_flags,
124 spinlock_t *lock);
125
126
23extern struct clk *mmp_clk_register_pll2(const char *name, 127extern struct clk *mmp_clk_register_pll2(const char *name,
24 const char *parent_name, unsigned long flags); 128 const char *parent_name, unsigned long flags);
25extern struct clk *mmp_clk_register_apbc(const char *name, 129extern struct clk *mmp_clk_register_apbc(const char *name,
@@ -28,8 +132,108 @@ extern struct clk *mmp_clk_register_apbc(const char *name,
28extern struct clk *mmp_clk_register_apmu(const char *name, 132extern struct clk *mmp_clk_register_apmu(const char *name,
29 const char *parent_name, void __iomem *base, u32 enable_mask, 133 const char *parent_name, void __iomem *base, u32 enable_mask,
30 spinlock_t *lock); 134 spinlock_t *lock);
31extern struct clk *mmp_clk_register_factor(const char *name, 135
32 const char *parent_name, unsigned long flags, 136struct mmp_clk_unit {
33 void __iomem *base, struct clk_factor_masks *masks, 137 unsigned int nr_clks;
34 struct clk_factor_tbl *ftbl, unsigned int ftbl_cnt); 138 struct clk **clk_table;
139 struct clk_onecell_data clk_data;
140};
141
142struct mmp_param_fixed_rate_clk {
143 unsigned int id;
144 char *name;
145 const char *parent_name;
146 unsigned long flags;
147 unsigned long fixed_rate;
148};
149void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
150 struct mmp_param_fixed_rate_clk *clks,
151 int size);
152
153struct mmp_param_fixed_factor_clk {
154 unsigned int id;
155 char *name;
156 const char *parent_name;
157 unsigned long mult;
158 unsigned long div;
159 unsigned long flags;
160};
161void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
162 struct mmp_param_fixed_factor_clk *clks,
163 int size);
164
165struct mmp_param_general_gate_clk {
166 unsigned int id;
167 const char *name;
168 const char *parent_name;
169 unsigned long flags;
170 unsigned long offset;
171 u8 bit_idx;
172 u8 gate_flags;
173 spinlock_t *lock;
174};
175void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
176 struct mmp_param_general_gate_clk *clks,
177 void __iomem *base, int size);
178
179struct mmp_param_gate_clk {
180 unsigned int id;
181 char *name;
182 const char *parent_name;
183 unsigned long flags;
184 unsigned long offset;
185 u32 mask;
186 u32 val_enable;
187 u32 val_disable;
188 unsigned int gate_flags;
189 spinlock_t *lock;
190};
191void mmp_register_gate_clks(struct mmp_clk_unit *unit,
192 struct mmp_param_gate_clk *clks,
193 void __iomem *base, int size);
194
195struct mmp_param_mux_clk {
196 unsigned int id;
197 char *name;
198 const char **parent_name;
199 u8 num_parents;
200 unsigned long flags;
201 unsigned long offset;
202 u8 shift;
203 u8 width;
204 u8 mux_flags;
205 spinlock_t *lock;
206};
207void mmp_register_mux_clks(struct mmp_clk_unit *unit,
208 struct mmp_param_mux_clk *clks,
209 void __iomem *base, int size);
210
211struct mmp_param_div_clk {
212 unsigned int id;
213 char *name;
214 const char *parent_name;
215 unsigned long flags;
216 unsigned long offset;
217 u8 shift;
218 u8 width;
219 u8 div_flags;
220 spinlock_t *lock;
221};
222void mmp_register_div_clks(struct mmp_clk_unit *unit,
223 struct mmp_param_div_clk *clks,
224 void __iomem *base, int size);
225
226#define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc) \
227{ \
228 .width_div = (w_d), \
229 .shift_div = (s_d), \
230 .width_mux = (w_m), \
231 .shift_mux = (s_m), \
232 .bit_fc = (fc), \
233}
234
235void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
236 int nr_clks);
237void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
238 struct clk *clk);
35#endif 239#endif
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
new file mode 100644
index 000000000000..b54da1fe73f0
--- /dev/null
+++ b/drivers/clk/mmp/reset.c
@@ -0,0 +1,99 @@
1#include <linux/slab.h>
2#include <linux/io.h>
3#include <linux/of.h>
4#include <linux/of_address.h>
5#include <linux/reset-controller.h>
6
7#include "reset.h"
8
9#define rcdev_to_unit(rcdev) container_of(rcdev, struct mmp_clk_reset_unit, rcdev)
10
11static int mmp_of_reset_xlate(struct reset_controller_dev *rcdev,
12 const struct of_phandle_args *reset_spec)
13{
14 struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
15 struct mmp_clk_reset_cell *cell;
16 int i;
17
18 if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
19 return -EINVAL;
20
21 for (i = 0; i < rcdev->nr_resets; i++) {
22 cell = &unit->cells[i];
23 if (cell->clk_id == reset_spec->args[0])
24 break;
25 }
26
27 if (i == rcdev->nr_resets)
28 return -EINVAL;
29
30 return i;
31}
32
33static int mmp_clk_reset_assert(struct reset_controller_dev *rcdev,
34 unsigned long id)
35{
36 struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
37 struct mmp_clk_reset_cell *cell;
38 unsigned long flags = 0;
39 u32 val;
40
41 cell = &unit->cells[id];
42 if (cell->lock)
43 spin_lock_irqsave(cell->lock, flags);
44
45 val = readl(cell->reg);
46 val |= cell->bits;
47 writel(val, cell->reg);
48
49 if (cell->lock)
50 spin_unlock_irqrestore(cell->lock, flags);
51
52 return 0;
53}
54
55static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
56 unsigned long id)
57{
58 struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
59 struct mmp_clk_reset_cell *cell;
60 unsigned long flags = 0;
61 u32 val;
62
63 cell = &unit->cells[id];
64 if (cell->lock)
65 spin_lock_irqsave(cell->lock, flags);
66
67 val = readl(cell->reg);
68 val &= ~cell->bits;
69 writel(val, cell->reg);
70
71 if (cell->lock)
72 spin_unlock_irqrestore(cell->lock, flags);
73
74 return 0;
75}
76
77static struct reset_control_ops mmp_clk_reset_ops = {
78 .assert = mmp_clk_reset_assert,
79 .deassert = mmp_clk_reset_deassert,
80};
81
82void mmp_clk_reset_register(struct device_node *np,
83 struct mmp_clk_reset_cell *cells, int nr_resets)
84{
85 struct mmp_clk_reset_unit *unit;
86
87 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
88 if (!unit)
89 return;
90
91 unit->cells = cells;
92 unit->rcdev.of_reset_n_cells = 1;
93 unit->rcdev.nr_resets = nr_resets;
94 unit->rcdev.ops = &mmp_clk_reset_ops;
95 unit->rcdev.of_node = np;
96 unit->rcdev.of_xlate = mmp_of_reset_xlate;
97
98 reset_controller_register(&unit->rcdev);
99}
diff --git a/drivers/clk/mmp/reset.h b/drivers/clk/mmp/reset.h
new file mode 100644
index 000000000000..be8b1a7000f7
--- /dev/null
+++ b/drivers/clk/mmp/reset.h
@@ -0,0 +1,31 @@
1#ifndef __MACH_MMP_CLK_RESET_H
2#define __MACH_MMP_CLK_RESET_H
3
4#include <linux/reset-controller.h>
5
6#define MMP_RESET_INVERT 1
7
8struct mmp_clk_reset_cell {
9 unsigned int clk_id;
10 void __iomem *reg;
11 u32 bits;
12 unsigned int flags;
13 spinlock_t *lock;
14};
15
16struct mmp_clk_reset_unit {
17 struct reset_controller_dev rcdev;
18 struct mmp_clk_reset_cell *cells;
19};
20
21#ifdef CONFIG_RESET_CONTROLLER
22void mmp_clk_reset_register(struct device_node *np,
23 struct mmp_clk_reset_cell *cells, int nr_resets);
24#else
25static inline void mmp_clk_reset_register(struct device_node *np,
26 struct mmp_clk_reset_cell *cells, int nr_resets)
27{
28}
29#endif
30
31#endif
diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile
index 4ff2abcd500b..38e915344605 100644
--- a/drivers/clk/pxa/Makefile
+++ b/drivers/clk/pxa/Makefile
@@ -1,2 +1,3 @@
1obj-y += clk-pxa.o 1obj-y += clk-pxa.o
2obj-$(CONFIG_PXA25x) += clk-pxa25x.o
2obj-$(CONFIG_PXA27x) += clk-pxa27x.o 3obj-$(CONFIG_PXA27x) += clk-pxa27x.o
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index ef3c05389c0a..4e834753ab09 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -26,12 +26,20 @@ static struct clk_onecell_data onecell_data = {
26 .clk_num = CLK_MAX, 26 .clk_num = CLK_MAX,
27}; 27};
28 28
29#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk_cken, hw) 29struct pxa_clk {
30 struct clk_hw hw;
31 struct clk_fixed_factor lp;
32 struct clk_fixed_factor hp;
33 struct clk_gate gate;
34 bool (*is_in_low_power)(void);
35};
36
37#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
30 38
31static unsigned long cken_recalc_rate(struct clk_hw *hw, 39static unsigned long cken_recalc_rate(struct clk_hw *hw,
32 unsigned long parent_rate) 40 unsigned long parent_rate)
33{ 41{
34 struct pxa_clk_cken *pclk = to_pxa_clk(hw); 42 struct pxa_clk *pclk = to_pxa_clk(hw);
35 struct clk_fixed_factor *fix; 43 struct clk_fixed_factor *fix;
36 44
37 if (!pclk->is_in_low_power || pclk->is_in_low_power()) 45 if (!pclk->is_in_low_power || pclk->is_in_low_power())
@@ -48,7 +56,7 @@ static struct clk_ops cken_rate_ops = {
48 56
49static u8 cken_get_parent(struct clk_hw *hw) 57static u8 cken_get_parent(struct clk_hw *hw)
50{ 58{
51 struct pxa_clk_cken *pclk = to_pxa_clk(hw); 59 struct pxa_clk *pclk = to_pxa_clk(hw);
52 60
53 if (!pclk->is_in_low_power) 61 if (!pclk->is_in_low_power)
54 return 0; 62 return 0;
@@ -69,29 +77,32 @@ void __init clkdev_pxa_register(int ckid, const char *con_id,
69 clk_register_clkdev(clk, con_id, dev_id); 77 clk_register_clkdev(clk, con_id, dev_id);
70} 78}
71 79
72int __init clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks) 80int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
73{ 81{
74 int i; 82 int i;
75 struct pxa_clk_cken *pclk; 83 struct pxa_clk *pxa_clk;
76 struct clk *clk; 84 struct clk *clk;
77 85
78 for (i = 0; i < nb_clks; i++) { 86 for (i = 0; i < nb_clks; i++) {
79 pclk = clks + i; 87 pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
80 pclk->gate.lock = &lock; 88 pxa_clk->is_in_low_power = clks[i].is_in_low_power;
81 clk = clk_register_composite(NULL, pclk->name, 89 pxa_clk->lp = clks[i].lp;
82 pclk->parent_names, 2, 90 pxa_clk->hp = clks[i].hp;
83 &pclk->hw, &cken_mux_ops, 91 pxa_clk->gate = clks[i].gate;
84 &pclk->hw, &cken_rate_ops, 92 pxa_clk->gate.lock = &lock;
85 &pclk->gate.hw, &clk_gate_ops, 93 clk = clk_register_composite(NULL, clks[i].name,
86 pclk->flags); 94 clks[i].parent_names, 2,
87 clkdev_pxa_register(pclk->ckid, pclk->con_id, pclk->dev_id, 95 &pxa_clk->hw, &cken_mux_ops,
88 clk); 96 &pxa_clk->hw, &cken_rate_ops,
97 &pxa_clk->gate.hw, &clk_gate_ops,
98 clks[i].flags);
99 clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
100 clks[i].dev_id, clk);
89 } 101 }
90 return 0; 102 return 0;
91} 103}
92 104
93static void __init pxa_dt_clocks_init(struct device_node *np) 105void __init clk_pxa_dt_common_init(struct device_node *np)
94{ 106{
95 of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data); 107 of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
96} 108}
97CLK_OF_DECLARE(pxa_clks, "marvell,pxa-clocks", pxa_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h
index 5fe219d06b49..323965430111 100644
--- a/drivers/clk/pxa/clk-pxa.h
+++ b/drivers/clk/pxa/clk-pxa.h
@@ -25,7 +25,7 @@
25 static struct clk_ops name ## _rate_ops = { \ 25 static struct clk_ops name ## _rate_ops = { \
26 .recalc_rate = name ## _get_rate, \ 26 .recalc_rate = name ## _get_rate, \
27 }; \ 27 }; \
28 static struct clk *clk_register_ ## name(void) \ 28 static struct clk * __init clk_register_ ## name(void) \
29 { \ 29 { \
30 return clk_register_composite(NULL, clk_name, \ 30 return clk_register_composite(NULL, clk_name, \
31 name ## _parents, \ 31 name ## _parents, \
@@ -40,7 +40,7 @@
40 static struct clk_ops name ## _rate_ops = { \ 40 static struct clk_ops name ## _rate_ops = { \
41 .recalc_rate = name ## _get_rate, \ 41 .recalc_rate = name ## _get_rate, \
42 }; \ 42 }; \
43 static struct clk *clk_register_ ## name(void) \ 43 static struct clk * __init clk_register_ ## name(void) \
44 { \ 44 { \
45 return clk_register_composite(NULL, clk_name, \ 45 return clk_register_composite(NULL, clk_name, \
46 name ## _parents, \ 46 name ## _parents, \
@@ -66,7 +66,7 @@
66 * | Clock | --- | / div_hp | 66 * | Clock | --- | / div_hp |
67 * +------------+ +-----------+ 67 * +------------+ +-----------+
68 */ 68 */
69struct pxa_clk_cken { 69struct desc_clk_cken {
70 struct clk_hw hw; 70 struct clk_hw hw;
71 int ckid; 71 int ckid;
72 const char *name; 72 const char *name;
@@ -102,6 +102,7 @@ static int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
102 102
103extern void clkdev_pxa_register(int ckid, const char *con_id, 103extern void clkdev_pxa_register(int ckid, const char *con_id,
104 const char *dev_id, struct clk *clk); 104 const char *dev_id, struct clk *clk);
105extern int clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks); 105extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
106void clk_pxa_dt_common_init(struct device_node *np);
106 107
107#endif 108#endif
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
new file mode 100644
index 000000000000..6cd88d963a7f
--- /dev/null
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -0,0 +1,273 @@
1/*
2 * Marvell PXA25x family clocks
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * Heavily inspired from former arch/arm/mach-pxa/pxa25x.c.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
13 * should go away.
14 */
15#include <linux/clk-provider.h>
16#include <linux/clk.h>
17#include <linux/clkdev.h>
18#include <linux/io.h>
19#include <linux/of.h>
20#include <mach/pxa25x.h>
21#include <mach/pxa2xx-regs.h>
22
23#include <dt-bindings/clock/pxa-clock.h>
24#include "clk-pxa.h"
25
26#define KHz 1000
27#define MHz (1000 * 1000)
28
29enum {
30 PXA_CORE_RUN = 0,
31 PXA_CORE_TURBO,
32};
33
34/*
35 * Various clock factors driven by the CCCR register.
36 */
37
38/* Crystal Frequency to Memory Frequency Multiplier (L) */
39static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, };
40
41/* Memory Frequency to Run Mode Frequency Multiplier (M) */
42static unsigned char M_clk_mult[4] = { 0, 1, 2, 4 };
43
44/* Run Mode Frequency to Turbo Mode Frequency Multiplier (N) */
45/* Note: we store the value N * 2 here. */
46static unsigned char N2_clk_mult[8] = { 0, 0, 2, 3, 4, 0, 6, 0 };
47
48static const char * const get_freq_khz[] = {
49 "core", "run", "cpll", "memory"
50};
51
52/*
53 * Get the clock frequency as reflected by CCCR and the turbo flag.
54 * We assume these values have been applied via a fcs.
55 * If info is not 0 we also display the current settings.
56 */
57unsigned int pxa25x_get_clk_frequency_khz(int info)
58{
59 struct clk *clk;
60 unsigned long clks[5];
61 int i;
62
63 for (i = 0; i < ARRAY_SIZE(get_freq_khz); i++) {
64 clk = clk_get(NULL, get_freq_khz[i]);
65 if (IS_ERR(clk)) {
66 clks[i] = 0;
67 } else {
68 clks[i] = clk_get_rate(clk);
69 clk_put(clk);
70 }
71 }
72
73 if (info) {
74 pr_info("Run Mode clock: %ld.%02ldMHz\n",
75 clks[1] / 1000000, (clks[1] % 1000000) / 10000);
76 pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
77 clks[2] / 1000000, (clks[2] % 1000000) / 10000);
78 pr_info("Memory clock: %ld.%02ldMHz\n",
79 clks[3] / 1000000, (clks[3] % 1000000) / 10000);
80 }
81
82 return (unsigned int)clks[0];
83}
84
85static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
86 unsigned long parent_rate)
87{
88 unsigned long cccr = CCCR;
89 unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
90
91 return parent_rate / m;
92}
93PARENTS(clk_pxa25x_memory) = { "run" };
94RATE_RO_OPS(clk_pxa25x_memory, "memory");
95
96PARENTS(pxa25x_pbus95) = { "ppll_95_85mhz", "ppll_95_85mhz" };
97PARENTS(pxa25x_pbus147) = { "ppll_147_46mhz", "ppll_147_46mhz" };
98PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
99
100#define PXA25X_CKEN(dev_id, con_id, parents, mult, div, \
101 bit, is_lp, flags) \
102 PXA_CKEN(dev_id, con_id, bit, parents, mult, div, mult, div, \
103 is_lp, &CKEN, CKEN_ ## bit, flags)
104#define PXA25X_PBUS95_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
105 PXA25X_CKEN(dev_id, con_id, pxa25x_pbus95_parents, mult_hp, \
106 div_hp, bit, NULL, 0)
107#define PXA25X_PBUS147_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay)\
108 PXA25X_CKEN(dev_id, con_id, pxa25x_pbus147_parents, mult_hp, \
109 div_hp, bit, NULL, 0)
110#define PXA25X_OSC3_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay) \
111 PXA25X_CKEN(dev_id, con_id, pxa25x_osc3_parents, mult_hp, \
112 div_hp, bit, NULL, 0)
113
114#define PXA25X_CKEN_1RATE(dev_id, con_id, bit, parents, delay) \
115 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
116 &CKEN, CKEN_ ## bit, 0)
117#define PXA25X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay) \
118 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
119 &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
120
121static struct desc_clk_cken pxa25x_clocks[] __initdata = {
122 PXA25X_PBUS95_CKEN("pxa2xx-mci.0", NULL, MMC, 1, 5, 0),
123 PXA25X_PBUS95_CKEN("pxa2xx-i2c.0", NULL, I2C, 1, 3, 0),
124 PXA25X_PBUS95_CKEN("pxa2xx-ir", "FICPCLK", FICP, 1, 2, 0),
125 PXA25X_PBUS95_CKEN("pxa25x-udc", NULL, USB, 1, 2, 5),
126 PXA25X_PBUS147_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 10, 1),
127 PXA25X_PBUS147_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 10, 1),
128 PXA25X_PBUS147_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 10, 1),
129 PXA25X_PBUS147_CKEN("pxa2xx-uart.3", NULL, HWUART, 1, 10, 1),
130 PXA25X_PBUS147_CKEN("pxa2xx-i2s", NULL, I2S, 1, 10, 0),
131 PXA25X_PBUS147_CKEN(NULL, "AC97CLK", AC97, 1, 12, 0),
132 PXA25X_OSC3_CKEN("pxa25x-ssp.0", NULL, SSP, 1, 1, 0),
133 PXA25X_OSC3_CKEN("pxa25x-nssp.1", NULL, NSSP, 1, 1, 0),
134 PXA25X_OSC3_CKEN("pxa25x-nssp.2", NULL, ASSP, 1, 1, 0),
135 PXA25X_OSC3_CKEN("pxa25x-pwm.0", NULL, PWM0, 1, 1, 0),
136 PXA25X_OSC3_CKEN("pxa25x-pwm.1", NULL, PWM1, 1, 1, 0),
137
138 PXA25X_CKEN_1RATE("pxa2xx-fb", NULL, LCD, clk_pxa25x_memory_parents, 0),
139 PXA25X_CKEN_1RATE_AO("pxa2xx-pcmcia", NULL, MEMC,
140 clk_pxa25x_memory_parents, 0),
141};
142
143static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
144{
145 unsigned long clkcfg;
146 unsigned int t;
147
148 asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
149 t = clkcfg & (1 << 0);
150 if (t)
151 return PXA_CORE_TURBO;
152 return PXA_CORE_RUN;
153}
154
155static unsigned long clk_pxa25x_core_get_rate(struct clk_hw *hw,
156 unsigned long parent_rate)
157{
158 return parent_rate;
159}
160PARENTS(clk_pxa25x_core) = { "run", "cpll" };
161MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
162
163static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
164 unsigned long parent_rate)
165{
166 unsigned long cccr = CCCR;
167 unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
168
169 return (parent_rate / n2) * 2;
170}
171PARENTS(clk_pxa25x_run) = { "cpll" };
172RATE_RO_OPS(clk_pxa25x_run, "run");
173
174static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
175 unsigned long parent_rate)
176{
177 unsigned long clkcfg, cccr = CCCR;
178 unsigned int l, m, n2, t;
179
180 asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
181 t = clkcfg & (1 << 0);
182 l = L_clk_mult[(cccr >> 0) & 0x1f];
183 m = M_clk_mult[(cccr >> 5) & 0x03];
184 n2 = N2_clk_mult[(cccr >> 7) & 0x07];
185
186 if (t)
187 return m * l * n2 * parent_rate / 2;
188 return m * l * parent_rate;
189}
190PARENTS(clk_pxa25x_cpll) = { "osc_3_6864mhz" };
191RATE_RO_OPS(clk_pxa25x_cpll, "cpll");
192
193static void __init pxa25x_register_core(void)
194{
195 clk_register_clk_pxa25x_cpll();
196 clk_register_clk_pxa25x_run();
197 clkdev_pxa_register(CLK_CORE, "core", NULL,
198 clk_register_clk_pxa25x_core());
199}
200
201static void __init pxa25x_register_plls(void)
202{
203 clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
204 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
205 3686400);
206 clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
207 CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
208 32768);
209 clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
210 clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
211 0, 26, 1);
212 clk_register_fixed_factor(NULL, "ppll_147_46mhz", "osc_3_6864mhz",
213 0, 40, 1);
214}
215
216static void __init pxa25x_base_clocks_init(void)
217{
218 pxa25x_register_plls();
219 pxa25x_register_core();
220 clk_register_clk_pxa25x_memory();
221}
222
223#define DUMMY_CLK(_con_id, _dev_id, _parent) \
224 { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
225struct dummy_clk {
226 const char *con_id;
227 const char *dev_id;
228 const char *parent;
229};
230static struct dummy_clk dummy_clks[] __initdata = {
231 DUMMY_CLK(NULL, "pxa25x-gpio", "osc_32_768khz"),
232 DUMMY_CLK(NULL, "pxa26x-gpio", "osc_32_768khz"),
233 DUMMY_CLK("GPIO11_CLK", NULL, "osc_3_6864mhz"),
234 DUMMY_CLK("GPIO12_CLK", NULL, "osc_32_768khz"),
235 DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
236 DUMMY_CLK("OSTIMER0", NULL, "osc_32_768khz"),
237 DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
238};
239
240static void __init pxa25x_dummy_clocks_init(void)
241{
242 struct clk *clk;
243 struct dummy_clk *d;
244 const char *name;
245 int i;
246
247 /*
248 * All pinctrl logic has been wiped out of the clock driver, especially
249 * for gpio11 and gpio12 outputs. Machine code should ensure proper pin
250 * control (ie. pxa2xx_mfp_config() invocation).
251 */
252 for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
253 d = &dummy_clks[i];
254 name = d->dev_id ? d->dev_id : d->con_id;
255 clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
256 clk_register_clkdev(clk, d->con_id, d->dev_id);
257 }
258}
259
260int __init pxa25x_clocks_init(void)
261{
262 pxa25x_base_clocks_init();
263 pxa25x_dummy_clocks_init();
264 return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks));
265}
266
267static void __init pxa25x_dt_clocks_init(struct device_node *np)
268{
269 pxa25x_clocks_init();
270 clk_pxa_dt_common_init(np);
271}
272CLK_OF_DECLARE(pxa25x_clks, "marvell,pxa250-core-clocks",
273 pxa25x_dt_clocks_init);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 88b9fe13fa44..5f9b54b024b9 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -111,7 +111,7 @@ PARENTS(pxa27x_membus) = { "lcd_base", "lcd_base" };
111 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \ 111 PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \
112 &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED) 112 &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
113 113
114static struct pxa_clk_cken pxa27x_clocks[] = { 114static struct desc_clk_cken pxa27x_clocks[] __initdata = {
115 PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1), 115 PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1),
116 PXA27X_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 2, 42, 1), 116 PXA27X_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 2, 42, 1),
117 PXA27X_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 2, 42, 1), 117 PXA27X_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 2, 42, 1),
@@ -368,3 +368,10 @@ static int __init pxa27x_clocks_init(void)
368 return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks)); 368 return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
369} 369}
370postcore_initcall(pxa27x_clocks_init); 370postcore_initcall(pxa27x_clocks_init);
371
372static void __init pxa27x_dt_clocks_init(struct device_node *np)
373{
374 pxa27x_clocks_init();
375 clk_pxa_dt_common_init(np);
376}
377CLK_OF_DECLARE(pxa_clks, "marvell,pxa270-clocks", pxa27x_dt_clocks_init);
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index b823bc3b6250..60873a7f45d9 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -141,7 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
141 141
142static long 142static long
143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate, 143clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
144 unsigned long *p_rate, struct clk **p) 144 unsigned long *p_rate, struct clk_hw **p)
145{ 145{
146 struct clk_pll *pll = to_clk_pll(hw); 146 struct clk_pll *pll = to_clk_pll(hw);
147 const struct pll_freq_tbl *f; 147 const struct pll_freq_tbl *f;
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index b6e6959e89aa..0b93972c8807 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -368,16 +368,17 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
368 368
369static long _freq_tbl_determine_rate(struct clk_hw *hw, 369static long _freq_tbl_determine_rate(struct clk_hw *hw,
370 const struct freq_tbl *f, unsigned long rate, 370 const struct freq_tbl *f, unsigned long rate,
371 unsigned long *p_rate, struct clk **p) 371 unsigned long *p_rate, struct clk_hw **p_hw)
372{ 372{
373 unsigned long clk_flags; 373 unsigned long clk_flags;
374 struct clk *p;
374 375
375 f = qcom_find_freq(f, rate); 376 f = qcom_find_freq(f, rate);
376 if (!f) 377 if (!f)
377 return -EINVAL; 378 return -EINVAL;
378 379
379 clk_flags = __clk_get_flags(hw->clk); 380 clk_flags = __clk_get_flags(hw->clk);
380 *p = clk_get_parent_by_index(hw->clk, f->src); 381 p = clk_get_parent_by_index(hw->clk, f->src);
381 if (clk_flags & CLK_SET_RATE_PARENT) { 382 if (clk_flags & CLK_SET_RATE_PARENT) {
382 rate = rate * f->pre_div; 383 rate = rate * f->pre_div;
383 if (f->n) { 384 if (f->n) {
@@ -387,15 +388,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
387 rate = tmp; 388 rate = tmp;
388 } 389 }
389 } else { 390 } else {
390 rate = __clk_get_rate(*p); 391 rate = __clk_get_rate(p);
391 } 392 }
393 *p_hw = __clk_get_hw(p);
392 *p_rate = rate; 394 *p_rate = rate;
393 395
394 return f->freq; 396 return f->freq;
395} 397}
396 398
397static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 399static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
398 unsigned long *p_rate, struct clk **p) 400 unsigned long *p_rate, struct clk_hw **p)
399{ 401{
400 struct clk_rcg *rcg = to_clk_rcg(hw); 402 struct clk_rcg *rcg = to_clk_rcg(hw);
401 403
@@ -403,7 +405,7 @@ static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
403} 405}
404 406
405static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, 407static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
406 unsigned long *p_rate, struct clk **p) 408 unsigned long *p_rate, struct clk_hw **p)
407{ 409{
408 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); 410 struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
409 411
@@ -411,13 +413,15 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
411} 413}
412 414
413static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate, 415static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
414 unsigned long *p_rate, struct clk **p) 416 unsigned long *p_rate, struct clk_hw **p_hw)
415{ 417{
416 struct clk_rcg *rcg = to_clk_rcg(hw); 418 struct clk_rcg *rcg = to_clk_rcg(hw);
417 const struct freq_tbl *f = rcg->freq_tbl; 419 const struct freq_tbl *f = rcg->freq_tbl;
420 struct clk *p;
418 421
419 *p = clk_get_parent_by_index(hw->clk, f->src); 422 p = clk_get_parent_by_index(hw->clk, f->src);
420 *p_rate = __clk_round_rate(*p, rate); 423 *p_hw = __clk_get_hw(p);
424 *p_rate = __clk_round_rate(p, rate);
421 425
422 return *p_rate; 426 return *p_rate;
423} 427}
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index cfa9eb4fe9ca..08b8b3729f53 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -175,16 +175,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
175 175
176static long _freq_tbl_determine_rate(struct clk_hw *hw, 176static long _freq_tbl_determine_rate(struct clk_hw *hw,
177 const struct freq_tbl *f, unsigned long rate, 177 const struct freq_tbl *f, unsigned long rate,
178 unsigned long *p_rate, struct clk **p) 178 unsigned long *p_rate, struct clk_hw **p_hw)
179{ 179{
180 unsigned long clk_flags; 180 unsigned long clk_flags;
181 struct clk *p;
181 182
182 f = qcom_find_freq(f, rate); 183 f = qcom_find_freq(f, rate);
183 if (!f) 184 if (!f)
184 return -EINVAL; 185 return -EINVAL;
185 186
186 clk_flags = __clk_get_flags(hw->clk); 187 clk_flags = __clk_get_flags(hw->clk);
187 *p = clk_get_parent_by_index(hw->clk, f->src); 188 p = clk_get_parent_by_index(hw->clk, f->src);
188 if (clk_flags & CLK_SET_RATE_PARENT) { 189 if (clk_flags & CLK_SET_RATE_PARENT) {
189 if (f->pre_div) { 190 if (f->pre_div) {
190 rate /= 2; 191 rate /= 2;
@@ -198,15 +199,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
198 rate = tmp; 199 rate = tmp;
199 } 200 }
200 } else { 201 } else {
201 rate = __clk_get_rate(*p); 202 rate = __clk_get_rate(p);
202 } 203 }
204 *p_hw = __clk_get_hw(p);
203 *p_rate = rate; 205 *p_rate = rate;
204 206
205 return f->freq; 207 return f->freq;
206} 208}
207 209
208static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate, 210static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
209 unsigned long *p_rate, struct clk **p) 211 unsigned long *p_rate, struct clk_hw **p)
210{ 212{
211 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 213 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
212 214
@@ -359,7 +361,7 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
359} 361}
360 362
361static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 363static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
362 unsigned long *p_rate, struct clk **p) 364 unsigned long *p_rate, struct clk_hw **p)
363{ 365{
364 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 366 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
365 const struct freq_tbl *f = rcg->freq_tbl; 367 const struct freq_tbl *f = rcg->freq_tbl;
@@ -371,7 +373,7 @@ static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
371 u32 hid_div; 373 u32 hid_div;
372 374
373 /* Force the correct parent */ 375 /* Force the correct parent */
374 *p = clk_get_parent_by_index(hw->clk, f->src); 376 *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, f->src));
375 377
376 if (src_rate == 810000000) 378 if (src_rate == 810000000)
377 frac = frac_table_810m; 379 frac = frac_table_810m;
@@ -410,18 +412,20 @@ const struct clk_ops clk_edp_pixel_ops = {
410EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); 412EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
411 413
412static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate, 414static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
413 unsigned long *p_rate, struct clk **p) 415 unsigned long *p_rate, struct clk_hw **p_hw)
414{ 416{
415 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 417 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
416 const struct freq_tbl *f = rcg->freq_tbl; 418 const struct freq_tbl *f = rcg->freq_tbl;
417 unsigned long parent_rate, div; 419 unsigned long parent_rate, div;
418 u32 mask = BIT(rcg->hid_width) - 1; 420 u32 mask = BIT(rcg->hid_width) - 1;
421 struct clk *p;
419 422
420 if (rate == 0) 423 if (rate == 0)
421 return -EINVAL; 424 return -EINVAL;
422 425
423 *p = clk_get_parent_by_index(hw->clk, f->src); 426 p = clk_get_parent_by_index(hw->clk, f->src);
424 *p_rate = parent_rate = __clk_round_rate(*p, rate); 427 *p_hw = __clk_get_hw(p);
428 *p_rate = parent_rate = __clk_round_rate(p, rate);
425 429
426 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1; 430 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
427 div = min_t(u32, div, mask); 431 div = min_t(u32, div, mask);
@@ -472,14 +476,16 @@ static const struct frac_entry frac_table_pixel[] = {
472}; 476};
473 477
474static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, 478static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
475 unsigned long *p_rate, struct clk **p) 479 unsigned long *p_rate, struct clk_hw **p)
476{ 480{
477 struct clk_rcg2 *rcg = to_clk_rcg2(hw); 481 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
478 unsigned long request, src_rate; 482 unsigned long request, src_rate;
479 int delta = 100000; 483 int delta = 100000;
480 const struct freq_tbl *f = rcg->freq_tbl; 484 const struct freq_tbl *f = rcg->freq_tbl;
481 const struct frac_entry *frac = frac_table_pixel; 485 const struct frac_entry *frac = frac_table_pixel;
482 struct clk *parent = *p = clk_get_parent_by_index(hw->clk, f->src); 486 struct clk *parent = clk_get_parent_by_index(hw->clk, f->src);
487
488 *p = __clk_get_hw(parent);
483 489
484 for (; frac->num; frac++) { 490 for (; frac->num; frac++) {
485 request = (rate * frac->den) / frac->num; 491 request = (rate * frac->den) / frac->num;
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index bd8514d63634..2714097f90db 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,6 +6,7 @@ obj-y += clk-rockchip.o
6obj-y += clk.o 6obj-y += clk.o
7obj-y += clk-pll.o 7obj-y += clk-pll.o
8obj-y += clk-cpu.o 8obj-y += clk-cpu.o
9obj-y += clk-mmc-phase.o
9obj-$(CONFIG_RESET_CONTROLLER) += softrst.o 10obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
10 11
11obj-y += clk-rk3188.o 12obj-y += clk-rk3188.o
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
new file mode 100644
index 000000000000..c842e3b60f21
--- /dev/null
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -0,0 +1,154 @@
1/*
2 * Copyright 2014 Google, Inc
3 * Author: Alexandru M Stan <amstan@chromium.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/slab.h>
17#include <linux/clk-provider.h>
18#include "clk.h"
19
20struct rockchip_mmc_clock {
21 struct clk_hw hw;
22 void __iomem *reg;
23 int id;
24 int shift;
25};
26
27#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
28
29#define RK3288_MMC_CLKGEN_DIV 2
30
31static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
32 unsigned long parent_rate)
33{
34 return parent_rate / RK3288_MMC_CLKGEN_DIV;
35}
36
37#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
38#define ROCKCHIP_MMC_DEGREE_MASK 0x3
39#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
40#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
41
42#define PSECS_PER_SEC 1000000000000LL
43
44/*
45 * Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to
46 * simplify calculations. So 45degs could be anywhere between 33deg and 66deg.
47 */
48#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
49
50static int rockchip_mmc_get_phase(struct clk_hw *hw)
51{
52 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
53 unsigned long rate = clk_get_rate(hw->clk);
54 u32 raw_value;
55 u16 degrees;
56 u32 delay_num = 0;
57
58 raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
59
60 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
61
62 if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
63 /* degrees/delaynum * 10000 */
64 unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
65 36 * (rate / 1000000);
66
67 delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
68 delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
69 degrees += delay_num * factor / 10000;
70 }
71
72 return degrees % 360;
73}
74
75static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
76{
77 struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
78 unsigned long rate = clk_get_rate(hw->clk);
79 u8 nineties, remainder;
80 u8 delay_num;
81 u32 raw_value;
82 u64 delay;
83
84 /* allow 22 to be 22.5 */
85 degrees++;
86 /* floor to 22.5 increment */
87 degrees -= ((degrees) * 10 % 225) / 10;
88
89 nineties = degrees / 90;
90 /* 22.5 multiples */
91 remainder = (degrees % 90) / 22;
92
93 delay = PSECS_PER_SEC;
94 do_div(delay, rate);
95 /* / 360 / 22.5 */
96 do_div(delay, 16);
97 do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC);
98
99 delay *= remainder;
100 delay_num = (u8) min(delay, 255ULL);
101
102 raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
103 raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
104 raw_value |= nineties;
105 writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
106
107 pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
108 __clk_get_name(hw->clk), degrees, delay_num,
109 mmc_clock->reg, raw_value>>(mmc_clock->shift),
110 rockchip_mmc_get_phase(hw)
111 );
112
113 return 0;
114}
115
116static const struct clk_ops rockchip_mmc_clk_ops = {
117 .recalc_rate = rockchip_mmc_recalc,
118 .get_phase = rockchip_mmc_get_phase,
119 .set_phase = rockchip_mmc_set_phase,
120};
121
122struct clk *rockchip_clk_register_mmc(const char *name,
123 const char **parent_names, u8 num_parents,
124 void __iomem *reg, int shift)
125{
126 struct clk_init_data init;
127 struct rockchip_mmc_clock *mmc_clock;
128 struct clk *clk;
129
130 mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
131 if (!mmc_clock)
132 return NULL;
133
134 init.num_parents = num_parents;
135 init.parent_names = parent_names;
136 init.ops = &rockchip_mmc_clk_ops;
137
138 mmc_clock->hw.init = &init;
139 mmc_clock->reg = reg;
140 mmc_clock->shift = shift;
141
142 if (name)
143 init.name = name;
144
145 clk = clk_register(NULL, &mmc_clock->hw);
146 if (IS_ERR(clk))
147 goto err_free;
148
149 return clk;
150
151err_free:
152 kfree(mmc_clock);
153 return NULL;
154}
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index a3e886a38480..f8d3baf275b2 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -39,6 +39,7 @@ struct rockchip_clk_pll {
39 int lock_offset; 39 int lock_offset;
40 unsigned int lock_shift; 40 unsigned int lock_shift;
41 enum rockchip_pll_type type; 41 enum rockchip_pll_type type;
42 u8 flags;
42 const struct rockchip_pll_rate_table *rate_table; 43 const struct rockchip_pll_rate_table *rate_table;
43 unsigned int rate_count; 44 unsigned int rate_count;
44 spinlock_t *lock; 45 spinlock_t *lock;
@@ -257,6 +258,55 @@ static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
257 return !(pllcon & RK3066_PLLCON3_PWRDOWN); 258 return !(pllcon & RK3066_PLLCON3_PWRDOWN);
258} 259}
259 260
261static void rockchip_rk3066_pll_init(struct clk_hw *hw)
262{
263 struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
264 const struct rockchip_pll_rate_table *rate;
265 unsigned int nf, nr, no, bwadj;
266 unsigned long drate;
267 u32 pllcon;
268
269 if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
270 return;
271
272 drate = __clk_get_rate(hw->clk);
273 rate = rockchip_get_pll_settings(pll, drate);
274
275 /* when no rate setting for the current rate, rely on clk_set_rate */
276 if (!rate)
277 return;
278
279 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
280 nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK) + 1;
281 no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK) + 1;
282
283 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
284 nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
285
286 pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
287 bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK;
288
289 pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n",
290 __func__, __clk_get_name(hw->clk), drate, rate->nr, nr,
291 rate->no, no, rate->nf, nf, rate->bwadj, bwadj);
292 if (rate->nr != nr || rate->no != no || rate->nf != nf
293 || rate->bwadj != bwadj) {
294 struct clk *parent = __clk_get_parent(hw->clk);
295 unsigned long prate;
296
297 if (!parent) {
298 pr_warn("%s: parent of %s not available\n",
299 __func__, __clk_get_name(hw->clk));
300 return;
301 }
302
303 pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
304 __func__, __clk_get_name(hw->clk));
305 prate = __clk_get_rate(parent);
306 rockchip_rk3066_pll_set_rate(hw, drate, prate);
307 }
308}
309
260static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = { 310static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
261 .recalc_rate = rockchip_rk3066_pll_recalc_rate, 311 .recalc_rate = rockchip_rk3066_pll_recalc_rate,
262 .enable = rockchip_rk3066_pll_enable, 312 .enable = rockchip_rk3066_pll_enable,
@@ -271,6 +321,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
271 .enable = rockchip_rk3066_pll_enable, 321 .enable = rockchip_rk3066_pll_enable,
272 .disable = rockchip_rk3066_pll_disable, 322 .disable = rockchip_rk3066_pll_disable,
273 .is_enabled = rockchip_rk3066_pll_is_enabled, 323 .is_enabled = rockchip_rk3066_pll_is_enabled,
324 .init = rockchip_rk3066_pll_init,
274}; 325};
275 326
276/* 327/*
@@ -282,7 +333,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
282 void __iomem *base, int con_offset, int grf_lock_offset, 333 void __iomem *base, int con_offset, int grf_lock_offset,
283 int lock_shift, int mode_offset, int mode_shift, 334 int lock_shift, int mode_offset, int mode_shift,
284 struct rockchip_pll_rate_table *rate_table, 335 struct rockchip_pll_rate_table *rate_table,
285 spinlock_t *lock) 336 u8 clk_pll_flags, spinlock_t *lock)
286{ 337{
287 const char *pll_parents[3]; 338 const char *pll_parents[3];
288 struct clk_init_data init; 339 struct clk_init_data init;
@@ -345,8 +396,22 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
345 pll->reg_base = base + con_offset; 396 pll->reg_base = base + con_offset;
346 pll->lock_offset = grf_lock_offset; 397 pll->lock_offset = grf_lock_offset;
347 pll->lock_shift = lock_shift; 398 pll->lock_shift = lock_shift;
399 pll->flags = clk_pll_flags;
348 pll->lock = lock; 400 pll->lock = lock;
349 401
402 /* create the mux on top of the real pll */
403 pll->pll_mux_ops = &clk_mux_ops;
404 pll_mux = &pll->pll_mux;
405 pll_mux->reg = base + mode_offset;
406 pll_mux->shift = mode_shift;
407 pll_mux->mask = PLL_MODE_MASK;
408 pll_mux->flags = 0;
409 pll_mux->lock = lock;
410 pll_mux->hw.init = &init;
411
412 if (pll_type == pll_rk3066)
413 pll_mux->flags |= CLK_MUX_HIWORD_MASK;
414
350 pll_clk = clk_register(NULL, &pll->hw); 415 pll_clk = clk_register(NULL, &pll->hw);
351 if (IS_ERR(pll_clk)) { 416 if (IS_ERR(pll_clk)) {
352 pr_err("%s: failed to register pll clock %s : %ld\n", 417 pr_err("%s: failed to register pll clock %s : %ld\n",
@@ -355,10 +420,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
355 goto err_pll; 420 goto err_pll;
356 } 421 }
357 422
358 /* create the mux on top of the real pll */
359 pll->pll_mux_ops = &clk_mux_ops;
360 pll_mux = &pll->pll_mux;
361
362 /* the actual muxing is xin24m, pll-output, xin32k */ 423 /* the actual muxing is xin24m, pll-output, xin32k */
363 pll_parents[0] = parent_names[0]; 424 pll_parents[0] = parent_names[0];
364 pll_parents[1] = pll_name; 425 pll_parents[1] = pll_name;
@@ -370,16 +431,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
370 init.parent_names = pll_parents; 431 init.parent_names = pll_parents;
371 init.num_parents = ARRAY_SIZE(pll_parents); 432 init.num_parents = ARRAY_SIZE(pll_parents);
372 433
373 pll_mux->reg = base + mode_offset;
374 pll_mux->shift = mode_shift;
375 pll_mux->mask = PLL_MODE_MASK;
376 pll_mux->flags = 0;
377 pll_mux->lock = lock;
378 pll_mux->hw.init = &init;
379
380 if (pll_type == pll_rk3066)
381 pll_mux->flags |= CLK_MUX_HIWORD_MASK;
382
383 mux_clk = clk_register(NULL, &pll_mux->hw); 434 mux_clk = clk_register(NULL, &pll_mux->hw);
384 if (IS_ERR(mux_clk)) 435 if (IS_ERR(mux_clk))
385 goto err_mux; 436 goto err_mux;
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index beed49c79126..c54078960847 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -212,13 +212,13 @@ PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
212 212
213static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = { 213static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
214 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), 214 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
215 RK2928_MODE_CON, 0, 6, rk3188_pll_rates), 215 RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
216 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4), 216 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
217 RK2928_MODE_CON, 4, 5, NULL), 217 RK2928_MODE_CON, 4, 5, 0, NULL),
218 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8), 218 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
219 RK2928_MODE_CON, 8, 7, rk3188_pll_rates), 219 RK2928_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
220 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12), 220 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
221 RK2928_MODE_CON, 12, 8, rk3188_pll_rates), 221 RK2928_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
222}; 222};
223 223
224#define MFLAGS CLK_MUX_HIWORD_MASK 224#define MFLAGS CLK_MUX_HIWORD_MASK
@@ -257,9 +257,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
257 GATE(0, "hclk_vdpu", "aclk_vdpu", 0, 257 GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
258 RK2928_CLKGATE_CON(3), 12, GFLAGS), 258 RK2928_CLKGATE_CON(3), 12, GFLAGS),
259 259
260 GATE(0, "gpll_ddr", "gpll", 0, 260 GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
261 RK2928_CLKGATE_CON(1), 7, GFLAGS), 261 RK2928_CLKGATE_CON(1), 7, GFLAGS),
262 COMPOSITE(0, "ddrphy", mux_ddrphy_p, 0, 262 COMPOSITE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
263 RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 263 RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
264 RK2928_CLKGATE_CON(0), 2, GFLAGS), 264 RK2928_CLKGATE_CON(0), 2, GFLAGS),
265 265
@@ -270,10 +270,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
270 RK2928_CLKGATE_CON(0), 6, GFLAGS), 270 RK2928_CLKGATE_CON(0), 6, GFLAGS),
271 GATE(0, "pclk_cpu", "pclk_cpu_pre", 0, 271 GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
272 RK2928_CLKGATE_CON(0), 5, GFLAGS), 272 RK2928_CLKGATE_CON(0), 5, GFLAGS),
273 GATE(0, "hclk_cpu", "hclk_cpu_pre", 0, 273 GATE(0, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
274 RK2928_CLKGATE_CON(0), 4, GFLAGS), 274 RK2928_CLKGATE_CON(0), 4, GFLAGS),
275 275
276 COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, 0, 276 COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
277 RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS, 277 RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS,
278 RK2928_CLKGATE_CON(3), 0, GFLAGS), 278 RK2928_CLKGATE_CON(3), 0, GFLAGS),
279 COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0, 279 COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -304,9 +304,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
304 * the 480m are generated inside the usb block from these clocks, 304 * the 480m are generated inside the usb block from these clocks,
305 * but they are also a source for the hsicphy clock. 305 * but they are also a source for the hsicphy clock.
306 */ 306 */
307 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0, 307 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
308 RK2928_CLKGATE_CON(1), 5, GFLAGS), 308 RK2928_CLKGATE_CON(1), 5, GFLAGS),
309 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0, 309 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
310 RK2928_CLKGATE_CON(1), 6, GFLAGS), 310 RK2928_CLKGATE_CON(1), 6, GFLAGS),
311 311
312 COMPOSITE(0, "mac_src", mux_mac_p, 0, 312 COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -320,9 +320,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
320 COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0, 320 COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
321 RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS, 321 RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
322 RK2928_CLKGATE_CON(2), 6, GFLAGS), 322 RK2928_CLKGATE_CON(2), 6, GFLAGS),
323 COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 323 COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 0,
324 RK2928_CLKSEL_CON(23), 0, 324 RK2928_CLKSEL_CON(23), 0,
325 RK2928_CLKGATE_CON(2), 7, 0, GFLAGS), 325 RK2928_CLKGATE_CON(2), 7, GFLAGS),
326 MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0, 326 MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
327 RK2928_CLKSEL_CON(22), 4, 2, MFLAGS), 327 RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
328 328
@@ -330,6 +330,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
330 RK2928_CLKSEL_CON(24), 8, 8, DFLAGS, 330 RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
331 RK2928_CLKGATE_CON(2), 8, GFLAGS), 331 RK2928_CLKGATE_CON(2), 8, GFLAGS),
332 332
333 COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
334 RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
335 RK2928_CLKGATE_CON(0), 13, GFLAGS),
336 COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
337 RK2928_CLKSEL_CON(9), 0,
338 RK2928_CLKGATE_CON(0), 14, GFLAGS),
339 MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
340 RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
341
333 /* 342 /*
334 * Clock-Architecture Diagram 4 343 * Clock-Architecture Diagram 4
335 */ 344 */
@@ -399,8 +408,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
399 408
400 /* aclk_cpu gates */ 409 /* aclk_cpu gates */
401 GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS), 410 GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS),
402 GATE(0, "aclk_intmem", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 12, GFLAGS), 411 GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 12, GFLAGS),
403 GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 10, GFLAGS), 412 GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 10, GFLAGS),
404 413
405 /* hclk_cpu gates */ 414 /* hclk_cpu gates */
406 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS), 415 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
@@ -410,14 +419,14 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
410 /* hclk_ahb2apb is part of a clk branch */ 419 /* hclk_ahb2apb is part of a clk branch */
411 GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS), 420 GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
412 GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS), 421 GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS),
413 GATE(HCLK_LCDC1, "hclk_lcdc1", "aclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS), 422 GATE(HCLK_LCDC1, "hclk_lcdc1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
414 GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS), 423 GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS),
415 GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS), 424 GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS),
416 GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS), 425 GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS),
417 426
418 /* hclk_peri gates */ 427 /* hclk_peri gates */
419 GATE(0, "hclk_peri_axi_matrix", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 0, GFLAGS), 428 GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
420 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 6, GFLAGS), 429 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
421 GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS), 430 GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
422 GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS), 431 GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
423 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS), 432 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
@@ -457,18 +466,18 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
457 GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS), 466 GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS),
458 GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS), 467 GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
459 GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS), 468 GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS),
460 GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 4, GFLAGS), 469 GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
461 GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 5, GFLAGS), 470 GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 5, GFLAGS),
462 471
463 /* aclk_peri */ 472 /* aclk_peri */
464 GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS), 473 GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
465 GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS), 474 GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS),
466 GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 4, GFLAGS), 475 GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 4, GFLAGS),
467 GATE(0, "aclk_cpu_peri", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 2, GFLAGS), 476 GATE(0, "aclk_cpu_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS),
468 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 3, GFLAGS), 477 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 3, GFLAGS),
469 478
470 /* pclk_peri gates */ 479 /* pclk_peri gates */
471 GATE(0, "pclk_peri_axi_matrix", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 1, GFLAGS), 480 GATE(0, "pclk_peri_axi_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS),
472 GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS), 481 GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS),
473 GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS), 482 GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
474 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS), 483 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS),
@@ -511,7 +520,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
511 | CLK_DIVIDER_READ_ONLY, 520 | CLK_DIVIDER_READ_ONLY,
512 RK2928_CLKGATE_CON(4), 9, GFLAGS), 521 RK2928_CLKGATE_CON(4), 9, GFLAGS),
513 522
514 GATE(CORE_L2C, "core_l2c", "aclk_cpu", 0, 523 GATE(CORE_L2C, "core_l2c", "aclk_cpu", CLK_IGNORE_UNUSED,
515 RK2928_CLKGATE_CON(9), 4, GFLAGS), 524 RK2928_CLKGATE_CON(9), 4, GFLAGS),
516 525
517 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0, 526 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0,
@@ -577,14 +586,6 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
577 RK2928_CLKGATE_CON(0), 12, GFLAGS), 586 RK2928_CLKGATE_CON(0), 12, GFLAGS),
578 MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0, 587 MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
579 RK2928_CLKSEL_CON(4), 8, 2, MFLAGS), 588 RK2928_CLKSEL_CON(4), 8, 2, MFLAGS),
580 COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
581 RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
582 RK2928_CLKGATE_CON(0), 13, GFLAGS),
583 COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
584 RK2928_CLKSEL_CON(9), 0,
585 RK2928_CLKGATE_CON(0), 14, GFLAGS),
586 MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
587 RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
588 589
589 GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), 590 GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
590 GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS), 591 GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
@@ -618,7 +619,7 @@ PNAME(mux_hsicphy_p) = { "sclk_otgphy0", "sclk_otgphy1",
618 "gpll", "cpll" }; 619 "gpll", "cpll" };
619 620
620static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = { 621static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
621 COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", 0, 622 COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
622 RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 623 RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
623 div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS), 624 div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS),
624 625
@@ -633,7 +634,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
633 RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 634 RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
634 RK2928_CLKGATE_CON(4), 9, GFLAGS), 635 RK2928_CLKGATE_CON(4), 9, GFLAGS),
635 636
636 GATE(CORE_L2C, "core_l2c", "armclk", 0, 637 GATE(CORE_L2C, "core_l2c", "armclk", CLK_IGNORE_UNUSED,
637 RK2928_CLKGATE_CON(9), 4, GFLAGS), 638 RK2928_CLKGATE_CON(9), 4, GFLAGS),
638 639
639 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0, 640 COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -663,7 +664,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
663 RK2928_CLKSEL_CON(30), 0, 2, DFLAGS, 664 RK2928_CLKSEL_CON(30), 0, 2, DFLAGS,
664 RK2928_CLKGATE_CON(3), 6, GFLAGS), 665 RK2928_CLKGATE_CON(3), 6, GFLAGS),
665 DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0, 666 DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0,
666 RK2928_CLKGATE_CON(11), 8, 6, DFLAGS), 667 RK2928_CLKSEL_CON(11), 8, 6, DFLAGS),
667 668
668 MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0, 669 MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
669 RK2928_CLKSEL_CON(2), 15, 1, MFLAGS), 670 RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
@@ -675,14 +676,6 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
675 RK2928_CLKGATE_CON(0), 10, GFLAGS), 676 RK2928_CLKGATE_CON(0), 10, GFLAGS),
676 MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0, 677 MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
677 RK2928_CLKSEL_CON(3), 8, 2, MFLAGS), 678 RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
678 COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
679 RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
680 RK2928_CLKGATE_CON(13), 13, GFLAGS),
681 COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
682 RK2928_CLKSEL_CON(9), 0,
683 RK2928_CLKGATE_CON(0), 14, GFLAGS),
684 MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
685 RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
686 679
687 GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), 680 GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
688 GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS), 681 GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 23278291da44..ac6be7c0132d 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -16,6 +16,7 @@
16#include <linux/clk-provider.h> 16#include <linux/clk-provider.h>
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/of_address.h> 18#include <linux/of_address.h>
19#include <linux/syscore_ops.h>
19#include <dt-bindings/clock/rk3288-cru.h> 20#include <dt-bindings/clock/rk3288-cru.h>
20#include "clk.h" 21#include "clk.h"
21 22
@@ -83,11 +84,13 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
83 RK3066_PLL_RATE( 742500000, 8, 495, 2), 84 RK3066_PLL_RATE( 742500000, 8, 495, 2),
84 RK3066_PLL_RATE( 696000000, 1, 58, 2), 85 RK3066_PLL_RATE( 696000000, 1, 58, 2),
85 RK3066_PLL_RATE( 600000000, 1, 50, 2), 86 RK3066_PLL_RATE( 600000000, 1, 50, 2),
86 RK3066_PLL_RATE( 594000000, 2, 198, 4), 87 RK3066_PLL_RATE_BWADJ(594000000, 1, 198, 8, 1),
87 RK3066_PLL_RATE( 552000000, 1, 46, 2), 88 RK3066_PLL_RATE( 552000000, 1, 46, 2),
88 RK3066_PLL_RATE( 504000000, 1, 84, 4), 89 RK3066_PLL_RATE( 504000000, 1, 84, 4),
90 RK3066_PLL_RATE( 500000000, 3, 125, 2),
89 RK3066_PLL_RATE( 456000000, 1, 76, 4), 91 RK3066_PLL_RATE( 456000000, 1, 76, 4),
90 RK3066_PLL_RATE( 408000000, 1, 68, 4), 92 RK3066_PLL_RATE( 408000000, 1, 68, 4),
93 RK3066_PLL_RATE( 400000000, 3, 100, 2),
91 RK3066_PLL_RATE( 384000000, 2, 128, 4), 94 RK3066_PLL_RATE( 384000000, 2, 128, 4),
92 RK3066_PLL_RATE( 360000000, 1, 60, 4), 95 RK3066_PLL_RATE( 360000000, 1, 60, 4),
93 RK3066_PLL_RATE( 312000000, 1, 52, 4), 96 RK3066_PLL_RATE( 312000000, 1, 52, 4),
@@ -173,14 +176,14 @@ PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu" };
173PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" }; 176PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
174PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" }; 177PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" };
175PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" }; 178PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" };
176PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usb480m" }; 179PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usbphy480m_src" };
180PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "usbphy480m_src", "npll" };
177 181
178PNAME(mux_mmc_src_p) = { "cpll", "gpll", "xin24m", "xin24m" }; 182PNAME(mux_mmc_src_p) = { "cpll", "gpll", "xin24m", "xin24m" };
179PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; 183PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
180PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" }; 184PNAME(mux_i2s_clkout_p) = { "i2s_pre", "xin12m" };
181PNAME(mux_spdif_p) = { "spdif_pre", "spdif_frac", "xin12m" }; 185PNAME(mux_spdif_p) = { "spdif_pre", "spdif_frac", "xin12m" };
182PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" }; 186PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" };
183PNAME(mux_uart0_pll_p) = { "cpll", "gpll", "usbphy_480m_src", "npll" };
184PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; 187PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
185PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; 188PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
186PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; 189PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
@@ -192,22 +195,22 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
192PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" }; 195PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
193PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" }; 196PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
194 197
195PNAME(mux_usbphy480m_p) = { "sclk_otgphy0", "sclk_otgphy1", 198PNAME(mux_usbphy480m_p) = { "sclk_otgphy1", "sclk_otgphy2",
196 "sclk_otgphy2" }; 199 "sclk_otgphy0" };
197PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" }; 200PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
198PNAME(mux_hsicphy12m_p) = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" }; 201PNAME(mux_hsicphy12m_p) = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
199 202
200static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = { 203static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = {
201 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0), 204 [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0),
202 RK3288_MODE_CON, 0, 6, rk3288_pll_rates), 205 RK3288_MODE_CON, 0, 6, 0, rk3288_pll_rates),
203 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4), 206 [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4),
204 RK3288_MODE_CON, 4, 5, NULL), 207 RK3288_MODE_CON, 4, 5, 0, NULL),
205 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8), 208 [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8),
206 RK3288_MODE_CON, 8, 7, rk3288_pll_rates), 209 RK3288_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
207 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12), 210 [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12),
208 RK3288_MODE_CON, 12, 8, rk3288_pll_rates), 211 RK3288_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
209 [npll] = PLL(pll_rk3066, PLL_NPLL, "npll", mux_pll_p, 0, RK3288_PLL_CON(16), 212 [npll] = PLL(pll_rk3066, PLL_NPLL, "npll", mux_pll_p, 0, RK3288_PLL_CON(16),
210 RK3288_MODE_CON, 14, 9, rk3288_pll_rates), 213 RK3288_MODE_CON, 14, 9, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
211}; 214};
212 215
213static struct clk_div_table div_hclk_cpu_t[] = { 216static struct clk_div_table div_hclk_cpu_t[] = {
@@ -226,67 +229,67 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
226 * Clock-Architecture Diagram 1 229 * Clock-Architecture Diagram 1
227 */ 230 */
228 231
229 GATE(0, "apll_core", "apll", 0, 232 GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
230 RK3288_CLKGATE_CON(0), 1, GFLAGS), 233 RK3288_CLKGATE_CON(0), 1, GFLAGS),
231 GATE(0, "gpll_core", "gpll", 0, 234 GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
232 RK3288_CLKGATE_CON(0), 2, GFLAGS), 235 RK3288_CLKGATE_CON(0), 2, GFLAGS),
233 236
234 COMPOSITE_NOMUX(0, "armcore0", "armclk", 0, 237 COMPOSITE_NOMUX(0, "armcore0", "armclk", CLK_IGNORE_UNUSED,
235 RK3288_CLKSEL_CON(36), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 238 RK3288_CLKSEL_CON(36), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
236 RK3288_CLKGATE_CON(12), 0, GFLAGS), 239 RK3288_CLKGATE_CON(12), 0, GFLAGS),
237 COMPOSITE_NOMUX(0, "armcore1", "armclk", 0, 240 COMPOSITE_NOMUX(0, "armcore1", "armclk", CLK_IGNORE_UNUSED,
238 RK3288_CLKSEL_CON(36), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 241 RK3288_CLKSEL_CON(36), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
239 RK3288_CLKGATE_CON(12), 1, GFLAGS), 242 RK3288_CLKGATE_CON(12), 1, GFLAGS),
240 COMPOSITE_NOMUX(0, "armcore2", "armclk", 0, 243 COMPOSITE_NOMUX(0, "armcore2", "armclk", CLK_IGNORE_UNUSED,
241 RK3288_CLKSEL_CON(36), 8, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 244 RK3288_CLKSEL_CON(36), 8, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
242 RK3288_CLKGATE_CON(12), 2, GFLAGS), 245 RK3288_CLKGATE_CON(12), 2, GFLAGS),
243 COMPOSITE_NOMUX(0, "armcore3", "armclk", 0, 246 COMPOSITE_NOMUX(0, "armcore3", "armclk", CLK_IGNORE_UNUSED,
244 RK3288_CLKSEL_CON(36), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 247 RK3288_CLKSEL_CON(36), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
245 RK3288_CLKGATE_CON(12), 3, GFLAGS), 248 RK3288_CLKGATE_CON(12), 3, GFLAGS),
246 COMPOSITE_NOMUX(0, "l2ram", "armclk", 0, 249 COMPOSITE_NOMUX(0, "l2ram", "armclk", CLK_IGNORE_UNUSED,
247 RK3288_CLKSEL_CON(37), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY, 250 RK3288_CLKSEL_CON(37), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
248 RK3288_CLKGATE_CON(12), 4, GFLAGS), 251 RK3288_CLKGATE_CON(12), 4, GFLAGS),
249 COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", 0, 252 COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", CLK_IGNORE_UNUSED,
250 RK3288_CLKSEL_CON(0), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, 253 RK3288_CLKSEL_CON(0), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
251 RK3288_CLKGATE_CON(12), 5, GFLAGS), 254 RK3288_CLKGATE_CON(12), 5, GFLAGS),
252 COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", 0, 255 COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
253 RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY, 256 RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
254 RK3288_CLKGATE_CON(12), 6, GFLAGS), 257 RK3288_CLKGATE_CON(12), 6, GFLAGS),
255 COMPOSITE_NOMUX(0, "atclk", "armclk", 0, 258 COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
256 RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, 259 RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
257 RK3288_CLKGATE_CON(12), 7, GFLAGS), 260 RK3288_CLKGATE_CON(12), 7, GFLAGS),
258 COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", 0, 261 COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
259 RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY, 262 RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
260 RK3288_CLKGATE_CON(12), 8, GFLAGS), 263 RK3288_CLKGATE_CON(12), 8, GFLAGS),
261 GATE(0, "pclk_dbg", "pclk_dbg_pre", 0, 264 GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
262 RK3288_CLKGATE_CON(12), 9, GFLAGS), 265 RK3288_CLKGATE_CON(12), 9, GFLAGS),
263 GATE(0, "cs_dbg", "pclk_dbg_pre", 0, 266 GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
264 RK3288_CLKGATE_CON(12), 10, GFLAGS), 267 RK3288_CLKGATE_CON(12), 10, GFLAGS),
265 GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0, 268 GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0,
266 RK3288_CLKGATE_CON(12), 11, GFLAGS), 269 RK3288_CLKGATE_CON(12), 11, GFLAGS),
267 270
268 GATE(0, "dpll_ddr", "dpll", 0, 271 GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
269 RK3288_CLKGATE_CON(0), 8, GFLAGS), 272 RK3288_CLKGATE_CON(0), 8, GFLAGS),
270 GATE(0, "gpll_ddr", "gpll", 0, 273 GATE(0, "gpll_ddr", "gpll", 0,
271 RK3288_CLKGATE_CON(0), 9, GFLAGS), 274 RK3288_CLKGATE_CON(0), 9, GFLAGS),
272 COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, 0, 275 COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
273 RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2, 276 RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2,
274 DFLAGS | CLK_DIVIDER_POWER_OF_TWO), 277 DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
275 278
276 GATE(0, "gpll_aclk_cpu", "gpll", 0, 279 GATE(0, "gpll_aclk_cpu", "gpll", CLK_IGNORE_UNUSED,
277 RK3288_CLKGATE_CON(0), 10, GFLAGS), 280 RK3288_CLKGATE_CON(0), 10, GFLAGS),
278 GATE(0, "cpll_aclk_cpu", "cpll", 0, 281 GATE(0, "cpll_aclk_cpu", "cpll", CLK_IGNORE_UNUSED,
279 RK3288_CLKGATE_CON(0), 11, GFLAGS), 282 RK3288_CLKGATE_CON(0), 11, GFLAGS),
280 COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0, 283 COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, CLK_IGNORE_UNUSED,
281 RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS), 284 RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS),
282 DIV(0, "aclk_cpu_pre", "aclk_cpu_src", 0, 285 DIV(0, "aclk_cpu_pre", "aclk_cpu_src", CLK_SET_RATE_PARENT,
283 RK3288_CLKSEL_CON(1), 0, 3, DFLAGS), 286 RK3288_CLKSEL_CON(1), 0, 3, DFLAGS),
284 GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", 0, 287 GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
285 RK3288_CLKGATE_CON(0), 3, GFLAGS), 288 RK3288_CLKGATE_CON(0), 3, GFLAGS),
286 COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", 0, 289 COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
287 RK3288_CLKSEL_CON(1), 12, 3, DFLAGS, 290 RK3288_CLKSEL_CON(1), 12, 3, DFLAGS,
288 RK3288_CLKGATE_CON(0), 5, GFLAGS), 291 RK3288_CLKGATE_CON(0), 5, GFLAGS),
289 COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", 0, 292 COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
290 RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t, 293 RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t,
291 RK3288_CLKGATE_CON(0), 4, GFLAGS), 294 RK3288_CLKGATE_CON(0), 4, GFLAGS),
292 GATE(0, "c2c_host", "aclk_cpu_src", 0, 295 GATE(0, "c2c_host", "aclk_cpu_src", 0,
@@ -294,7 +297,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
294 COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0, 297 COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0,
295 RK3288_CLKSEL_CON(26), 6, 2, DFLAGS, 298 RK3288_CLKSEL_CON(26), 6, 2, DFLAGS,
296 RK3288_CLKGATE_CON(5), 4, GFLAGS), 299 RK3288_CLKGATE_CON(5), 4, GFLAGS),
297 GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", 0, 300 GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
298 RK3288_CLKGATE_CON(0), 7, GFLAGS), 301 RK3288_CLKGATE_CON(0), 7, GFLAGS),
299 302
300 COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0, 303 COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
@@ -305,7 +308,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
305 RK3288_CLKGATE_CON(4), 2, GFLAGS), 308 RK3288_CLKGATE_CON(4), 2, GFLAGS),
306 MUX(0, "i2s_pre", mux_i2s_pre_p, CLK_SET_RATE_PARENT, 309 MUX(0, "i2s_pre", mux_i2s_pre_p, CLK_SET_RATE_PARENT,
307 RK3288_CLKSEL_CON(4), 8, 2, MFLAGS), 310 RK3288_CLKSEL_CON(4), 8, 2, MFLAGS),
308 COMPOSITE_NODIV(0, "i2s0_clkout", mux_i2s_clkout_p, CLK_SET_RATE_PARENT, 311 COMPOSITE_NODIV(SCLK_I2S0_OUT, "i2s0_clkout", mux_i2s_clkout_p, 0,
309 RK3288_CLKSEL_CON(4), 12, 1, MFLAGS, 312 RK3288_CLKSEL_CON(4), 12, 1, MFLAGS,
310 RK3288_CLKGATE_CON(4), 0, GFLAGS), 313 RK3288_CLKGATE_CON(4), 0, GFLAGS),
311 GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", CLK_SET_RATE_PARENT, 314 GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", CLK_SET_RATE_PARENT,
@@ -325,7 +328,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
325 COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0, 328 COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0,
326 RK3288_CLKSEL_CON(40), 0, 7, DFLAGS, 329 RK3288_CLKSEL_CON(40), 0, 7, DFLAGS,
327 RK3288_CLKGATE_CON(4), 7, GFLAGS), 330 RK3288_CLKGATE_CON(4), 7, GFLAGS),
328 COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", 0, 331 COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_pre", 0,
329 RK3288_CLKSEL_CON(41), 0, 332 RK3288_CLKSEL_CON(41), 0,
330 RK3288_CLKGATE_CON(4), 8, GFLAGS), 333 RK3288_CLKGATE_CON(4), 8, GFLAGS),
331 COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0, 334 COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
@@ -373,12 +376,12 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
373 GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0, 376 GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
374 RK3288_CLKGATE_CON(9), 1, GFLAGS), 377 RK3288_CLKGATE_CON(9), 1, GFLAGS),
375 378
376 COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, 0, 379 COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
377 RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS, 380 RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
378 RK3288_CLKGATE_CON(3), 0, GFLAGS), 381 RK3288_CLKGATE_CON(3), 0, GFLAGS),
379 DIV(0, "hclk_vio", "aclk_vio0", 0, 382 DIV(0, "hclk_vio", "aclk_vio0", 0,
380 RK3288_CLKSEL_CON(28), 8, 5, DFLAGS), 383 RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
381 COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, 0, 384 COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
382 RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS, 385 RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
383 RK3288_CLKGATE_CON(3), 2, GFLAGS), 386 RK3288_CLKGATE_CON(3), 2, GFLAGS),
384 387
@@ -436,24 +439,24 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
436 439
437 DIV(0, "pclk_pd_alive", "gpll", 0, 440 DIV(0, "pclk_pd_alive", "gpll", 0,
438 RK3288_CLKSEL_CON(33), 8, 5, DFLAGS), 441 RK3288_CLKSEL_CON(33), 8, 5, DFLAGS),
439 COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", 0, 442 COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", CLK_IGNORE_UNUSED,
440 RK3288_CLKSEL_CON(33), 0, 5, DFLAGS, 443 RK3288_CLKSEL_CON(33), 0, 5, DFLAGS,
441 RK3288_CLKGATE_CON(5), 8, GFLAGS), 444 RK3288_CLKGATE_CON(5), 8, GFLAGS),
442 445
443 COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gpll_usb480m_p, 0, 446 COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gll_usb_npll_p, 0,
444 RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS, 447 RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS,
445 RK3288_CLKGATE_CON(5), 7, GFLAGS), 448 RK3288_CLKGATE_CON(5), 7, GFLAGS),
446 449
447 COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, 0, 450 COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
448 RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS, 451 RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
449 RK3288_CLKGATE_CON(2), 0, GFLAGS), 452 RK3288_CLKGATE_CON(2), 0, GFLAGS),
450 COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0, 453 COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
451 RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 454 RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
452 RK3288_CLKGATE_CON(2), 3, GFLAGS), 455 RK3288_CLKGATE_CON(2), 3, GFLAGS),
453 COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0, 456 COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
454 RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO, 457 RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
455 RK3288_CLKGATE_CON(2), 2, GFLAGS), 458 RK3288_CLKGATE_CON(2), 2, GFLAGS),
456 GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0, 459 GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
457 RK3288_CLKGATE_CON(2), 1, GFLAGS), 460 RK3288_CLKGATE_CON(2), 1, GFLAGS),
458 461
459 /* 462 /*
@@ -483,6 +486,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
483 RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS, 486 RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS,
484 RK3288_CLKGATE_CON(13), 3, GFLAGS), 487 RK3288_CLKGATE_CON(13), 3, GFLAGS),
485 488
489 MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3288_SDMMC_CON0, 1),
490 MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3288_SDMMC_CON1, 0),
491
492 MMC(SCLK_SDIO0_DRV, "sdio0_drv", "sclk_sdio0", RK3288_SDIO0_CON0, 1),
493 MMC(SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3288_SDIO0_CON1, 0),
494
495 MMC(SCLK_SDIO1_DRV, "sdio1_drv", "sclk_sdio1", RK3288_SDIO1_CON0, 1),
496 MMC(SCLK_SDIO1_SAMPLE, "sdio1_sample", "sclk_sdio1", RK3288_SDIO1_CON1, 0),
497
498 MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3288_EMMC_CON0, 1),
499 MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3288_EMMC_CON1, 0),
500
486 COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0, 501 COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0,
487 RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS, 502 RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS,
488 RK3288_CLKGATE_CON(4), 11, GFLAGS), 503 RK3288_CLKGATE_CON(4), 11, GFLAGS),
@@ -490,13 +505,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
490 RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS, 505 RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
491 RK3288_CLKGATE_CON(4), 10, GFLAGS), 506 RK3288_CLKGATE_CON(4), 10, GFLAGS),
492 507
493 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0, 508 GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
494 RK3288_CLKGATE_CON(13), 4, GFLAGS), 509 RK3288_CLKGATE_CON(13), 4, GFLAGS),
495 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0, 510 GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
496 RK3288_CLKGATE_CON(13), 5, GFLAGS), 511 RK3288_CLKGATE_CON(13), 5, GFLAGS),
497 GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", 0, 512 GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
498 RK3288_CLKGATE_CON(13), 6, GFLAGS), 513 RK3288_CLKGATE_CON(13), 6, GFLAGS),
499 GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", 0, 514 GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
500 RK3288_CLKGATE_CON(13), 7, GFLAGS), 515 RK3288_CLKGATE_CON(13), 7, GFLAGS),
501 516
502 COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0, 517 COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
@@ -517,7 +532,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
517 RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS, 532 RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS,
518 RK3288_CLKGATE_CON(5), 6, GFLAGS), 533 RK3288_CLKGATE_CON(5), 6, GFLAGS),
519 534
520 COMPOSITE(0, "uart0_src", mux_uart0_pll_p, 0, 535 COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
521 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS, 536 RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
522 RK3288_CLKGATE_CON(1), 8, GFLAGS), 537 RK3288_CLKGATE_CON(1), 8, GFLAGS),
523 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0, 538 COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
@@ -585,7 +600,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
585 600
586 COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0, 601 COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
587 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS, 602 RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
588 RK3288_CLKGATE_CON(5), 15, GFLAGS), 603 RK3288_CLKGATE_CON(5), 14, GFLAGS),
589 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, 604 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
590 RK3288_CLKSEL_CON(29), 0, 2, MFLAGS, 605 RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
591 RK3288_CLKGATE_CON(3), 6, GFLAGS), 606 RK3288_CLKGATE_CON(3), 6, GFLAGS),
@@ -601,19 +616,19 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
601 */ 616 */
602 617
603 /* aclk_cpu gates */ 618 /* aclk_cpu gates */
604 GATE(0, "sclk_intmem0", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 5, GFLAGS), 619 GATE(0, "sclk_intmem0", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 5, GFLAGS),
605 GATE(0, "sclk_intmem1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 6, GFLAGS), 620 GATE(0, "sclk_intmem1", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 6, GFLAGS),
606 GATE(0, "sclk_intmem2", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 7, GFLAGS), 621 GATE(0, "sclk_intmem2", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 7, GFLAGS),
607 GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS), 622 GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS),
608 GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 13, GFLAGS), 623 GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 13, GFLAGS),
609 GATE(0, "aclk_intmem", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 4, GFLAGS), 624 GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 4, GFLAGS),
610 GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS), 625 GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS),
611 GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS), 626 GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS),
612 627
613 /* hclk_cpu gates */ 628 /* hclk_cpu gates */
614 GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS), 629 GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS),
615 GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS), 630 GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS),
616 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 9, GFLAGS), 631 GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 9, GFLAGS),
617 GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS), 632 GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS),
618 GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS), 633 GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS),
619 634
@@ -622,42 +637,42 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
622 GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS), 637 GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
623 GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS), 638 GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
624 GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS), 639 GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
625 GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS), 640 GATE(PCLK_DDRUPCTL0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
626 GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS), 641 GATE(PCLK_PUBL0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
627 GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS), 642 GATE(PCLK_DDRUPCTL1, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
628 GATE(0, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS), 643 GATE(PCLK_PUBL1, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
629 GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS), 644 GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS),
630 GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS), 645 GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
631 GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS), 646 GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
632 GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS), 647 GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
633 GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS), 648 GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
634 649
635 /* ddrctrl [DDR Controller PHY clock] gates */ 650 /* ddrctrl [DDR Controller PHY clock] gates */
636 GATE(0, "nclk_ddrupctl0", "ddrphy", 0, RK3288_CLKGATE_CON(11), 4, GFLAGS), 651 GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
637 GATE(0, "nclk_ddrupctl1", "ddrphy", 0, RK3288_CLKGATE_CON(11), 5, GFLAGS), 652 GATE(0, "nclk_ddrupctl1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 5, GFLAGS),
638 653
639 /* ddrphy gates */ 654 /* ddrphy gates */
640 GATE(0, "sclk_ddrphy0", "ddrphy", 0, RK3288_CLKGATE_CON(4), 12, GFLAGS), 655 GATE(0, "sclk_ddrphy0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 12, GFLAGS),
641 GATE(0, "sclk_ddrphy1", "ddrphy", 0, RK3288_CLKGATE_CON(4), 13, GFLAGS), 656 GATE(0, "sclk_ddrphy1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 13, GFLAGS),
642 657
643 /* aclk_peri gates */ 658 /* aclk_peri gates */
644 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 2, GFLAGS), 659 GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 2, GFLAGS),
645 GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS), 660 GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS),
646 GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK3288_CLKGATE_CON(7), 11, GFLAGS), 661 GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 11, GFLAGS),
647 GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 12, GFLAGS), 662 GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(8), 12, GFLAGS),
648 GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS), 663 GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS),
649 GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS), 664 GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS),
650 665
651 /* hclk_peri gates */ 666 /* hclk_peri gates */
652 GATE(0, "hclk_peri_matrix", "hclk_peri", 0, RK3288_CLKGATE_CON(6), 0, GFLAGS), 667 GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 0, GFLAGS),
653 GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 4, GFLAGS), 668 GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 4, GFLAGS),
654 GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS), 669 GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS),
655 GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 7, GFLAGS), 670 GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 7, GFLAGS),
656 GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS), 671 GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS),
657 GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 9, GFLAGS), 672 GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 9, GFLAGS),
658 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 10, GFLAGS), 673 GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 10, GFLAGS),
659 GATE(0, "hclk_emem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 12, GFLAGS), 674 GATE(0, "hclk_emem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 12, GFLAGS),
660 GATE(0, "hclk_mem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 13, GFLAGS), 675 GATE(0, "hclk_mem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 13, GFLAGS),
661 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS), 676 GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS),
662 GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS), 677 GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS),
663 GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS), 678 GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS),
@@ -669,7 +684,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
669 GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS), 684 GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS),
670 685
671 /* pclk_peri gates */ 686 /* pclk_peri gates */
672 GATE(0, "pclk_peri_matrix", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 1, GFLAGS), 687 GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 1, GFLAGS),
673 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS), 688 GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS),
674 GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS), 689 GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS),
675 GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS), 690 GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS),
@@ -705,48 +720,48 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
705 GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS), 720 GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS),
706 GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS), 721 GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS),
707 GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS), 722 GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS),
708 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 11, GFLAGS), 723 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 11, GFLAGS),
709 GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS), 724 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 12, GFLAGS),
710 725
711 /* pclk_pd_pmu gates */ 726 /* pclk_pd_pmu gates */
712 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 0, GFLAGS), 727 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 0, GFLAGS),
713 GATE(0, "pclk_intmem1", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 1, GFLAGS), 728 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 1, GFLAGS),
714 GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 2, GFLAGS), 729 GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 2, GFLAGS),
715 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 3, GFLAGS), 730 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 3, GFLAGS),
716 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS), 731 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS),
717 732
718 /* hclk_vio gates */ 733 /* hclk_vio gates */
719 GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS), 734 GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS),
720 GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS), 735 GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS),
721 GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS), 736 GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS),
722 GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 9, GFLAGS), 737 GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 9, GFLAGS),
723 GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 10, GFLAGS), 738 GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 10, GFLAGS),
724 GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS), 739 GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS),
725 GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS), 740 GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS),
726 GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS), 741 GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS),
727 GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 10, GFLAGS), 742 GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 10, GFLAGS),
728 GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS), 743 GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS),
729 GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS), 744 GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS),
730 GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS), 745 GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS),
731 GATE(PCLK_LVDS_PHY, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS), 746 GATE(PCLK_LVDS_PHY, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS),
732 GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 8, GFLAGS), 747 GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 8, GFLAGS),
733 GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS), 748 GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS),
734 GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 11, GFLAGS), 749 GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 11, GFLAGS),
735 750
736 /* aclk_vio0 gates */ 751 /* aclk_vio0 gates */
737 GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS), 752 GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS),
738 GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS), 753 GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS),
739 GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 11, GFLAGS), 754 GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 11, GFLAGS),
740 GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS), 755 GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS),
741 756
742 /* aclk_vio1 gates */ 757 /* aclk_vio1 gates */
743 GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS), 758 GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS),
744 GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS), 759 GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS),
745 GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 12, GFLAGS), 760 GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 12, GFLAGS),
746 761
747 /* aclk_rga_pre gates */ 762 /* aclk_rga_pre gates */
748 GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS), 763 GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS),
749 GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 13, GFLAGS), 764 GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 13, GFLAGS),
750 765
751 /* 766 /*
752 * Other ungrouped clocks. 767 * Other ungrouped clocks.
@@ -762,6 +777,64 @@ static const char *rk3288_critical_clocks[] __initconst = {
762 "hclk_peri", 777 "hclk_peri",
763}; 778};
764 779
780#ifdef CONFIG_PM_SLEEP
781static void __iomem *rk3288_cru_base;
782
783/* Some CRU registers will be reset in maskrom when the system
784 * wakes up from fastboot.
785 * So save them before suspend, restore them after resume.
786 */
787static const int rk3288_saved_cru_reg_ids[] = {
788 RK3288_MODE_CON,
789 RK3288_CLKSEL_CON(0),
790 RK3288_CLKSEL_CON(1),
791 RK3288_CLKSEL_CON(10),
792 RK3288_CLKSEL_CON(33),
793 RK3288_CLKSEL_CON(37),
794};
795
796static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
797
798static int rk3288_clk_suspend(void)
799{
800 int i, reg_id;
801
802 for (i = 0; i < ARRAY_SIZE(rk3288_saved_cru_reg_ids); i++) {
803 reg_id = rk3288_saved_cru_reg_ids[i];
804
805 rk3288_saved_cru_regs[i] =
806 readl_relaxed(rk3288_cru_base + reg_id);
807 }
808 return 0;
809}
810
811static void rk3288_clk_resume(void)
812{
813 int i, reg_id;
814
815 for (i = ARRAY_SIZE(rk3288_saved_cru_reg_ids) - 1; i >= 0; i--) {
816 reg_id = rk3288_saved_cru_reg_ids[i];
817
818 writel_relaxed(rk3288_saved_cru_regs[i] | 0xffff0000,
819 rk3288_cru_base + reg_id);
820 }
821}
822
823static struct syscore_ops rk3288_clk_syscore_ops = {
824 .suspend = rk3288_clk_suspend,
825 .resume = rk3288_clk_resume,
826};
827
828static void rk3288_clk_sleep_init(void __iomem *reg_base)
829{
830 rk3288_cru_base = reg_base;
831 register_syscore_ops(&rk3288_clk_syscore_ops);
832}
833
834#else /* CONFIG_PM_SLEEP */
835static void rk3288_clk_sleep_init(void __iomem *reg_base) {}
836#endif
837
765static void __init rk3288_clk_init(struct device_node *np) 838static void __init rk3288_clk_init(struct device_node *np)
766{ 839{
767 void __iomem *reg_base; 840 void __iomem *reg_base;
@@ -810,5 +883,6 @@ static void __init rk3288_clk_init(struct device_node *np)
810 ROCKCHIP_SOFTRST_HIWORD_MASK); 883 ROCKCHIP_SOFTRST_HIWORD_MASK);
811 884
812 rockchip_register_restart_notifier(RK3288_GLB_SRST_FST); 885 rockchip_register_restart_notifier(RK3288_GLB_SRST_FST);
886 rk3288_clk_sleep_init(reg_base);
813} 887}
814CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init); 888CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 880a266f0143..20e05bbb3a67 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -197,7 +197,8 @@ void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
197 list->parent_names, list->num_parents, 197 list->parent_names, list->num_parents,
198 reg_base, list->con_offset, grf_lock_offset, 198 reg_base, list->con_offset, grf_lock_offset,
199 list->lock_shift, list->mode_offset, 199 list->lock_shift, list->mode_offset,
200 list->mode_shift, list->rate_table, &clk_lock); 200 list->mode_shift, list->rate_table,
201 list->pll_flags, &clk_lock);
201 if (IS_ERR(clk)) { 202 if (IS_ERR(clk)) {
202 pr_err("%s: failed to register clock %s\n", __func__, 203 pr_err("%s: failed to register clock %s\n", __func__,
203 list->name); 204 list->name);
@@ -244,9 +245,6 @@ void __init rockchip_clk_register_branches(
244 list->div_flags, &clk_lock); 245 list->div_flags, &clk_lock);
245 break; 246 break;
246 case branch_fraction_divider: 247 case branch_fraction_divider:
247 /* keep all gates untouched for now */
248 flags |= CLK_IGNORE_UNUSED;
249
250 clk = rockchip_clk_register_frac_branch(list->name, 248 clk = rockchip_clk_register_frac_branch(list->name,
251 list->parent_names, list->num_parents, 249 list->parent_names, list->num_parents,
252 reg_base, list->muxdiv_offset, list->div_flags, 250 reg_base, list->muxdiv_offset, list->div_flags,
@@ -256,18 +254,12 @@ void __init rockchip_clk_register_branches(
256 case branch_gate: 254 case branch_gate:
257 flags |= CLK_SET_RATE_PARENT; 255 flags |= CLK_SET_RATE_PARENT;
258 256
259 /* keep all gates untouched for now */
260 flags |= CLK_IGNORE_UNUSED;
261
262 clk = clk_register_gate(NULL, list->name, 257 clk = clk_register_gate(NULL, list->name,
263 list->parent_names[0], flags, 258 list->parent_names[0], flags,
264 reg_base + list->gate_offset, 259 reg_base + list->gate_offset,
265 list->gate_shift, list->gate_flags, &clk_lock); 260 list->gate_shift, list->gate_flags, &clk_lock);
266 break; 261 break;
267 case branch_composite: 262 case branch_composite:
268 /* keep all gates untouched for now */
269 flags |= CLK_IGNORE_UNUSED;
270
271 clk = rockchip_clk_register_branch(list->name, 263 clk = rockchip_clk_register_branch(list->name,
272 list->parent_names, list->num_parents, 264 list->parent_names, list->num_parents,
273 reg_base, list->muxdiv_offset, list->mux_shift, 265 reg_base, list->muxdiv_offset, list->mux_shift,
@@ -277,6 +269,14 @@ void __init rockchip_clk_register_branches(
277 list->gate_offset, list->gate_shift, 269 list->gate_offset, list->gate_shift,
278 list->gate_flags, flags, &clk_lock); 270 list->gate_flags, flags, &clk_lock);
279 break; 271 break;
272 case branch_mmc:
273 clk = rockchip_clk_register_mmc(
274 list->name,
275 list->parent_names, list->num_parents,
276 reg_base + list->muxdiv_offset,
277 list->div_shift
278 );
279 break;
280 } 280 }
281 281
282 /* none of the cases above matched */ 282 /* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index ca009ab0a33a..58d2e3bdf22f 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -48,6 +48,14 @@
48#define RK3288_GLB_SRST_SND 0x1b4 48#define RK3288_GLB_SRST_SND 0x1b4
49#define RK3288_SOFTRST_CON(x) (x * 0x4 + 0x1b8) 49#define RK3288_SOFTRST_CON(x) (x * 0x4 + 0x1b8)
50#define RK3288_MISC_CON 0x1e8 50#define RK3288_MISC_CON 0x1e8
51#define RK3288_SDMMC_CON0 0x200
52#define RK3288_SDMMC_CON1 0x204
53#define RK3288_SDIO0_CON0 0x208
54#define RK3288_SDIO0_CON1 0x20c
55#define RK3288_SDIO1_CON0 0x210
56#define RK3288_SDIO1_CON1 0x214
57#define RK3288_EMMC_CON0 0x218
58#define RK3288_EMMC_CON1 0x21c
51 59
52enum rockchip_pll_type { 60enum rockchip_pll_type {
53 pll_rk3066, 61 pll_rk3066,
@@ -62,6 +70,15 @@ enum rockchip_pll_type {
62 .bwadj = (_nf >> 1), \ 70 .bwadj = (_nf >> 1), \
63} 71}
64 72
73#define RK3066_PLL_RATE_BWADJ(_rate, _nr, _nf, _no, _bw) \
74{ \
75 .rate = _rate##U, \
76 .nr = _nr, \
77 .nf = _nf, \
78 .no = _no, \
79 .bwadj = _bw, \
80}
81
65struct rockchip_pll_rate_table { 82struct rockchip_pll_rate_table {
66 unsigned long rate; 83 unsigned long rate;
67 unsigned int nr; 84 unsigned int nr;
@@ -81,7 +98,12 @@ struct rockchip_pll_rate_table {
81 * @mode_shift: offset inside the mode-register for the mode of this pll. 98 * @mode_shift: offset inside the mode-register for the mode of this pll.
82 * @lock_shift: offset inside the lock register for the lock status. 99 * @lock_shift: offset inside the lock register for the lock status.
83 * @type: Type of PLL to be registered. 100 * @type: Type of PLL to be registered.
101 * @pll_flags: hardware-specific flags
84 * @rate_table: Table of usable pll rates 102 * @rate_table: Table of usable pll rates
103 *
104 * Flags:
105 * ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
106 * rate_table parameters and ajust them if necessary.
85 */ 107 */
86struct rockchip_pll_clock { 108struct rockchip_pll_clock {
87 unsigned int id; 109 unsigned int id;
@@ -94,11 +116,14 @@ struct rockchip_pll_clock {
94 int mode_shift; 116 int mode_shift;
95 int lock_shift; 117 int lock_shift;
96 enum rockchip_pll_type type; 118 enum rockchip_pll_type type;
119 u8 pll_flags;
97 struct rockchip_pll_rate_table *rate_table; 120 struct rockchip_pll_rate_table *rate_table;
98}; 121};
99 122
123#define ROCKCHIP_PLL_SYNC_RATE BIT(0)
124
100#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift, \ 125#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift, \
101 _lshift, _rtable) \ 126 _lshift, _pflags, _rtable) \
102 { \ 127 { \
103 .id = _id, \ 128 .id = _id, \
104 .type = _type, \ 129 .type = _type, \
@@ -110,6 +135,7 @@ struct rockchip_pll_clock {
110 .mode_offset = _mode, \ 135 .mode_offset = _mode, \
111 .mode_shift = _mshift, \ 136 .mode_shift = _mshift, \
112 .lock_shift = _lshift, \ 137 .lock_shift = _lshift, \
138 .pll_flags = _pflags, \
113 .rate_table = _rtable, \ 139 .rate_table = _rtable, \
114 } 140 }
115 141
@@ -118,7 +144,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
118 void __iomem *base, int con_offset, int grf_lock_offset, 144 void __iomem *base, int con_offset, int grf_lock_offset,
119 int lock_shift, int reg_mode, int mode_shift, 145 int lock_shift, int reg_mode, int mode_shift,
120 struct rockchip_pll_rate_table *rate_table, 146 struct rockchip_pll_rate_table *rate_table,
121 spinlock_t *lock); 147 u8 clk_pll_flags, spinlock_t *lock);
122 148
123struct rockchip_cpuclk_clksel { 149struct rockchip_cpuclk_clksel {
124 int reg; 150 int reg;
@@ -152,6 +178,10 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
152 const struct rockchip_cpuclk_rate_table *rates, 178 const struct rockchip_cpuclk_rate_table *rates,
153 int nrates, void __iomem *reg_base, spinlock_t *lock); 179 int nrates, void __iomem *reg_base, spinlock_t *lock);
154 180
181struct clk *rockchip_clk_register_mmc(const char *name,
182 const char **parent_names, u8 num_parents,
183 void __iomem *reg, int shift);
184
155#define PNAME(x) static const char *x[] __initconst 185#define PNAME(x) static const char *x[] __initconst
156 186
157enum rockchip_clk_branch_type { 187enum rockchip_clk_branch_type {
@@ -160,6 +190,7 @@ enum rockchip_clk_branch_type {
160 branch_divider, 190 branch_divider,
161 branch_fraction_divider, 191 branch_fraction_divider,
162 branch_gate, 192 branch_gate,
193 branch_mmc,
163}; 194};
164 195
165struct rockchip_clk_branch { 196struct rockchip_clk_branch {
@@ -352,6 +383,16 @@ struct rockchip_clk_branch {
352 .gate_flags = gf, \ 383 .gate_flags = gf, \
353 } 384 }
354 385
386#define MMC(_id, cname, pname, offset, shift) \
387 { \
388 .id = _id, \
389 .branch_type = branch_mmc, \
390 .name = cname, \
391 .parent_names = (const char *[]){ pname }, \
392 .num_parents = 1, \
393 .muxdiv_offset = offset, \
394 .div_shift = shift, \
395 }
355 396
356void rockchip_clk_init(struct device_node *np, void __iomem *base, 397void rockchip_clk_init(struct device_node *np, void __iomem *base,
357 unsigned long nr_clks); 398 unsigned long nr_clks);
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 6fb4bc602e8a..006c6f294310 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -5,6 +5,7 @@
5obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o 5obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o
6obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o 6obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
7obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o 7obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
8obj-$(CONFIG_SOC_EXYNOS4415) += clk-exynos4415.o
8obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o 9obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
9obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o 10obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
10obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o 11obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
12obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o 13obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
13obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o 14obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
14obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o 15obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
16obj-$(CONFIG_ARCH_EXYNOS7) += clk-exynos7.o
15obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o 17obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
16obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o 18obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
17obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o 19obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index acce708ace18..f2c2ccce49bb 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -29,6 +29,13 @@ static DEFINE_SPINLOCK(lock);
29static struct clk **clk_table; 29static struct clk **clk_table;
30static void __iomem *reg_base; 30static void __iomem *reg_base;
31static struct clk_onecell_data clk_data; 31static struct clk_onecell_data clk_data;
32/*
33 * On Exynos5420 this will be a clock which has to be enabled before any
34 * access to audss registers. Typically a child of EPLL.
35 *
36 * On other platforms this will be -ENODEV.
37 */
38static struct clk *epll;
32 39
33#define ASS_CLK_SRC 0x0 40#define ASS_CLK_SRC 0x0
34#define ASS_CLK_DIV 0x4 41#define ASS_CLK_DIV 0x4
@@ -98,6 +105,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
98 dev_err(&pdev->dev, "failed to map audss registers\n"); 105 dev_err(&pdev->dev, "failed to map audss registers\n");
99 return PTR_ERR(reg_base); 106 return PTR_ERR(reg_base);
100 } 107 }
108 /* EPLL don't have to be enabled for boards other than Exynos5420 */
109 epll = ERR_PTR(-ENODEV);
101 110
102 clk_table = devm_kzalloc(&pdev->dev, 111 clk_table = devm_kzalloc(&pdev->dev,
103 sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS, 112 sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
@@ -115,8 +124,20 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
115 pll_in = devm_clk_get(&pdev->dev, "pll_in"); 124 pll_in = devm_clk_get(&pdev->dev, "pll_in");
116 if (!IS_ERR(pll_ref)) 125 if (!IS_ERR(pll_ref))
117 mout_audss_p[0] = __clk_get_name(pll_ref); 126 mout_audss_p[0] = __clk_get_name(pll_ref);
118 if (!IS_ERR(pll_in)) 127 if (!IS_ERR(pll_in)) {
119 mout_audss_p[1] = __clk_get_name(pll_in); 128 mout_audss_p[1] = __clk_get_name(pll_in);
129
130 if (variant == TYPE_EXYNOS5420) {
131 epll = pll_in;
132
133 ret = clk_prepare_enable(epll);
134 if (ret) {
135 dev_err(&pdev->dev,
136 "failed to prepare the epll clock\n");
137 return ret;
138 }
139 }
140 }
120 clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss", 141 clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
121 mout_audss_p, ARRAY_SIZE(mout_audss_p), 142 mout_audss_p, ARRAY_SIZE(mout_audss_p),
122 CLK_SET_RATE_NO_REPARENT, 143 CLK_SET_RATE_NO_REPARENT,
@@ -203,6 +224,9 @@ unregister:
203 clk_unregister(clk_table[i]); 224 clk_unregister(clk_table[i]);
204 } 225 }
205 226
227 if (!IS_ERR(epll))
228 clk_disable_unprepare(epll);
229
206 return ret; 230 return ret;
207} 231}
208 232
@@ -210,6 +234,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
210{ 234{
211 int i; 235 int i;
212 236
237#ifdef CONFIG_PM_SLEEP
238 unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
239#endif
240
213 of_clk_del_provider(pdev->dev.of_node); 241 of_clk_del_provider(pdev->dev.of_node);
214 242
215 for (i = 0; i < clk_data.clk_num; i++) { 243 for (i = 0; i < clk_data.clk_num; i++) {
@@ -217,6 +245,9 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
217 clk_unregister(clk_table[i]); 245 clk_unregister(clk_table[i]);
218 } 246 }
219 247
248 if (!IS_ERR(epll))
249 clk_disable_unprepare(epll);
250
220 return 0; 251 return 0;
221} 252}
222 253
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 940f02837b82..88e8c6bbd77f 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -505,7 +505,7 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata
505/* fixed rate clocks generated inside the soc */ 505/* fixed rate clocks generated inside the soc */
506static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = { 506static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
507 FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000), 507 FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
508 FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000), 508 FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000),
509 FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000), 509 FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
510}; 510};
511 511
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
new file mode 100644
index 000000000000..2123fc251e0f
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -0,0 +1,1144 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * Author: Chanwoo Choi <cw00.choi@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Common Clock Framework support for Exynos4415 SoC.
10 */
11
12#include <linux/clk.h>
13#include <linux/clkdev.h>
14#include <linux/clk-provider.h>
15#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/platform_device.h>
18#include <linux/syscore_ops.h>
19
20#include <dt-bindings/clock/exynos4415.h>
21
22#include "clk.h"
23#include "clk-pll.h"
24
25#define SRC_LEFTBUS 0x4200
26#define DIV_LEFTBUS 0x4500
27#define GATE_IP_LEFTBUS 0x4800
28#define GATE_IP_IMAGE 0x4930
29#define SRC_RIGHTBUS 0x8200
30#define DIV_RIGHTBUS 0x8500
31#define GATE_IP_RIGHTBUS 0x8800
32#define GATE_IP_PERIR 0x8960
33#define EPLL_LOCK 0xc010
34#define G3D_PLL_LOCK 0xc020
35#define DISP_PLL_LOCK 0xc030
36#define ISP_PLL_LOCK 0xc040
37#define EPLL_CON0 0xc110
38#define EPLL_CON1 0xc114
39#define EPLL_CON2 0xc118
40#define G3D_PLL_CON0 0xc120
41#define G3D_PLL_CON1 0xc124
42#define G3D_PLL_CON2 0xc128
43#define ISP_PLL_CON0 0xc130
44#define ISP_PLL_CON1 0xc134
45#define ISP_PLL_CON2 0xc138
46#define DISP_PLL_CON0 0xc140
47#define DISP_PLL_CON1 0xc144
48#define DISP_PLL_CON2 0xc148
49#define SRC_TOP0 0xc210
50#define SRC_TOP1 0xc214
51#define SRC_CAM 0xc220
52#define SRC_TV 0xc224
53#define SRC_MFC 0xc228
54#define SRC_G3D 0xc22c
55#define SRC_LCD 0xc234
56#define SRC_ISP 0xc238
57#define SRC_MAUDIO 0xc23c
58#define SRC_FSYS 0xc240
59#define SRC_PERIL0 0xc250
60#define SRC_PERIL1 0xc254
61#define SRC_CAM1 0xc258
62#define SRC_TOP_ISP0 0xc25c
63#define SRC_TOP_ISP1 0xc260
64#define SRC_MASK_TOP 0xc310
65#define SRC_MASK_CAM 0xc320
66#define SRC_MASK_TV 0xc324
67#define SRC_MASK_LCD 0xc334
68#define SRC_MASK_ISP 0xc338
69#define SRC_MASK_MAUDIO 0xc33c
70#define SRC_MASK_FSYS 0xc340
71#define SRC_MASK_PERIL0 0xc350
72#define SRC_MASK_PERIL1 0xc354
73#define DIV_TOP 0xc510
74#define DIV_CAM 0xc520
75#define DIV_TV 0xc524
76#define DIV_MFC 0xc528
77#define DIV_G3D 0xc52c
78#define DIV_LCD 0xc534
79#define DIV_ISP 0xc538
80#define DIV_MAUDIO 0xc53c
81#define DIV_FSYS0 0xc540
82#define DIV_FSYS1 0xc544
83#define DIV_FSYS2 0xc548
84#define DIV_PERIL0 0xc550
85#define DIV_PERIL1 0xc554
86#define DIV_PERIL2 0xc558
87#define DIV_PERIL3 0xc55c
88#define DIV_PERIL4 0xc560
89#define DIV_PERIL5 0xc564
90#define DIV_CAM1 0xc568
91#define DIV_TOP_ISP1 0xc56c
92#define DIV_TOP_ISP0 0xc570
93#define CLKDIV2_RATIO 0xc580
94#define GATE_SCLK_CAM 0xc820
95#define GATE_SCLK_TV 0xc824
96#define GATE_SCLK_MFC 0xc828
97#define GATE_SCLK_G3D 0xc82c
98#define GATE_SCLK_LCD 0xc834
99#define GATE_SCLK_MAUDIO 0xc83c
100#define GATE_SCLK_FSYS 0xc840
101#define GATE_SCLK_PERIL 0xc850
102#define GATE_IP_CAM 0xc920
103#define GATE_IP_TV 0xc924
104#define GATE_IP_MFC 0xc928
105#define GATE_IP_G3D 0xc92c
106#define GATE_IP_LCD 0xc934
107#define GATE_IP_FSYS 0xc940
108#define GATE_IP_PERIL 0xc950
109#define GATE_BLOCK 0xc970
110#define APLL_LOCK 0x14000
111#define APLL_CON0 0x14100
112#define SRC_CPU 0x14200
113#define DIV_CPU0 0x14500
114#define DIV_CPU1 0x14504
115
116enum exynos4415_plls {
117 apll, epll, g3d_pll, isp_pll, disp_pll,
118 nr_plls,
119};
120
121static struct samsung_clk_provider *exynos4415_ctx;
122
123/*
124 * Support for CMU save/restore across system suspends
125 */
126#ifdef CONFIG_PM_SLEEP
127static struct samsung_clk_reg_dump *exynos4415_clk_regs;
128
129static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
130 SRC_LEFTBUS,
131 DIV_LEFTBUS,
132 GATE_IP_LEFTBUS,
133 GATE_IP_IMAGE,
134 SRC_RIGHTBUS,
135 DIV_RIGHTBUS,
136 GATE_IP_RIGHTBUS,
137 GATE_IP_PERIR,
138 EPLL_LOCK,
139 G3D_PLL_LOCK,
140 DISP_PLL_LOCK,
141 ISP_PLL_LOCK,
142 EPLL_CON0,
143 EPLL_CON1,
144 EPLL_CON2,
145 G3D_PLL_CON0,
146 G3D_PLL_CON1,
147 G3D_PLL_CON2,
148 ISP_PLL_CON0,
149 ISP_PLL_CON1,
150 ISP_PLL_CON2,
151 DISP_PLL_CON0,
152 DISP_PLL_CON1,
153 DISP_PLL_CON2,
154 SRC_TOP0,
155 SRC_TOP1,
156 SRC_CAM,
157 SRC_TV,
158 SRC_MFC,
159 SRC_G3D,
160 SRC_LCD,
161 SRC_ISP,
162 SRC_MAUDIO,
163 SRC_FSYS,
164 SRC_PERIL0,
165 SRC_PERIL1,
166 SRC_CAM1,
167 SRC_TOP_ISP0,
168 SRC_TOP_ISP1,
169 SRC_MASK_TOP,
170 SRC_MASK_CAM,
171 SRC_MASK_TV,
172 SRC_MASK_LCD,
173 SRC_MASK_ISP,
174 SRC_MASK_MAUDIO,
175 SRC_MASK_FSYS,
176 SRC_MASK_PERIL0,
177 SRC_MASK_PERIL1,
178 DIV_TOP,
179 DIV_CAM,
180 DIV_TV,
181 DIV_MFC,
182 DIV_G3D,
183 DIV_LCD,
184 DIV_ISP,
185 DIV_MAUDIO,
186 DIV_FSYS0,
187 DIV_FSYS1,
188 DIV_FSYS2,
189 DIV_PERIL0,
190 DIV_PERIL1,
191 DIV_PERIL2,
192 DIV_PERIL3,
193 DIV_PERIL4,
194 DIV_PERIL5,
195 DIV_CAM1,
196 DIV_TOP_ISP1,
197 DIV_TOP_ISP0,
198 CLKDIV2_RATIO,
199 GATE_SCLK_CAM,
200 GATE_SCLK_TV,
201 GATE_SCLK_MFC,
202 GATE_SCLK_G3D,
203 GATE_SCLK_LCD,
204 GATE_SCLK_MAUDIO,
205 GATE_SCLK_FSYS,
206 GATE_SCLK_PERIL,
207 GATE_IP_CAM,
208 GATE_IP_TV,
209 GATE_IP_MFC,
210 GATE_IP_G3D,
211 GATE_IP_LCD,
212 GATE_IP_FSYS,
213 GATE_IP_PERIL,
214 GATE_BLOCK,
215 APLL_LOCK,
216 APLL_CON0,
217 SRC_CPU,
218 DIV_CPU0,
219 DIV_CPU1,
220};
221
222static int exynos4415_clk_suspend(void)
223{
224 samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
225 ARRAY_SIZE(exynos4415_cmu_clk_regs));
226
227 return 0;
228}
229
230static void exynos4415_clk_resume(void)
231{
232 samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
233 ARRAY_SIZE(exynos4415_cmu_clk_regs));
234}
235
236static struct syscore_ops exynos4415_clk_syscore_ops = {
237 .suspend = exynos4415_clk_suspend,
238 .resume = exynos4415_clk_resume,
239};
240
241static void exynos4415_clk_sleep_init(void)
242{
243 exynos4415_clk_regs =
244 samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
245 ARRAY_SIZE(exynos4415_cmu_clk_regs));
246 if (!exynos4415_clk_regs) {
247 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
248 return;
249 }
250
251 register_syscore_ops(&exynos4415_clk_syscore_ops);
252}
253#else
254static inline void exynos4415_clk_sleep_init(void) { }
255#endif
256
257/* list of all parent clock list */
258PNAME(mout_g3d_pllsrc_p) = { "fin_pll", };
259
260PNAME(mout_apll_p) = { "fin_pll", "fout_apll", };
261PNAME(mout_g3d_pll_p) = { "fin_pll", "fout_g3d_pll", };
262PNAME(mout_isp_pll_p) = { "fin_pll", "fout_isp_pll", };
263PNAME(mout_disp_pll_p) = { "fin_pll", "fout_disp_pll", };
264
265PNAME(mout_mpll_user_p) = { "fin_pll", "div_mpll_pre", };
266PNAME(mout_epll_p) = { "fin_pll", "fout_epll", };
267PNAME(mout_core_p) = { "mout_apll", "mout_mpll_user_c", };
268PNAME(mout_hpm_p) = { "mout_apll", "mout_mpll_user_c", };
269
270PNAME(mout_ebi_p) = { "div_aclk_200", "div_aclk_160", };
271PNAME(mout_ebi_1_p) = { "mout_ebi", "mout_g3d_pll", };
272
273PNAME(mout_gdl_p) = { "mout_mpll_user_l", };
274PNAME(mout_gdr_p) = { "mout_mpll_user_r", };
275
276PNAME(mout_aclk_266_p) = { "mout_mpll_user_t", "mout_g3d_pll", };
277
278PNAME(group_epll_g3dpll_p) = { "mout_epll", "mout_g3d_pll" };
279PNAME(group_sclk_p) = { "xxti", "xusbxti",
280 "none", "mout_isp_pll",
281 "none", "none", "div_mpll_pre",
282 "mout_epll", "mout_g3d_pll", };
283PNAME(group_spdif_p) = { "mout_audio0", "mout_audio1",
284 "mout_audio2", "spdif_extclk", };
285PNAME(group_sclk_audio2_p) = { "audiocdclk2", "none",
286 "none", "mout_isp_pll",
287 "mout_disp_pll", "xusbxti",
288 "div_mpll_pre", "mout_epll",
289 "mout_g3d_pll", };
290PNAME(group_sclk_audio1_p) = { "audiocdclk1", "none",
291 "none", "mout_isp_pll",
292 "mout_disp_pll", "xusbxti",
293 "div_mpll_pre", "mout_epll",
294 "mout_g3d_pll", };
295PNAME(group_sclk_audio0_p) = { "audiocdclk0", "none",
296 "none", "mout_isp_pll",
297 "mout_disp_pll", "xusbxti",
298 "div_mpll_pre", "mout_epll",
299 "mout_g3d_pll", };
300PNAME(group_fimc_lclk_p) = { "xxti", "xusbxti",
301 "none", "mout_isp_pll",
302 "none", "mout_disp_pll",
303 "mout_mpll_user_t", "mout_epll",
304 "mout_g3d_pll", };
305PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti",
306 "m_bitclkhsdiv4_4l", "mout_isp_pll",
307 "mout_disp_pll", "sclk_hdmiphy",
308 "div_mpll_pre", "mout_epll",
309 "mout_g3d_pll", };
310PNAME(mout_hdmi_p) = { "sclk_pixel", "sclk_hdmiphy" };
311PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" };
312PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" };
313PNAME(mout_jpeg_p) = { "mout_jpeg_0", "mout_jpeg_1" };
314PNAME(mout_jpeg1_p) = { "mout_epll", "mout_g3d_pll" };
315PNAME(group_aclk_isp0_300_p) = { "mout_isp_pll", "div_mpll_pre" };
316PNAME(group_aclk_isp0_400_user_p) = { "fin_pll", "div_aclk_400_mcuisp" };
317PNAME(group_aclk_isp0_300_user_p) = { "fin_pll", "mout_aclk_isp0_300" };
318PNAME(group_aclk_isp1_300_user_p) = { "fin_pll", "mout_aclk_isp1_300" };
319PNAME(group_mout_mpll_user_t_p) = { "mout_mpll_user_t" };
320
321static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initdata = {
322 /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */
323 FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0),
324};
325
326static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = {
327 FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
328};
329
330static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
331 /*
332 * NOTE: Following table is sorted by register address in ascending
333 * order and then bitfield shift in descending order, as it is done
334 * in the User's Manual. When adding new entries, please make sure
335 * that the order is preserved, to avoid merge conflicts and make
336 * further work with defined data easier.
337 */
338
339 /* SRC_LEFTBUS */
340 MUX(CLK_MOUT_MPLL_USER_L, "mout_mpll_user_l", mout_mpll_user_p,
341 SRC_LEFTBUS, 4, 1),
342 MUX(CLK_MOUT_GDL, "mout_gdl", mout_gdl_p, SRC_LEFTBUS, 0, 1),
343
344 /* SRC_RIGHTBUS */
345 MUX(CLK_MOUT_MPLL_USER_R, "mout_mpll_user_r", mout_mpll_user_p,
346 SRC_RIGHTBUS, 4, 1),
347 MUX(CLK_MOUT_GDR, "mout_gdr", mout_gdr_p, SRC_RIGHTBUS, 0, 1),
348
349 /* SRC_TOP0 */
350 MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1),
351 MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_mout_mpll_user_t_p,
352 SRC_TOP0, 24, 1),
353 MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_mout_mpll_user_t_p,
354 SRC_TOP0, 20, 1),
355 MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_mout_mpll_user_t_p,
356 SRC_TOP0, 16, 1),
357 MUX(CLK_MOUT_ACLK_266, "mout_aclk_266", mout_aclk_266_p,
358 SRC_TOP0, 12, 1),
359 MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
360 SRC_TOP0, 8, 1),
361 MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_TOP0, 4, 1),
362 MUX(CLK_MOUT_EBI_1, "mout_ebi_1", mout_ebi_1_p, SRC_TOP0, 0, 1),
363
364 /* SRC_TOP1 */
365 MUX(CLK_MOUT_ISP_PLL, "mout_isp_pll", mout_isp_pll_p,
366 SRC_TOP1, 28, 1),
367 MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p,
368 SRC_TOP1, 16, 1),
369 MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p,
370 SRC_TOP1, 12, 1),
371 MUX(CLK_MOUT_ACLK_400_MCUISP, "mout_aclk_400_mcuisp",
372 group_mout_mpll_user_t_p, SRC_TOP1, 8, 1),
373 MUX(CLK_MOUT_G3D_PLLSRC, "mout_g3d_pllsrc", mout_g3d_pllsrc_p,
374 SRC_TOP1, 0, 1),
375
376 /* SRC_CAM */
377 MUX(CLK_MOUT_CSIS1, "mout_csis1", group_fimc_lclk_p, SRC_CAM, 28, 4),
378 MUX(CLK_MOUT_CSIS0, "mout_csis0", group_fimc_lclk_p, SRC_CAM, 24, 4),
379 MUX(CLK_MOUT_CAM1, "mout_cam1", group_fimc_lclk_p, SRC_CAM, 20, 4),
380 MUX(CLK_MOUT_FIMC3_LCLK, "mout_fimc3_lclk", group_fimc_lclk_p, SRC_CAM,
381 12, 4),
382 MUX(CLK_MOUT_FIMC2_LCLK, "mout_fimc2_lclk", group_fimc_lclk_p, SRC_CAM,
383 8, 4),
384 MUX(CLK_MOUT_FIMC1_LCLK, "mout_fimc1_lclk", group_fimc_lclk_p, SRC_CAM,
385 4, 4),
386 MUX(CLK_MOUT_FIMC0_LCLK, "mout_fimc0_lclk", group_fimc_lclk_p, SRC_CAM,
387 0, 4),
388
389 /* SRC_TV */
390 MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
391
392 /* SRC_MFC */
393 MUX(CLK_MOUT_MFC, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
394 MUX(CLK_MOUT_MFC_1, "mout_mfc_1", group_epll_g3dpll_p, SRC_MFC, 4, 1),
395 MUX(CLK_MOUT_MFC_0, "mout_mfc_0", group_mout_mpll_user_t_p, SRC_MFC, 0,
396 1),
397
398 /* SRC_G3D */
399 MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1),
400 MUX(CLK_MOUT_G3D_1, "mout_g3d_1", group_epll_g3dpll_p, SRC_G3D, 4, 1),
401 MUX(CLK_MOUT_G3D_0, "mout_g3d_0", group_mout_mpll_user_t_p, SRC_G3D, 0,
402 1),
403
404 /* SRC_LCD */
405 MUX(CLK_MOUT_MIPI0, "mout_mipi0", group_fimc_lclk_p, SRC_LCD, 12, 4),
406 MUX(CLK_MOUT_FIMD0, "mout_fimd0", group_sclk_fimd0_p, SRC_LCD, 0, 4),
407
408 /* SRC_ISP */
409 MUX(CLK_MOUT_TSADC_ISP, "mout_tsadc_isp", group_fimc_lclk_p, SRC_ISP,
410 16, 4),
411 MUX(CLK_MOUT_UART_ISP, "mout_uart_isp", group_fimc_lclk_p, SRC_ISP,
412 12, 4),
413 MUX(CLK_MOUT_SPI1_ISP, "mout_spi1_isp", group_fimc_lclk_p, SRC_ISP,
414 8, 4),
415 MUX(CLK_MOUT_SPI0_ISP, "mout_spi0_isp", group_fimc_lclk_p, SRC_ISP,
416 4, 4),
417 MUX(CLK_MOUT_PWM_ISP, "mout_pwm_isp", group_fimc_lclk_p, SRC_ISP,
418 0, 4),
419
420 /* SRC_MAUDIO */
421 MUX(CLK_MOUT_AUDIO0, "mout_audio0", group_sclk_audio0_p, SRC_MAUDIO,
422 0, 4),
423
424 /* SRC_FSYS */
425 MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
426 MUX(CLK_MOUT_MMC2, "mout_mmc2", group_sclk_p, SRC_FSYS, 8, 4),
427 MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4),
428 MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4),
429
430 /* SRC_PERIL0 */
431 MUX(CLK_MOUT_UART3, "mout_uart3", group_sclk_p, SRC_PERIL0, 12, 4),
432 MUX(CLK_MOUT_UART2, "mout_uart2", group_sclk_p, SRC_PERIL0, 8, 4),
433 MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
434 MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4),
435
436 /* SRC_PERIL1 */
437 MUX(CLK_MOUT_SPI2, "mout_spi2", group_sclk_p, SRC_PERIL1, 24, 4),
438 MUX(CLK_MOUT_SPI1, "mout_spi1", group_sclk_p, SRC_PERIL1, 20, 4),
439 MUX(CLK_MOUT_SPI0, "mout_spi0", group_sclk_p, SRC_PERIL1, 16, 4),
440 MUX(CLK_MOUT_SPDIF, "mout_spdif", group_spdif_p, SRC_PERIL1, 8, 4),
441 MUX(CLK_MOUT_AUDIO2, "mout_audio2", group_sclk_audio2_p, SRC_PERIL1,
442 4, 4),
443 MUX(CLK_MOUT_AUDIO1, "mout_audio1", group_sclk_audio1_p, SRC_PERIL1,
444 0, 4),
445
446 /* SRC_CPU */
447 MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p,
448 SRC_CPU, 24, 1),
449 MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1),
450 MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1, 0,
451 CLK_MUX_READ_ONLY),
452 MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
453 CLK_SET_RATE_PARENT, 0),
454
455 /* SRC_CAM1 */
456 MUX(CLK_MOUT_PXLASYNC_CSIS1_FIMC, "mout_pxlasync_csis1",
457 group_fimc_lclk_p, SRC_CAM1, 20, 1),
458 MUX(CLK_MOUT_PXLASYNC_CSIS0_FIMC, "mout_pxlasync_csis0",
459 group_fimc_lclk_p, SRC_CAM1, 16, 1),
460 MUX(CLK_MOUT_JPEG, "mout_jpeg", mout_jpeg_p, SRC_CAM1, 8, 1),
461 MUX(CLK_MOUT_JPEG1, "mout_jpeg_1", mout_jpeg1_p, SRC_CAM1, 4, 1),
462 MUX(CLK_MOUT_JPEG0, "mout_jpeg_0", group_mout_mpll_user_t_p, SRC_CAM1,
463 0, 1),
464
465 /* SRC_TOP_ISP0 */
466 MUX(CLK_MOUT_ACLK_ISP0_300, "mout_aclk_isp0_300",
467 group_aclk_isp0_300_p, SRC_TOP_ISP0, 8, 1),
468 MUX(CLK_MOUT_ACLK_ISP0_400, "mout_aclk_isp0_400_user",
469 group_aclk_isp0_400_user_p, SRC_TOP_ISP0, 4, 1),
470 MUX(CLK_MOUT_ACLK_ISP0_300_USER, "mout_aclk_isp0_300_user",
471 group_aclk_isp0_300_user_p, SRC_TOP_ISP0, 0, 1),
472
473 /* SRC_TOP_ISP1 */
474 MUX(CLK_MOUT_ACLK_ISP1_300, "mout_aclk_isp1_300",
475 group_aclk_isp0_300_p, SRC_TOP_ISP1, 4, 1),
476 MUX(CLK_MOUT_ACLK_ISP1_300_USER, "mout_aclk_isp1_300_user",
477 group_aclk_isp1_300_user_p, SRC_TOP_ISP1, 0, 1),
478};
479
480static struct samsung_div_clock exynos4415_div_clks[] __initdata = {
481 /*
482 * NOTE: Following table is sorted by register address in ascending
483 * order and then bitfield shift in descending order, as it is done
484 * in the User's Manual. When adding new entries, please make sure
485 * that the order is preserved, to avoid merge conflicts and make
486 * further work with defined data easier.
487 */
488
489 /* DIV_LEFTBUS */
490 DIV(CLK_DIV_GPL, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
491 DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 4),
492
493 /* DIV_RIGHTBUS */
494 DIV(CLK_DIV_GPR, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
495 DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 4),
496
497 /* DIV_TOP */
498 DIV(CLK_DIV_ACLK_400_MCUISP, "div_aclk_400_mcuisp",
499 "mout_aclk_400_mcuisp", DIV_TOP, 24, 3),
500 DIV(CLK_DIV_EBI, "div_ebi", "mout_ebi_1", DIV_TOP, 16, 3),
501 DIV(CLK_DIV_ACLK_200, "div_aclk_200", "mout_aclk_200", DIV_TOP, 12, 3),
502 DIV(CLK_DIV_ACLK_160, "div_aclk_160", "mout_aclk_160", DIV_TOP, 8, 3),
503 DIV(CLK_DIV_ACLK_100, "div_aclk_100", "mout_aclk_100", DIV_TOP, 4, 4),
504 DIV(CLK_DIV_ACLK_266, "div_aclk_266", "mout_aclk_266", DIV_TOP, 0, 3),
505
506 /* DIV_CAM */
507 DIV(CLK_DIV_CSIS1, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
508 DIV(CLK_DIV_CSIS0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
509 DIV(CLK_DIV_CAM1, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
510 DIV(CLK_DIV_FIMC3_LCLK, "div_fimc3_lclk", "mout_fimc3_lclk", DIV_CAM,
511 12, 4),
512 DIV(CLK_DIV_FIMC2_LCLK, "div_fimc2_lclk", "mout_fimc2_lclk", DIV_CAM,
513 8, 4),
514 DIV(CLK_DIV_FIMC1_LCLK, "div_fimc1_lclk", "mout_fimc1_lclk", DIV_CAM,
515 4, 4),
516 DIV(CLK_DIV_FIMC0_LCLK, "div_fimc0_lclk", "mout_fimc0_lclk", DIV_CAM,
517 0, 4),
518
519 /* DIV_TV */
520 DIV(CLK_DIV_TV_BLK, "div_tv_blk", "mout_g3d_pll", DIV_TV, 0, 4),
521
522 /* DIV_MFC */
523 DIV(CLK_DIV_MFC, "div_mfc", "mout_mfc", DIV_MFC, 0, 4),
524
525 /* DIV_G3D */
526 DIV(CLK_DIV_G3D, "div_g3d", "mout_g3d", DIV_G3D, 0, 4),
527
528 /* DIV_LCD */
529 DIV_F(CLK_DIV_MIPI0_PRE, "div_mipi0_pre", "div_mipi0", DIV_LCD, 20, 4,
530 CLK_SET_RATE_PARENT, 0),
531 DIV(CLK_DIV_MIPI0, "div_mipi0", "mout_mipi0", DIV_LCD, 16, 4),
532 DIV(CLK_DIV_FIMD0, "div_fimd0", "mout_fimd0", DIV_LCD, 0, 4),
533
534 /* DIV_ISP */
535 DIV(CLK_DIV_UART_ISP, "div_uart_isp", "mout_uart_isp", DIV_ISP, 28, 4),
536 DIV_F(CLK_DIV_SPI1_ISP_PRE, "div_spi1_isp_pre", "div_spi1_isp",
537 DIV_ISP, 20, 8, CLK_SET_RATE_PARENT, 0),
538 DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4),
539 DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp",
540 DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0),
541 DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 4, 4),
542 DIV(CLK_DIV_PWM_ISP, "div_pwm_isp", "mout_pwm_isp", DIV_ISP, 0, 4),
543
544 /* DIV_MAUDIO */
545 DIV(CLK_DIV_PCM0, "div_pcm0", "div_audio0", DIV_MAUDIO, 4, 8),
546 DIV(CLK_DIV_AUDIO0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
547
548 /* DIV_FSYS0 */
549 DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8,
550 CLK_SET_RATE_PARENT, 0),
551 DIV(CLK_DIV_TSADC, "div_tsadc", "mout_tsadc", DIV_FSYS0, 0, 4),
552
553 /* DIV_FSYS1 */
554 DIV_F(CLK_DIV_MMC1_PRE, "div_mmc1_pre", "div_mmc1", DIV_FSYS1, 24, 8,
555 CLK_SET_RATE_PARENT, 0),
556 DIV(CLK_DIV_MMC1, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
557 DIV_F(CLK_DIV_MMC0_PRE, "div_mmc0_pre", "div_mmc0", DIV_FSYS1, 8, 8,
558 CLK_SET_RATE_PARENT, 0),
559 DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
560
561 /* DIV_FSYS2 */
562 DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2_pre", "div_mmc2", DIV_FSYS2, 8, 8,
563 CLK_SET_RATE_PARENT, 0),
564 DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4,
565 CLK_SET_RATE_PARENT, 0),
566
567 /* DIV_PERIL0 */
568 DIV(CLK_DIV_UART3, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
569 DIV(CLK_DIV_UART2, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
570 DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
571 DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
572
573 /* DIV_PERIL1 */
574 DIV_F(CLK_DIV_SPI1_PRE, "div_spi1_pre", "div_spi1", DIV_PERIL1, 24, 8,
575 CLK_SET_RATE_PARENT, 0),
576 DIV(CLK_DIV_SPI1, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
577 DIV_F(CLK_DIV_SPI0_PRE, "div_spi0_pre", "div_spi0", DIV_PERIL1, 8, 8,
578 CLK_SET_RATE_PARENT, 0),
579 DIV(CLK_DIV_SPI0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
580
581 /* DIV_PERIL2 */
582 DIV_F(CLK_DIV_SPI2_PRE, "div_spi2_pre", "div_spi2", DIV_PERIL2, 8, 8,
583 CLK_SET_RATE_PARENT, 0),
584 DIV(CLK_DIV_SPI2, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
585
586 /* DIV_PERIL4 */
587 DIV(CLK_DIV_PCM2, "div_pcm2", "div_audio2", DIV_PERIL4, 20, 8),
588 DIV(CLK_DIV_AUDIO2, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
589 DIV(CLK_DIV_PCM1, "div_pcm1", "div_audio1", DIV_PERIL4, 20, 8),
590 DIV(CLK_DIV_AUDIO1, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
591
592 /* DIV_PERIL5 */
593 DIV(CLK_DIV_I2S1, "div_i2s1", "div_audio1", DIV_PERIL5, 0, 6),
594
595 /* DIV_CAM1 */
596 DIV(CLK_DIV_PXLASYNC_CSIS1_FIMC, "div_pxlasync_csis1_fimc",
597 "mout_pxlasync_csis1", DIV_CAM1, 24, 4),
598 DIV(CLK_DIV_PXLASYNC_CSIS0_FIMC, "div_pxlasync_csis0_fimc",
599 "mout_pxlasync_csis0", DIV_CAM1, 20, 4),
600 DIV(CLK_DIV_JPEG, "div_jpeg", "mout_jpeg", DIV_CAM1, 0, 4),
601
602 /* DIV_CPU0 */
603 DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3),
604 DIV_F(CLK_DIV_APLL, "div_apll", "mout_apll", DIV_CPU0, 24, 3,
605 CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
606 DIV(CLK_DIV_PCLK_DBG, "div_pclk_dbg", "div_core2", DIV_CPU0, 20, 3),
607 DIV(CLK_DIV_ATB, "div_atb", "div_core2", DIV_CPU0, 16, 3),
608 DIV(CLK_DIV_PERIPH, "div_periph", "div_core2", DIV_CPU0, 12, 3),
609 DIV(CLK_DIV_COREM1, "div_corem1", "div_core2", DIV_CPU0, 8, 3),
610 DIV(CLK_DIV_COREM0, "div_corem0", "div_core2", DIV_CPU0, 4, 3),
611 DIV_F(CLK_DIV_CORE, "div_core", "mout_core", DIV_CPU0, 0, 3,
612 CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
613
614 /* DIV_CPU1 */
615 DIV(CLK_DIV_HPM, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
616 DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
617};
618
619static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = {
620 /*
621 * NOTE: Following table is sorted by register address in ascending
622 * order and then bitfield shift in descending order, as it is done
623 * in the User's Manual. When adding new entries, please make sure
624 * that the order is preserved, to avoid merge conflicts and make
625 * further work with defined data easier.
626 */
627
628 /* GATE_IP_LEFTBUS */
629 GATE(CLK_ASYNC_G3D, "async_g3d", "div_aclk_100", GATE_IP_LEFTBUS, 6,
630 CLK_IGNORE_UNUSED, 0),
631 GATE(CLK_ASYNC_MFCL, "async_mfcl", "div_aclk_100", GATE_IP_LEFTBUS, 4,
632 CLK_IGNORE_UNUSED, 0),
633 GATE(CLK_ASYNC_TVX, "async_tvx", "div_aclk_100", GATE_IP_LEFTBUS, 3,
634 CLK_IGNORE_UNUSED, 0),
635 GATE(CLK_PPMULEFT, "ppmuleft", "div_aclk_100", GATE_IP_LEFTBUS, 1,
636 CLK_IGNORE_UNUSED, 0),
637 GATE(CLK_GPIO_LEFT, "gpio_left", "div_aclk_100", GATE_IP_LEFTBUS, 0,
638 CLK_IGNORE_UNUSED, 0),
639
640 /* GATE_IP_IMAGE */
641 GATE(CLK_PPMUIMAGE, "ppmuimage", "div_aclk_100", GATE_IP_IMAGE,
642 9, 0, 0),
643 GATE(CLK_QEMDMA2, "qe_mdma2", "div_aclk_100", GATE_IP_IMAGE,
644 8, 0, 0),
645 GATE(CLK_QEROTATOR, "qe_rotator", "div_aclk_100", GATE_IP_IMAGE,
646 7, 0, 0),
647 GATE(CLK_SMMUMDMA2, "smmu_mdam2", "div_aclk_100", GATE_IP_IMAGE,
648 5, 0, 0),
649 GATE(CLK_SMMUROTATOR, "smmu_rotator", "div_aclk_100", GATE_IP_IMAGE,
650 4, 0, 0),
651 GATE(CLK_MDMA2, "mdma2", "div_aclk_100", GATE_IP_IMAGE, 2, 0, 0),
652 GATE(CLK_ROTATOR, "rotator", "div_aclk_100", GATE_IP_IMAGE, 1, 0, 0),
653
654 /* GATE_IP_RIGHTBUS */
655 GATE(CLK_ASYNC_ISPMX, "async_ispmx", "div_aclk_100",
656 GATE_IP_RIGHTBUS, 9, CLK_IGNORE_UNUSED, 0),
657 GATE(CLK_ASYNC_MAUDIOX, "async_maudiox", "div_aclk_100",
658 GATE_IP_RIGHTBUS, 7, CLK_IGNORE_UNUSED, 0),
659 GATE(CLK_ASYNC_MFCR, "async_mfcr", "div_aclk_100",
660 GATE_IP_RIGHTBUS, 6, CLK_IGNORE_UNUSED, 0),
661 GATE(CLK_ASYNC_FSYSD, "async_fsysd", "div_aclk_100",
662 GATE_IP_RIGHTBUS, 5, CLK_IGNORE_UNUSED, 0),
663 GATE(CLK_ASYNC_LCD0X, "async_lcd0x", "div_aclk_100",
664 GATE_IP_RIGHTBUS, 3, CLK_IGNORE_UNUSED, 0),
665 GATE(CLK_ASYNC_CAMX, "async_camx", "div_aclk_100",
666 GATE_IP_RIGHTBUS, 2, CLK_IGNORE_UNUSED, 0),
667 GATE(CLK_PPMURIGHT, "ppmuright", "div_aclk_100",
668 GATE_IP_RIGHTBUS, 1, CLK_IGNORE_UNUSED, 0),
669 GATE(CLK_GPIO_RIGHT, "gpio_right", "div_aclk_100",
670 GATE_IP_RIGHTBUS, 0, CLK_IGNORE_UNUSED, 0),
671
672 /* GATE_IP_PERIR */
673 GATE(CLK_ANTIRBK_APBIF, "antirbk_apbif", "div_aclk_100",
674 GATE_IP_PERIR, 24, CLK_IGNORE_UNUSED, 0),
675 GATE(CLK_EFUSE_WRITER_APBIF, "efuse_writer_apbif", "div_aclk_100",
676 GATE_IP_PERIR, 23, CLK_IGNORE_UNUSED, 0),
677 GATE(CLK_MONOCNT, "monocnt", "div_aclk_100", GATE_IP_PERIR, 22,
678 CLK_IGNORE_UNUSED, 0),
679 GATE(CLK_TZPC6, "tzpc6", "div_aclk_100", GATE_IP_PERIR, 21,
680 CLK_IGNORE_UNUSED, 0),
681 GATE(CLK_PROVISIONKEY1, "provisionkey1", "div_aclk_100",
682 GATE_IP_PERIR, 20, CLK_IGNORE_UNUSED, 0),
683 GATE(CLK_PROVISIONKEY0, "provisionkey0", "div_aclk_100",
684 GATE_IP_PERIR, 19, CLK_IGNORE_UNUSED, 0),
685 GATE(CLK_CMU_ISPPART, "cmu_isppart", "div_aclk_100", GATE_IP_PERIR, 18,
686 CLK_IGNORE_UNUSED, 0),
687 GATE(CLK_TMU_APBIF, "tmu_apbif", "div_aclk_100",
688 GATE_IP_PERIR, 17, 0, 0),
689 GATE(CLK_KEYIF, "keyif", "div_aclk_100", GATE_IP_PERIR, 16, 0, 0),
690 GATE(CLK_RTC, "rtc", "div_aclk_100", GATE_IP_PERIR, 15, 0, 0),
691 GATE(CLK_WDT, "wdt", "div_aclk_100", GATE_IP_PERIR, 14, 0, 0),
692 GATE(CLK_MCT, "mct", "div_aclk_100", GATE_IP_PERIR, 13, 0, 0),
693 GATE(CLK_SECKEY, "seckey", "div_aclk_100", GATE_IP_PERIR, 12,
694 CLK_IGNORE_UNUSED, 0),
695 GATE(CLK_HDMI_CEC, "hdmi_cec", "div_aclk_100", GATE_IP_PERIR, 11,
696 CLK_IGNORE_UNUSED, 0),
697 GATE(CLK_TZPC5, "tzpc5", "div_aclk_100", GATE_IP_PERIR, 10,
698 CLK_IGNORE_UNUSED, 0),
699 GATE(CLK_TZPC4, "tzpc4", "div_aclk_100", GATE_IP_PERIR, 9,
700 CLK_IGNORE_UNUSED, 0),
701 GATE(CLK_TZPC3, "tzpc3", "div_aclk_100", GATE_IP_PERIR, 8,
702 CLK_IGNORE_UNUSED, 0),
703 GATE(CLK_TZPC2, "tzpc2", "div_aclk_100", GATE_IP_PERIR, 7,
704 CLK_IGNORE_UNUSED, 0),
705 GATE(CLK_TZPC1, "tzpc1", "div_aclk_100", GATE_IP_PERIR, 6,
706 CLK_IGNORE_UNUSED, 0),
707 GATE(CLK_TZPC0, "tzpc0", "div_aclk_100", GATE_IP_PERIR, 5,
708 CLK_IGNORE_UNUSED, 0),
709 GATE(CLK_CMU_COREPART, "cmu_corepart", "div_aclk_100", GATE_IP_PERIR, 4,
710 CLK_IGNORE_UNUSED, 0),
711 GATE(CLK_CMU_TOPPART, "cmu_toppart", "div_aclk_100", GATE_IP_PERIR, 3,
712 CLK_IGNORE_UNUSED, 0),
713 GATE(CLK_PMU_APBIF, "pmu_apbif", "div_aclk_100", GATE_IP_PERIR, 2,
714 CLK_IGNORE_UNUSED, 0),
715 GATE(CLK_SYSREG, "sysreg", "div_aclk_100", GATE_IP_PERIR, 1,
716 CLK_IGNORE_UNUSED, 0),
717 GATE(CLK_CHIP_ID, "chip_id", "div_aclk_100", GATE_IP_PERIR, 0,
718 CLK_IGNORE_UNUSED, 0),
719
720 /* GATE_SCLK_CAM - non-completed */
721 GATE(CLK_SCLK_PXLAYSNC_CSIS1_FIMC, "sclk_pxlasync_csis1_fimc",
722 "div_pxlasync_csis1_fimc", GATE_SCLK_CAM, 11,
723 CLK_SET_RATE_PARENT, 0),
724 GATE(CLK_SCLK_PXLAYSNC_CSIS0_FIMC, "sclk_pxlasync_csis0_fimc",
725 "div_pxlasync_csis0_fimc", GATE_SCLK_CAM,
726 10, CLK_SET_RATE_PARENT, 0),
727 GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_jpeg",
728 GATE_SCLK_CAM, 8, CLK_SET_RATE_PARENT, 0),
729 GATE(CLK_SCLK_CSIS1, "sclk_csis1", "div_csis1",
730 GATE_SCLK_CAM, 7, CLK_SET_RATE_PARENT, 0),
731 GATE(CLK_SCLK_CSIS0, "sclk_csis0", "div_csis0",
732 GATE_SCLK_CAM, 6, CLK_SET_RATE_PARENT, 0),
733 GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1",
734 GATE_SCLK_CAM, 5, CLK_SET_RATE_PARENT, 0),
735 GATE(CLK_SCLK_FIMC3_LCLK, "sclk_fimc3_lclk", "div_fimc3_lclk",
736 GATE_SCLK_CAM, 3, CLK_SET_RATE_PARENT, 0),
737 GATE(CLK_SCLK_FIMC2_LCLK, "sclk_fimc2_lclk", "div_fimc2_lclk",
738 GATE_SCLK_CAM, 2, CLK_SET_RATE_PARENT, 0),
739 GATE(CLK_SCLK_FIMC1_LCLK, "sclk_fimc1_lclk", "div_fimc1_lclk",
740 GATE_SCLK_CAM, 1, CLK_SET_RATE_PARENT, 0),
741 GATE(CLK_SCLK_FIMC0_LCLK, "sclk_fimc0_lclk", "div_fimc0_lclk",
742 GATE_SCLK_CAM, 0, CLK_SET_RATE_PARENT, 0),
743
744 /* GATE_SCLK_TV */
745 GATE(CLK_SCLK_PIXEL, "sclk_pixel", "div_tv_blk",
746 GATE_SCLK_TV, 3, CLK_SET_RATE_PARENT, 0),
747 GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
748 GATE_SCLK_TV, 2, CLK_SET_RATE_PARENT, 0),
749 GATE(CLK_SCLK_MIXER, "sclk_mixer", "div_tv_blk",
750 GATE_SCLK_TV, 0, CLK_SET_RATE_PARENT, 0),
751
752 /* GATE_SCLK_MFC */
753 GATE(CLK_SCLK_MFC, "sclk_mfc", "div_mfc",
754 GATE_SCLK_MFC, 0, CLK_SET_RATE_PARENT, 0),
755
756 /* GATE_SCLK_G3D */
757 GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d",
758 GATE_SCLK_G3D, 0, CLK_SET_RATE_PARENT, 0),
759
760 /* GATE_SCLK_LCD */
761 GATE(CLK_SCLK_MIPIDPHY4L, "sclk_mipidphy4l", "div_mipi0",
762 GATE_SCLK_LCD, 4, CLK_SET_RATE_PARENT, 0),
763 GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi0_pre",
764 GATE_SCLK_LCD, 3, CLK_SET_RATE_PARENT, 0),
765 GATE(CLK_SCLK_MDNIE0, "sclk_mdnie0", "div_fimd0",
766 GATE_SCLK_LCD, 1, CLK_SET_RATE_PARENT, 0),
767 GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0",
768 GATE_SCLK_LCD, 0, CLK_SET_RATE_PARENT, 0),
769
770 /* GATE_SCLK_MAUDIO */
771 GATE(CLK_SCLK_PCM0, "sclk_pcm0", "div_pcm0",
772 GATE_SCLK_MAUDIO, 1, CLK_SET_RATE_PARENT, 0),
773 GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0",
774 GATE_SCLK_MAUDIO, 0, CLK_SET_RATE_PARENT, 0),
775
776 /* GATE_SCLK_FSYS */
777 GATE(CLK_SCLK_TSADC, "sclk_tsadc", "div_tsadc_pre",
778 GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
779 GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi",
780 GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0),
781 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc2_pre",
782 GATE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
783 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre",
784 GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
785 GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre",
786 GATE_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
787
788 /* GATE_SCLK_PERIL */
789 GATE(CLK_SCLK_I2S, "sclk_i2s1", "div_i2s1",
790 GATE_SCLK_PERIL, 18, CLK_SET_RATE_PARENT, 0),
791 GATE(CLK_SCLK_PCM2, "sclk_pcm2", "div_pcm2",
792 GATE_SCLK_PERIL, 16, CLK_SET_RATE_PARENT, 0),
793 GATE(CLK_SCLK_PCM1, "sclk_pcm1", "div_pcm1",
794 GATE_SCLK_PERIL, 15, CLK_SET_RATE_PARENT, 0),
795 GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2",
796 GATE_SCLK_PERIL, 14, CLK_SET_RATE_PARENT, 0),
797 GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1",
798 GATE_SCLK_PERIL, 13, CLK_SET_RATE_PARENT, 0),
799 GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
800 GATE_SCLK_PERIL, 10, CLK_SET_RATE_PARENT, 0),
801 GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi2_pre",
802 GATE_SCLK_PERIL, 8, CLK_SET_RATE_PARENT, 0),
803 GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi1_pre",
804 GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0),
805 GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre",
806 GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0),
807 GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3",
808 GATE_SCLK_PERIL, 3, CLK_SET_RATE_PARENT, 0),
809 GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
810 GATE_SCLK_PERIL, 2, CLK_SET_RATE_PARENT, 0),
811 GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
812 GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0),
813 GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
814 GATE_SCLK_PERIL, 0, CLK_SET_RATE_PARENT, 0),
815
816 /* GATE_IP_CAM */
817 GATE(CLK_SMMUFIMC_LITE2, "smmufimc_lite2", "div_aclk_160", GATE_IP_CAM,
818 22, CLK_IGNORE_UNUSED, 0),
819 GATE(CLK_FIMC_LITE2, "fimc_lite2", "div_aclk_160", GATE_IP_CAM,
820 20, CLK_IGNORE_UNUSED, 0),
821 GATE(CLK_PIXELASYNCM1, "pixelasyncm1", "div_aclk_160", GATE_IP_CAM,
822 18, CLK_IGNORE_UNUSED, 0),
823 GATE(CLK_PIXELASYNCM0, "pixelasyncm0", "div_aclk_160", GATE_IP_CAM,
824 17, CLK_IGNORE_UNUSED, 0),
825 GATE(CLK_PPMUCAMIF, "ppmucamif", "div_aclk_160", GATE_IP_CAM,
826 16, CLK_IGNORE_UNUSED, 0),
827 GATE(CLK_SMMUJPEG, "smmujpeg", "div_aclk_160", GATE_IP_CAM, 11, 0, 0),
828 GATE(CLK_SMMUFIMC3, "smmufimc3", "div_aclk_160", GATE_IP_CAM, 10, 0, 0),
829 GATE(CLK_SMMUFIMC2, "smmufimc2", "div_aclk_160", GATE_IP_CAM, 9, 0, 0),
830 GATE(CLK_SMMUFIMC1, "smmufimc1", "div_aclk_160", GATE_IP_CAM, 8, 0, 0),
831 GATE(CLK_SMMUFIMC0, "smmufimc0", "div_aclk_160", GATE_IP_CAM, 7, 0, 0),
832 GATE(CLK_JPEG, "jpeg", "div_aclk_160", GATE_IP_CAM, 6, 0, 0),
833 GATE(CLK_CSIS1, "csis1", "div_aclk_160", GATE_IP_CAM, 5, 0, 0),
834 GATE(CLK_CSIS0, "csis0", "div_aclk_160", GATE_IP_CAM, 4, 0, 0),
835 GATE(CLK_FIMC3, "fimc3", "div_aclk_160", GATE_IP_CAM, 3, 0, 0),
836 GATE(CLK_FIMC2, "fimc2", "div_aclk_160", GATE_IP_CAM, 2, 0, 0),
837 GATE(CLK_FIMC1, "fimc1", "div_aclk_160", GATE_IP_CAM, 1, 0, 0),
838 GATE(CLK_FIMC0, "fimc0", "div_aclk_160", GATE_IP_CAM, 0, 0, 0),
839
840 /* GATE_IP_TV */
841 GATE(CLK_PPMUTV, "ppmutv", "div_aclk_100", GATE_IP_TV, 5, 0, 0),
842 GATE(CLK_SMMUTV, "smmutv", "div_aclk_100", GATE_IP_TV, 4, 0, 0),
843 GATE(CLK_HDMI, "hdmi", "div_aclk_100", GATE_IP_TV, 3, 0, 0),
844 GATE(CLK_MIXER, "mixer", "div_aclk_100", GATE_IP_TV, 1, 0, 0),
845 GATE(CLK_VP, "vp", "div_aclk_100", GATE_IP_TV, 0, 0, 0),
846
847 /* GATE_IP_MFC */
848 GATE(CLK_PPMUMFC_R, "ppmumfc_r", "div_aclk_200", GATE_IP_MFC, 4,
849 CLK_IGNORE_UNUSED, 0),
850 GATE(CLK_PPMUMFC_L, "ppmumfc_l", "div_aclk_200", GATE_IP_MFC, 3,
851 CLK_IGNORE_UNUSED, 0),
852 GATE(CLK_SMMUMFC_R, "smmumfc_r", "div_aclk_200", GATE_IP_MFC, 2, 0, 0),
853 GATE(CLK_SMMUMFC_L, "smmumfc_l", "div_aclk_200", GATE_IP_MFC, 1, 0, 0),
854 GATE(CLK_MFC, "mfc", "div_aclk_200", GATE_IP_MFC, 0, 0, 0),
855
856 /* GATE_IP_G3D */
857 GATE(CLK_PPMUG3D, "ppmug3d", "div_aclk_200", GATE_IP_G3D, 1,
858 CLK_IGNORE_UNUSED, 0),
859 GATE(CLK_G3D, "g3d", "div_aclk_200", GATE_IP_G3D, 0, 0, 0),
860
861 /* GATE_IP_LCD */
862 GATE(CLK_PPMULCD0, "ppmulcd0", "div_aclk_160", GATE_IP_LCD, 5,
863 CLK_IGNORE_UNUSED, 0),
864 GATE(CLK_SMMUFIMD0, "smmufimd0", "div_aclk_160", GATE_IP_LCD, 4, 0, 0),
865 GATE(CLK_DSIM0, "dsim0", "div_aclk_160", GATE_IP_LCD, 3, 0, 0),
866 GATE(CLK_SMIES, "smies", "div_aclk_160", GATE_IP_LCD, 2, 0, 0),
867 GATE(CLK_MIE0, "mie0", "div_aclk_160", GATE_IP_LCD, 1, 0, 0),
868 GATE(CLK_FIMD0, "fimd0", "div_aclk_160", GATE_IP_LCD, 0, 0, 0),
869
870 /* GATE_IP_FSYS */
871 GATE(CLK_TSADC, "tsadc", "div_aclk_200", GATE_IP_FSYS, 20, 0, 0),
872 GATE(CLK_PPMUFILE, "ppmufile", "div_aclk_200", GATE_IP_FSYS, 17,
873 CLK_IGNORE_UNUSED, 0),
874 GATE(CLK_NFCON, "nfcon", "div_aclk_200", GATE_IP_FSYS, 16, 0, 0),
875 GATE(CLK_USBDEVICE, "usbdevice", "div_aclk_200", GATE_IP_FSYS, 13,
876 0, 0),
877 GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0),
878 GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0),
879 GATE(CLK_SDMMC2, "sdmmc2", "div_aclk_200", GATE_IP_FSYS, 7, 0, 0),
880 GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0),
881 GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0),
882 GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0),
883 GATE(CLK_PDMA0, "pdma0", "div_aclk_200", GATE_IP_FSYS, 0, 0, 0),
884
885 /* GATE_IP_PERIL */
886 GATE(CLK_SPDIF, "spdif", "div_aclk_100", GATE_IP_PERIL, 26, 0, 0),
887 GATE(CLK_PWM, "pwm", "div_aclk_100", GATE_IP_PERIL, 24, 0, 0),
888 GATE(CLK_PCM2, "pcm2", "div_aclk_100", GATE_IP_PERIL, 23, 0, 0),
889 GATE(CLK_PCM1, "pcm1", "div_aclk_100", GATE_IP_PERIL, 22, 0, 0),
890 GATE(CLK_I2S1, "i2s1", "div_aclk_100", GATE_IP_PERIL, 20, 0, 0),
891 GATE(CLK_SPI2, "spi2", "div_aclk_100", GATE_IP_PERIL, 18, 0, 0),
892 GATE(CLK_SPI1, "spi1", "div_aclk_100", GATE_IP_PERIL, 17, 0, 0),
893 GATE(CLK_SPI0, "spi0", "div_aclk_100", GATE_IP_PERIL, 16, 0, 0),
894 GATE(CLK_I2CHDMI, "i2chdmi", "div_aclk_100", GATE_IP_PERIL, 14, 0, 0),
895 GATE(CLK_I2C7, "i2c7", "div_aclk_100", GATE_IP_PERIL, 13, 0, 0),
896 GATE(CLK_I2C6, "i2c6", "div_aclk_100", GATE_IP_PERIL, 12, 0, 0),
897 GATE(CLK_I2C5, "i2c5", "div_aclk_100", GATE_IP_PERIL, 11, 0, 0),
898 GATE(CLK_I2C4, "i2c4", "div_aclk_100", GATE_IP_PERIL, 10, 0, 0),
899 GATE(CLK_I2C3, "i2c3", "div_aclk_100", GATE_IP_PERIL, 9, 0, 0),
900 GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0),
901 GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0),
902 GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0),
903 GATE(CLK_UART3, "uart3", "div_aclk_100", GATE_IP_PERIL, 3, 0, 0),
904 GATE(CLK_UART2, "uart2", "div_aclk_100", GATE_IP_PERIL, 2, 0, 0),
905 GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0),
906 GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0),
907};
908
909/*
910 * APLL & MPLL & BPLL & ISP_PLL & DISP_PLL & G3D_PLL
911 */
912static struct samsung_pll_rate_table exynos4415_pll_rates[] = {
913 PLL_35XX_RATE(1600000000, 400, 3, 1),
914 PLL_35XX_RATE(1500000000, 250, 2, 1),
915 PLL_35XX_RATE(1400000000, 175, 3, 0),
916 PLL_35XX_RATE(1300000000, 325, 3, 1),
917 PLL_35XX_RATE(1200000000, 400, 4, 1),
918 PLL_35XX_RATE(1100000000, 275, 3, 1),
919 PLL_35XX_RATE(1066000000, 533, 6, 1),
920 PLL_35XX_RATE(1000000000, 250, 3, 1),
921 PLL_35XX_RATE(960000000, 320, 4, 1),
922 PLL_35XX_RATE(900000000, 300, 4, 1),
923 PLL_35XX_RATE(850000000, 425, 6, 1),
924 PLL_35XX_RATE(800000000, 200, 3, 1),
925 PLL_35XX_RATE(700000000, 175, 3, 1),
926 PLL_35XX_RATE(667000000, 667, 12, 1),
927 PLL_35XX_RATE(600000000, 400, 4, 2),
928 PLL_35XX_RATE(550000000, 275, 3, 2),
929 PLL_35XX_RATE(533000000, 533, 6, 2),
930 PLL_35XX_RATE(520000000, 260, 3, 2),
931 PLL_35XX_RATE(500000000, 250, 3, 2),
932 PLL_35XX_RATE(440000000, 220, 3, 2),
933 PLL_35XX_RATE(400000000, 200, 3, 2),
934 PLL_35XX_RATE(350000000, 175, 3, 2),
935 PLL_35XX_RATE(300000000, 300, 3, 3),
936 PLL_35XX_RATE(266000000, 266, 3, 3),
937 PLL_35XX_RATE(200000000, 200, 3, 3),
938 PLL_35XX_RATE(160000000, 160, 3, 3),
939 PLL_35XX_RATE(100000000, 200, 3, 4),
940 { /* sentinel */ }
941};
942
943/* EPLL */
944static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
945 PLL_36XX_RATE(800000000, 200, 3, 1, 0),
946 PLL_36XX_RATE(288000000, 96, 2, 2, 0),
947 PLL_36XX_RATE(192000000, 128, 2, 3, 0),
948 PLL_36XX_RATE(144000000, 96, 2, 3, 0),
949 PLL_36XX_RATE(96000000, 128, 2, 4, 0),
950 PLL_36XX_RATE(84000000, 112, 2, 4, 0),
951 PLL_36XX_RATE(80750011, 107, 2, 4, 43691),
952 PLL_36XX_RATE(73728004, 98, 2, 4, 19923),
953 PLL_36XX_RATE(67987602, 271, 3, 5, 62285),
954 PLL_36XX_RATE(65911004, 175, 2, 5, 49982),
955 PLL_36XX_RATE(50000000, 200, 3, 5, 0),
956 PLL_36XX_RATE(49152003, 131, 2, 5, 4719),
957 PLL_36XX_RATE(48000000, 128, 2, 5, 0),
958 PLL_36XX_RATE(45250000, 181, 3, 5, 0),
959 { /* sentinel */ }
960};
961
962static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
963 [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
964 APLL_LOCK, APLL_CON0, NULL),
965 [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
966 EPLL_LOCK, EPLL_CON0, NULL),
967 [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
968 "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
969 [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
970 ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
971 [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
972 "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
973};
974
975static void __init exynos4415_cmu_init(struct device_node *np)
976{
977 void __iomem *reg_base;
978
979 reg_base = of_iomap(np, 0);
980 if (!reg_base)
981 panic("%s: failed to map registers\n", __func__);
982
983 exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
984 if (!exynos4415_ctx)
985 panic("%s: unable to allocate context.\n", __func__);
986
987 exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
988 exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
989 exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
990 exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
991 exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
992
993 samsung_clk_register_fixed_factor(exynos4415_ctx,
994 exynos4415_fixed_factor_clks,
995 ARRAY_SIZE(exynos4415_fixed_factor_clks));
996 samsung_clk_register_fixed_rate(exynos4415_ctx,
997 exynos4415_fixed_rate_clks,
998 ARRAY_SIZE(exynos4415_fixed_rate_clks));
999
1000 samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
1001 ARRAY_SIZE(exynos4415_plls), reg_base);
1002 samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
1003 ARRAY_SIZE(exynos4415_mux_clks));
1004 samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
1005 ARRAY_SIZE(exynos4415_div_clks));
1006 samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
1007 ARRAY_SIZE(exynos4415_gate_clks));
1008
1009 exynos4415_clk_sleep_init();
1010
1011 samsung_clk_of_add_provider(np, exynos4415_ctx);
1012}
1013CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
1014
1015/*
1016 * CMU DMC
1017 */
1018
1019#define MPLL_LOCK 0x008
1020#define MPLL_CON0 0x108
1021#define MPLL_CON1 0x10c
1022#define MPLL_CON2 0x110
1023#define BPLL_LOCK 0x118
1024#define BPLL_CON0 0x218
1025#define BPLL_CON1 0x21c
1026#define BPLL_CON2 0x220
1027#define SRC_DMC 0x300
1028#define DIV_DMC1 0x504
1029
1030enum exynos4415_dmc_plls {
1031 mpll, bpll,
1032 nr_dmc_plls,
1033};
1034
1035static struct samsung_clk_provider *exynos4415_dmc_ctx;
1036
1037#ifdef CONFIG_PM_SLEEP
1038static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
1039
1040static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
1041 MPLL_LOCK,
1042 MPLL_CON0,
1043 MPLL_CON1,
1044 MPLL_CON2,
1045 BPLL_LOCK,
1046 BPLL_CON0,
1047 BPLL_CON1,
1048 BPLL_CON2,
1049 SRC_DMC,
1050 DIV_DMC1,
1051};
1052
1053static int exynos4415_dmc_clk_suspend(void)
1054{
1055 samsung_clk_save(exynos4415_dmc_ctx->reg_base,
1056 exynos4415_dmc_clk_regs,
1057 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1058 return 0;
1059}
1060
1061static void exynos4415_dmc_clk_resume(void)
1062{
1063 samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
1064 exynos4415_dmc_clk_regs,
1065 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1066}
1067
1068static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
1069 .suspend = exynos4415_dmc_clk_suspend,
1070 .resume = exynos4415_dmc_clk_resume,
1071};
1072
1073static void exynos4415_dmc_clk_sleep_init(void)
1074{
1075 exynos4415_dmc_clk_regs =
1076 samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
1077 ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
1078 if (!exynos4415_dmc_clk_regs) {
1079 pr_warn("%s: Failed to allocate sleep save data\n", __func__);
1080 return;
1081 }
1082
1083 register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
1084}
1085#else
1086static inline void exynos4415_dmc_clk_sleep_init(void) { }
1087#endif /* CONFIG_PM_SLEEP */
1088
1089PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", };
1090PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", };
1091PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", };
1092
1093static struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initdata = {
1094 MUX(CLK_DMC_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_DMC, 12, 1),
1095 MUX(CLK_DMC_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1),
1096 MUX(CLK_DMC_MOUT_DPHY, "mout_dphy", mbpll_p, SRC_DMC, 8, 1),
1097 MUX(CLK_DMC_MOUT_DMC_BUS, "mout_dmc_bus", mbpll_p, SRC_DMC, 4, 1),
1098};
1099
1100static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
1101 DIV(CLK_DMC_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3),
1102 DIV(CLK_DMC_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3),
1103 DIV(CLK_DMC_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus",
1104 DIV_DMC1, 19, 2),
1105 DIV(CLK_DMC_DIV_DMCP, "div_dmcp", "div_dmcd", DIV_DMC1, 15, 3),
1106 DIV(CLK_DMC_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
1107 DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
1108};
1109
1110static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
1111 [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
1112 MPLL_LOCK, MPLL_CON0, NULL),
1113 [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
1114 BPLL_LOCK, BPLL_CON0, NULL),
1115};
1116
1117static void __init exynos4415_cmu_dmc_init(struct device_node *np)
1118{
1119 void __iomem *reg_base;
1120
1121 reg_base = of_iomap(np, 0);
1122 if (!reg_base)
1123 panic("%s: failed to map registers\n", __func__);
1124
1125 exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
1126 if (!exynos4415_dmc_ctx)
1127 panic("%s: unable to allocate context.\n", __func__);
1128
1129 exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
1130 exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
1131
1132 samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
1133 ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
1134 samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
1135 ARRAY_SIZE(exynos4415_dmc_mux_clks));
1136 samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
1137 ARRAY_SIZE(exynos4415_dmc_div_clks));
1138
1139 exynos4415_dmc_clk_sleep_init();
1140
1141 samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
1142}
1143CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
1144 exynos4415_cmu_dmc_init);
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 2527e39aadcf..e2e5193d1049 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -11,10 +11,8 @@
11 11
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/clkdev.h> 13#include <linux/clkdev.h>
14#include <linux/clk-provider.h>
15#include <linux/of.h> 14#include <linux/of.h>
16#include <linux/of_address.h> 15#include <linux/of_address.h>
17#include <linux/syscore_ops.h>
18 16
19#include "clk-exynos5260.h" 17#include "clk-exynos5260.h"
20#include "clk.h" 18#include "clk.h"
@@ -22,39 +20,6 @@
22 20
23#include <dt-bindings/clock/exynos5260-clk.h> 21#include <dt-bindings/clock/exynos5260-clk.h>
24 22
25static LIST_HEAD(clock_reg_cache_list);
26
27struct exynos5260_clock_reg_cache {
28 struct list_head node;
29 void __iomem *reg_base;
30 struct samsung_clk_reg_dump *rdump;
31 unsigned int rd_num;
32};
33
34struct exynos5260_cmu_info {
35 /* list of pll clocks and respective count */
36 struct samsung_pll_clock *pll_clks;
37 unsigned int nr_pll_clks;
38 /* list of mux clocks and respective count */
39 struct samsung_mux_clock *mux_clks;
40 unsigned int nr_mux_clks;
41 /* list of div clocks and respective count */
42 struct samsung_div_clock *div_clks;
43 unsigned int nr_div_clks;
44 /* list of gate clocks and respective count */
45 struct samsung_gate_clock *gate_clks;
46 unsigned int nr_gate_clks;
47 /* list of fixed clocks and respective count */
48 struct samsung_fixed_rate_clock *fixed_clks;
49 unsigned int nr_fixed_clks;
50 /* total number of clocks with IDs assigned*/
51 unsigned int nr_clk_ids;
52
53 /* list and number of clocks registers */
54 unsigned long *clk_regs;
55 unsigned int nr_clk_regs;
56};
57
58/* 23/*
59 * Applicable for all 2550 Type PLLS for Exynos5260, listed below 24 * Applicable for all 2550 Type PLLS for Exynos5260, listed below
60 * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL. 25 * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
@@ -113,104 +78,6 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
113 PLL_36XX_RATE(66000000, 176, 2, 5, 0), 78 PLL_36XX_RATE(66000000, 176, 2, 5, 0),
114}; 79};
115 80
116#ifdef CONFIG_PM_SLEEP
117
118static int exynos5260_clk_suspend(void)
119{
120 struct exynos5260_clock_reg_cache *cache;
121
122 list_for_each_entry(cache, &clock_reg_cache_list, node)
123 samsung_clk_save(cache->reg_base, cache->rdump,
124 cache->rd_num);
125
126 return 0;
127}
128
129static void exynos5260_clk_resume(void)
130{
131 struct exynos5260_clock_reg_cache *cache;
132
133 list_for_each_entry(cache, &clock_reg_cache_list, node)
134 samsung_clk_restore(cache->reg_base, cache->rdump,
135 cache->rd_num);
136}
137
138static struct syscore_ops exynos5260_clk_syscore_ops = {
139 .suspend = exynos5260_clk_suspend,
140 .resume = exynos5260_clk_resume,
141};
142
143static void exynos5260_clk_sleep_init(void __iomem *reg_base,
144 unsigned long *rdump,
145 unsigned long nr_rdump)
146{
147 struct exynos5260_clock_reg_cache *reg_cache;
148
149 reg_cache = kzalloc(sizeof(struct exynos5260_clock_reg_cache),
150 GFP_KERNEL);
151 if (!reg_cache)
152 panic("could not allocate register cache.\n");
153
154 reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
155
156 if (!reg_cache->rdump)
157 panic("could not allocate register dump storage.\n");
158
159 if (list_empty(&clock_reg_cache_list))
160 register_syscore_ops(&exynos5260_clk_syscore_ops);
161
162 reg_cache->rd_num = nr_rdump;
163 reg_cache->reg_base = reg_base;
164 list_add_tail(&reg_cache->node, &clock_reg_cache_list);
165}
166
167#else
168static void exynos5260_clk_sleep_init(void __iomem *reg_base,
169 unsigned long *rdump,
170 unsigned long nr_rdump){}
171#endif
172
173/*
174 * Common function which registers plls, muxes, dividers and gates
175 * for each CMU. It also add CMU register list to register cache.
176 */
177
178void __init exynos5260_cmu_register_one(struct device_node *np,
179 struct exynos5260_cmu_info *cmu)
180{
181 void __iomem *reg_base;
182 struct samsung_clk_provider *ctx;
183
184 reg_base = of_iomap(np, 0);
185 if (!reg_base)
186 panic("%s: failed to map registers\n", __func__);
187
188 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
189 if (!ctx)
190 panic("%s: unable to alllocate ctx\n", __func__);
191
192 if (cmu->pll_clks)
193 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
194 reg_base);
195 if (cmu->mux_clks)
196 samsung_clk_register_mux(ctx, cmu->mux_clks,
197 cmu->nr_mux_clks);
198 if (cmu->div_clks)
199 samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
200 if (cmu->gate_clks)
201 samsung_clk_register_gate(ctx, cmu->gate_clks,
202 cmu->nr_gate_clks);
203 if (cmu->fixed_clks)
204 samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
205 cmu->nr_fixed_clks);
206 if (cmu->clk_regs)
207 exynos5260_clk_sleep_init(reg_base, cmu->clk_regs,
208 cmu->nr_clk_regs);
209
210 samsung_clk_of_add_provider(np, ctx);
211}
212
213
214/* CMU_AUD */ 81/* CMU_AUD */
215 82
216static unsigned long aud_clk_regs[] __initdata = { 83static unsigned long aud_clk_regs[] __initdata = {
@@ -268,7 +135,7 @@ struct samsung_gate_clock aud_gate_clks[] __initdata = {
268 135
269static void __init exynos5260_clk_aud_init(struct device_node *np) 136static void __init exynos5260_clk_aud_init(struct device_node *np)
270{ 137{
271 struct exynos5260_cmu_info cmu = {0}; 138 struct samsung_cmu_info cmu = {0};
272 139
273 cmu.mux_clks = aud_mux_clks; 140 cmu.mux_clks = aud_mux_clks;
274 cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks); 141 cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks);
@@ -280,7 +147,7 @@ static void __init exynos5260_clk_aud_init(struct device_node *np)
280 cmu.clk_regs = aud_clk_regs; 147 cmu.clk_regs = aud_clk_regs;
281 cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs); 148 cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs);
282 149
283 exynos5260_cmu_register_one(np, &cmu); 150 samsung_cmu_register_one(np, &cmu);
284} 151}
285 152
286CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud", 153CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud",
@@ -458,7 +325,7 @@ struct samsung_gate_clock disp_gate_clks[] __initdata = {
458 325
459static void __init exynos5260_clk_disp_init(struct device_node *np) 326static void __init exynos5260_clk_disp_init(struct device_node *np)
460{ 327{
461 struct exynos5260_cmu_info cmu = {0}; 328 struct samsung_cmu_info cmu = {0};
462 329
463 cmu.mux_clks = disp_mux_clks; 330 cmu.mux_clks = disp_mux_clks;
464 cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks); 331 cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks);
@@ -470,7 +337,7 @@ static void __init exynos5260_clk_disp_init(struct device_node *np)
470 cmu.clk_regs = disp_clk_regs; 337 cmu.clk_regs = disp_clk_regs;
471 cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs); 338 cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs);
472 339
473 exynos5260_cmu_register_one(np, &cmu); 340 samsung_cmu_register_one(np, &cmu);
474} 341}
475 342
476CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp", 343CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp",
@@ -522,7 +389,7 @@ static struct samsung_pll_clock egl_pll_clks[] __initdata = {
522 389
523static void __init exynos5260_clk_egl_init(struct device_node *np) 390static void __init exynos5260_clk_egl_init(struct device_node *np)
524{ 391{
525 struct exynos5260_cmu_info cmu = {0}; 392 struct samsung_cmu_info cmu = {0};
526 393
527 cmu.pll_clks = egl_pll_clks; 394 cmu.pll_clks = egl_pll_clks;
528 cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks); 395 cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks);
@@ -534,7 +401,7 @@ static void __init exynos5260_clk_egl_init(struct device_node *np)
534 cmu.clk_regs = egl_clk_regs; 401 cmu.clk_regs = egl_clk_regs;
535 cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs); 402 cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs);
536 403
537 exynos5260_cmu_register_one(np, &cmu); 404 samsung_cmu_register_one(np, &cmu);
538} 405}
539 406
540CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl", 407CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl",
@@ -624,7 +491,7 @@ struct samsung_gate_clock fsys_gate_clks[] __initdata = {
624 491
625static void __init exynos5260_clk_fsys_init(struct device_node *np) 492static void __init exynos5260_clk_fsys_init(struct device_node *np)
626{ 493{
627 struct exynos5260_cmu_info cmu = {0}; 494 struct samsung_cmu_info cmu = {0};
628 495
629 cmu.mux_clks = fsys_mux_clks; 496 cmu.mux_clks = fsys_mux_clks;
630 cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks); 497 cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks);
@@ -634,7 +501,7 @@ static void __init exynos5260_clk_fsys_init(struct device_node *np)
634 cmu.clk_regs = fsys_clk_regs; 501 cmu.clk_regs = fsys_clk_regs;
635 cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs); 502 cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs);
636 503
637 exynos5260_cmu_register_one(np, &cmu); 504 samsung_cmu_register_one(np, &cmu);
638} 505}
639 506
640CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys", 507CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys",
@@ -713,7 +580,7 @@ struct samsung_gate_clock g2d_gate_clks[] __initdata = {
713 580
714static void __init exynos5260_clk_g2d_init(struct device_node *np) 581static void __init exynos5260_clk_g2d_init(struct device_node *np)
715{ 582{
716 struct exynos5260_cmu_info cmu = {0}; 583 struct samsung_cmu_info cmu = {0};
717 584
718 cmu.mux_clks = g2d_mux_clks; 585 cmu.mux_clks = g2d_mux_clks;
719 cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks); 586 cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks);
@@ -725,7 +592,7 @@ static void __init exynos5260_clk_g2d_init(struct device_node *np)
725 cmu.clk_regs = g2d_clk_regs; 592 cmu.clk_regs = g2d_clk_regs;
726 cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs); 593 cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs);
727 594
728 exynos5260_cmu_register_one(np, &cmu); 595 samsung_cmu_register_one(np, &cmu);
729} 596}
730 597
731CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d", 598CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d",
@@ -774,7 +641,7 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
774 641
775static void __init exynos5260_clk_g3d_init(struct device_node *np) 642static void __init exynos5260_clk_g3d_init(struct device_node *np)
776{ 643{
777 struct exynos5260_cmu_info cmu = {0}; 644 struct samsung_cmu_info cmu = {0};
778 645
779 cmu.pll_clks = g3d_pll_clks; 646 cmu.pll_clks = g3d_pll_clks;
780 cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks); 647 cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks);
@@ -788,7 +655,7 @@ static void __init exynos5260_clk_g3d_init(struct device_node *np)
788 cmu.clk_regs = g3d_clk_regs; 655 cmu.clk_regs = g3d_clk_regs;
789 cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs); 656 cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs);
790 657
791 exynos5260_cmu_register_one(np, &cmu); 658 samsung_cmu_register_one(np, &cmu);
792} 659}
793 660
794CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d", 661CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d",
@@ -909,7 +776,7 @@ struct samsung_gate_clock gscl_gate_clks[] __initdata = {
909 776
910static void __init exynos5260_clk_gscl_init(struct device_node *np) 777static void __init exynos5260_clk_gscl_init(struct device_node *np)
911{ 778{
912 struct exynos5260_cmu_info cmu = {0}; 779 struct samsung_cmu_info cmu = {0};
913 780
914 cmu.mux_clks = gscl_mux_clks; 781 cmu.mux_clks = gscl_mux_clks;
915 cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks); 782 cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks);
@@ -921,7 +788,7 @@ static void __init exynos5260_clk_gscl_init(struct device_node *np)
921 cmu.clk_regs = gscl_clk_regs; 788 cmu.clk_regs = gscl_clk_regs;
922 cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs); 789 cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs);
923 790
924 exynos5260_cmu_register_one(np, &cmu); 791 samsung_cmu_register_one(np, &cmu);
925} 792}
926 793
927CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl", 794CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl",
@@ -1028,7 +895,7 @@ struct samsung_gate_clock isp_gate_clks[] __initdata = {
1028 895
1029static void __init exynos5260_clk_isp_init(struct device_node *np) 896static void __init exynos5260_clk_isp_init(struct device_node *np)
1030{ 897{
1031 struct exynos5260_cmu_info cmu = {0}; 898 struct samsung_cmu_info cmu = {0};
1032 899
1033 cmu.mux_clks = isp_mux_clks; 900 cmu.mux_clks = isp_mux_clks;
1034 cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks); 901 cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks);
@@ -1040,7 +907,7 @@ static void __init exynos5260_clk_isp_init(struct device_node *np)
1040 cmu.clk_regs = isp_clk_regs; 907 cmu.clk_regs = isp_clk_regs;
1041 cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs); 908 cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs);
1042 909
1043 exynos5260_cmu_register_one(np, &cmu); 910 samsung_cmu_register_one(np, &cmu);
1044} 911}
1045 912
1046CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp", 913CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp",
@@ -1092,7 +959,7 @@ static struct samsung_pll_clock kfc_pll_clks[] __initdata = {
1092 959
1093static void __init exynos5260_clk_kfc_init(struct device_node *np) 960static void __init exynos5260_clk_kfc_init(struct device_node *np)
1094{ 961{
1095 struct exynos5260_cmu_info cmu = {0}; 962 struct samsung_cmu_info cmu = {0};
1096 963
1097 cmu.pll_clks = kfc_pll_clks; 964 cmu.pll_clks = kfc_pll_clks;
1098 cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks); 965 cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks);
@@ -1104,7 +971,7 @@ static void __init exynos5260_clk_kfc_init(struct device_node *np)
1104 cmu.clk_regs = kfc_clk_regs; 971 cmu.clk_regs = kfc_clk_regs;
1105 cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs); 972 cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs);
1106 973
1107 exynos5260_cmu_register_one(np, &cmu); 974 samsung_cmu_register_one(np, &cmu);
1108} 975}
1109 976
1110CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc", 977CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc",
@@ -1148,7 +1015,7 @@ struct samsung_gate_clock mfc_gate_clks[] __initdata = {
1148 1015
1149static void __init exynos5260_clk_mfc_init(struct device_node *np) 1016static void __init exynos5260_clk_mfc_init(struct device_node *np)
1150{ 1017{
1151 struct exynos5260_cmu_info cmu = {0}; 1018 struct samsung_cmu_info cmu = {0};
1152 1019
1153 cmu.mux_clks = mfc_mux_clks; 1020 cmu.mux_clks = mfc_mux_clks;
1154 cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks); 1021 cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks);
@@ -1160,7 +1027,7 @@ static void __init exynos5260_clk_mfc_init(struct device_node *np)
1160 cmu.clk_regs = mfc_clk_regs; 1027 cmu.clk_regs = mfc_clk_regs;
1161 cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs); 1028 cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs);
1162 1029
1163 exynos5260_cmu_register_one(np, &cmu); 1030 samsung_cmu_register_one(np, &cmu);
1164} 1031}
1165 1032
1166CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc", 1033CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc",
@@ -1295,7 +1162,7 @@ static struct samsung_pll_clock mif_pll_clks[] __initdata = {
1295 1162
1296static void __init exynos5260_clk_mif_init(struct device_node *np) 1163static void __init exynos5260_clk_mif_init(struct device_node *np)
1297{ 1164{
1298 struct exynos5260_cmu_info cmu = {0}; 1165 struct samsung_cmu_info cmu = {0};
1299 1166
1300 cmu.pll_clks = mif_pll_clks; 1167 cmu.pll_clks = mif_pll_clks;
1301 cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks); 1168 cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks);
@@ -1309,7 +1176,7 @@ static void __init exynos5260_clk_mif_init(struct device_node *np)
1309 cmu.clk_regs = mif_clk_regs; 1176 cmu.clk_regs = mif_clk_regs;
1310 cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs); 1177 cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs);
1311 1178
1312 exynos5260_cmu_register_one(np, &cmu); 1179 samsung_cmu_register_one(np, &cmu);
1313} 1180}
1314 1181
1315CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif", 1182CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif",
@@ -1503,7 +1370,7 @@ struct samsung_gate_clock peri_gate_clks[] __initdata = {
1503 1370
1504static void __init exynos5260_clk_peri_init(struct device_node *np) 1371static void __init exynos5260_clk_peri_init(struct device_node *np)
1505{ 1372{
1506 struct exynos5260_cmu_info cmu = {0}; 1373 struct samsung_cmu_info cmu = {0};
1507 1374
1508 cmu.mux_clks = peri_mux_clks; 1375 cmu.mux_clks = peri_mux_clks;
1509 cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks); 1376 cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks);
@@ -1515,7 +1382,7 @@ static void __init exynos5260_clk_peri_init(struct device_node *np)
1515 cmu.clk_regs = peri_clk_regs; 1382 cmu.clk_regs = peri_clk_regs;
1516 cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs); 1383 cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs);
1517 1384
1518 exynos5260_cmu_register_one(np, &cmu); 1385 samsung_cmu_register_one(np, &cmu);
1519} 1386}
1520 1387
1521CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri", 1388CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri",
@@ -1959,7 +1826,7 @@ static struct samsung_pll_clock top_pll_clks[] __initdata = {
1959 1826
1960static void __init exynos5260_clk_top_init(struct device_node *np) 1827static void __init exynos5260_clk_top_init(struct device_node *np)
1961{ 1828{
1962 struct exynos5260_cmu_info cmu = {0}; 1829 struct samsung_cmu_info cmu = {0};
1963 1830
1964 cmu.pll_clks = top_pll_clks; 1831 cmu.pll_clks = top_pll_clks;
1965 cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks); 1832 cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks);
@@ -1975,7 +1842,7 @@ static void __init exynos5260_clk_top_init(struct device_node *np)
1975 cmu.clk_regs = top_clk_regs; 1842 cmu.clk_regs = top_clk_regs;
1976 cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs); 1843 cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs);
1977 1844
1978 exynos5260_cmu_register_one(np, &cmu); 1845 samsung_cmu_register_one(np, &cmu);
1979} 1846}
1980 1847
1981CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top", 1848CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top",
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
new file mode 100644
index 000000000000..ea4483b8d62e
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -0,0 +1,743 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9*/
10
11#include <linux/clk.h>
12#include <linux/clkdev.h>
13#include <linux/clk-provider.h>
14#include <linux/of.h>
15
16#include "clk.h"
17#include <dt-bindings/clock/exynos7-clk.h>
18
19/* Register Offset definitions for CMU_TOPC (0x10570000) */
20#define CC_PLL_LOCK 0x0000
21#define BUS0_PLL_LOCK 0x0004
22#define BUS1_DPLL_LOCK 0x0008
23#define MFC_PLL_LOCK 0x000C
24#define AUD_PLL_LOCK 0x0010
25#define CC_PLL_CON0 0x0100
26#define BUS0_PLL_CON0 0x0110
27#define BUS1_DPLL_CON0 0x0120
28#define MFC_PLL_CON0 0x0130
29#define AUD_PLL_CON0 0x0140
30#define MUX_SEL_TOPC0 0x0200
31#define MUX_SEL_TOPC1 0x0204
32#define MUX_SEL_TOPC2 0x0208
33#define MUX_SEL_TOPC3 0x020C
34#define DIV_TOPC0 0x0600
35#define DIV_TOPC1 0x0604
36#define DIV_TOPC3 0x060C
37
38static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
39 FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
40 FFACTOR(0, "ffac_topc_bus0_pll_div4",
41 "ffac_topc_bus0_pll_div2", 1, 2, 0),
42 FFACTOR(0, "ffac_topc_bus1_pll_div2", "mout_bus1_pll_ctrl", 1, 2, 0),
43 FFACTOR(0, "ffac_topc_cc_pll_div2", "mout_cc_pll_ctrl", 1, 2, 0),
44 FFACTOR(0, "ffac_topc_mfc_pll_div2", "mout_mfc_pll_ctrl", 1, 2, 0),
45};
46
47/* List of parent clocks for Muxes in CMU_TOPC */
48PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" };
49PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" };
50PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" };
51PNAME(mout_mfc_pll_ctrl_p) = { "fin_pll", "fout_mfc_pll" };
52
53PNAME(mout_topc_group2) = { "mout_sclk_bus0_pll_cmuc",
54 "mout_sclk_bus1_pll_cmuc", "mout_sclk_cc_pll_cmuc",
55 "mout_sclk_mfc_pll_cmuc" };
56
57PNAME(mout_sclk_bus0_pll_cmuc_p) = { "mout_bus0_pll_ctrl",
58 "ffac_topc_bus0_pll_div2", "ffac_topc_bus0_pll_div4"};
59PNAME(mout_sclk_bus1_pll_cmuc_p) = { "mout_bus1_pll_ctrl",
60 "ffac_topc_bus1_pll_div2"};
61PNAME(mout_sclk_cc_pll_cmuc_p) = { "mout_cc_pll_ctrl",
62 "ffac_topc_cc_pll_div2"};
63PNAME(mout_sclk_mfc_pll_cmuc_p) = { "mout_mfc_pll_ctrl",
64 "ffac_topc_mfc_pll_div2"};
65
66
67PNAME(mout_sclk_bus0_pll_out_p) = {"mout_bus0_pll_ctrl",
68 "ffac_topc_bus0_pll_div2"};
69
70static unsigned long topc_clk_regs[] __initdata = {
71 CC_PLL_LOCK,
72 BUS0_PLL_LOCK,
73 BUS1_DPLL_LOCK,
74 MFC_PLL_LOCK,
75 AUD_PLL_LOCK,
76 CC_PLL_CON0,
77 BUS0_PLL_CON0,
78 BUS1_DPLL_CON0,
79 MFC_PLL_CON0,
80 AUD_PLL_CON0,
81 MUX_SEL_TOPC0,
82 MUX_SEL_TOPC1,
83 MUX_SEL_TOPC2,
84 MUX_SEL_TOPC3,
85 DIV_TOPC0,
86 DIV_TOPC1,
87 DIV_TOPC3,
88};
89
90static struct samsung_mux_clock topc_mux_clks[] __initdata = {
91 MUX(0, "mout_bus0_pll_ctrl", mout_bus0_pll_ctrl_p, MUX_SEL_TOPC0, 0, 1),
92 MUX(0, "mout_bus1_pll_ctrl", mout_bus1_pll_ctrl_p, MUX_SEL_TOPC0, 4, 1),
93 MUX(0, "mout_cc_pll_ctrl", mout_cc_pll_ctrl_p, MUX_SEL_TOPC0, 8, 1),
94 MUX(0, "mout_mfc_pll_ctrl", mout_mfc_pll_ctrl_p, MUX_SEL_TOPC0, 12, 1),
95
96 MUX(0, "mout_sclk_bus0_pll_cmuc", mout_sclk_bus0_pll_cmuc_p,
97 MUX_SEL_TOPC0, 16, 2),
98 MUX(0, "mout_sclk_bus1_pll_cmuc", mout_sclk_bus1_pll_cmuc_p,
99 MUX_SEL_TOPC0, 20, 1),
100 MUX(0, "mout_sclk_cc_pll_cmuc", mout_sclk_cc_pll_cmuc_p,
101 MUX_SEL_TOPC0, 24, 1),
102 MUX(0, "mout_sclk_mfc_pll_cmuc", mout_sclk_mfc_pll_cmuc_p,
103 MUX_SEL_TOPC0, 28, 1),
104
105 MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
106 MUX_SEL_TOPC1, 16, 1),
107
108 MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
109
110 MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
111};
112
113static struct samsung_div_clock topc_div_clks[] __initdata = {
114 DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
115 DIV_TOPC0, 4, 4),
116
117 DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
118 DIV_TOPC1, 24, 4),
119
120 DIV(DOUT_SCLK_BUS0_PLL, "dout_sclk_bus0_pll", "mout_sclk_bus0_pll_out",
121 DIV_TOPC3, 0, 3),
122 DIV(DOUT_SCLK_BUS1_PLL, "dout_sclk_bus1_pll", "mout_bus1_pll_ctrl",
123 DIV_TOPC3, 8, 3),
124 DIV(DOUT_SCLK_CC_PLL, "dout_sclk_cc_pll", "mout_cc_pll_ctrl",
125 DIV_TOPC3, 12, 3),
126 DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
127 DIV_TOPC3, 16, 3),
128};
129
130static struct samsung_pll_clock topc_pll_clks[] __initdata = {
131 PLL(pll_1451x, 0, "fout_bus0_pll", "fin_pll", BUS0_PLL_LOCK,
132 BUS0_PLL_CON0, NULL),
133 PLL(pll_1452x, 0, "fout_cc_pll", "fin_pll", CC_PLL_LOCK,
134 CC_PLL_CON0, NULL),
135 PLL(pll_1452x, 0, "fout_bus1_pll", "fin_pll", BUS1_DPLL_LOCK,
136 BUS1_DPLL_CON0, NULL),
137 PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
138 MFC_PLL_CON0, NULL),
139 PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
140 AUD_PLL_CON0, NULL),
141};
142
143static struct samsung_cmu_info topc_cmu_info __initdata = {
144 .pll_clks = topc_pll_clks,
145 .nr_pll_clks = ARRAY_SIZE(topc_pll_clks),
146 .mux_clks = topc_mux_clks,
147 .nr_mux_clks = ARRAY_SIZE(topc_mux_clks),
148 .div_clks = topc_div_clks,
149 .nr_div_clks = ARRAY_SIZE(topc_div_clks),
150 .fixed_factor_clks = topc_fixed_factor_clks,
151 .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks),
152 .nr_clk_ids = TOPC_NR_CLK,
153 .clk_regs = topc_clk_regs,
154 .nr_clk_regs = ARRAY_SIZE(topc_clk_regs),
155};
156
157static void __init exynos7_clk_topc_init(struct device_node *np)
158{
159 samsung_cmu_register_one(np, &topc_cmu_info);
160}
161
162CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
163 exynos7_clk_topc_init);
164
165/* Register Offset definitions for CMU_TOP0 (0x105D0000) */
166#define MUX_SEL_TOP00 0x0200
167#define MUX_SEL_TOP01 0x0204
168#define MUX_SEL_TOP03 0x020C
169#define MUX_SEL_TOP0_PERIC3 0x023C
170#define DIV_TOP03 0x060C
171#define DIV_TOP0_PERIC3 0x063C
172#define ENABLE_SCLK_TOP0_PERIC3 0x0A3C
173
174/* List of parent clocks for Muxes in CMU_TOP0 */
175PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
176PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
177PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" };
178PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" };
179
180PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
181 "ffac_top0_bus0_pll_div2"};
182PNAME(mout_top0_half_bus1_pll_p) = {"mout_top0_bus1_pll",
183 "ffac_top0_bus1_pll_div2"};
184PNAME(mout_top0_half_cc_pll_p) = {"mout_top0_cc_pll",
185 "ffac_top0_cc_pll_div2"};
186PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
187 "ffac_top0_mfc_pll_div2"};
188
189PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
190 "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
191 "mout_top0_half_mfc_pll"};
192
193static unsigned long top0_clk_regs[] __initdata = {
194 MUX_SEL_TOP00,
195 MUX_SEL_TOP01,
196 MUX_SEL_TOP03,
197 MUX_SEL_TOP0_PERIC3,
198 DIV_TOP03,
199 DIV_TOP0_PERIC3,
200 ENABLE_SCLK_TOP0_PERIC3,
201};
202
203static struct samsung_mux_clock top0_mux_clks[] __initdata = {
204 MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
205 MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
206 MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
207 MUX(0, "mout_top0_bus0_pll", mout_bus0_pll_p, MUX_SEL_TOP00, 16, 1),
208
209 MUX(0, "mout_top0_half_mfc_pll", mout_top0_half_mfc_pll_p,
210 MUX_SEL_TOP01, 4, 1),
211 MUX(0, "mout_top0_half_cc_pll", mout_top0_half_cc_pll_p,
212 MUX_SEL_TOP01, 8, 1),
213 MUX(0, "mout_top0_half_bus1_pll", mout_top0_half_bus1_pll_p,
214 MUX_SEL_TOP01, 12, 1),
215 MUX(0, "mout_top0_half_bus0_pll", mout_top0_half_bus0_pll_p,
216 MUX_SEL_TOP01, 16, 1),
217
218 MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
219 MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
220
221 MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
222 MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
223 MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
224 MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
225};
226
227static struct samsung_div_clock top0_div_clks[] __initdata = {
228 DIV(DOUT_ACLK_PERIC1, "dout_aclk_peric1_66", "mout_aclk_peric1_66",
229 DIV_TOP03, 12, 6),
230 DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
231 DIV_TOP03, 20, 6),
232
233 DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
234 DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
235 DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
236 DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
237};
238
239static struct samsung_gate_clock top0_gate_clks[] __initdata = {
240 GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
241 ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
242 GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
243 ENABLE_SCLK_TOP0_PERIC3, 8, 0, 0),
244 GATE(CLK_SCLK_UART1, "sclk_uart1", "dout_sclk_uart1",
245 ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
246 GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
247 ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
248};
249
250static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
251 FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll", 1, 2, 0),
252 FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll", 1, 2, 0),
253 FFACTOR(0, "ffac_top0_cc_pll_div2", "mout_top0_cc_pll", 1, 2, 0),
254 FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll", 1, 2, 0),
255};
256
257static struct samsung_cmu_info top0_cmu_info __initdata = {
258 .mux_clks = top0_mux_clks,
259 .nr_mux_clks = ARRAY_SIZE(top0_mux_clks),
260 .div_clks = top0_div_clks,
261 .nr_div_clks = ARRAY_SIZE(top0_div_clks),
262 .gate_clks = top0_gate_clks,
263 .nr_gate_clks = ARRAY_SIZE(top0_gate_clks),
264 .fixed_factor_clks = top0_fixed_factor_clks,
265 .nr_fixed_factor_clks = ARRAY_SIZE(top0_fixed_factor_clks),
266 .nr_clk_ids = TOP0_NR_CLK,
267 .clk_regs = top0_clk_regs,
268 .nr_clk_regs = ARRAY_SIZE(top0_clk_regs),
269};
270
271static void __init exynos7_clk_top0_init(struct device_node *np)
272{
273 samsung_cmu_register_one(np, &top0_cmu_info);
274}
275
276CLK_OF_DECLARE(exynos7_clk_top0, "samsung,exynos7-clock-top0",
277 exynos7_clk_top0_init);
278
279/* Register Offset definitions for CMU_TOP1 (0x105E0000) */
280#define MUX_SEL_TOP10 0x0200
281#define MUX_SEL_TOP11 0x0204
282#define MUX_SEL_TOP13 0x020C
283#define MUX_SEL_TOP1_FSYS0 0x0224
284#define MUX_SEL_TOP1_FSYS1 0x0228
285#define DIV_TOP13 0x060C
286#define DIV_TOP1_FSYS0 0x0624
287#define DIV_TOP1_FSYS1 0x0628
288#define ENABLE_ACLK_TOP13 0x080C
289#define ENABLE_SCLK_TOP1_FSYS0 0x0A24
290#define ENABLE_SCLK_TOP1_FSYS1 0x0A28
291
292/* List of parent clocks for Muxes in CMU_TOP1 */
293PNAME(mout_top1_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
294PNAME(mout_top1_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll_b" };
295PNAME(mout_top1_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll_b" };
296PNAME(mout_top1_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll_b" };
297
298PNAME(mout_top1_half_bus0_pll_p) = {"mout_top1_bus0_pll",
299 "ffac_top1_bus0_pll_div2"};
300PNAME(mout_top1_half_bus1_pll_p) = {"mout_top1_bus1_pll",
301 "ffac_top1_bus1_pll_div2"};
302PNAME(mout_top1_half_cc_pll_p) = {"mout_top1_cc_pll",
303 "ffac_top1_cc_pll_div2"};
304PNAME(mout_top1_half_mfc_pll_p) = {"mout_top1_mfc_pll",
305 "ffac_top1_mfc_pll_div2"};
306
307PNAME(mout_top1_group1) = {"mout_top1_half_bus0_pll",
308 "mout_top1_half_bus1_pll", "mout_top1_half_cc_pll",
309 "mout_top1_half_mfc_pll"};
310
311static unsigned long top1_clk_regs[] __initdata = {
312 MUX_SEL_TOP10,
313 MUX_SEL_TOP11,
314 MUX_SEL_TOP13,
315 MUX_SEL_TOP1_FSYS0,
316 MUX_SEL_TOP1_FSYS1,
317 DIV_TOP13,
318 DIV_TOP1_FSYS0,
319 DIV_TOP1_FSYS1,
320 ENABLE_ACLK_TOP13,
321 ENABLE_SCLK_TOP1_FSYS0,
322 ENABLE_SCLK_TOP1_FSYS1,
323};
324
325static struct samsung_mux_clock top1_mux_clks[] __initdata = {
326 MUX(0, "mout_top1_mfc_pll", mout_top1_mfc_pll_p, MUX_SEL_TOP10, 4, 1),
327 MUX(0, "mout_top1_cc_pll", mout_top1_cc_pll_p, MUX_SEL_TOP10, 8, 1),
328 MUX(0, "mout_top1_bus1_pll", mout_top1_bus1_pll_p,
329 MUX_SEL_TOP10, 12, 1),
330 MUX(0, "mout_top1_bus0_pll", mout_top1_bus0_pll_p,
331 MUX_SEL_TOP10, 16, 1),
332
333 MUX(0, "mout_top1_half_mfc_pll", mout_top1_half_mfc_pll_p,
334 MUX_SEL_TOP11, 4, 1),
335 MUX(0, "mout_top1_half_cc_pll", mout_top1_half_cc_pll_p,
336 MUX_SEL_TOP11, 8, 1),
337 MUX(0, "mout_top1_half_bus1_pll", mout_top1_half_bus1_pll_p,
338 MUX_SEL_TOP11, 12, 1),
339 MUX(0, "mout_top1_half_bus0_pll", mout_top1_half_bus0_pll_p,
340 MUX_SEL_TOP11, 16, 1),
341
342 MUX(0, "mout_aclk_fsys1_200", mout_top1_group1, MUX_SEL_TOP13, 24, 2),
343 MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
344
345 MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
346
347 MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
348 MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
349};
350
351static struct samsung_div_clock top1_div_clks[] __initdata = {
352 DIV(DOUT_ACLK_FSYS1_200, "dout_aclk_fsys1_200", "mout_aclk_fsys1_200",
353 DIV_TOP13, 24, 4),
354 DIV(DOUT_ACLK_FSYS0_200, "dout_aclk_fsys0_200", "mout_aclk_fsys0_200",
355 DIV_TOP13, 28, 4),
356
357 DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
358 DIV_TOP1_FSYS0, 24, 4),
359
360 DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
361 DIV_TOP1_FSYS1, 24, 4),
362 DIV(DOUT_SCLK_MMC0, "dout_sclk_mmc0", "mout_sclk_mmc0",
363 DIV_TOP1_FSYS1, 28, 4),
364};
365
366static struct samsung_gate_clock top1_gate_clks[] __initdata = {
367 GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
368 ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
369
370 GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
371 ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
372 GATE(CLK_SCLK_MMC0, "sclk_mmc0", "dout_sclk_mmc0",
373 ENABLE_SCLK_TOP1_FSYS1, 28, CLK_SET_RATE_PARENT, 0),
374};
375
376static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = {
377 FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll", 1, 2, 0),
378 FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll", 1, 2, 0),
379 FFACTOR(0, "ffac_top1_cc_pll_div2", "mout_top1_cc_pll", 1, 2, 0),
380 FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll", 1, 2, 0),
381};
382
383static struct samsung_cmu_info top1_cmu_info __initdata = {
384 .mux_clks = top1_mux_clks,
385 .nr_mux_clks = ARRAY_SIZE(top1_mux_clks),
386 .div_clks = top1_div_clks,
387 .nr_div_clks = ARRAY_SIZE(top1_div_clks),
388 .gate_clks = top1_gate_clks,
389 .nr_gate_clks = ARRAY_SIZE(top1_gate_clks),
390 .fixed_factor_clks = top1_fixed_factor_clks,
391 .nr_fixed_factor_clks = ARRAY_SIZE(top1_fixed_factor_clks),
392 .nr_clk_ids = TOP1_NR_CLK,
393 .clk_regs = top1_clk_regs,
394 .nr_clk_regs = ARRAY_SIZE(top1_clk_regs),
395};
396
397static void __init exynos7_clk_top1_init(struct device_node *np)
398{
399 samsung_cmu_register_one(np, &top1_cmu_info);
400}
401
402CLK_OF_DECLARE(exynos7_clk_top1, "samsung,exynos7-clock-top1",
403 exynos7_clk_top1_init);
404
405/* Register Offset definitions for CMU_CCORE (0x105B0000) */
406#define MUX_SEL_CCORE 0x0200
407#define DIV_CCORE 0x0600
408#define ENABLE_ACLK_CCORE0 0x0800
409#define ENABLE_ACLK_CCORE1 0x0804
410#define ENABLE_PCLK_CCORE 0x0900
411
412/*
413 * List of parent clocks for Muxes in CMU_CCORE
414 */
415PNAME(mout_aclk_ccore_133_p) = { "fin_pll", "dout_aclk_ccore_133" };
416
417static unsigned long ccore_clk_regs[] __initdata = {
418 MUX_SEL_CCORE,
419 ENABLE_PCLK_CCORE,
420};
421
422static struct samsung_mux_clock ccore_mux_clks[] __initdata = {
423 MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_p,
424 MUX_SEL_CCORE, 1, 1),
425};
426
427static struct samsung_gate_clock ccore_gate_clks[] __initdata = {
428 GATE(PCLK_RTC, "pclk_rtc", "mout_aclk_ccore_133_user",
429 ENABLE_PCLK_CCORE, 8, 0, 0),
430};
431
432static struct samsung_cmu_info ccore_cmu_info __initdata = {
433 .mux_clks = ccore_mux_clks,
434 .nr_mux_clks = ARRAY_SIZE(ccore_mux_clks),
435 .gate_clks = ccore_gate_clks,
436 .nr_gate_clks = ARRAY_SIZE(ccore_gate_clks),
437 .nr_clk_ids = CCORE_NR_CLK,
438 .clk_regs = ccore_clk_regs,
439 .nr_clk_regs = ARRAY_SIZE(ccore_clk_regs),
440};
441
442static void __init exynos7_clk_ccore_init(struct device_node *np)
443{
444 samsung_cmu_register_one(np, &ccore_cmu_info);
445}
446
447CLK_OF_DECLARE(exynos7_clk_ccore, "samsung,exynos7-clock-ccore",
448 exynos7_clk_ccore_init);
449
450/* Register Offset definitions for CMU_PERIC0 (0x13610000) */
451#define MUX_SEL_PERIC0 0x0200
452#define ENABLE_PCLK_PERIC0 0x0900
453#define ENABLE_SCLK_PERIC0 0x0A00
454
455/* List of parent clocks for Muxes in CMU_PERIC0 */
456PNAME(mout_aclk_peric0_66_p) = { "fin_pll", "dout_aclk_peric0_66" };
457PNAME(mout_sclk_uart0_p) = { "fin_pll", "sclk_uart0" };
458
459static unsigned long peric0_clk_regs[] __initdata = {
460 MUX_SEL_PERIC0,
461 ENABLE_PCLK_PERIC0,
462 ENABLE_SCLK_PERIC0,
463};
464
465static struct samsung_mux_clock peric0_mux_clks[] __initdata = {
466 MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_p,
467 MUX_SEL_PERIC0, 0, 1),
468 MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_p,
469 MUX_SEL_PERIC0, 16, 1),
470};
471
472static struct samsung_gate_clock peric0_gate_clks[] __initdata = {
473 GATE(PCLK_HSI2C0, "pclk_hsi2c0", "mout_aclk_peric0_66_user",
474 ENABLE_PCLK_PERIC0, 8, 0, 0),
475 GATE(PCLK_HSI2C1, "pclk_hsi2c1", "mout_aclk_peric0_66_user",
476 ENABLE_PCLK_PERIC0, 9, 0, 0),
477 GATE(PCLK_HSI2C4, "pclk_hsi2c4", "mout_aclk_peric0_66_user",
478 ENABLE_PCLK_PERIC0, 10, 0, 0),
479 GATE(PCLK_HSI2C5, "pclk_hsi2c5", "mout_aclk_peric0_66_user",
480 ENABLE_PCLK_PERIC0, 11, 0, 0),
481 GATE(PCLK_HSI2C9, "pclk_hsi2c9", "mout_aclk_peric0_66_user",
482 ENABLE_PCLK_PERIC0, 12, 0, 0),
483 GATE(PCLK_HSI2C10, "pclk_hsi2c10", "mout_aclk_peric0_66_user",
484 ENABLE_PCLK_PERIC0, 13, 0, 0),
485 GATE(PCLK_HSI2C11, "pclk_hsi2c11", "mout_aclk_peric0_66_user",
486 ENABLE_PCLK_PERIC0, 14, 0, 0),
487 GATE(PCLK_UART0, "pclk_uart0", "mout_aclk_peric0_66_user",
488 ENABLE_PCLK_PERIC0, 16, 0, 0),
489 GATE(PCLK_ADCIF, "pclk_adcif", "mout_aclk_peric0_66_user",
490 ENABLE_PCLK_PERIC0, 20, 0, 0),
491 GATE(PCLK_PWM, "pclk_pwm", "mout_aclk_peric0_66_user",
492 ENABLE_PCLK_PERIC0, 21, 0, 0),
493
494 GATE(SCLK_UART0, "sclk_uart0_user", "mout_sclk_uart0_user",
495 ENABLE_SCLK_PERIC0, 16, 0, 0),
496 GATE(SCLK_PWM, "sclk_pwm", "fin_pll", ENABLE_SCLK_PERIC0, 21, 0, 0),
497};
498
499static struct samsung_cmu_info peric0_cmu_info __initdata = {
500 .mux_clks = peric0_mux_clks,
501 .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
502 .gate_clks = peric0_gate_clks,
503 .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
504 .nr_clk_ids = PERIC0_NR_CLK,
505 .clk_regs = peric0_clk_regs,
506 .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
507};
508
509static void __init exynos7_clk_peric0_init(struct device_node *np)
510{
511 samsung_cmu_register_one(np, &peric0_cmu_info);
512}
513
514/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
515#define MUX_SEL_PERIC10 0x0200
516#define MUX_SEL_PERIC11 0x0204
517#define ENABLE_PCLK_PERIC1 0x0900
518#define ENABLE_SCLK_PERIC10 0x0A00
519
520CLK_OF_DECLARE(exynos7_clk_peric0, "samsung,exynos7-clock-peric0",
521 exynos7_clk_peric0_init);
522
523/* List of parent clocks for Muxes in CMU_PERIC1 */
524PNAME(mout_aclk_peric1_66_p) = { "fin_pll", "dout_aclk_peric1_66" };
525PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" };
526PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" };
527PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" };
528
529static unsigned long peric1_clk_regs[] __initdata = {
530 MUX_SEL_PERIC10,
531 MUX_SEL_PERIC11,
532 ENABLE_PCLK_PERIC1,
533 ENABLE_SCLK_PERIC10,
534};
535
536static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
537 MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
538 MUX_SEL_PERIC10, 0, 1),
539
540 MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
541 MUX_SEL_PERIC11, 20, 1),
542 MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
543 MUX_SEL_PERIC11, 24, 1),
544 MUX(0, "mout_sclk_uart3_user", mout_sclk_uart3_p,
545 MUX_SEL_PERIC11, 28, 1),
546};
547
548static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
549 GATE(PCLK_HSI2C2, "pclk_hsi2c2", "mout_aclk_peric1_66_user",
550 ENABLE_PCLK_PERIC1, 4, 0, 0),
551 GATE(PCLK_HSI2C3, "pclk_hsi2c3", "mout_aclk_peric1_66_user",
552 ENABLE_PCLK_PERIC1, 5, 0, 0),
553 GATE(PCLK_HSI2C6, "pclk_hsi2c6", "mout_aclk_peric1_66_user",
554 ENABLE_PCLK_PERIC1, 6, 0, 0),
555 GATE(PCLK_HSI2C7, "pclk_hsi2c7", "mout_aclk_peric1_66_user",
556 ENABLE_PCLK_PERIC1, 7, 0, 0),
557 GATE(PCLK_HSI2C8, "pclk_hsi2c8", "mout_aclk_peric1_66_user",
558 ENABLE_PCLK_PERIC1, 8, 0, 0),
559 GATE(PCLK_UART1, "pclk_uart1", "mout_aclk_peric1_66_user",
560 ENABLE_PCLK_PERIC1, 9, 0, 0),
561 GATE(PCLK_UART2, "pclk_uart2", "mout_aclk_peric1_66_user",
562 ENABLE_PCLK_PERIC1, 10, 0, 0),
563 GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
564 ENABLE_PCLK_PERIC1, 11, 0, 0),
565
566 GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
567 ENABLE_SCLK_PERIC10, 9, 0, 0),
568 GATE(SCLK_UART2, "sclk_uart2_user", "mout_sclk_uart2_user",
569 ENABLE_SCLK_PERIC10, 10, 0, 0),
570 GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
571 ENABLE_SCLK_PERIC10, 11, 0, 0),
572};
573
574static struct samsung_cmu_info peric1_cmu_info __initdata = {
575 .mux_clks = peric1_mux_clks,
576 .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
577 .gate_clks = peric1_gate_clks,
578 .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
579 .nr_clk_ids = PERIC1_NR_CLK,
580 .clk_regs = peric1_clk_regs,
581 .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
582};
583
584static void __init exynos7_clk_peric1_init(struct device_node *np)
585{
586 samsung_cmu_register_one(np, &peric1_cmu_info);
587}
588
589CLK_OF_DECLARE(exynos7_clk_peric1, "samsung,exynos7-clock-peric1",
590 exynos7_clk_peric1_init);
591
592/* Register Offset definitions for CMU_PERIS (0x10040000) */
593#define MUX_SEL_PERIS 0x0200
594#define ENABLE_PCLK_PERIS 0x0900
595#define ENABLE_PCLK_PERIS_SECURE_CHIPID 0x0910
596#define ENABLE_SCLK_PERIS 0x0A00
597#define ENABLE_SCLK_PERIS_SECURE_CHIPID 0x0A10
598
599/* List of parent clocks for Muxes in CMU_PERIS */
600PNAME(mout_aclk_peris_66_p) = { "fin_pll", "dout_aclk_peris_66" };
601
602static unsigned long peris_clk_regs[] __initdata = {
603 MUX_SEL_PERIS,
604 ENABLE_PCLK_PERIS,
605 ENABLE_PCLK_PERIS_SECURE_CHIPID,
606 ENABLE_SCLK_PERIS,
607 ENABLE_SCLK_PERIS_SECURE_CHIPID,
608};
609
610static struct samsung_mux_clock peris_mux_clks[] __initdata = {
611 MUX(0, "mout_aclk_peris_66_user",
612 mout_aclk_peris_66_p, MUX_SEL_PERIS, 0, 1),
613};
614
615static struct samsung_gate_clock peris_gate_clks[] __initdata = {
616 GATE(PCLK_WDT, "pclk_wdt", "mout_aclk_peris_66_user",
617 ENABLE_PCLK_PERIS, 6, 0, 0),
618 GATE(PCLK_TMU, "pclk_tmu_apbif", "mout_aclk_peris_66_user",
619 ENABLE_PCLK_PERIS, 10, 0, 0),
620
621 GATE(PCLK_CHIPID, "pclk_chipid", "mout_aclk_peris_66_user",
622 ENABLE_PCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
623 GATE(SCLK_CHIPID, "sclk_chipid", "fin_pll",
624 ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
625
626 GATE(SCLK_TMU, "sclk_tmu", "fin_pll", ENABLE_SCLK_PERIS, 10, 0, 0),
627};
628
629static struct samsung_cmu_info peris_cmu_info __initdata = {
630 .mux_clks = peris_mux_clks,
631 .nr_mux_clks = ARRAY_SIZE(peris_mux_clks),
632 .gate_clks = peris_gate_clks,
633 .nr_gate_clks = ARRAY_SIZE(peris_gate_clks),
634 .nr_clk_ids = PERIS_NR_CLK,
635 .clk_regs = peris_clk_regs,
636 .nr_clk_regs = ARRAY_SIZE(peris_clk_regs),
637};
638
639static void __init exynos7_clk_peris_init(struct device_node *np)
640{
641 samsung_cmu_register_one(np, &peris_cmu_info);
642}
643
644CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
645 exynos7_clk_peris_init);
646
647/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
648#define MUX_SEL_FSYS00 0x0200
649#define MUX_SEL_FSYS01 0x0204
650#define ENABLE_ACLK_FSYS01 0x0804
651
652/*
653 * List of parent clocks for Muxes in CMU_FSYS0
654 */
655PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" };
656PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" };
657
658static unsigned long fsys0_clk_regs[] __initdata = {
659 MUX_SEL_FSYS00,
660 MUX_SEL_FSYS01,
661 ENABLE_ACLK_FSYS01,
662};
663
664static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
665 MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_p,
666 MUX_SEL_FSYS00, 24, 1),
667
668 MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
669};
670
671static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
672 GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
673 ENABLE_ACLK_FSYS01, 31, 0, 0),
674};
675
676static struct samsung_cmu_info fsys0_cmu_info __initdata = {
677 .mux_clks = fsys0_mux_clks,
678 .nr_mux_clks = ARRAY_SIZE(fsys0_mux_clks),
679 .gate_clks = fsys0_gate_clks,
680 .nr_gate_clks = ARRAY_SIZE(fsys0_gate_clks),
681 .nr_clk_ids = TOP1_NR_CLK,
682 .clk_regs = fsys0_clk_regs,
683 .nr_clk_regs = ARRAY_SIZE(fsys0_clk_regs),
684};
685
686static void __init exynos7_clk_fsys0_init(struct device_node *np)
687{
688 samsung_cmu_register_one(np, &fsys0_cmu_info);
689}
690
691CLK_OF_DECLARE(exynos7_clk_fsys0, "samsung,exynos7-clock-fsys0",
692 exynos7_clk_fsys0_init);
693
694/* Register Offset definitions for CMU_FSYS1 (0x156E0000) */
695#define MUX_SEL_FSYS10 0x0200
696#define MUX_SEL_FSYS11 0x0204
697#define ENABLE_ACLK_FSYS1 0x0800
698
699/*
700 * List of parent clocks for Muxes in CMU_FSYS1
701 */
702PNAME(mout_aclk_fsys1_200_p) = { "fin_pll", "dout_aclk_fsys1_200" };
703PNAME(mout_sclk_mmc0_p) = { "fin_pll", "sclk_mmc0" };
704PNAME(mout_sclk_mmc1_p) = { "fin_pll", "sclk_mmc1" };
705
706static unsigned long fsys1_clk_regs[] __initdata = {
707 MUX_SEL_FSYS10,
708 MUX_SEL_FSYS11,
709 ENABLE_ACLK_FSYS1,
710};
711
712static struct samsung_mux_clock fsys1_mux_clks[] __initdata = {
713 MUX(0, "mout_aclk_fsys1_200_user", mout_aclk_fsys1_200_p,
714 MUX_SEL_FSYS10, 28, 1),
715
716 MUX(0, "mout_sclk_mmc1_user", mout_sclk_mmc1_p, MUX_SEL_FSYS11, 24, 1),
717 MUX(0, "mout_sclk_mmc0_user", mout_sclk_mmc0_p, MUX_SEL_FSYS11, 28, 1),
718};
719
720static struct samsung_gate_clock fsys1_gate_clks[] __initdata = {
721 GATE(ACLK_MMC1, "aclk_mmc1", "mout_aclk_fsys1_200_user",
722 ENABLE_ACLK_FSYS1, 29, 0, 0),
723 GATE(ACLK_MMC0, "aclk_mmc0", "mout_aclk_fsys1_200_user",
724 ENABLE_ACLK_FSYS1, 30, 0, 0),
725};
726
727static struct samsung_cmu_info fsys1_cmu_info __initdata = {
728 .mux_clks = fsys1_mux_clks,
729 .nr_mux_clks = ARRAY_SIZE(fsys1_mux_clks),
730 .gate_clks = fsys1_gate_clks,
731 .nr_gate_clks = ARRAY_SIZE(fsys1_gate_clks),
732 .nr_clk_ids = TOP1_NR_CLK,
733 .clk_regs = fsys1_clk_regs,
734 .nr_clk_regs = ARRAY_SIZE(fsys1_clk_regs),
735};
736
737static void __init exynos7_clk_fsys1_init(struct device_node *np)
738{
739 samsung_cmu_register_one(np, &fsys1_cmu_info);
740}
741
742CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
743 exynos7_clk_fsys1_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index b07fad2a9167..9d70e5c03804 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -482,6 +482,8 @@ static const struct clk_ops samsung_pll45xx_clk_min_ops = {
482 482
483#define PLL46XX_VSEL_MASK (1) 483#define PLL46XX_VSEL_MASK (1)
484#define PLL46XX_MDIV_MASK (0x1FF) 484#define PLL46XX_MDIV_MASK (0x1FF)
485#define PLL1460X_MDIV_MASK (0x3FF)
486
485#define PLL46XX_PDIV_MASK (0x3F) 487#define PLL46XX_PDIV_MASK (0x3F)
486#define PLL46XX_SDIV_MASK (0x7) 488#define PLL46XX_SDIV_MASK (0x7)
487#define PLL46XX_VSEL_SHIFT (27) 489#define PLL46XX_VSEL_SHIFT (27)
@@ -511,13 +513,15 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
511 513
512 pll_con0 = __raw_readl(pll->con_reg); 514 pll_con0 = __raw_readl(pll->con_reg);
513 pll_con1 = __raw_readl(pll->con_reg + 4); 515 pll_con1 = __raw_readl(pll->con_reg + 4);
514 mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK; 516 mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & ((pll->type == pll_1460x) ?
517 PLL1460X_MDIV_MASK : PLL46XX_MDIV_MASK);
515 pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK; 518 pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
516 sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK; 519 sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK;
517 kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK : 520 kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK :
518 pll_con1 & PLL46XX_KDIV_MASK; 521 pll_con1 & PLL46XX_KDIV_MASK;
519 522
520 shift = pll->type == pll_4600 ? 16 : 10; 523 shift = ((pll->type == pll_4600) || (pll->type == pll_1460x)) ? 16 : 10;
524
521 fvco *= (mdiv << shift) + kdiv; 525 fvco *= (mdiv << shift) + kdiv;
522 do_div(fvco, (pdiv << sdiv)); 526 do_div(fvco, (pdiv << sdiv));
523 fvco >>= shift; 527 fvco >>= shift;
@@ -573,14 +577,21 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
573 lock = 0xffff; 577 lock = 0xffff;
574 578
575 /* Set PLL PMS and VSEL values. */ 579 /* Set PLL PMS and VSEL values. */
576 con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) | 580 if (pll->type == pll_1460x) {
581 con0 &= ~((PLL1460X_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
582 (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
583 (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT));
584 } else {
585 con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
577 (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) | 586 (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
578 (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) | 587 (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) |
579 (PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT)); 588 (PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT));
589 con0 |= rate->vsel << PLL46XX_VSEL_SHIFT;
590 }
591
580 con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) | 592 con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) |
581 (rate->pdiv << PLL46XX_PDIV_SHIFT) | 593 (rate->pdiv << PLL46XX_PDIV_SHIFT) |
582 (rate->sdiv << PLL46XX_SDIV_SHIFT) | 594 (rate->sdiv << PLL46XX_SDIV_SHIFT);
583 (rate->vsel << PLL46XX_VSEL_SHIFT);
584 595
585 /* Set PLL K, MFR and MRR values. */ 596 /* Set PLL K, MFR and MRR values. */
586 con1 = __raw_readl(pll->con_reg + 0x4); 597 con1 = __raw_readl(pll->con_reg + 0x4);
@@ -1190,6 +1201,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
1190 /* clk_ops for 35xx and 2550 are similar */ 1201 /* clk_ops for 35xx and 2550 are similar */
1191 case pll_35xx: 1202 case pll_35xx:
1192 case pll_2550: 1203 case pll_2550:
1204 case pll_1450x:
1205 case pll_1451x:
1206 case pll_1452x:
1193 if (!pll->rate_table) 1207 if (!pll->rate_table)
1194 init.ops = &samsung_pll35xx_clk_min_ops; 1208 init.ops = &samsung_pll35xx_clk_min_ops;
1195 else 1209 else
@@ -1223,6 +1237,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
1223 case pll_4600: 1237 case pll_4600:
1224 case pll_4650: 1238 case pll_4650:
1225 case pll_4650c: 1239 case pll_4650c:
1240 case pll_1460x:
1226 if (!pll->rate_table) 1241 if (!pll->rate_table)
1227 init.ops = &samsung_pll46xx_clk_min_ops; 1242 init.ops = &samsung_pll46xx_clk_min_ops;
1228 else 1243 else
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index c0ed4d41fd90..213de9af8b4f 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -33,6 +33,10 @@ enum samsung_pll_type {
33 pll_s3c2440_mpll, 33 pll_s3c2440_mpll,
34 pll_2550xx, 34 pll_2550xx,
35 pll_2650xx, 35 pll_2650xx,
36 pll_1450x,
37 pll_1451x,
38 pll_1452x,
39 pll_1460x,
36}; 40};
37 41
38#define PLL_35XX_RATE(_rate, _m, _p, _s) \ 42#define PLL_35XX_RATE(_rate, _m, _p, _s) \
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index deab84d9f37d..4bda54095a16 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -11,9 +11,13 @@
11 * clock framework for Samsung platforms. 11 * clock framework for Samsung platforms.
12*/ 12*/
13 13
14#include <linux/of_address.h>
14#include <linux/syscore_ops.h> 15#include <linux/syscore_ops.h>
16
15#include "clk.h" 17#include "clk.h"
16 18
19static LIST_HEAD(clock_reg_cache_list);
20
17void samsung_clk_save(void __iomem *base, 21void samsung_clk_save(void __iomem *base,
18 struct samsung_clk_reg_dump *rd, 22 struct samsung_clk_reg_dump *rd,
19 unsigned int num_regs) 23 unsigned int num_regs)
@@ -281,7 +285,6 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
281 * obtain the clock speed of all external fixed clock sources from device 285 * obtain the clock speed of all external fixed clock sources from device
282 * tree and register it 286 * tree and register it
283 */ 287 */
284#ifdef CONFIG_OF
285void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx, 288void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
286 struct samsung_fixed_rate_clock *fixed_rate_clk, 289 struct samsung_fixed_rate_clock *fixed_rate_clk,
287 unsigned int nr_fixed_rate_clk, 290 unsigned int nr_fixed_rate_clk,
@@ -298,7 +301,6 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
298 } 301 }
299 samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk); 302 samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
300} 303}
301#endif
302 304
303/* utility function to get the rate of a specified clock */ 305/* utility function to get the rate of a specified clock */
304unsigned long _get_rate(const char *clk_name) 306unsigned long _get_rate(const char *clk_name)
@@ -313,3 +315,99 @@ unsigned long _get_rate(const char *clk_name)
313 315
314 return clk_get_rate(clk); 316 return clk_get_rate(clk);
315} 317}
318
319#ifdef CONFIG_PM_SLEEP
320static int samsung_clk_suspend(void)
321{
322 struct samsung_clock_reg_cache *reg_cache;
323
324 list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
325 samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
326 reg_cache->rd_num);
327 return 0;
328}
329
330static void samsung_clk_resume(void)
331{
332 struct samsung_clock_reg_cache *reg_cache;
333
334 list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
335 samsung_clk_restore(reg_cache->reg_base, reg_cache->rdump,
336 reg_cache->rd_num);
337}
338
339static struct syscore_ops samsung_clk_syscore_ops = {
340 .suspend = samsung_clk_suspend,
341 .resume = samsung_clk_resume,
342};
343
344static void samsung_clk_sleep_init(void __iomem *reg_base,
345 const unsigned long *rdump,
346 unsigned long nr_rdump)
347{
348 struct samsung_clock_reg_cache *reg_cache;
349
350 reg_cache = kzalloc(sizeof(struct samsung_clock_reg_cache),
351 GFP_KERNEL);
352 if (!reg_cache)
353 panic("could not allocate register reg_cache.\n");
354 reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
355
356 if (!reg_cache->rdump)
357 panic("could not allocate register dump storage.\n");
358
359 if (list_empty(&clock_reg_cache_list))
360 register_syscore_ops(&samsung_clk_syscore_ops);
361
362 reg_cache->reg_base = reg_base;
363 reg_cache->rd_num = nr_rdump;
364 list_add_tail(&reg_cache->node, &clock_reg_cache_list);
365}
366
367#else
368static void samsung_clk_sleep_init(void __iomem *reg_base,
369 const unsigned long *rdump,
370 unsigned long nr_rdump) {}
371#endif
372
373/*
374 * Common function which registers plls, muxes, dividers and gates
375 * for each CMU. It also add CMU register list to register cache.
376 */
377void __init samsung_cmu_register_one(struct device_node *np,
378 struct samsung_cmu_info *cmu)
379{
380 void __iomem *reg_base;
381 struct samsung_clk_provider *ctx;
382
383 reg_base = of_iomap(np, 0);
384 if (!reg_base)
385 panic("%s: failed to map registers\n", __func__);
386
387 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
388 if (!ctx)
389 panic("%s: unable to alllocate ctx\n", __func__);
390
391 if (cmu->pll_clks)
392 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
393 reg_base);
394 if (cmu->mux_clks)
395 samsung_clk_register_mux(ctx, cmu->mux_clks,
396 cmu->nr_mux_clks);
397 if (cmu->div_clks)
398 samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
399 if (cmu->gate_clks)
400 samsung_clk_register_gate(ctx, cmu->gate_clks,
401 cmu->nr_gate_clks);
402 if (cmu->fixed_clks)
403 samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
404 cmu->nr_fixed_clks);
405 if (cmu->fixed_factor_clks)
406 samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
407 cmu->nr_fixed_factor_clks);
408 if (cmu->clk_regs)
409 samsung_clk_sleep_init(reg_base, cmu->clk_regs,
410 cmu->nr_clk_regs);
411
412 samsung_clk_of_add_provider(np, ctx);
413}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 66ab36b5cef1..8acabe1f32c4 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -13,19 +13,15 @@
13#ifndef __SAMSUNG_CLK_H 13#ifndef __SAMSUNG_CLK_H
14#define __SAMSUNG_CLK_H 14#define __SAMSUNG_CLK_H
15 15
16#include <linux/clk.h>
17#include <linux/clkdev.h> 16#include <linux/clkdev.h>
18#include <linux/io.h>
19#include <linux/clk-provider.h> 17#include <linux/clk-provider.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include "clk-pll.h" 18#include "clk-pll.h"
23 19
24/** 20/**
25 * struct samsung_clk_provider: information about clock provider 21 * struct samsung_clk_provider: information about clock provider
26 * @reg_base: virtual address for the register base. 22 * @reg_base: virtual address for the register base.
27 * @clk_data: holds clock related data like clk* and number of clocks. 23 * @clk_data: holds clock related data like clk* and number of clocks.
28 * @lock: maintains exclusion bwtween callbacks for a given clock-provider. 24 * @lock: maintains exclusion between callbacks for a given clock-provider.
29 */ 25 */
30struct samsung_clk_provider { 26struct samsung_clk_provider {
31 void __iomem *reg_base; 27 void __iomem *reg_base;
@@ -324,6 +320,40 @@ struct samsung_pll_clock {
324 __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \ 320 __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \
325 _lock, _con, _rtable, _alias) 321 _lock, _con, _rtable, _alias)
326 322
323struct samsung_clock_reg_cache {
324 struct list_head node;
325 void __iomem *reg_base;
326 struct samsung_clk_reg_dump *rdump;
327 unsigned int rd_num;
328};
329
330struct samsung_cmu_info {
331 /* list of pll clocks and respective count */
332 struct samsung_pll_clock *pll_clks;
333 unsigned int nr_pll_clks;
334 /* list of mux clocks and respective count */
335 struct samsung_mux_clock *mux_clks;
336 unsigned int nr_mux_clks;
337 /* list of div clocks and respective count */
338 struct samsung_div_clock *div_clks;
339 unsigned int nr_div_clks;
340 /* list of gate clocks and respective count */
341 struct samsung_gate_clock *gate_clks;
342 unsigned int nr_gate_clks;
343 /* list of fixed clocks and respective count */
344 struct samsung_fixed_rate_clock *fixed_clks;
345 unsigned int nr_fixed_clks;
346 /* list of fixed factor clocks and respective count */
347 struct samsung_fixed_factor_clock *fixed_factor_clks;
348 unsigned int nr_fixed_factor_clks;
349 /* total number of clocks with IDs assigned*/
350 unsigned int nr_clk_ids;
351
352 /* list and number of clocks registers */
353 unsigned long *clk_regs;
354 unsigned int nr_clk_regs;
355};
356
327extern struct samsung_clk_provider *__init samsung_clk_init( 357extern struct samsung_clk_provider *__init samsung_clk_init(
328 struct device_node *np, void __iomem *base, 358 struct device_node *np, void __iomem *base,
329 unsigned long nr_clks); 359 unsigned long nr_clks);
@@ -362,6 +392,9 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
362 struct samsung_pll_clock *pll_list, 392 struct samsung_pll_clock *pll_list,
363 unsigned int nr_clk, void __iomem *base); 393 unsigned int nr_clk, void __iomem *base);
364 394
395extern void __init samsung_cmu_register_one(struct device_node *,
396 struct samsung_cmu_info *);
397
365extern unsigned long _get_rate(const char *clk_name); 398extern unsigned long _get_rate(const char *clk_name);
366 399
367extern void samsung_clk_save(void __iomem *base, 400extern void samsung_clk_save(void __iomem *base,
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index f065f694cb65..639241e31e03 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -32,6 +32,9 @@ struct div6_clock {
32 struct clk_hw hw; 32 struct clk_hw hw;
33 void __iomem *reg; 33 void __iomem *reg;
34 unsigned int div; 34 unsigned int div;
35 u32 src_shift;
36 u32 src_width;
37 u8 *parents;
35}; 38};
36 39
37#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw) 40#define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
@@ -39,8 +42,11 @@ struct div6_clock {
39static int cpg_div6_clock_enable(struct clk_hw *hw) 42static int cpg_div6_clock_enable(struct clk_hw *hw)
40{ 43{
41 struct div6_clock *clock = to_div6_clock(hw); 44 struct div6_clock *clock = to_div6_clock(hw);
45 u32 val;
42 46
43 clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg); 47 val = (clk_readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
48 | CPG_DIV6_DIV(clock->div - 1);
49 clk_writel(val, clock->reg);
44 50
45 return 0; 51 return 0;
46} 52}
@@ -52,7 +58,7 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
52 /* DIV6 clocks require the divisor field to be non-zero when stopping 58 /* DIV6 clocks require the divisor field to be non-zero when stopping
53 * the clock. 59 * the clock.
54 */ 60 */
55 clk_writel(CPG_DIV6_CKSTP | CPG_DIV6_DIV(CPG_DIV6_DIV_MASK), 61 clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
56 clock->reg); 62 clock->reg);
57} 63}
58 64
@@ -94,12 +100,53 @@ static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
94{ 100{
95 struct div6_clock *clock = to_div6_clock(hw); 101 struct div6_clock *clock = to_div6_clock(hw);
96 unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate); 102 unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate);
103 u32 val;
97 104
98 clock->div = div; 105 clock->div = div;
99 106
107 val = clk_readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
100 /* Only program the new divisor if the clock isn't stopped. */ 108 /* Only program the new divisor if the clock isn't stopped. */
101 if (!(clk_readl(clock->reg) & CPG_DIV6_CKSTP)) 109 if (!(val & CPG_DIV6_CKSTP))
102 clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg); 110 clk_writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
111
112 return 0;
113}
114
115static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
116{
117 struct div6_clock *clock = to_div6_clock(hw);
118 unsigned int i;
119 u8 hw_index;
120
121 if (clock->src_width == 0)
122 return 0;
123
124 hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
125 (BIT(clock->src_width) - 1);
126 for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
127 if (clock->parents[i] == hw_index)
128 return i;
129 }
130
131 pr_err("%s: %s DIV6 clock set to invalid parent %u\n",
132 __func__, __clk_get_name(hw->clk), hw_index);
133 return 0;
134}
135
136static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
137{
138 struct div6_clock *clock = to_div6_clock(hw);
139 u8 hw_index;
140 u32 mask;
141
142 if (index >= __clk_get_num_parents(hw->clk))
143 return -EINVAL;
144
145 mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
146 hw_index = clock->parents[index];
147
148 clk_writel((clk_readl(clock->reg) & mask) |
149 (hw_index << clock->src_shift), clock->reg);
103 150
104 return 0; 151 return 0;
105} 152}
@@ -108,6 +155,8 @@ static const struct clk_ops cpg_div6_clock_ops = {
108 .enable = cpg_div6_clock_enable, 155 .enable = cpg_div6_clock_enable,
109 .disable = cpg_div6_clock_disable, 156 .disable = cpg_div6_clock_disable,
110 .is_enabled = cpg_div6_clock_is_enabled, 157 .is_enabled = cpg_div6_clock_is_enabled,
158 .get_parent = cpg_div6_clock_get_parent,
159 .set_parent = cpg_div6_clock_set_parent,
111 .recalc_rate = cpg_div6_clock_recalc_rate, 160 .recalc_rate = cpg_div6_clock_recalc_rate,
112 .round_rate = cpg_div6_clock_round_rate, 161 .round_rate = cpg_div6_clock_round_rate,
113 .set_rate = cpg_div6_clock_set_rate, 162 .set_rate = cpg_div6_clock_set_rate,
@@ -115,20 +164,33 @@ static const struct clk_ops cpg_div6_clock_ops = {
115 164
116static void __init cpg_div6_clock_init(struct device_node *np) 165static void __init cpg_div6_clock_init(struct device_node *np)
117{ 166{
167 unsigned int num_parents, valid_parents;
168 const char **parent_names;
118 struct clk_init_data init; 169 struct clk_init_data init;
119 struct div6_clock *clock; 170 struct div6_clock *clock;
120 const char *parent_name;
121 const char *name; 171 const char *name;
122 struct clk *clk; 172 struct clk *clk;
173 unsigned int i;
123 int ret; 174 int ret;
124 175
125 clock = kzalloc(sizeof(*clock), GFP_KERNEL); 176 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
126 if (!clock) { 177 if (!clock)
127 pr_err("%s: failed to allocate %s DIV6 clock\n", 178 return;
179
180 num_parents = of_clk_get_parent_count(np);
181 if (num_parents < 1) {
182 pr_err("%s: no parent found for %s DIV6 clock\n",
128 __func__, np->name); 183 __func__, np->name);
129 return; 184 return;
130 } 185 }
131 186
187 clock->parents = kmalloc_array(num_parents, sizeof(*clock->parents),
188 GFP_KERNEL);
189 parent_names = kmalloc_array(num_parents, sizeof(*parent_names),
190 GFP_KERNEL);
191 if (!parent_names)
192 return;
193
132 /* Remap the clock register and read the divisor. Disabling the 194 /* Remap the clock register and read the divisor. Disabling the
133 * clock overwrites the divisor, so we need to cache its value for the 195 * clock overwrites the divisor, so we need to cache its value for the
134 * enable operation. 196 * enable operation.
@@ -150,9 +212,34 @@ static void __init cpg_div6_clock_init(struct device_node *np)
150 goto error; 212 goto error;
151 } 213 }
152 214
153 parent_name = of_clk_get_parent_name(np, 0); 215
154 if (parent_name == NULL) { 216 for (i = 0, valid_parents = 0; i < num_parents; i++) {
155 pr_err("%s: failed to get %s DIV6 clock parent name\n", 217 const char *name = of_clk_get_parent_name(np, i);
218
219 if (name) {
220 parent_names[valid_parents] = name;
221 clock->parents[valid_parents] = i;
222 valid_parents++;
223 }
224 }
225
226 switch (num_parents) {
227 case 1:
228 /* fixed parent clock */
229 clock->src_shift = clock->src_width = 0;
230 break;
231 case 4:
232 /* clock with EXSRC bits 6-7 */
233 clock->src_shift = 6;
234 clock->src_width = 2;
235 break;
236 case 8:
237 /* VCLK with EXSRC bits 12-14 */
238 clock->src_shift = 12;
239 clock->src_width = 3;
240 break;
241 default:
242 pr_err("%s: invalid number of parents for DIV6 clock %s\n",
156 __func__, np->name); 243 __func__, np->name);
157 goto error; 244 goto error;
158 } 245 }
@@ -161,8 +248,8 @@ static void __init cpg_div6_clock_init(struct device_node *np)
161 init.name = name; 248 init.name = name;
162 init.ops = &cpg_div6_clock_ops; 249 init.ops = &cpg_div6_clock_ops;
163 init.flags = CLK_IS_BASIC; 250 init.flags = CLK_IS_BASIC;
164 init.parent_names = &parent_name; 251 init.parent_names = parent_names;
165 init.num_parents = 1; 252 init.num_parents = valid_parents;
166 253
167 clock->hw.init = &init; 254 clock->hw.init = &init;
168 255
@@ -175,11 +262,13 @@ static void __init cpg_div6_clock_init(struct device_node *np)
175 262
176 of_clk_add_provider(np, of_clk_src_simple_get, clk); 263 of_clk_add_provider(np, of_clk_src_simple_get, clk);
177 264
265 kfree(parent_names);
178 return; 266 return;
179 267
180error: 268error:
181 if (clock->reg) 269 if (clock->reg)
182 iounmap(clock->reg); 270 iounmap(clock->reg);
271 kfree(parent_names);
183 kfree(clock); 272 kfree(clock);
184} 273}
185CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init); 274CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init);
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 7ddc2b553846..a66953c0f430 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -7,6 +7,7 @@ obj-y += clk-a10-hosc.o
7obj-y += clk-a20-gmac.o 7obj-y += clk-a20-gmac.o
8obj-y += clk-mod0.o 8obj-y += clk-mod0.o
9obj-y += clk-sun8i-mbus.o 9obj-y += clk-sun8i-mbus.o
10obj-y += clk-sun9i-core.o
10 11
11obj-$(CONFIG_MFD_SUN6I_PRCM) += \ 12obj-$(CONFIG_MFD_SUN6I_PRCM) += \
12 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \ 13 clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 5296fd6dd7b3..0dcf4f205fb8 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -53,6 +53,11 @@ static DEFINE_SPINLOCK(gmac_lock);
53#define SUN7I_A20_GMAC_MASK 0x3 53#define SUN7I_A20_GMAC_MASK 0x3
54#define SUN7I_A20_GMAC_PARENTS 2 54#define SUN7I_A20_GMAC_PARENTS 2
55 55
56static u32 sun7i_a20_gmac_mux_table[SUN7I_A20_GMAC_PARENTS] = {
57 0x00, /* Select mii_phy_tx_clk */
58 0x02, /* Select gmac_int_tx_clk */
59};
60
56static void __init sun7i_a20_gmac_clk_setup(struct device_node *node) 61static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
57{ 62{
58 struct clk *clk; 63 struct clk *clk;
@@ -90,7 +95,7 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
90 gate->lock = &gmac_lock; 95 gate->lock = &gmac_lock;
91 mux->reg = reg; 96 mux->reg = reg;
92 mux->mask = SUN7I_A20_GMAC_MASK; 97 mux->mask = SUN7I_A20_GMAC_MASK;
93 mux->flags = CLK_MUX_INDEX_BIT; 98 mux->table = sun7i_a20_gmac_mux_table;
94 mux->lock = &gmac_lock; 99 mux->lock = &gmac_lock;
95 100
96 clk = clk_register_composite(NULL, clk_name, 101 clk = clk_register_composite(NULL, clk_name,
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index f83ba097126c..62e08fb58554 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -81,7 +81,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
81 81
82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate, 82static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
83 unsigned long *best_parent_rate, 83 unsigned long *best_parent_rate,
84 struct clk **best_parent_p) 84 struct clk_hw **best_parent_p)
85{ 85{
86 struct clk *clk = hw->clk, *parent, *best_parent = NULL; 86 struct clk *clk = hw->clk, *parent, *best_parent = NULL;
87 int i, num_parents; 87 int i, num_parents;
@@ -108,7 +108,7 @@ static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
108 } 108 }
109 109
110 if (best_parent) 110 if (best_parent)
111 *best_parent_p = best_parent; 111 *best_parent_p = __clk_get_hw(best_parent);
112 *best_parent_rate = best; 112 *best_parent_rate = best;
113 113
114 return best_child_rate; 114 return best_child_rate;
@@ -224,7 +224,7 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
224 /* set up gate properties */ 224 /* set up gate properties */
225 mux->reg = reg; 225 mux->reg = reg;
226 mux->shift = data->mux; 226 mux->shift = data->mux;
227 mux->mask = SUNXI_FACTORS_MUX_MASK; 227 mux->mask = data->muxmask;
228 mux->lock = factors->lock; 228 mux->lock = factors->lock;
229 mux_hw = &mux->hw; 229 mux_hw = &mux->hw;
230 } 230 }
diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h
index 9913840018d3..912238fde132 100644
--- a/drivers/clk/sunxi/clk-factors.h
+++ b/drivers/clk/sunxi/clk-factors.h
@@ -7,8 +7,6 @@
7 7
8#define SUNXI_FACTORS_NOT_APPLICABLE (0) 8#define SUNXI_FACTORS_NOT_APPLICABLE (0)
9 9
10#define SUNXI_FACTORS_MUX_MASK 0x3
11
12struct clk_factors_config { 10struct clk_factors_config {
13 u8 nshift; 11 u8 nshift;
14 u8 nwidth; 12 u8 nwidth;
@@ -24,6 +22,7 @@ struct clk_factors_config {
24struct factors_data { 22struct factors_data {
25 int enable; 23 int enable;
26 int mux; 24 int mux;
25 int muxmask;
27 struct clk_factors_config *table; 26 struct clk_factors_config *table;
28 void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p); 27 void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
29 const char *name; 28 const char *name;
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index 4a563850ee6e..da0524eaee94 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -70,6 +70,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
70static const struct factors_data sun4i_a10_mod0_data __initconst = { 70static const struct factors_data sun4i_a10_mod0_data __initconst = {
71 .enable = 31, 71 .enable = 31,
72 .mux = 24, 72 .mux = 24,
73 .muxmask = BIT(1) | BIT(0),
73 .table = &sun4i_a10_mod0_config, 74 .table = &sun4i_a10_mod0_config,
74 .getter = sun4i_a10_get_mod0_factors, 75 .getter = sun4i_a10_get_mod0_factors,
75}; 76};
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index acca53290be2..3d282fb8f85c 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -46,7 +46,7 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
46 46
47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, 47static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
48 unsigned long *best_parent_rate, 48 unsigned long *best_parent_rate,
49 struct clk **best_parent_clk) 49 struct clk_hw **best_parent_clk)
50{ 50{
51 int nparents = __clk_get_num_parents(hw->clk); 51 int nparents = __clk_get_num_parents(hw->clk);
52 long best_rate = -EINVAL; 52 long best_rate = -EINVAL;
@@ -100,7 +100,7 @@ static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
100 100
101 tmp_rate = (parent_rate >> shift) / div; 101 tmp_rate = (parent_rate >> shift) / div;
102 if (!*best_parent_clk || tmp_rate > best_rate) { 102 if (!*best_parent_clk || tmp_rate > best_rate) {
103 *best_parent_clk = parent; 103 *best_parent_clk = __clk_get_hw(parent);
104 *best_parent_rate = parent_rate; 104 *best_parent_rate = parent_rate;
105 best_rate = tmp_rate; 105 best_rate = tmp_rate;
106 } 106 }
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 8e49b44cee41..ef49786eefd3 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -60,6 +60,7 @@ static struct clk_factors_config sun8i_a23_mbus_config = {
60static const struct factors_data sun8i_a23_mbus_data __initconst = { 60static const struct factors_data sun8i_a23_mbus_data __initconst = {
61 .enable = 31, 61 .enable = 31,
62 .mux = 24, 62 .mux = 24,
63 .muxmask = BIT(1) | BIT(0),
63 .table = &sun8i_a23_mbus_config, 64 .table = &sun8i_a23_mbus_config,
64 .getter = sun8i_a23_get_mbus_factors, 65 .getter = sun8i_a23_get_mbus_factors,
65}; 66};
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
new file mode 100644
index 000000000000..3cb9036d91bb
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright 2014 Chen-Yu Tsai
3 *
4 * Chen-Yu Tsai <wens@csie.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/clk-provider.h>
18#include <linux/clkdev.h>
19#include <linux/of.h>
20#include <linux/of_address.h>
21#include <linux/log2.h>
22
23#include "clk-factors.h"
24
25
26/**
27 * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
28 * PLL4 rate is calculated as follows
29 * rate = (parent_rate * n >> p) / (m + 1);
30 * parent_rate is always 24Mhz
31 *
32 * p and m are named div1 and div2 in Allwinner's SDK
33 */
34
35static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
36 u8 *n, u8 *k, u8 *m, u8 *p)
37{
38 int div;
39
40 /* Normalize value to a 6M multiple */
41 div = DIV_ROUND_UP(*freq, 6000000);
42
43 /* divs above 256 cannot be odd */
44 if (div > 256)
45 div = round_up(div, 2);
46
47 /* divs above 512 must be a multiple of 4 */
48 if (div > 512)
49 div = round_up(div, 4);
50
51 *freq = 6000000 * div;
52
53 /* we were called to round the frequency, we can now return */
54 if (n == NULL)
55 return;
56
57 /* p will be 1 for divs under 512 */
58 if (div < 512)
59 *p = 1;
60 else
61 *p = 0;
62
63 /* m will be 1 if div is odd */
64 if (div & 1)
65 *m = 1;
66 else
67 *m = 0;
68
69 /* calculate a suitable n based on m and p */
70 *n = div / (*p + 1) / (*m + 1);
71}
72
73static struct clk_factors_config sun9i_a80_pll4_config = {
74 .mshift = 18,
75 .mwidth = 1,
76 .nshift = 8,
77 .nwidth = 8,
78 .pshift = 16,
79 .pwidth = 1,
80};
81
82static const struct factors_data sun9i_a80_pll4_data __initconst = {
83 .enable = 31,
84 .table = &sun9i_a80_pll4_config,
85 .getter = sun9i_a80_get_pll4_factors,
86};
87
88static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
89
90static void __init sun9i_a80_pll4_setup(struct device_node *node)
91{
92 sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
93}
94CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
95
96
97/**
98 * sun9i_a80_get_gt_factors() - calculates m factor for GT
99 * GT rate is calculated as follows
100 * rate = parent_rate / (m + 1);
101 */
102
103static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
104 u8 *n, u8 *k, u8 *m, u8 *p)
105{
106 u32 div;
107
108 if (parent_rate < *freq)
109 *freq = parent_rate;
110
111 div = DIV_ROUND_UP(parent_rate, *freq);
112
113 /* maximum divider is 4 */
114 if (div > 4)
115 div = 4;
116
117 *freq = parent_rate / div;
118
119 /* we were called to round the frequency, we can now return */
120 if (!m)
121 return;
122
123 *m = div;
124}
125
126static struct clk_factors_config sun9i_a80_gt_config = {
127 .mshift = 0,
128 .mwidth = 2,
129};
130
131static const struct factors_data sun9i_a80_gt_data __initconst = {
132 .mux = 24,
133 .muxmask = BIT(1) | BIT(0),
134 .table = &sun9i_a80_gt_config,
135 .getter = sun9i_a80_get_gt_factors,
136};
137
138static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
139
140static void __init sun9i_a80_gt_setup(struct device_node *node)
141{
142 struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
143 &sun9i_a80_gt_lock);
144
145 /* The GT bus clock needs to be always enabled */
146 __clk_get(gt);
147 clk_prepare_enable(gt);
148}
149CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
150
151
152/**
153 * sun9i_a80_get_ahb_factors() - calculates p factor for AHB0/1/2
154 * AHB rate is calculated as follows
155 * rate = parent_rate >> p;
156 */
157
158static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
159 u8 *n, u8 *k, u8 *m, u8 *p)
160{
161 u32 _p;
162
163 if (parent_rate < *freq)
164 *freq = parent_rate;
165
166 _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
167
168 /* maximum p is 3 */
169 if (_p > 3)
170 _p = 3;
171
172 *freq = parent_rate >> _p;
173
174 /* we were called to round the frequency, we can now return */
175 if (!p)
176 return;
177
178 *p = _p;
179}
180
181static struct clk_factors_config sun9i_a80_ahb_config = {
182 .pshift = 0,
183 .pwidth = 2,
184};
185
186static const struct factors_data sun9i_a80_ahb_data __initconst = {
187 .mux = 24,
188 .muxmask = BIT(1) | BIT(0),
189 .table = &sun9i_a80_ahb_config,
190 .getter = sun9i_a80_get_ahb_factors,
191};
192
193static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
194
195static void __init sun9i_a80_ahb_setup(struct device_node *node)
196{
197 sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
198}
199CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
200
201
202static const struct factors_data sun9i_a80_apb0_data __initconst = {
203 .mux = 24,
204 .muxmask = BIT(0),
205 .table = &sun9i_a80_ahb_config,
206 .getter = sun9i_a80_get_ahb_factors,
207};
208
209static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
210
211static void __init sun9i_a80_apb0_setup(struct device_node *node)
212{
213 sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
214}
215CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
216
217
218/**
219 * sun9i_a80_get_apb1_factors() - calculates m, p factors for APB1
220 * APB1 rate is calculated as follows
221 * rate = (parent_rate >> p) / (m + 1);
222 */
223
224static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
225 u8 *n, u8 *k, u8 *m, u8 *p)
226{
227 u32 div;
228 u8 calcm, calcp;
229
230 if (parent_rate < *freq)
231 *freq = parent_rate;
232
233 div = DIV_ROUND_UP(parent_rate, *freq);
234
235 /* Highest possible divider is 256 (p = 3, m = 31) */
236 if (div > 256)
237 div = 256;
238
239 calcp = order_base_2(div);
240 calcm = (parent_rate >> calcp) - 1;
241 *freq = (parent_rate >> calcp) / (calcm + 1);
242
243 /* we were called to round the frequency, we can now return */
244 if (n == NULL)
245 return;
246
247 *m = calcm;
248 *p = calcp;
249}
250
251static struct clk_factors_config sun9i_a80_apb1_config = {
252 .mshift = 0,
253 .mwidth = 5,
254 .pshift = 16,
255 .pwidth = 2,
256};
257
258static const struct factors_data sun9i_a80_apb1_data __initconst = {
259 .mux = 24,
260 .muxmask = BIT(0),
261 .table = &sun9i_a80_apb1_config,
262 .getter = sun9i_a80_get_apb1_factors,
263};
264
265static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
266
267static void __init sun9i_a80_apb1_setup(struct device_node *node)
268{
269 sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
270}
271CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index d5dc951264ca..570202582dcf 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -245,9 +245,9 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
245} 245}
246 246
247/** 247/**
248 * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6 248 * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
249 * PLL6 rate is calculated as follows 249 * PLL6x2 rate is calculated as follows
250 * rate = parent_rate * n * (k + 1) / 2 250 * rate = parent_rate * (n + 1) * (k + 1)
251 * parent_rate is always 24Mhz 251 * parent_rate is always 24Mhz
252 */ 252 */
253 253
@@ -256,13 +256,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
256{ 256{
257 u8 div; 257 u8 div;
258 258
259 /* 259 /* Normalize value to a parent_rate multiple (24M) */
260 * We always have 24MHz / 2, so we can just say that our
261 * parent clock is 12MHz.
262 */
263 parent_rate = parent_rate / 2;
264
265 /* Normalize value to a parent_rate multiple (24M / 2) */
266 div = *freq / parent_rate; 260 div = *freq / parent_rate;
267 *freq = parent_rate * div; 261 *freq = parent_rate * div;
268 262
@@ -274,7 +268,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
274 if (*k > 3) 268 if (*k > 3)
275 *k = 3; 269 *k = 3;
276 270
277 *n = DIV_ROUND_UP(div, (*k+1)); 271 *n = DIV_ROUND_UP(div, (*k+1)) - 1;
278} 272}
279 273
280/** 274/**
@@ -445,6 +439,7 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
445 .nwidth = 5, 439 .nwidth = 5,
446 .kshift = 4, 440 .kshift = 4,
447 .kwidth = 2, 441 .kwidth = 2,
442 .n_start = 1,
448}; 443};
449 444
450static struct clk_factors_config sun4i_apb1_config = { 445static struct clk_factors_config sun4i_apb1_config = {
@@ -504,9 +499,12 @@ static const struct factors_data sun6i_a31_pll6_data __initconst = {
504 .enable = 31, 499 .enable = 31,
505 .table = &sun6i_a31_pll6_config, 500 .table = &sun6i_a31_pll6_config,
506 .getter = sun6i_a31_get_pll6_factors, 501 .getter = sun6i_a31_get_pll6_factors,
502 .name = "pll6x2",
507}; 503};
508 504
509static const struct factors_data sun4i_apb1_data __initconst = { 505static const struct factors_data sun4i_apb1_data __initconst = {
506 .mux = 24,
507 .muxmask = BIT(1) | BIT(0),
510 .table = &sun4i_apb1_config, 508 .table = &sun4i_apb1_config,
511 .getter = sun4i_get_apb1_factors, 509 .getter = sun4i_get_apb1_factors,
512}; 510};
@@ -514,6 +512,7 @@ static const struct factors_data sun4i_apb1_data __initconst = {
514static const struct factors_data sun7i_a20_out_data __initconst = { 512static const struct factors_data sun7i_a20_out_data __initconst = {
515 .enable = 31, 513 .enable = 31,
516 .mux = 24, 514 .mux = 24,
515 .muxmask = BIT(1) | BIT(0),
517 .table = &sun7i_a20_out_config, 516 .table = &sun7i_a20_out_config,
518 .getter = sun7i_a20_get_out_factors, 517 .getter = sun7i_a20_get_out_factors,
519}; 518};
@@ -544,10 +543,6 @@ static const struct mux_data sun6i_a31_ahb1_mux_data __initconst = {
544 .shift = 12, 543 .shift = 12,
545}; 544};
546 545
547static const struct mux_data sun4i_apb1_mux_data __initconst = {
548 .shift = 24,
549};
550
551static void __init sunxi_mux_clk_setup(struct device_node *node, 546static void __init sunxi_mux_clk_setup(struct device_node *node,
552 struct mux_data *data) 547 struct mux_data *data)
553{ 548{
@@ -633,12 +628,6 @@ static const struct div_data sun4i_apb0_data __initconst = {
633 .table = sun4i_apb0_table, 628 .table = sun4i_apb0_table,
634}; 629};
635 630
636static const struct div_data sun6i_a31_apb2_div_data __initconst = {
637 .shift = 0,
638 .pow = 0,
639 .width = 4,
640};
641
642static void __init sunxi_divider_clk_setup(struct device_node *node, 631static void __init sunxi_divider_clk_setup(struct device_node *node,
643 struct div_data *data) 632 struct div_data *data)
644{ 633{
@@ -757,6 +746,18 @@ static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
757 .mask = {0x25386742, 0x2505111}, 746 .mask = {0x25386742, 0x2505111},
758}; 747};
759 748
749static const struct gates_data sun9i_a80_ahb0_gates_data __initconst = {
750 .mask = {0xF5F12B},
751};
752
753static const struct gates_data sun9i_a80_ahb1_gates_data __initconst = {
754 .mask = {0x1E20003},
755};
756
757static const struct gates_data sun9i_a80_ahb2_gates_data __initconst = {
758 .mask = {0x9B7},
759};
760
760static const struct gates_data sun4i_apb0_gates_data __initconst = { 761static const struct gates_data sun4i_apb0_gates_data __initconst = {
761 .mask = {0x4EF}, 762 .mask = {0x4EF},
762}; 763};
@@ -773,6 +774,10 @@ static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
773 .mask = { 0x4ff }, 774 .mask = { 0x4ff },
774}; 775};
775 776
777static const struct gates_data sun9i_a80_apb0_gates_data __initconst = {
778 .mask = {0xEB822},
779};
780
776static const struct gates_data sun4i_apb1_gates_data __initconst = { 781static const struct gates_data sun4i_apb1_gates_data __initconst = {
777 .mask = {0xFF00F7}, 782 .mask = {0xFF00F7},
778}; 783};
@@ -801,6 +806,10 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
801 .mask = { 0xff80ff }, 806 .mask = { 0xff80ff },
802}; 807};
803 808
809static const struct gates_data sun9i_a80_apb1_gates_data __initconst = {
810 .mask = {0x3F001F},
811};
812
804static const struct gates_data sun8i_a23_apb2_gates_data __initconst = { 813static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
805 .mask = {0x1F0007}, 814 .mask = {0x1F0007},
806}; 815};
@@ -893,6 +902,7 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
893 902
894struct divs_data { 903struct divs_data {
895 const struct factors_data *factors; /* data for the factor clock */ 904 const struct factors_data *factors; /* data for the factor clock */
905 int ndivs; /* number of children */
896 struct { 906 struct {
897 u8 fixed; /* is it a fixed divisor? if not... */ 907 u8 fixed; /* is it a fixed divisor? if not... */
898 struct clk_div_table *table; /* is it a table based divisor? */ 908 struct clk_div_table *table; /* is it a table based divisor? */
@@ -912,6 +922,7 @@ static struct clk_div_table pll6_sata_tbl[] = {
912 922
913static const struct divs_data pll5_divs_data __initconst = { 923static const struct divs_data pll5_divs_data __initconst = {
914 .factors = &sun4i_pll5_data, 924 .factors = &sun4i_pll5_data,
925 .ndivs = 2,
915 .div = { 926 .div = {
916 { .shift = 0, .pow = 0, }, /* M, DDR */ 927 { .shift = 0, .pow = 0, }, /* M, DDR */
917 { .shift = 16, .pow = 1, }, /* P, other */ 928 { .shift = 16, .pow = 1, }, /* P, other */
@@ -920,12 +931,21 @@ static const struct divs_data pll5_divs_data __initconst = {
920 931
921static const struct divs_data pll6_divs_data __initconst = { 932static const struct divs_data pll6_divs_data __initconst = {
922 .factors = &sun4i_pll6_data, 933 .factors = &sun4i_pll6_data,
934 .ndivs = 2,
923 .div = { 935 .div = {
924 { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */ 936 { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
925 { .fixed = 2 }, /* P, other */ 937 { .fixed = 2 }, /* P, other */
926 } 938 }
927}; 939};
928 940
941static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
942 .factors = &sun6i_a31_pll6_data,
943 .ndivs = 1,
944 .div = {
945 { .fixed = 2 }, /* normal output */
946 }
947};
948
929/** 949/**
930 * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks 950 * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
931 * 951 *
@@ -950,7 +970,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
950 struct clk_fixed_factor *fix_factor; 970 struct clk_fixed_factor *fix_factor;
951 struct clk_divider *divider; 971 struct clk_divider *divider;
952 void __iomem *reg; 972 void __iomem *reg;
953 int i = 0; 973 int ndivs = SUNXI_DIVS_MAX_QTY, i = 0;
954 int flags, clkflags; 974 int flags, clkflags;
955 975
956 /* Set up factor clock that we will be dividing */ 976 /* Set up factor clock that we will be dividing */
@@ -973,7 +993,11 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
973 * our RAM clock! */ 993 * our RAM clock! */
974 clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT; 994 clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT;
975 995
976 for (i = 0; i < SUNXI_DIVS_MAX_QTY; i++) { 996 /* if number of children known, use it */
997 if (data->ndivs)
998 ndivs = data->ndivs;
999
1000 for (i = 0; i < ndivs; i++) {
977 if (of_property_read_string_index(node, "clock-output-names", 1001 if (of_property_read_string_index(node, "clock-output-names",
978 i, &clk_name) != 0) 1002 i, &clk_name) != 0)
979 break; 1003 break;
@@ -1062,7 +1086,6 @@ static const struct of_device_id clk_factors_match[] __initconst = {
1062 {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,}, 1086 {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
1063 {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,}, 1087 {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
1064 {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,}, 1088 {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
1065 {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
1066 {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,}, 1089 {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
1067 {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,}, 1090 {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
1068 {} 1091 {}
@@ -1074,7 +1097,6 @@ static const struct of_device_id clk_div_match[] __initconst = {
1074 {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,}, 1097 {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
1075 {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,}, 1098 {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
1076 {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,}, 1099 {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
1077 {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
1078 {} 1100 {}
1079}; 1101};
1080 1102
@@ -1082,13 +1104,13 @@ static const struct of_device_id clk_div_match[] __initconst = {
1082static const struct of_device_id clk_divs_match[] __initconst = { 1104static const struct of_device_id clk_divs_match[] __initconst = {
1083 {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,}, 1105 {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
1084 {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,}, 1106 {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
1107 {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
1085 {} 1108 {}
1086}; 1109};
1087 1110
1088/* Matches for mux clocks */ 1111/* Matches for mux clocks */
1089static const struct of_device_id clk_mux_match[] __initconst = { 1112static const struct of_device_id clk_mux_match[] __initconst = {
1090 {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,}, 1113 {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
1091 {.compatible = "allwinner,sun4i-a10-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
1092 {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,}, 1114 {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
1093 {} 1115 {}
1094}; 1116};
@@ -1102,16 +1124,21 @@ static const struct of_device_id clk_gates_match[] __initconst = {
1102 {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,}, 1124 {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
1103 {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,}, 1125 {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
1104 {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,}, 1126 {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
1127 {.compatible = "allwinner,sun9i-a80-ahb0-gates-clk", .data = &sun9i_a80_ahb0_gates_data,},
1128 {.compatible = "allwinner,sun9i-a80-ahb1-gates-clk", .data = &sun9i_a80_ahb1_gates_data,},
1129 {.compatible = "allwinner,sun9i-a80-ahb2-gates-clk", .data = &sun9i_a80_ahb2_gates_data,},
1105 {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,}, 1130 {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
1106 {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,}, 1131 {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
1107 {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,}, 1132 {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
1108 {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,}, 1133 {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
1134 {.compatible = "allwinner,sun9i-a80-apb0-gates-clk", .data = &sun9i_a80_apb0_gates_data,},
1109 {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,}, 1135 {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
1110 {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,}, 1136 {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
1111 {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,}, 1137 {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
1112 {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,}, 1138 {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
1113 {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,}, 1139 {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
1114 {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,}, 1140 {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
1141 {.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,},
1115 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,}, 1142 {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
1116 {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,}, 1143 {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
1117 {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,}, 1144 {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
@@ -1200,3 +1227,9 @@ static void __init sun6i_init_clocks(struct device_node *node)
1200} 1227}
1201CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks); 1228CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
1202CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks); 1229CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
1230
1231static void __init sun9i_init_clocks(struct device_node *node)
1232{
1233 sunxi_init_clocks(NULL, 0);
1234}
1235CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 91a488c7cc44..31e8308ba899 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -753,6 +753,7 @@ config I2C_SH7760
753 753
754config I2C_SH_MOBILE 754config I2C_SH_MOBILE
755 tristate "SuperH Mobile I2C Controller" 755 tristate "SuperH Mobile I2C Controller"
756 depends on HAS_DMA
756 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST 757 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
757 help 758 help
758 If you say yes to this option, support will be included for the 759 If you say yes to this option, support will be included for the
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 373f6d4e4080..30059c1df2a3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -30,12 +30,12 @@
30#define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7) 30#define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7)
31#define MV64XXX_I2C_BAUD_DIV_M(val) ((val & 0xf) << 3) 31#define MV64XXX_I2C_BAUD_DIV_M(val) ((val & 0xf) << 3)
32 32
33#define MV64XXX_I2C_REG_CONTROL_ACK 0x00000004 33#define MV64XXX_I2C_REG_CONTROL_ACK BIT(2)
34#define MV64XXX_I2C_REG_CONTROL_IFLG 0x00000008 34#define MV64XXX_I2C_REG_CONTROL_IFLG BIT(3)
35#define MV64XXX_I2C_REG_CONTROL_STOP 0x00000010 35#define MV64XXX_I2C_REG_CONTROL_STOP BIT(4)
36#define MV64XXX_I2C_REG_CONTROL_START 0x00000020 36#define MV64XXX_I2C_REG_CONTROL_START BIT(5)
37#define MV64XXX_I2C_REG_CONTROL_TWSIEN 0x00000040 37#define MV64XXX_I2C_REG_CONTROL_TWSIEN BIT(6)
38#define MV64XXX_I2C_REG_CONTROL_INTEN 0x00000080 38#define MV64XXX_I2C_REG_CONTROL_INTEN BIT(7)
39 39
40/* Ctlr status values */ 40/* Ctlr status values */
41#define MV64XXX_I2C_STATUS_BUS_ERR 0x00 41#define MV64XXX_I2C_STATUS_BUS_ERR 0x00
@@ -68,19 +68,17 @@
68#define MV64XXX_I2C_REG_BRIDGE_TIMING 0xe0 68#define MV64XXX_I2C_REG_BRIDGE_TIMING 0xe0
69 69
70/* Bridge Control values */ 70/* Bridge Control values */
71#define MV64XXX_I2C_BRIDGE_CONTROL_WR 0x00000001 71#define MV64XXX_I2C_BRIDGE_CONTROL_WR BIT(0)
72#define MV64XXX_I2C_BRIDGE_CONTROL_RD 0x00000002 72#define MV64XXX_I2C_BRIDGE_CONTROL_RD BIT(1)
73#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT 2 73#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT 2
74#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT 0x00001000 74#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT BIT(12)
75#define MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT 13 75#define MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT 13
76#define MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT 16 76#define MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT 16
77#define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE 0x00080000 77#define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE BIT(19)
78#define MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START BIT(20)
78 79
79/* Bridge Status values */ 80/* Bridge Status values */
80#define MV64XXX_I2C_BRIDGE_STATUS_ERROR 0x00000001 81#define MV64XXX_I2C_BRIDGE_STATUS_ERROR BIT(0)
81#define MV64XXX_I2C_STATUS_OFFLOAD_ERROR 0xf0000001
82#define MV64XXX_I2C_STATUS_OFFLOAD_OK 0xf0000000
83
84 82
85/* Driver states */ 83/* Driver states */
86enum { 84enum {
@@ -99,14 +97,12 @@ enum {
99 MV64XXX_I2C_ACTION_INVALID, 97 MV64XXX_I2C_ACTION_INVALID,
100 MV64XXX_I2C_ACTION_CONTINUE, 98 MV64XXX_I2C_ACTION_CONTINUE,
101 MV64XXX_I2C_ACTION_SEND_RESTART, 99 MV64XXX_I2C_ACTION_SEND_RESTART,
102 MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
103 MV64XXX_I2C_ACTION_SEND_ADDR_1, 100 MV64XXX_I2C_ACTION_SEND_ADDR_1,
104 MV64XXX_I2C_ACTION_SEND_ADDR_2, 101 MV64XXX_I2C_ACTION_SEND_ADDR_2,
105 MV64XXX_I2C_ACTION_SEND_DATA, 102 MV64XXX_I2C_ACTION_SEND_DATA,
106 MV64XXX_I2C_ACTION_RCV_DATA, 103 MV64XXX_I2C_ACTION_RCV_DATA,
107 MV64XXX_I2C_ACTION_RCV_DATA_STOP, 104 MV64XXX_I2C_ACTION_RCV_DATA_STOP,
108 MV64XXX_I2C_ACTION_SEND_STOP, 105 MV64XXX_I2C_ACTION_SEND_STOP,
109 MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP,
110}; 106};
111 107
112struct mv64xxx_i2c_regs { 108struct mv64xxx_i2c_regs {
@@ -193,75 +189,6 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
193 } 189 }
194} 190}
195 191
196static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
197{
198 unsigned long data_reg_hi = 0;
199 unsigned long data_reg_lo = 0;
200 unsigned long ctrl_reg;
201 struct i2c_msg *msg = drv_data->msgs;
202
203 if (!drv_data->offload_enabled)
204 return -EOPNOTSUPP;
205
206 /* Only regular transactions can be offloaded */
207 if ((msg->flags & ~(I2C_M_TEN | I2C_M_RD)) != 0)
208 return -EINVAL;
209
210 /* Only 1-8 byte transfers can be offloaded */
211 if (msg->len < 1 || msg->len > 8)
212 return -EINVAL;
213
214 /* Build transaction */
215 ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
216 (msg->addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
217
218 if ((msg->flags & I2C_M_TEN) != 0)
219 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
220
221 if ((msg->flags & I2C_M_RD) == 0) {
222 u8 local_buf[8] = { 0 };
223
224 memcpy(local_buf, msg->buf, msg->len);
225 data_reg_lo = cpu_to_le32(*((u32 *)local_buf));
226 data_reg_hi = cpu_to_le32(*((u32 *)(local_buf+4)));
227
228 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
229 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
230
231 writel(data_reg_lo,
232 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
233 writel(data_reg_hi,
234 drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
235
236 } else {
237 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
238 (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT;
239 }
240
241 /* Execute transaction */
242 writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
243
244 return 0;
245}
246
247static void
248mv64xxx_i2c_update_offload_data(struct mv64xxx_i2c_data *drv_data)
249{
250 struct i2c_msg *msg = drv_data->msg;
251
252 if (msg->flags & I2C_M_RD) {
253 u32 data_reg_lo = readl(drv_data->reg_base +
254 MV64XXX_I2C_REG_RX_DATA_LO);
255 u32 data_reg_hi = readl(drv_data->reg_base +
256 MV64XXX_I2C_REG_RX_DATA_HI);
257 u8 local_buf[8] = { 0 };
258
259 *((u32 *)local_buf) = le32_to_cpu(data_reg_lo);
260 *((u32 *)(local_buf+4)) = le32_to_cpu(data_reg_hi);
261 memcpy(msg->buf, local_buf, msg->len);
262 }
263
264}
265/* 192/*
266 ***************************************************************************** 193 *****************************************************************************
267 * 194 *
@@ -389,16 +316,6 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
389 drv_data->rc = -ENXIO; 316 drv_data->rc = -ENXIO;
390 break; 317 break;
391 318
392 case MV64XXX_I2C_STATUS_OFFLOAD_OK:
393 if (drv_data->send_stop || drv_data->aborting) {
394 drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP;
395 drv_data->state = MV64XXX_I2C_STATE_IDLE;
396 } else {
397 drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_RESTART;
398 drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
399 }
400 break;
401
402 default: 319 default:
403 dev_err(&drv_data->adapter.dev, 320 dev_err(&drv_data->adapter.dev,
404 "mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, " 321 "mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, "
@@ -419,25 +336,15 @@ static void mv64xxx_i2c_send_start(struct mv64xxx_i2c_data *drv_data)
419 drv_data->aborting = 0; 336 drv_data->aborting = 0;
420 drv_data->rc = 0; 337 drv_data->rc = 0;
421 338
422 /* Can we offload this msg ? */ 339 mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
423 if (mv64xxx_i2c_offload_msg(drv_data) < 0) { 340 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
424 /* No, switch to standard path */ 341 drv_data->reg_base + drv_data->reg_offsets.control);
425 mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
426 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
427 drv_data->reg_base + drv_data->reg_offsets.control);
428 }
429} 342}
430 343
431static void 344static void
432mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) 345mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
433{ 346{
434 switch(drv_data->action) { 347 switch(drv_data->action) {
435 case MV64XXX_I2C_ACTION_OFFLOAD_RESTART:
436 mv64xxx_i2c_update_offload_data(drv_data);
437 writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
438 writel(0, drv_data->reg_base +
439 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
440 /* FALLTHRU */
441 case MV64XXX_I2C_ACTION_SEND_RESTART: 348 case MV64XXX_I2C_ACTION_SEND_RESTART:
442 /* We should only get here if we have further messages */ 349 /* We should only get here if we have further messages */
443 BUG_ON(drv_data->num_msgs == 0); 350 BUG_ON(drv_data->num_msgs == 0);
@@ -518,16 +425,71 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
518 drv_data->block = 0; 425 drv_data->block = 0;
519 wake_up(&drv_data->waitq); 426 wake_up(&drv_data->waitq);
520 break; 427 break;
428 }
429}
521 430
522 case MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP: 431static void
523 mv64xxx_i2c_update_offload_data(drv_data); 432mv64xxx_i2c_read_offload_rx_data(struct mv64xxx_i2c_data *drv_data,
524 writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL); 433 struct i2c_msg *msg)
525 writel(0, drv_data->reg_base + 434{
526 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE); 435 u32 buf[2];
527 drv_data->block = 0; 436
528 wake_up(&drv_data->waitq); 437 buf[0] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_LO);
529 break; 438 buf[1] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_HI);
439
440 memcpy(msg->buf, buf, msg->len);
441}
442
443static int
444mv64xxx_i2c_intr_offload(struct mv64xxx_i2c_data *drv_data)
445{
446 u32 cause, status;
447
448 cause = readl(drv_data->reg_base +
449 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
450 if (!cause)
451 return IRQ_NONE;
452
453 status = readl(drv_data->reg_base +
454 MV64XXX_I2C_REG_BRIDGE_STATUS);
455
456 if (status & MV64XXX_I2C_BRIDGE_STATUS_ERROR) {
457 drv_data->rc = -EIO;
458 goto out;
459 }
460
461 drv_data->rc = 0;
462
463 /*
464 * Transaction is a one message read transaction, read data
465 * for this message.
466 */
467 if (drv_data->num_msgs == 1 && drv_data->msgs[0].flags & I2C_M_RD) {
468 mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs);
469 drv_data->msgs++;
470 drv_data->num_msgs--;
471 }
472 /*
473 * Transaction is a two messages write/read transaction, read
474 * data for the second (read) message.
475 */
476 else if (drv_data->num_msgs == 2 &&
477 !(drv_data->msgs[0].flags & I2C_M_RD) &&
478 drv_data->msgs[1].flags & I2C_M_RD) {
479 mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs + 1);
480 drv_data->msgs += 2;
481 drv_data->num_msgs -= 2;
530 } 482 }
483
484out:
485 writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
486 writel(0, drv_data->reg_base +
487 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
488 drv_data->block = 0;
489
490 wake_up(&drv_data->waitq);
491
492 return IRQ_HANDLED;
531} 493}
532 494
533static irqreturn_t 495static irqreturn_t
@@ -540,20 +502,9 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
540 502
541 spin_lock_irqsave(&drv_data->lock, flags); 503 spin_lock_irqsave(&drv_data->lock, flags);
542 504
543 if (drv_data->offload_enabled) { 505 if (drv_data->offload_enabled)
544 while (readl(drv_data->reg_base + 506 rc = mv64xxx_i2c_intr_offload(drv_data);
545 MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE)) { 507
546 int reg_status = readl(drv_data->reg_base +
547 MV64XXX_I2C_REG_BRIDGE_STATUS);
548 if (reg_status & MV64XXX_I2C_BRIDGE_STATUS_ERROR)
549 status = MV64XXX_I2C_STATUS_OFFLOAD_ERROR;
550 else
551 status = MV64XXX_I2C_STATUS_OFFLOAD_OK;
552 mv64xxx_i2c_fsm(drv_data, status);
553 mv64xxx_i2c_do_action(drv_data);
554 rc = IRQ_HANDLED;
555 }
556 }
557 while (readl(drv_data->reg_base + drv_data->reg_offsets.control) & 508 while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
558 MV64XXX_I2C_REG_CONTROL_IFLG) { 509 MV64XXX_I2C_REG_CONTROL_IFLG) {
559 status = readl(drv_data->reg_base + drv_data->reg_offsets.status); 510 status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
@@ -635,6 +586,117 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
635 return drv_data->rc; 586 return drv_data->rc;
636} 587}
637 588
589static void
590mv64xxx_i2c_prepare_tx(struct mv64xxx_i2c_data *drv_data)
591{
592 struct i2c_msg *msg = drv_data->msgs;
593 u32 buf[2];
594
595 memcpy(buf, msg->buf, msg->len);
596
597 writel(buf[0], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
598 writel(buf[1], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
599}
600
601static int
602mv64xxx_i2c_offload_xfer(struct mv64xxx_i2c_data *drv_data)
603{
604 struct i2c_msg *msgs = drv_data->msgs;
605 int num = drv_data->num_msgs;
606 unsigned long ctrl_reg;
607 unsigned long flags;
608
609 spin_lock_irqsave(&drv_data->lock, flags);
610
611 /* Build transaction */
612 ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
613 (msgs[0].addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
614
615 if (msgs[0].flags & I2C_M_TEN)
616 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
617
618 /* Single write message transaction */
619 if (num == 1 && !(msgs[0].flags & I2C_M_RD)) {
620 size_t len = msgs[0].len - 1;
621
622 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
623 (len << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT);
624 mv64xxx_i2c_prepare_tx(drv_data);
625 }
626 /* Single read message transaction */
627 else if (num == 1 && msgs[0].flags & I2C_M_RD) {
628 size_t len = msgs[0].len - 1;
629
630 ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
631 (len << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT);
632 }
633 /*
634 * Transaction with one write and one read message. This is
635 * guaranteed by the mv64xx_i2c_can_offload() checks.
636 */
637 else if (num == 2) {
638 size_t lentx = msgs[0].len - 1;
639 size_t lenrx = msgs[1].len - 1;
640
641 ctrl_reg |=
642 MV64XXX_I2C_BRIDGE_CONTROL_RD |
643 MV64XXX_I2C_BRIDGE_CONTROL_WR |
644 (lentx << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT) |
645 (lenrx << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT) |
646 MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START;
647 mv64xxx_i2c_prepare_tx(drv_data);
648 }
649
650 /* Execute transaction */
651 drv_data->block = 1;
652 writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
653 spin_unlock_irqrestore(&drv_data->lock, flags);
654
655 mv64xxx_i2c_wait_for_completion(drv_data);
656
657 return drv_data->rc;
658}
659
660static bool
661mv64xxx_i2c_valid_offload_sz(struct i2c_msg *msg)
662{
663 return msg->len <= 8 && msg->len >= 1;
664}
665
666static bool
667mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data)
668{
669 struct i2c_msg *msgs = drv_data->msgs;
670 int num = drv_data->num_msgs;
671
672 return false;
673
674 if (!drv_data->offload_enabled)
675 return false;
676
677 /*
678 * We can offload a transaction consisting of a single
679 * message, as long as the message has a length between 1 and
680 * 8 bytes.
681 */
682 if (num == 1 && mv64xxx_i2c_valid_offload_sz(msgs))
683 return true;
684
685 /*
686 * We can offload a transaction consisting of two messages, if
687 * the first is a write and a second is a read, and both have
688 * a length between 1 and 8 bytes.
689 */
690 if (num == 2 &&
691 mv64xxx_i2c_valid_offload_sz(msgs) &&
692 mv64xxx_i2c_valid_offload_sz(msgs + 1) &&
693 !(msgs[0].flags & I2C_M_RD) &&
694 msgs[1].flags & I2C_M_RD)
695 return true;
696
697 return false;
698}
699
638/* 700/*
639 ***************************************************************************** 701 *****************************************************************************
640 * 702 *
@@ -658,7 +720,11 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
658 drv_data->msgs = msgs; 720 drv_data->msgs = msgs;
659 drv_data->num_msgs = num; 721 drv_data->num_msgs = num;
660 722
661 rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1); 723 if (mv64xxx_i2c_can_offload(drv_data))
724 rc = mv64xxx_i2c_offload_xfer(drv_data);
725 else
726 rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
727
662 if (rc < 0) 728 if (rc < 0)
663 ret = rc; 729 ret = rc;
664 730
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index d7efaf44868b..440d5dbc8b5f 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -140,6 +140,7 @@ struct sh_mobile_i2c_data {
140 int sr; 140 int sr;
141 bool send_stop; 141 bool send_stop;
142 142
143 struct resource *res;
143 struct dma_chan *dma_tx; 144 struct dma_chan *dma_tx;
144 struct dma_chan *dma_rx; 145 struct dma_chan *dma_rx;
145 struct scatterlist sg; 146 struct scatterlist sg;
@@ -539,6 +540,42 @@ static void sh_mobile_i2c_dma_callback(void *data)
539 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE); 540 iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
540} 541}
541 542
543static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev,
544 enum dma_transfer_direction dir, dma_addr_t port_addr)
545{
546 struct dma_chan *chan;
547 struct dma_slave_config cfg;
548 char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
549 int ret;
550
551 chan = dma_request_slave_channel_reason(dev, chan_name);
552 if (IS_ERR(chan)) {
553 ret = PTR_ERR(chan);
554 dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
555 return chan;
556 }
557
558 memset(&cfg, 0, sizeof(cfg));
559 cfg.direction = dir;
560 if (dir == DMA_MEM_TO_DEV) {
561 cfg.dst_addr = port_addr;
562 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
563 } else {
564 cfg.src_addr = port_addr;
565 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
566 }
567
568 ret = dmaengine_slave_config(chan, &cfg);
569 if (ret) {
570 dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
571 dma_release_channel(chan);
572 return ERR_PTR(ret);
573 }
574
575 dev_dbg(dev, "got DMA channel for %s\n", chan_name);
576 return chan;
577}
578
542static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd) 579static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
543{ 580{
544 bool read = pd->msg->flags & I2C_M_RD; 581 bool read = pd->msg->flags & I2C_M_RD;
@@ -548,7 +585,16 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
548 dma_addr_t dma_addr; 585 dma_addr_t dma_addr;
549 dma_cookie_t cookie; 586 dma_cookie_t cookie;
550 587
551 if (!chan) 588 if (PTR_ERR(chan) == -EPROBE_DEFER) {
589 if (read)
590 chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
591 pd->res->start + ICDR);
592 else
593 chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
594 pd->res->start + ICDR);
595 }
596
597 if (IS_ERR(chan))
552 return; 598 return;
553 599
554 dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir); 600 dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
@@ -747,56 +793,16 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
747}; 793};
748MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids); 794MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
749 795
750static int sh_mobile_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir,
751 dma_addr_t port_addr, struct dma_chan **chan_ptr)
752{
753 struct dma_chan *chan;
754 struct dma_slave_config cfg;
755 char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
756 int ret;
757
758 *chan_ptr = NULL;
759
760 chan = dma_request_slave_channel_reason(dev, chan_name);
761 if (IS_ERR(chan)) {
762 ret = PTR_ERR(chan);
763 dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
764 return ret;
765 }
766
767 memset(&cfg, 0, sizeof(cfg));
768 cfg.direction = dir;
769 if (dir == DMA_MEM_TO_DEV) {
770 cfg.dst_addr = port_addr;
771 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
772 } else {
773 cfg.src_addr = port_addr;
774 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
775 }
776
777 ret = dmaengine_slave_config(chan, &cfg);
778 if (ret) {
779 dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
780 dma_release_channel(chan);
781 return ret;
782 }
783
784 *chan_ptr = chan;
785
786 dev_dbg(dev, "got DMA channel for %s\n", chan_name);
787 return 0;
788}
789
790static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd) 796static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd)
791{ 797{
792 if (pd->dma_tx) { 798 if (!IS_ERR(pd->dma_tx)) {
793 dma_release_channel(pd->dma_tx); 799 dma_release_channel(pd->dma_tx);
794 pd->dma_tx = NULL; 800 pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
795 } 801 }
796 802
797 if (pd->dma_rx) { 803 if (!IS_ERR(pd->dma_rx)) {
798 dma_release_channel(pd->dma_rx); 804 dma_release_channel(pd->dma_rx);
799 pd->dma_rx = NULL; 805 pd->dma_rx = ERR_PTR(-EPROBE_DEFER);
800 } 806 }
801} 807}
802 808
@@ -849,6 +855,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
849 855
850 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 856 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
851 857
858 pd->res = res;
852 pd->reg = devm_ioremap_resource(&dev->dev, res); 859 pd->reg = devm_ioremap_resource(&dev->dev, res);
853 if (IS_ERR(pd->reg)) 860 if (IS_ERR(pd->reg))
854 return PTR_ERR(pd->reg); 861 return PTR_ERR(pd->reg);
@@ -889,17 +896,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
889 /* Init DMA */ 896 /* Init DMA */
890 sg_init_table(&pd->sg, 1); 897 sg_init_table(&pd->sg, 1);
891 pd->dma_direction = DMA_NONE; 898 pd->dma_direction = DMA_NONE;
892 ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM, 899 pd->dma_rx = pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
893 res->start + ICDR, &pd->dma_rx);
894 if (ret == -EPROBE_DEFER)
895 return ret;
896
897 ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
898 res->start + ICDR, &pd->dma_tx);
899 if (ret == -EPROBE_DEFER) {
900 sh_mobile_i2c_release_dma(pd);
901 return ret;
902 }
903 900
904 /* Enable Runtime PM for this device. 901 /* Enable Runtime PM for this device.
905 * 902 *
@@ -937,8 +934,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
937 return ret; 934 return ret;
938 } 935 }
939 936
940 dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz, DMA=%c\n", 937 dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz\n", adap->nr, pd->bus_speed);
941 adap->nr, pd->bus_speed, (pd->dma_rx || pd->dma_tx) ? 'y' : 'n');
942 938
943 return 0; 939 return 0;
944} 940}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 10641b7816f4..dafb3c531f96 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -22,7 +22,6 @@
22#include <linux/socket.h> 22#include <linux/socket.h>
23#include <linux/in.h> 23#include <linux/in.h>
24#include <linux/in6.h> 24#include <linux/in6.h>
25#include <linux/llist.h>
26#include <rdma/ib_verbs.h> 25#include <rdma/ib_verbs.h>
27#include <rdma/rdma_cm.h> 26#include <rdma/rdma_cm.h>
28#include <target/target_core_base.h> 27#include <target/target_core_base.h>
@@ -36,11 +35,17 @@
36#define ISERT_MAX_CONN 8 35#define ISERT_MAX_CONN 8
37#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN) 36#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
38#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN) 37#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
39 ISERT_MAX_CONN)
40
41int isert_debug_level = 0;
42module_param_named(debug_level, isert_debug_level, int, 0644);
43MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
39 44
40static DEFINE_MUTEX(device_list_mutex); 45static DEFINE_MUTEX(device_list_mutex);
41static LIST_HEAD(device_list); 46static LIST_HEAD(device_list);
42static struct workqueue_struct *isert_rx_wq;
43static struct workqueue_struct *isert_comp_wq; 47static struct workqueue_struct *isert_comp_wq;
48static struct workqueue_struct *isert_release_wq;
44 49
45static void 50static void
46isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); 51isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr); 59 struct isert_rdma_wr *wr);
55static int 60static int
56isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd); 61isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
62static int
63isert_rdma_post_recvl(struct isert_conn *isert_conn);
64static int
65isert_rdma_accept(struct isert_conn *isert_conn);
66struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
67
68static inline bool
69isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
70{
71 return (conn->pi_support &&
72 cmd->prot_op != TARGET_PROT_NORMAL);
73}
74
57 75
58static void 76static void
59isert_qp_event_callback(struct ib_event *e, void *context) 77isert_qp_event_callback(struct ib_event *e, void *context)
60{ 78{
61 struct isert_conn *isert_conn = (struct isert_conn *)context; 79 struct isert_conn *isert_conn = (struct isert_conn *)context;
62 80
63 pr_err("isert_qp_event_callback event: %d\n", e->event); 81 isert_err("conn %p event: %d\n", isert_conn, e->event);
64 switch (e->event) { 82 switch (e->event) {
65 case IB_EVENT_COMM_EST: 83 case IB_EVENT_COMM_EST:
66 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST); 84 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
67 break; 85 break;
68 case IB_EVENT_QP_LAST_WQE_REACHED: 86 case IB_EVENT_QP_LAST_WQE_REACHED:
69 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n"); 87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
70 break; 88 break;
71 default: 89 default:
72 break; 90 break;
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
80 98
81 ret = ib_query_device(ib_dev, devattr); 99 ret = ib_query_device(ib_dev, devattr);
82 if (ret) { 100 if (ret) {
83 pr_err("ib_query_device() failed: %d\n", ret); 101 isert_err("ib_query_device() failed: %d\n", ret);
84 return ret; 102 return ret;
85 } 103 }
86 pr_debug("devattr->max_sge: %d\n", devattr->max_sge); 104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
87 pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd); 105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
88 106
89 return 0; 107 return 0;
90} 108}
91 109
92static int 110static int
93isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, 111isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
94 u8 protection)
95{ 112{
96 struct isert_device *device = isert_conn->conn_device; 113 struct isert_device *device = isert_conn->conn_device;
97 struct ib_qp_init_attr attr; 114 struct ib_qp_init_attr attr;
98 int ret, index, min_index = 0; 115 struct isert_comp *comp;
116 int ret, i, min = 0;
99 117
100 mutex_lock(&device_list_mutex); 118 mutex_lock(&device_list_mutex);
101 for (index = 0; index < device->cqs_used; index++) 119 for (i = 0; i < device->comps_used; i++)
102 if (device->cq_active_qps[index] < 120 if (device->comps[i].active_qps <
103 device->cq_active_qps[min_index]) 121 device->comps[min].active_qps)
104 min_index = index; 122 min = i;
105 device->cq_active_qps[min_index]++; 123 comp = &device->comps[min];
106 pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index); 124 comp->active_qps++;
125 isert_info("conn %p, using comp %p min_index: %d\n",
126 isert_conn, comp, min);
107 mutex_unlock(&device_list_mutex); 127 mutex_unlock(&device_list_mutex);
108 128
109 memset(&attr, 0, sizeof(struct ib_qp_init_attr)); 129 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
110 attr.event_handler = isert_qp_event_callback; 130 attr.event_handler = isert_qp_event_callback;
111 attr.qp_context = isert_conn; 131 attr.qp_context = isert_conn;
112 attr.send_cq = device->dev_tx_cq[min_index]; 132 attr.send_cq = comp->cq;
113 attr.recv_cq = device->dev_rx_cq[min_index]; 133 attr.recv_cq = comp->cq;
114 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS; 134 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
115 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; 135 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
116 /* 136 /*
117 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 137 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
118 * work-around for RDMA_READs with ConnectX-2. 138 * work-around for RDMA_READs with ConnectX-2.
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
126 attr.cap.max_recv_sge = 1; 146 attr.cap.max_recv_sge = 1;
127 attr.sq_sig_type = IB_SIGNAL_REQ_WR; 147 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
128 attr.qp_type = IB_QPT_RC; 148 attr.qp_type = IB_QPT_RC;
129 if (protection) 149 if (device->pi_capable)
130 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 150 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
131 151
132 pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
133 cma_id->device);
134 pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
135 isert_conn->conn_pd->device);
136
137 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); 152 ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
138 if (ret) { 153 if (ret) {
139 pr_err("rdma_create_qp failed for cma_id %d\n", ret); 154 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
140 return ret; 155 goto err;
141 } 156 }
142 isert_conn->conn_qp = cma_id->qp; 157 isert_conn->conn_qp = cma_id->qp;
143 pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
144 158
145 return 0; 159 return 0;
160err:
161 mutex_lock(&device_list_mutex);
162 comp->active_qps--;
163 mutex_unlock(&device_list_mutex);
164
165 return ret;
146} 166}
147 167
148static void 168static void
149isert_cq_event_callback(struct ib_event *e, void *context) 169isert_cq_event_callback(struct ib_event *e, void *context)
150{ 170{
151 pr_debug("isert_cq_event_callback event: %d\n", e->event); 171 isert_dbg("event: %d\n", e->event);
152} 172}
153 173
154static int 174static int
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
182 } 202 }
183 203
184 isert_conn->conn_rx_desc_head = 0; 204 isert_conn->conn_rx_desc_head = 0;
205
185 return 0; 206 return 0;
186 207
187dma_map_fail: 208dma_map_fail:
@@ -193,6 +214,8 @@ dma_map_fail:
193 kfree(isert_conn->conn_rx_descs); 214 kfree(isert_conn->conn_rx_descs);
194 isert_conn->conn_rx_descs = NULL; 215 isert_conn->conn_rx_descs = NULL;
195fail: 216fail:
217 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
218
196 return -ENOMEM; 219 return -ENOMEM;
197} 220}
198 221
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
216 isert_conn->conn_rx_descs = NULL; 239 isert_conn->conn_rx_descs = NULL;
217} 240}
218 241
219static void isert_cq_tx_work(struct work_struct *); 242static void isert_cq_work(struct work_struct *);
220static void isert_cq_tx_callback(struct ib_cq *, void *); 243static void isert_cq_callback(struct ib_cq *, void *);
221static void isert_cq_rx_work(struct work_struct *);
222static void isert_cq_rx_callback(struct ib_cq *, void *);
223 244
224static int 245static int
225isert_create_device_ib_res(struct isert_device *device) 246isert_create_device_ib_res(struct isert_device *device)
226{ 247{
227 struct ib_device *ib_dev = device->ib_device; 248 struct ib_device *ib_dev = device->ib_device;
228 struct isert_cq_desc *cq_desc;
229 struct ib_device_attr *dev_attr; 249 struct ib_device_attr *dev_attr;
230 int ret = 0, i, j; 250 int ret = 0, i;
231 int max_rx_cqe, max_tx_cqe; 251 int max_cqe;
232 252
233 dev_attr = &device->dev_attr; 253 dev_attr = &device->dev_attr;
234 ret = isert_query_device(ib_dev, dev_attr); 254 ret = isert_query_device(ib_dev, dev_attr);
235 if (ret) 255 if (ret)
236 return ret; 256 return ret;
237 257
238 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); 258 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
239 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
240 259
241 /* asign function handlers */ 260 /* asign function handlers */
242 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 261 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device)
254 device->pi_capable = dev_attr->device_cap_flags & 273 device->pi_capable = dev_attr->device_cap_flags &
255 IB_DEVICE_SIGNATURE_HANDOVER ? true : false; 274 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
256 275
257 device->cqs_used = min_t(int, num_online_cpus(), 276 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
258 device->ib_device->num_comp_vectors); 277 device->ib_device->num_comp_vectors));
259 device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); 278 isert_info("Using %d CQs, %s supports %d vectors support "
260 pr_debug("Using %d CQs, device %s supports %d vectors support " 279 "Fast registration %d pi_capable %d\n",
261 "Fast registration %d pi_capable %d\n", 280 device->comps_used, device->ib_device->name,
262 device->cqs_used, device->ib_device->name, 281 device->ib_device->num_comp_vectors, device->use_fastreg,
263 device->ib_device->num_comp_vectors, device->use_fastreg, 282 device->pi_capable);
264 device->pi_capable); 283
265 device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * 284 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
266 device->cqs_used, GFP_KERNEL); 285 GFP_KERNEL);
267 if (!device->cq_desc) { 286 if (!device->comps) {
268 pr_err("Unable to allocate device->cq_desc\n"); 287 isert_err("Unable to allocate completion contexts\n");
269 return -ENOMEM; 288 return -ENOMEM;
270 } 289 }
271 cq_desc = device->cq_desc;
272
273 for (i = 0; i < device->cqs_used; i++) {
274 cq_desc[i].device = device;
275 cq_desc[i].cq_index = i;
276
277 INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
278 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
279 isert_cq_rx_callback,
280 isert_cq_event_callback,
281 (void *)&cq_desc[i],
282 max_rx_cqe, i);
283 if (IS_ERR(device->dev_rx_cq[i])) {
284 ret = PTR_ERR(device->dev_rx_cq[i]);
285 device->dev_rx_cq[i] = NULL;
286 goto out_cq;
287 }
288 290
289 INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work); 291 for (i = 0; i < device->comps_used; i++) {
290 device->dev_tx_cq[i] = ib_create_cq(device->ib_device, 292 struct isert_comp *comp = &device->comps[i];
291 isert_cq_tx_callback,
292 isert_cq_event_callback,
293 (void *)&cq_desc[i],
294 max_tx_cqe, i);
295 if (IS_ERR(device->dev_tx_cq[i])) {
296 ret = PTR_ERR(device->dev_tx_cq[i]);
297 device->dev_tx_cq[i] = NULL;
298 goto out_cq;
299 }
300 293
301 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP); 294 comp->device = device;
302 if (ret) 295 INIT_WORK(&comp->work, isert_cq_work);
296 comp->cq = ib_create_cq(device->ib_device,
297 isert_cq_callback,
298 isert_cq_event_callback,
299 (void *)comp,
300 max_cqe, i);
301 if (IS_ERR(comp->cq)) {
302 ret = PTR_ERR(comp->cq);
303 comp->cq = NULL;
303 goto out_cq; 304 goto out_cq;
305 }
304 306
305 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP); 307 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
306 if (ret) 308 if (ret)
307 goto out_cq; 309 goto out_cq;
308 } 310 }
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device)
310 return 0; 312 return 0;
311 313
312out_cq: 314out_cq:
313 for (j = 0; j < i; j++) { 315 for (i = 0; i < device->comps_used; i++) {
314 cq_desc = &device->cq_desc[j]; 316 struct isert_comp *comp = &device->comps[i];
315 317
316 if (device->dev_rx_cq[j]) { 318 if (comp->cq) {
317 cancel_work_sync(&cq_desc->cq_rx_work); 319 cancel_work_sync(&comp->work);
318 ib_destroy_cq(device->dev_rx_cq[j]); 320 ib_destroy_cq(comp->cq);
319 }
320 if (device->dev_tx_cq[j]) {
321 cancel_work_sync(&cq_desc->cq_tx_work);
322 ib_destroy_cq(device->dev_tx_cq[j]);
323 } 321 }
324 } 322 }
325 kfree(device->cq_desc); 323 kfree(device->comps);
326 324
327 return ret; 325 return ret;
328} 326}
@@ -330,21 +328,18 @@ out_cq:
330static void 328static void
331isert_free_device_ib_res(struct isert_device *device) 329isert_free_device_ib_res(struct isert_device *device)
332{ 330{
333 struct isert_cq_desc *cq_desc;
334 int i; 331 int i;
335 332
336 for (i = 0; i < device->cqs_used; i++) { 333 isert_info("device %p\n", device);
337 cq_desc = &device->cq_desc[i];
338 334
339 cancel_work_sync(&cq_desc->cq_rx_work); 335 for (i = 0; i < device->comps_used; i++) {
340 cancel_work_sync(&cq_desc->cq_tx_work); 336 struct isert_comp *comp = &device->comps[i];
341 ib_destroy_cq(device->dev_rx_cq[i]);
342 ib_destroy_cq(device->dev_tx_cq[i]);
343 device->dev_rx_cq[i] = NULL;
344 device->dev_tx_cq[i] = NULL;
345 }
346 337
347 kfree(device->cq_desc); 338 cancel_work_sync(&comp->work);
339 ib_destroy_cq(comp->cq);
340 comp->cq = NULL;
341 }
342 kfree(device->comps);
348} 343}
349 344
350static void 345static void
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device)
352{ 347{
353 mutex_lock(&device_list_mutex); 348 mutex_lock(&device_list_mutex);
354 device->refcount--; 349 device->refcount--;
350 isert_info("device %p refcount %d\n", device, device->refcount);
355 if (!device->refcount) { 351 if (!device->refcount) {
356 isert_free_device_ib_res(device); 352 isert_free_device_ib_res(device);
357 list_del(&device->dev_node); 353 list_del(&device->dev_node);
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
370 list_for_each_entry(device, &device_list, dev_node) { 366 list_for_each_entry(device, &device_list, dev_node) {
371 if (device->ib_device->node_guid == cma_id->device->node_guid) { 367 if (device->ib_device->node_guid == cma_id->device->node_guid) {
372 device->refcount++; 368 device->refcount++;
369 isert_info("Found iser device %p refcount %d\n",
370 device, device->refcount);
373 mutex_unlock(&device_list_mutex); 371 mutex_unlock(&device_list_mutex);
374 return device; 372 return device;
375 } 373 }
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
393 391
394 device->refcount++; 392 device->refcount++;
395 list_add_tail(&device->dev_node, &device_list); 393 list_add_tail(&device->dev_node, &device_list);
394 isert_info("Created a new iser device %p refcount %d\n",
395 device, device->refcount);
396 mutex_unlock(&device_list_mutex); 396 mutex_unlock(&device_list_mutex);
397 397
398 return device; 398 return device;
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
407 if (list_empty(&isert_conn->conn_fr_pool)) 407 if (list_empty(&isert_conn->conn_fr_pool))
408 return; 408 return;
409 409
410 pr_debug("Freeing conn %p fastreg pool", isert_conn); 410 isert_info("Freeing conn %p fastreg pool", isert_conn);
411 411
412 list_for_each_entry_safe(fr_desc, tmp, 412 list_for_each_entry_safe(fr_desc, tmp,
413 &isert_conn->conn_fr_pool, list) { 413 &isert_conn->conn_fr_pool, list) {
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
425 } 425 }
426 426
427 if (i < isert_conn->conn_fr_pool_size) 427 if (i < isert_conn->conn_fr_pool_size)
428 pr_warn("Pool still has %d regions registered\n", 428 isert_warn("Pool still has %d regions registered\n",
429 isert_conn->conn_fr_pool_size - i); 429 isert_conn->conn_fr_pool_size - i);
430} 430}
431 431
432static int 432static int
433isert_create_pi_ctx(struct fast_reg_descriptor *desc,
434 struct ib_device *device,
435 struct ib_pd *pd)
436{
437 struct ib_mr_init_attr mr_init_attr;
438 struct pi_context *pi_ctx;
439 int ret;
440
441 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
442 if (!pi_ctx) {
443 isert_err("Failed to allocate pi context\n");
444 return -ENOMEM;
445 }
446
447 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
448 ISCSI_ISER_SG_TABLESIZE);
449 if (IS_ERR(pi_ctx->prot_frpl)) {
450 isert_err("Failed to allocate prot frpl err=%ld\n",
451 PTR_ERR(pi_ctx->prot_frpl));
452 ret = PTR_ERR(pi_ctx->prot_frpl);
453 goto err_pi_ctx;
454 }
455
456 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
457 if (IS_ERR(pi_ctx->prot_mr)) {
458 isert_err("Failed to allocate prot frmr err=%ld\n",
459 PTR_ERR(pi_ctx->prot_mr));
460 ret = PTR_ERR(pi_ctx->prot_mr);
461 goto err_prot_frpl;
462 }
463 desc->ind |= ISERT_PROT_KEY_VALID;
464
465 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
466 mr_init_attr.max_reg_descriptors = 2;
467 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
468 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
469 if (IS_ERR(pi_ctx->sig_mr)) {
470 isert_err("Failed to allocate signature enabled mr err=%ld\n",
471 PTR_ERR(pi_ctx->sig_mr));
472 ret = PTR_ERR(pi_ctx->sig_mr);
473 goto err_prot_mr;
474 }
475
476 desc->pi_ctx = pi_ctx;
477 desc->ind |= ISERT_SIG_KEY_VALID;
478 desc->ind &= ~ISERT_PROTECTED;
479
480 return 0;
481
482err_prot_mr:
483 ib_dereg_mr(desc->pi_ctx->prot_mr);
484err_prot_frpl:
485 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
486err_pi_ctx:
487 kfree(desc->pi_ctx);
488
489 return ret;
490}
491
492static int
433isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, 493isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
434 struct fast_reg_descriptor *fr_desc, u8 protection) 494 struct fast_reg_descriptor *fr_desc)
435{ 495{
436 int ret; 496 int ret;
437 497
438 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 498 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
439 ISCSI_ISER_SG_TABLESIZE); 499 ISCSI_ISER_SG_TABLESIZE);
440 if (IS_ERR(fr_desc->data_frpl)) { 500 if (IS_ERR(fr_desc->data_frpl)) {
441 pr_err("Failed to allocate data frpl err=%ld\n", 501 isert_err("Failed to allocate data frpl err=%ld\n",
442 PTR_ERR(fr_desc->data_frpl)); 502 PTR_ERR(fr_desc->data_frpl));
443 return PTR_ERR(fr_desc->data_frpl); 503 return PTR_ERR(fr_desc->data_frpl);
444 } 504 }
445 505
446 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); 506 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
447 if (IS_ERR(fr_desc->data_mr)) { 507 if (IS_ERR(fr_desc->data_mr)) {
448 pr_err("Failed to allocate data frmr err=%ld\n", 508 isert_err("Failed to allocate data frmr err=%ld\n",
449 PTR_ERR(fr_desc->data_mr)); 509 PTR_ERR(fr_desc->data_mr));
450 ret = PTR_ERR(fr_desc->data_mr); 510 ret = PTR_ERR(fr_desc->data_mr);
451 goto err_data_frpl; 511 goto err_data_frpl;
452 } 512 }
453 pr_debug("Create fr_desc %p page_list %p\n",
454 fr_desc, fr_desc->data_frpl->page_list);
455 fr_desc->ind |= ISERT_DATA_KEY_VALID; 513 fr_desc->ind |= ISERT_DATA_KEY_VALID;
456 514
457 if (protection) { 515 isert_dbg("Created fr_desc %p\n", fr_desc);
458 struct ib_mr_init_attr mr_init_attr = {0};
459 struct pi_context *pi_ctx;
460
461 fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
462 if (!fr_desc->pi_ctx) {
463 pr_err("Failed to allocate pi context\n");
464 ret = -ENOMEM;
465 goto err_data_mr;
466 }
467 pi_ctx = fr_desc->pi_ctx;
468
469 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
470 ISCSI_ISER_SG_TABLESIZE);
471 if (IS_ERR(pi_ctx->prot_frpl)) {
472 pr_err("Failed to allocate prot frpl err=%ld\n",
473 PTR_ERR(pi_ctx->prot_frpl));
474 ret = PTR_ERR(pi_ctx->prot_frpl);
475 goto err_pi_ctx;
476 }
477
478 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
479 if (IS_ERR(pi_ctx->prot_mr)) {
480 pr_err("Failed to allocate prot frmr err=%ld\n",
481 PTR_ERR(pi_ctx->prot_mr));
482 ret = PTR_ERR(pi_ctx->prot_mr);
483 goto err_prot_frpl;
484 }
485 fr_desc->ind |= ISERT_PROT_KEY_VALID;
486
487 mr_init_attr.max_reg_descriptors = 2;
488 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
489 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
490 if (IS_ERR(pi_ctx->sig_mr)) {
491 pr_err("Failed to allocate signature enabled mr err=%ld\n",
492 PTR_ERR(pi_ctx->sig_mr));
493 ret = PTR_ERR(pi_ctx->sig_mr);
494 goto err_prot_mr;
495 }
496 fr_desc->ind |= ISERT_SIG_KEY_VALID;
497 }
498 fr_desc->ind &= ~ISERT_PROTECTED;
499 516
500 return 0; 517 return 0;
501err_prot_mr: 518
502 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
503err_prot_frpl:
504 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
505err_pi_ctx:
506 kfree(fr_desc->pi_ctx);
507err_data_mr:
508 ib_dereg_mr(fr_desc->data_mr);
509err_data_frpl: 519err_data_frpl:
510 ib_free_fast_reg_page_list(fr_desc->data_frpl); 520 ib_free_fast_reg_page_list(fr_desc->data_frpl);
511 521
@@ -513,7 +523,7 @@ err_data_frpl:
513} 523}
514 524
515static int 525static int
516isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support) 526isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
517{ 527{
518 struct fast_reg_descriptor *fr_desc; 528 struct fast_reg_descriptor *fr_desc;
519 struct isert_device *device = isert_conn->conn_device; 529 struct isert_device *device = isert_conn->conn_device;
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
531 for (i = 0; i < tag_num; i++) { 541 for (i = 0; i < tag_num; i++) {
532 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); 542 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
533 if (!fr_desc) { 543 if (!fr_desc) {
534 pr_err("Failed to allocate fast_reg descriptor\n"); 544 isert_err("Failed to allocate fast_reg descriptor\n");
535 ret = -ENOMEM; 545 ret = -ENOMEM;
536 goto err; 546 goto err;
537 } 547 }
538 548
539 ret = isert_create_fr_desc(device->ib_device, 549 ret = isert_create_fr_desc(device->ib_device,
540 isert_conn->conn_pd, fr_desc, 550 isert_conn->conn_pd, fr_desc);
541 pi_support);
542 if (ret) { 551 if (ret) {
543 pr_err("Failed to create fastreg descriptor err=%d\n", 552 isert_err("Failed to create fastreg descriptor err=%d\n",
544 ret); 553 ret);
545 kfree(fr_desc); 554 kfree(fr_desc);
546 goto err; 555 goto err;
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
550 isert_conn->conn_fr_pool_size++; 559 isert_conn->conn_fr_pool_size++;
551 } 560 }
552 561
553 pr_debug("Creating conn %p fastreg pool size=%d", 562 isert_dbg("Creating conn %p fastreg pool size=%d",
554 isert_conn, isert_conn->conn_fr_pool_size); 563 isert_conn, isert_conn->conn_fr_pool_size);
555 564
556 return 0; 565 return 0;
@@ -563,47 +572,45 @@ err:
563static int 572static int
564isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 573isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
565{ 574{
566 struct iscsi_np *np = cma_id->context; 575 struct isert_np *isert_np = cma_id->context;
567 struct isert_np *isert_np = np->np_context; 576 struct iscsi_np *np = isert_np->np;
568 struct isert_conn *isert_conn; 577 struct isert_conn *isert_conn;
569 struct isert_device *device; 578 struct isert_device *device;
570 struct ib_device *ib_dev = cma_id->device; 579 struct ib_device *ib_dev = cma_id->device;
571 int ret = 0; 580 int ret = 0;
572 u8 pi_support;
573 581
574 spin_lock_bh(&np->np_thread_lock); 582 spin_lock_bh(&np->np_thread_lock);
575 if (!np->enabled) { 583 if (!np->enabled) {
576 spin_unlock_bh(&np->np_thread_lock); 584 spin_unlock_bh(&np->np_thread_lock);
577 pr_debug("iscsi_np is not enabled, reject connect request\n"); 585 isert_dbg("iscsi_np is not enabled, reject connect request\n");
578 return rdma_reject(cma_id, NULL, 0); 586 return rdma_reject(cma_id, NULL, 0);
579 } 587 }
580 spin_unlock_bh(&np->np_thread_lock); 588 spin_unlock_bh(&np->np_thread_lock);
581 589
582 pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", 590 isert_dbg("cma_id: %p, portal: %p\n",
583 cma_id, cma_id->context); 591 cma_id, cma_id->context);
584 592
585 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL); 593 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
586 if (!isert_conn) { 594 if (!isert_conn) {
587 pr_err("Unable to allocate isert_conn\n"); 595 isert_err("Unable to allocate isert_conn\n");
588 return -ENOMEM; 596 return -ENOMEM;
589 } 597 }
590 isert_conn->state = ISER_CONN_INIT; 598 isert_conn->state = ISER_CONN_INIT;
591 INIT_LIST_HEAD(&isert_conn->conn_accept_node); 599 INIT_LIST_HEAD(&isert_conn->conn_accept_node);
592 init_completion(&isert_conn->conn_login_comp); 600 init_completion(&isert_conn->conn_login_comp);
601 init_completion(&isert_conn->login_req_comp);
593 init_completion(&isert_conn->conn_wait); 602 init_completion(&isert_conn->conn_wait);
594 init_completion(&isert_conn->conn_wait_comp_err);
595 kref_init(&isert_conn->conn_kref); 603 kref_init(&isert_conn->conn_kref);
596 mutex_init(&isert_conn->conn_mutex); 604 mutex_init(&isert_conn->conn_mutex);
597 spin_lock_init(&isert_conn->conn_lock); 605 spin_lock_init(&isert_conn->conn_lock);
598 INIT_LIST_HEAD(&isert_conn->conn_fr_pool); 606 INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
599 607
600 cma_id->context = isert_conn;
601 isert_conn->conn_cm_id = cma_id; 608 isert_conn->conn_cm_id = cma_id;
602 609
603 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + 610 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
604 ISER_RX_LOGIN_SIZE, GFP_KERNEL); 611 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
605 if (!isert_conn->login_buf) { 612 if (!isert_conn->login_buf) {
606 pr_err("Unable to allocate isert_conn->login_buf\n"); 613 isert_err("Unable to allocate isert_conn->login_buf\n");
607 ret = -ENOMEM; 614 ret = -ENOMEM;
608 goto out; 615 goto out;
609 } 616 }
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
611 isert_conn->login_req_buf = isert_conn->login_buf; 618 isert_conn->login_req_buf = isert_conn->login_buf;
612 isert_conn->login_rsp_buf = isert_conn->login_buf + 619 isert_conn->login_rsp_buf = isert_conn->login_buf +
613 ISCSI_DEF_MAX_RECV_SEG_LEN; 620 ISCSI_DEF_MAX_RECV_SEG_LEN;
614 pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n", 621 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
615 isert_conn->login_buf, isert_conn->login_req_buf, 622 isert_conn->login_buf, isert_conn->login_req_buf,
616 isert_conn->login_rsp_buf); 623 isert_conn->login_rsp_buf);
617 624
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
621 628
622 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); 629 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
623 if (ret) { 630 if (ret) {
624 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n", 631 isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
625 ret); 632 ret);
626 isert_conn->login_req_dma = 0; 633 isert_conn->login_req_dma = 0;
627 goto out_login_buf; 634 goto out_login_buf;
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
633 640
634 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); 641 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
635 if (ret) { 642 if (ret) {
636 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n", 643 isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
637 ret); 644 ret);
638 isert_conn->login_rsp_dma = 0; 645 isert_conn->login_rsp_dma = 0;
639 goto out_req_dma_map; 646 goto out_req_dma_map;
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
649 isert_conn->initiator_depth = min_t(u8, 656 isert_conn->initiator_depth = min_t(u8,
650 event->param.conn.initiator_depth, 657 event->param.conn.initiator_depth,
651 device->dev_attr.max_qp_init_rd_atom); 658 device->dev_attr.max_qp_init_rd_atom);
652 pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth); 659 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
653 660
654 isert_conn->conn_device = device; 661 isert_conn->conn_device = device;
655 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); 662 isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
656 if (IS_ERR(isert_conn->conn_pd)) { 663 if (IS_ERR(isert_conn->conn_pd)) {
657 ret = PTR_ERR(isert_conn->conn_pd); 664 ret = PTR_ERR(isert_conn->conn_pd);
658 pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", 665 isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
659 isert_conn, ret); 666 isert_conn, ret);
660 goto out_pd; 667 goto out_pd;
661 } 668 }
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
664 IB_ACCESS_LOCAL_WRITE); 671 IB_ACCESS_LOCAL_WRITE);
665 if (IS_ERR(isert_conn->conn_mr)) { 672 if (IS_ERR(isert_conn->conn_mr)) {
666 ret = PTR_ERR(isert_conn->conn_mr); 673 ret = PTR_ERR(isert_conn->conn_mr);
667 pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", 674 isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
668 isert_conn, ret); 675 isert_conn, ret);
669 goto out_mr; 676 goto out_mr;
670 } 677 }
671 678
672 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; 679 ret = isert_conn_setup_qp(isert_conn, cma_id);
673 if (pi_support && !device->pi_capable) { 680 if (ret)
674 pr_err("Protection information requested but not supported, " 681 goto out_conn_dev;
675 "rejecting connect request\n");
676 ret = rdma_reject(cma_id, NULL, 0);
677 goto out_mr;
678 }
679 682
680 ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support); 683 ret = isert_rdma_post_recvl(isert_conn);
684 if (ret)
685 goto out_conn_dev;
686
687 ret = isert_rdma_accept(isert_conn);
681 if (ret) 688 if (ret)
682 goto out_conn_dev; 689 goto out_conn_dev;
683 690
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
685 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); 692 list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
686 mutex_unlock(&isert_np->np_accept_mutex); 693 mutex_unlock(&isert_np->np_accept_mutex);
687 694
688 pr_debug("isert_connect_request() up np_sem np: %p\n", np); 695 isert_info("np %p: Allow accept_np to continue\n", np);
689 up(&isert_np->np_sem); 696 up(&isert_np->np_sem);
690 return 0; 697 return 0;
691 698
@@ -705,6 +712,7 @@ out_login_buf:
705 kfree(isert_conn->login_buf); 712 kfree(isert_conn->login_buf);
706out: 713out:
707 kfree(isert_conn); 714 kfree(isert_conn);
715 rdma_reject(cma_id, NULL, 0);
708 return ret; 716 return ret;
709} 717}
710 718
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn)
713{ 721{
714 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 722 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
715 struct isert_device *device = isert_conn->conn_device; 723 struct isert_device *device = isert_conn->conn_device;
716 int cq_index;
717 724
718 pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 725 isert_dbg("conn %p\n", isert_conn);
719 726
720 if (device && device->use_fastreg) 727 if (device && device->use_fastreg)
721 isert_conn_free_fastreg_pool(isert_conn); 728 isert_conn_free_fastreg_pool(isert_conn);
722 729
730 isert_free_rx_descriptors(isert_conn);
731 rdma_destroy_id(isert_conn->conn_cm_id);
732
723 if (isert_conn->conn_qp) { 733 if (isert_conn->conn_qp) {
724 cq_index = ((struct isert_cq_desc *) 734 struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
725 isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
726 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
727 isert_conn->conn_device->cq_active_qps[cq_index]--;
728 735
729 rdma_destroy_qp(isert_conn->conn_cm_id); 736 isert_dbg("dec completion context %p active_qps\n", comp);
730 } 737 mutex_lock(&device_list_mutex);
738 comp->active_qps--;
739 mutex_unlock(&device_list_mutex);
731 740
732 isert_free_rx_descriptors(isert_conn); 741 ib_destroy_qp(isert_conn->conn_qp);
733 rdma_destroy_id(isert_conn->conn_cm_id); 742 }
734 743
735 ib_dereg_mr(isert_conn->conn_mr); 744 ib_dereg_mr(isert_conn->conn_mr);
736 ib_dealloc_pd(isert_conn->conn_pd); 745 ib_dealloc_pd(isert_conn->conn_pd);
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn)
747 756
748 if (device) 757 if (device)
749 isert_device_try_release(device); 758 isert_device_try_release(device);
750
751 pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
752} 759}
753 760
754static void 761static void
755isert_connected_handler(struct rdma_cm_id *cma_id) 762isert_connected_handler(struct rdma_cm_id *cma_id)
756{ 763{
757 struct isert_conn *isert_conn = cma_id->context; 764 struct isert_conn *isert_conn = cma_id->qp->qp_context;
758 765
759 kref_get(&isert_conn->conn_kref); 766 isert_info("conn %p\n", isert_conn);
767
768 if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
769 isert_warn("conn %p connect_release is running\n", isert_conn);
770 return;
771 }
772
773 mutex_lock(&isert_conn->conn_mutex);
774 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
775 isert_conn->state = ISER_CONN_UP;
776 mutex_unlock(&isert_conn->conn_mutex);
760} 777}
761 778
762static void 779static void
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref)
765 struct isert_conn *isert_conn = container_of(kref, 782 struct isert_conn *isert_conn = container_of(kref,
766 struct isert_conn, conn_kref); 783 struct isert_conn, conn_kref);
767 784
768 pr_debug("Calling isert_connect_release for final kref %s/%d\n", 785 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
769 current->comm, current->pid); 786 current->pid);
770 787
771 isert_connect_release(isert_conn); 788 isert_connect_release(isert_conn);
772} 789}
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn)
777 kref_put(&isert_conn->conn_kref, isert_release_conn_kref); 794 kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
778} 795}
779 796
797/**
798 * isert_conn_terminate() - Initiate connection termination
799 * @isert_conn: isert connection struct
800 *
801 * Notes:
802 * In case the connection state is FULL_FEATURE, move state
803 * to TEMINATING and start teardown sequence (rdma_disconnect).
804 * In case the connection state is UP, complete flush as well.
805 *
806 * This routine must be called with conn_mutex held. Thus it is
807 * safe to call multiple times.
808 */
780static void 809static void
781isert_disconnect_work(struct work_struct *work) 810isert_conn_terminate(struct isert_conn *isert_conn)
782{ 811{
783 struct isert_conn *isert_conn = container_of(work, 812 int err;
784 struct isert_conn, conn_logout_work);
785 813
786 pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); 814 switch (isert_conn->state) {
787 mutex_lock(&isert_conn->conn_mutex); 815 case ISER_CONN_TERMINATING:
788 if (isert_conn->state == ISER_CONN_UP) 816 break;
817 case ISER_CONN_UP:
818 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
819 isert_info("Terminating conn %p state %d\n",
820 isert_conn, isert_conn->state);
789 isert_conn->state = ISER_CONN_TERMINATING; 821 isert_conn->state = ISER_CONN_TERMINATING;
790 822 err = rdma_disconnect(isert_conn->conn_cm_id);
791 if (isert_conn->post_recv_buf_count == 0 && 823 if (err)
792 atomic_read(&isert_conn->post_send_buf_count) == 0) { 824 isert_warn("Failed rdma_disconnect isert_conn %p\n",
793 mutex_unlock(&isert_conn->conn_mutex); 825 isert_conn);
794 goto wake_up; 826 break;
795 } 827 default:
796 if (!isert_conn->conn_cm_id) { 828 isert_warn("conn %p teminating in state %d\n",
797 mutex_unlock(&isert_conn->conn_mutex); 829 isert_conn, isert_conn->state);
798 isert_put_conn(isert_conn);
799 return;
800 } 830 }
831}
801 832
802 if (isert_conn->disconnect) { 833static int
803 /* Send DREQ/DREP towards our initiator */ 834isert_np_cma_handler(struct isert_np *isert_np,
804 rdma_disconnect(isert_conn->conn_cm_id); 835 enum rdma_cm_event_type event)
805 } 836{
837 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
806 838
807 mutex_unlock(&isert_conn->conn_mutex); 839 switch (event) {
840 case RDMA_CM_EVENT_DEVICE_REMOVAL:
841 isert_np->np_cm_id = NULL;
842 break;
843 case RDMA_CM_EVENT_ADDR_CHANGE:
844 isert_np->np_cm_id = isert_setup_id(isert_np);
845 if (IS_ERR(isert_np->np_cm_id)) {
846 isert_err("isert np %p setup id failed: %ld\n",
847 isert_np, PTR_ERR(isert_np->np_cm_id));
848 isert_np->np_cm_id = NULL;
849 }
850 break;
851 default:
852 isert_err("isert np %p Unexpected event %d\n",
853 isert_np, event);
854 }
808 855
809wake_up: 856 return -1;
810 complete(&isert_conn->conn_wait);
811} 857}
812 858
813static int 859static int
814isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) 860isert_disconnected_handler(struct rdma_cm_id *cma_id,
861 enum rdma_cm_event_type event)
815{ 862{
863 struct isert_np *isert_np = cma_id->context;
816 struct isert_conn *isert_conn; 864 struct isert_conn *isert_conn;
817 865
818 if (!cma_id->qp) { 866 if (isert_np->np_cm_id == cma_id)
819 struct isert_np *isert_np = cma_id->context; 867 return isert_np_cma_handler(cma_id->context, event);
820 868
821 isert_np->np_cm_id = NULL; 869 isert_conn = cma_id->qp->qp_context;
822 return -1;
823 }
824 870
825 isert_conn = (struct isert_conn *)cma_id->context; 871 mutex_lock(&isert_conn->conn_mutex);
872 isert_conn_terminate(isert_conn);
873 mutex_unlock(&isert_conn->conn_mutex);
826 874
827 isert_conn->disconnect = disconnect; 875 isert_info("conn %p completing conn_wait\n", isert_conn);
828 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); 876 complete(&isert_conn->conn_wait);
829 schedule_work(&isert_conn->conn_logout_work);
830 877
831 return 0; 878 return 0;
832} 879}
833 880
881static void
882isert_connect_error(struct rdma_cm_id *cma_id)
883{
884 struct isert_conn *isert_conn = cma_id->qp->qp_context;
885
886 isert_put_conn(isert_conn);
887}
888
834static int 889static int
835isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) 890isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
836{ 891{
837 int ret = 0; 892 int ret = 0;
838 bool disconnect = false;
839 893
840 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n", 894 isert_info("event %d status %d id %p np %p\n", event->event,
841 event->event, event->status, cma_id->context, cma_id); 895 event->status, cma_id, cma_id->context);
842 896
843 switch (event->event) { 897 switch (event->event) {
844 case RDMA_CM_EVENT_CONNECT_REQUEST: 898 case RDMA_CM_EVENT_CONNECT_REQUEST:
845 ret = isert_connect_request(cma_id, event); 899 ret = isert_connect_request(cma_id, event);
846 if (ret) 900 if (ret)
847 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", 901 isert_err("failed handle connect request %d\n", ret);
848 event->event, ret);
849 break; 902 break;
850 case RDMA_CM_EVENT_ESTABLISHED: 903 case RDMA_CM_EVENT_ESTABLISHED:
851 isert_connected_handler(cma_id); 904 isert_connected_handler(cma_id);
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
853 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */ 906 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
854 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */ 907 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
855 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ 908 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
856 disconnect = true;
857 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 909 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
858 ret = isert_disconnected_handler(cma_id, disconnect); 910 ret = isert_disconnected_handler(cma_id, event->event);
859 break; 911 break;
912 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
913 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
860 case RDMA_CM_EVENT_CONNECT_ERROR: 914 case RDMA_CM_EVENT_CONNECT_ERROR:
915 isert_connect_error(cma_id);
916 break;
861 default: 917 default:
862 pr_err("Unhandled RDMA CMA event: %d\n", event->event); 918 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
863 break; 919 break;
864 } 920 }
865 921
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
876 932
877 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) { 933 for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
878 rx_desc = &isert_conn->conn_rx_descs[rx_head]; 934 rx_desc = &isert_conn->conn_rx_descs[rx_head];
879 rx_wr->wr_id = (unsigned long)rx_desc; 935 rx_wr->wr_id = (uintptr_t)rx_desc;
880 rx_wr->sg_list = &rx_desc->rx_sg; 936 rx_wr->sg_list = &rx_desc->rx_sg;
881 rx_wr->num_sge = 1; 937 rx_wr->num_sge = 1;
882 rx_wr->next = rx_wr + 1; 938 rx_wr->next = rx_wr + 1;
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
890 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr, 946 ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
891 &rx_wr_failed); 947 &rx_wr_failed);
892 if (ret) { 948 if (ret) {
893 pr_err("ib_post_recv() failed with ret: %d\n", ret); 949 isert_err("ib_post_recv() failed with ret: %d\n", ret);
894 isert_conn->post_recv_buf_count -= count; 950 isert_conn->post_recv_buf_count -= count;
895 } else { 951 } else {
896 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count); 952 isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
897 isert_conn->conn_rx_desc_head = rx_head; 953 isert_conn->conn_rx_desc_head = rx_head;
898 } 954 }
899 return ret; 955 return ret;
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
910 ISER_HEADERS_LEN, DMA_TO_DEVICE); 966 ISER_HEADERS_LEN, DMA_TO_DEVICE);
911 967
912 send_wr.next = NULL; 968 send_wr.next = NULL;
913 send_wr.wr_id = (unsigned long)tx_desc; 969 send_wr.wr_id = (uintptr_t)tx_desc;
914 send_wr.sg_list = tx_desc->tx_sg; 970 send_wr.sg_list = tx_desc->tx_sg;
915 send_wr.num_sge = tx_desc->num_sge; 971 send_wr.num_sge = tx_desc->num_sge;
916 send_wr.opcode = IB_WR_SEND; 972 send_wr.opcode = IB_WR_SEND;
917 send_wr.send_flags = IB_SEND_SIGNALED; 973 send_wr.send_flags = IB_SEND_SIGNALED;
918 974
919 atomic_inc(&isert_conn->post_send_buf_count);
920
921 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed); 975 ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
922 if (ret) { 976 if (ret)
923 pr_err("ib_post_send() failed, ret: %d\n", ret); 977 isert_err("ib_post_send() failed, ret: %d\n", ret);
924 atomic_dec(&isert_conn->post_send_buf_count);
925 }
926 978
927 return ret; 979 return ret;
928} 980}
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
945 997
946 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) { 998 if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
947 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 999 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
948 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc); 1000 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
949 } 1001 }
950} 1002}
951 1003
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
959 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, 1011 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
960 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1012 ISER_HEADERS_LEN, DMA_TO_DEVICE);
961 if (ib_dma_mapping_error(ib_dev, dma_addr)) { 1013 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
962 pr_err("ib_dma_mapping_error() failed\n"); 1014 isert_err("ib_dma_mapping_error() failed\n");
963 return -ENOMEM; 1015 return -ENOMEM;
964 } 1016 }
965 1017
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
968 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 1020 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
969 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey; 1021 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
970 1022
971 pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u" 1023 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
972 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr, 1024 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
973 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey); 1025 tx_desc->tx_sg[0].lkey);
974 1026
975 return 0; 1027 return 0;
976} 1028}
977 1029
978static void 1030static void
979isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, 1031isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
980 struct ib_send_wr *send_wr, bool coalesce) 1032 struct ib_send_wr *send_wr)
981{ 1033{
982 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc; 1034 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
983 1035
984 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND; 1036 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
985 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 1037 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
986 send_wr->opcode = IB_WR_SEND; 1038 send_wr->opcode = IB_WR_SEND;
987 send_wr->sg_list = &tx_desc->tx_sg[0]; 1039 send_wr->sg_list = &tx_desc->tx_sg[0];
988 send_wr->num_sge = isert_cmd->tx_desc.num_sge; 1040 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
989 /*
990 * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
991 * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
992 */
993 mutex_lock(&isert_conn->conn_mutex);
994 if (coalesce && isert_conn->state == ISER_CONN_UP &&
995 ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
996 tx_desc->llnode_active = true;
997 llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
998 mutex_unlock(&isert_conn->conn_mutex);
999 return;
1000 }
1001 isert_conn->conn_comp_batch = 0;
1002 tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
1003 mutex_unlock(&isert_conn->conn_mutex);
1004
1005 send_wr->send_flags = IB_SEND_SIGNALED; 1041 send_wr->send_flags = IB_SEND_SIGNALED;
1006} 1042}
1007 1043
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
1017 sge.length = ISER_RX_LOGIN_SIZE; 1053 sge.length = ISER_RX_LOGIN_SIZE;
1018 sge.lkey = isert_conn->conn_mr->lkey; 1054 sge.lkey = isert_conn->conn_mr->lkey;
1019 1055
1020 pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n", 1056 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1021 sge.addr, sge.length, sge.lkey); 1057 sge.addr, sge.length, sge.lkey);
1022 1058
1023 memset(&rx_wr, 0, sizeof(struct ib_recv_wr)); 1059 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1024 rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf; 1060 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1025 rx_wr.sg_list = &sge; 1061 rx_wr.sg_list = &sge;
1026 rx_wr.num_sge = 1; 1062 rx_wr.num_sge = 1;
1027 1063
1028 isert_conn->post_recv_buf_count++; 1064 isert_conn->post_recv_buf_count++;
1029 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail); 1065 ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
1030 if (ret) { 1066 if (ret) {
1031 pr_err("ib_post_recv() failed: %d\n", ret); 1067 isert_err("ib_post_recv() failed: %d\n", ret);
1032 isert_conn->post_recv_buf_count--; 1068 isert_conn->post_recv_buf_count--;
1033 } 1069 }
1034 1070
1035 pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
1036 return ret; 1071 return ret;
1037} 1072}
1038 1073
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1072 if (login->login_complete) { 1107 if (login->login_complete) {
1073 if (!conn->sess->sess_ops->SessionType && 1108 if (!conn->sess->sess_ops->SessionType &&
1074 isert_conn->conn_device->use_fastreg) { 1109 isert_conn->conn_device->use_fastreg) {
1075 /* Normal Session and fastreg is used */ 1110 ret = isert_conn_create_fastreg_pool(isert_conn);
1076 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1077
1078 ret = isert_conn_create_fastreg_pool(isert_conn,
1079 pi_support);
1080 if (ret) { 1111 if (ret) {
1081 pr_err("Conn: %p failed to create" 1112 isert_err("Conn: %p failed to create"
1082 " fastreg pool\n", isert_conn); 1113 " fastreg pool\n", isert_conn);
1083 return ret; 1114 return ret;
1084 } 1115 }
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1092 if (ret) 1123 if (ret)
1093 return ret; 1124 return ret;
1094 1125
1095 isert_conn->state = ISER_CONN_UP; 1126 /* Now we are in FULL_FEATURE phase */
1127 mutex_lock(&isert_conn->conn_mutex);
1128 isert_conn->state = ISER_CONN_FULL_FEATURE;
1129 mutex_unlock(&isert_conn->conn_mutex);
1096 goto post_send; 1130 goto post_send;
1097 } 1131 }
1098 1132
@@ -1109,18 +1143,17 @@ post_send:
1109} 1143}
1110 1144
1111static void 1145static void
1112isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, 1146isert_rx_login_req(struct isert_conn *isert_conn)
1113 struct isert_conn *isert_conn)
1114{ 1147{
1148 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1149 int rx_buflen = isert_conn->login_req_len;
1115 struct iscsi_conn *conn = isert_conn->conn; 1150 struct iscsi_conn *conn = isert_conn->conn;
1116 struct iscsi_login *login = conn->conn_login; 1151 struct iscsi_login *login = conn->conn_login;
1117 int size; 1152 int size;
1118 1153
1119 if (!login) { 1154 isert_info("conn %p\n", isert_conn);
1120 pr_err("conn->conn_login is NULL\n"); 1155
1121 dump_stack(); 1156 WARN_ON_ONCE(!login);
1122 return;
1123 }
1124 1157
1125 if (login->first_request) { 1158 if (login->first_request) {
1126 struct iscsi_login_req *login_req = 1159 struct iscsi_login_req *login_req =
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
1146 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN); 1179 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1147 1180
1148 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS); 1181 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1149 pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n", 1182 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1150 size, rx_buflen, MAX_KEY_VALUE_PAIRS); 1183 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1184 MAX_KEY_VALUE_PAIRS);
1151 memcpy(login->req_buf, &rx_desc->data[0], size); 1185 memcpy(login->req_buf, &rx_desc->data[0], size);
1152 1186
1153 if (login->first_request) { 1187 if (login->first_request) {
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd
1166 1200
1167 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); 1201 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1168 if (!cmd) { 1202 if (!cmd) {
1169 pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); 1203 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1170 return NULL; 1204 return NULL;
1171 } 1205 }
1172 isert_cmd = iscsit_priv_cmd(cmd); 1206 isert_cmd = iscsit_priv_cmd(cmd);
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1209 sg = &cmd->se_cmd.t_data_sg[0]; 1243 sg = &cmd->se_cmd.t_data_sg[0];
1210 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE)); 1244 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1211 1245
1212 pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n", 1246 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1213 sg, sg_nents, &rx_desc->data[0], imm_data_len); 1247 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1214 1248
1215 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len); 1249 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1216 1250
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1254 * FIXME: Unexpected unsolicited_data out 1288 * FIXME: Unexpected unsolicited_data out
1255 */ 1289 */
1256 if (!cmd->unsolicited_data) { 1290 if (!cmd->unsolicited_data) {
1257 pr_err("Received unexpected solicited data payload\n"); 1291 isert_err("Received unexpected solicited data payload\n");
1258 dump_stack(); 1292 dump_stack();
1259 return -1; 1293 return -1;
1260 } 1294 }
1261 1295
1262 pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n", 1296 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1263 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length); 1297 "write_data_done: %u, data_length: %u\n",
1298 unsol_data_len, cmd->write_data_done,
1299 cmd->se_cmd.data_length);
1264 1300
1265 sg_off = cmd->write_data_done / PAGE_SIZE; 1301 sg_off = cmd->write_data_done / PAGE_SIZE;
1266 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1302 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1270 * FIXME: Non page-aligned unsolicited_data out 1306 * FIXME: Non page-aligned unsolicited_data out
1271 */ 1307 */
1272 if (page_off) { 1308 if (page_off) {
1273 pr_err("Received unexpected non-page aligned data payload\n"); 1309 isert_err("unexpected non-page aligned data payload\n");
1274 dump_stack(); 1310 dump_stack();
1275 return -1; 1311 return -1;
1276 } 1312 }
1277 pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n", 1313 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1278 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len); 1314 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1315 sg_nents, &rx_desc->data[0], unsol_data_len);
1279 1316
1280 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0], 1317 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1281 unsol_data_len); 1318 unsol_data_len);
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
1322 1359
1323 text_in = kzalloc(payload_length, GFP_KERNEL); 1360 text_in = kzalloc(payload_length, GFP_KERNEL);
1324 if (!text_in) { 1361 if (!text_in) {
1325 pr_err("Unable to allocate text_in of payload_length: %u\n", 1362 isert_err("Unable to allocate text_in of payload_length: %u\n",
1326 payload_length); 1363 payload_length);
1327 return -ENOMEM; 1364 return -ENOMEM;
1328 } 1365 }
1329 cmd->text_in_ptr = text_in; 1366 cmd->text_in_ptr = text_in;
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1348 1385
1349 if (sess->sess_ops->SessionType && 1386 if (sess->sess_ops->SessionType &&
1350 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) { 1387 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1351 pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery," 1388 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1352 " ignoring\n", opcode); 1389 " ignoring\n", opcode);
1353 return 0; 1390 return 0;
1354 } 1391 }
1355 1392
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1395 break; 1432 break;
1396 1433
1397 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); 1434 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1398 if (ret > 0)
1399 wait_for_completion_timeout(&conn->conn_logout_comp,
1400 SECONDS_FOR_LOGOUT_COMP *
1401 HZ);
1402 break; 1435 break;
1403 case ISCSI_OP_TEXT: 1436 case ISCSI_OP_TEXT:
1404 cmd = isert_allocate_cmd(conn); 1437 cmd = isert_allocate_cmd(conn);
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1410 rx_desc, (struct iscsi_text *)hdr); 1443 rx_desc, (struct iscsi_text *)hdr);
1411 break; 1444 break;
1412 default: 1445 default:
1413 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode); 1446 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1414 dump_stack(); 1447 dump_stack();
1415 break; 1448 break;
1416 } 1449 }
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1431 if (iser_hdr->flags & ISER_RSV) { 1464 if (iser_hdr->flags & ISER_RSV) {
1432 read_stag = be32_to_cpu(iser_hdr->read_stag); 1465 read_stag = be32_to_cpu(iser_hdr->read_stag);
1433 read_va = be64_to_cpu(iser_hdr->read_va); 1466 read_va = be64_to_cpu(iser_hdr->read_va);
1434 pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n", 1467 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1435 read_stag, (unsigned long long)read_va); 1468 read_stag, (unsigned long long)read_va);
1436 } 1469 }
1437 if (iser_hdr->flags & ISER_WSV) { 1470 if (iser_hdr->flags & ISER_WSV) {
1438 write_stag = be32_to_cpu(iser_hdr->write_stag); 1471 write_stag = be32_to_cpu(iser_hdr->write_stag);
1439 write_va = be64_to_cpu(iser_hdr->write_va); 1472 write_va = be64_to_cpu(iser_hdr->write_va);
1440 pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n", 1473 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1441 write_stag, (unsigned long long)write_va); 1474 write_stag, (unsigned long long)write_va);
1442 } 1475 }
1443 1476
1444 pr_debug("ISER ISCSI_CTRL PDU\n"); 1477 isert_dbg("ISER ISCSI_CTRL PDU\n");
1445 break; 1478 break;
1446 case ISER_HELLO: 1479 case ISER_HELLO:
1447 pr_err("iSER Hello message\n"); 1480 isert_err("iSER Hello message\n");
1448 break; 1481 break;
1449 default: 1482 default:
1450 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags); 1483 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1451 break; 1484 break;
1452 } 1485 }
1453 1486
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1457 1490
1458static void 1491static void
1459isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn, 1492isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1460 unsigned long xfer_len) 1493 u32 xfer_len)
1461{ 1494{
1462 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1495 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1463 struct iscsi_hdr *hdr; 1496 struct iscsi_hdr *hdr;
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1467 if ((char *)desc == isert_conn->login_req_buf) { 1500 if ((char *)desc == isert_conn->login_req_buf) {
1468 rx_dma = isert_conn->login_req_dma; 1501 rx_dma = isert_conn->login_req_dma;
1469 rx_buflen = ISER_RX_LOGIN_SIZE; 1502 rx_buflen = ISER_RX_LOGIN_SIZE;
1470 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1503 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1471 rx_dma, rx_buflen); 1504 rx_dma, rx_buflen);
1472 } else { 1505 } else {
1473 rx_dma = desc->dma_addr; 1506 rx_dma = desc->dma_addr;
1474 rx_buflen = ISER_RX_PAYLOAD_SIZE; 1507 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1475 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n", 1508 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1476 rx_dma, rx_buflen); 1509 rx_dma, rx_buflen);
1477 } 1510 }
1478 1511
1479 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE); 1512 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1480 1513
1481 hdr = &desc->iscsi_header; 1514 hdr = &desc->iscsi_header;
1482 pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n", 1515 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1483 hdr->opcode, hdr->itt, hdr->flags, 1516 hdr->opcode, hdr->itt, hdr->flags,
1484 (int)(xfer_len - ISER_HEADERS_LEN)); 1517 (int)(xfer_len - ISER_HEADERS_LEN));
1485 1518
1486 if ((char *)desc == isert_conn->login_req_buf) 1519 if ((char *)desc == isert_conn->login_req_buf) {
1487 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN, 1520 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1488 isert_conn); 1521 if (isert_conn->conn) {
1489 else 1522 struct iscsi_login *login = isert_conn->conn->conn_login;
1523
1524 if (login && !login->first_request)
1525 isert_rx_login_req(isert_conn);
1526 }
1527 mutex_lock(&isert_conn->conn_mutex);
1528 complete(&isert_conn->login_req_comp);
1529 mutex_unlock(&isert_conn->conn_mutex);
1530 } else {
1490 isert_rx_do_work(desc, isert_conn); 1531 isert_rx_do_work(desc, isert_conn);
1532 }
1491 1533
1492 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen, 1534 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1493 DMA_FROM_DEVICE); 1535 DMA_FROM_DEVICE);
1494 1536
1495 isert_conn->post_recv_buf_count--; 1537 isert_conn->post_recv_buf_count--;
1496 pr_debug("iSERT: Decremented post_recv_buf_count: %d\n", 1538 isert_dbg("Decremented post_recv_buf_count: %d\n",
1497 isert_conn->post_recv_buf_count); 1539 isert_conn->post_recv_buf_count);
1498 1540
1499 if ((char *)desc == isert_conn->login_req_buf) 1541 if ((char *)desc == isert_conn->login_req_buf)
1500 return; 1542 return;
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1505 ISERT_MIN_POSTED_RX); 1547 ISERT_MIN_POSTED_RX);
1506 err = isert_post_recv(isert_conn, count); 1548 err = isert_post_recv(isert_conn, count);
1507 if (err) { 1549 if (err) {
1508 pr_err("isert_post_recv() count: %d failed, %d\n", 1550 isert_err("isert_post_recv() count: %d failed, %d\n",
1509 count, err); 1551 count, err);
1510 } 1552 }
1511 } 1553 }
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1534 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents, 1576 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1535 data->dma_dir); 1577 data->dma_dir);
1536 if (unlikely(!data->dma_nents)) { 1578 if (unlikely(!data->dma_nents)) {
1537 pr_err("Cmd: unable to dma map SGs %p\n", sg); 1579 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1538 return -EINVAL; 1580 return -EINVAL;
1539 } 1581 }
1540 1582
1541 pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n", 1583 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1542 isert_cmd, data->dma_nents, data->sg, data->nents, data->len); 1584 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1543 1585
1544 return 0; 1586 return 0;
1545} 1587}
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1560{ 1602{
1561 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1603 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1562 1604
1563 pr_debug("isert_unmap_cmd: %p\n", isert_cmd); 1605 isert_dbg("Cmd %p\n", isert_cmd);
1564 1606
1565 if (wr->data.sg) { 1607 if (wr->data.sg) {
1566 pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd); 1608 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1567 isert_unmap_data_buf(isert_conn, &wr->data); 1609 isert_unmap_data_buf(isert_conn, &wr->data);
1568 } 1610 }
1569 1611
1570 if (wr->send_wr) { 1612 if (wr->send_wr) {
1571 pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd); 1613 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1572 kfree(wr->send_wr); 1614 kfree(wr->send_wr);
1573 wr->send_wr = NULL; 1615 wr->send_wr = NULL;
1574 } 1616 }
1575 1617
1576 if (wr->ib_sge) { 1618 if (wr->ib_sge) {
1577 pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd); 1619 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1578 kfree(wr->ib_sge); 1620 kfree(wr->ib_sge);
1579 wr->ib_sge = NULL; 1621 wr->ib_sge = NULL;
1580 } 1622 }
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1586 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; 1628 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1587 LIST_HEAD(unmap_list); 1629 LIST_HEAD(unmap_list);
1588 1630
1589 pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); 1631 isert_dbg("Cmd %p\n", isert_cmd);
1590 1632
1591 if (wr->fr_desc) { 1633 if (wr->fr_desc) {
1592 pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", 1634 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1593 isert_cmd, wr->fr_desc);
1594 if (wr->fr_desc->ind & ISERT_PROTECTED) { 1635 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1595 isert_unmap_data_buf(isert_conn, &wr->prot); 1636 isert_unmap_data_buf(isert_conn, &wr->prot);
1596 wr->fr_desc->ind &= ~ISERT_PROTECTED; 1637 wr->fr_desc->ind &= ~ISERT_PROTECTED;
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1602 } 1643 }
1603 1644
1604 if (wr->data.sg) { 1645 if (wr->data.sg) {
1605 pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); 1646 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1606 isert_unmap_data_buf(isert_conn, &wr->data); 1647 isert_unmap_data_buf(isert_conn, &wr->data);
1607 } 1648 }
1608 1649
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1618 struct iscsi_conn *conn = isert_conn->conn; 1659 struct iscsi_conn *conn = isert_conn->conn;
1619 struct isert_device *device = isert_conn->conn_device; 1660 struct isert_device *device = isert_conn->conn_device;
1620 1661
1621 pr_debug("Entering isert_put_cmd: %p\n", isert_cmd); 1662 isert_dbg("Cmd %p\n", isert_cmd);
1622 1663
1623 switch (cmd->iscsi_opcode) { 1664 switch (cmd->iscsi_opcode) {
1624 case ISCSI_OP_SCSI_CMD: 1665 case ISCSI_OP_SCSI_CMD:
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1668 * associated cmd->se_cmd needs to be released. 1709 * associated cmd->se_cmd needs to be released.
1669 */ 1710 */
1670 if (cmd->se_cmd.se_tfo != NULL) { 1711 if (cmd->se_cmd.se_tfo != NULL) {
1671 pr_debug("Calling transport_generic_free_cmd from" 1712 isert_dbg("Calling transport_generic_free_cmd from"
1672 " isert_put_cmd for 0x%02x\n", 1713 " isert_put_cmd for 0x%02x\n",
1673 cmd->iscsi_opcode); 1714 cmd->iscsi_opcode);
1674 transport_generic_free_cmd(&cmd->se_cmd, 0); 1715 transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1687,7 +1728,7 @@ static void
1687isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) 1728isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1688{ 1729{
1689 if (tx_desc->dma_addr != 0) { 1730 if (tx_desc->dma_addr != 0) {
1690 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n"); 1731 isert_dbg("unmap single for tx_desc->dma_addr\n");
1691 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, 1732 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1692 ISER_HEADERS_LEN, DMA_TO_DEVICE); 1733 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1693 tx_desc->dma_addr = 0; 1734 tx_desc->dma_addr = 0;
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1699 struct ib_device *ib_dev, bool comp_err) 1740 struct ib_device *ib_dev, bool comp_err)
1700{ 1741{
1701 if (isert_cmd->pdu_buf_dma != 0) { 1742 if (isert_cmd->pdu_buf_dma != 0) {
1702 pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n"); 1743 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1703 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, 1744 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1704 isert_cmd->pdu_buf_len, DMA_TO_DEVICE); 1745 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1705 isert_cmd->pdu_buf_dma = 0; 1746 isert_cmd->pdu_buf_dma = 0;
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1717 1758
1718 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); 1759 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1719 if (ret) { 1760 if (ret) {
1720 pr_err("ib_check_mr_status failed, ret %d\n", ret); 1761 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1721 goto fail_mr_status; 1762 goto fail_mr_status;
1722 } 1763 }
1723 1764
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1740 do_div(sec_offset_err, block_size); 1781 do_div(sec_offset_err, block_size);
1741 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba; 1782 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1742 1783
1743 pr_err("isert: PI error found type %d at sector 0x%llx " 1784 isert_err("PI error found type %d at sector 0x%llx "
1744 "expected 0x%x vs actual 0x%x\n", 1785 "expected 0x%x vs actual 0x%x\n",
1745 mr_status.sig_err.err_type, 1786 mr_status.sig_err.err_type,
1746 (unsigned long long)se_cmd->bad_sector, 1787 (unsigned long long)se_cmd->bad_sector,
1747 mr_status.sig_err.expected, 1788 mr_status.sig_err.expected,
1748 mr_status.sig_err.actual); 1789 mr_status.sig_err.actual);
1749 ret = 1; 1790 ret = 1;
1750 } 1791 }
1751 1792
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1801 cmd->write_data_done = wr->data.len; 1842 cmd->write_data_done = wr->data.len;
1802 wr->send_wr_num = 0; 1843 wr->send_wr_num = 0;
1803 1844
1804 pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); 1845 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1805 spin_lock_bh(&cmd->istate_lock); 1846 spin_lock_bh(&cmd->istate_lock);
1806 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT; 1847 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1807 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT; 1848 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work)
1823 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1864 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1824 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1865 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1825 1866
1867 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1868
1826 switch (cmd->i_state) { 1869 switch (cmd->i_state) {
1827 case ISTATE_SEND_TASKMGTRSP: 1870 case ISTATE_SEND_TASKMGTRSP:
1828 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1829
1830 atomic_dec(&isert_conn->post_send_buf_count);
1831 iscsit_tmr_post_handler(cmd, cmd->conn); 1871 iscsit_tmr_post_handler(cmd, cmd->conn);
1832 1872 case ISTATE_SEND_REJECT: /* FALLTHRU */
1833 cmd->i_state = ISTATE_SENT_STATUS; 1873 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1834 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1835 break;
1836 case ISTATE_SEND_REJECT:
1837 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1838 atomic_dec(&isert_conn->post_send_buf_count);
1839
1840 cmd->i_state = ISTATE_SENT_STATUS; 1874 cmd->i_state = ISTATE_SENT_STATUS;
1841 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false); 1875 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1876 ib_dev, false);
1842 break; 1877 break;
1843 case ISTATE_SEND_LOGOUTRSP: 1878 case ISTATE_SEND_LOGOUTRSP:
1844 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1845
1846 atomic_dec(&isert_conn->post_send_buf_count);
1847 iscsit_logout_post_handler(cmd, cmd->conn); 1879 iscsit_logout_post_handler(cmd, cmd->conn);
1848 break; 1880 break;
1849 case ISTATE_SEND_TEXTRSP:
1850 atomic_dec(&isert_conn->post_send_buf_count);
1851 cmd->i_state = ISTATE_SENT_STATUS;
1852 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1853 break;
1854 default: 1881 default:
1855 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state); 1882 isert_err("Unknown i_state %d\n", cmd->i_state);
1856 dump_stack(); 1883 dump_stack();
1857 break; 1884 break;
1858 } 1885 }
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1865 struct ib_device *ib_dev) 1892 struct ib_device *ib_dev)
1866{ 1893{
1867 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; 1894 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1868 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1869 1895
1870 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || 1896 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1871 cmd->i_state == ISTATE_SEND_LOGOUTRSP || 1897 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
1878 return; 1904 return;
1879 } 1905 }
1880 1906
1881 /**
1882 * If send_wr_num is 0 this means that we got
1883 * RDMA completion and we cleared it and we should
1884 * simply decrement the response post. else the
1885 * response is incorporated in send_wr_num, just
1886 * sub it.
1887 **/
1888 if (wr->send_wr_num)
1889 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1890 else
1891 atomic_dec(&isert_conn->post_send_buf_count);
1892
1893 cmd->i_state = ISTATE_SENT_STATUS; 1907 cmd->i_state = ISTATE_SENT_STATUS;
1894 isert_completion_put(tx_desc, isert_cmd, ib_dev, false); 1908 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1895} 1909}
1896 1910
1897static void 1911static void
1898__isert_send_completion(struct iser_tx_desc *tx_desc, 1912isert_send_completion(struct iser_tx_desc *tx_desc,
1899 struct isert_conn *isert_conn) 1913 struct isert_conn *isert_conn)
1900{ 1914{
1901 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1915 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1902 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1916 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1903 struct isert_rdma_wr *wr; 1917 struct isert_rdma_wr *wr;
1904 1918
1905 if (!isert_cmd) { 1919 if (!isert_cmd) {
1906 atomic_dec(&isert_conn->post_send_buf_count);
1907 isert_unmap_tx_desc(tx_desc, ib_dev); 1920 isert_unmap_tx_desc(tx_desc, ib_dev);
1908 return; 1921 return;
1909 } 1922 }
1910 wr = &isert_cmd->rdma_wr; 1923 wr = &isert_cmd->rdma_wr;
1911 1924
1925 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
1926
1912 switch (wr->iser_ib_op) { 1927 switch (wr->iser_ib_op) {
1913 case ISER_IB_RECV: 1928 case ISER_IB_RECV:
1914 pr_err("isert_send_completion: Got ISER_IB_RECV\n"); 1929 isert_err("Got ISER_IB_RECV\n");
1915 dump_stack(); 1930 dump_stack();
1916 break; 1931 break;
1917 case ISER_IB_SEND: 1932 case ISER_IB_SEND:
1918 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1919 isert_response_completion(tx_desc, isert_cmd, 1933 isert_response_completion(tx_desc, isert_cmd,
1920 isert_conn, ib_dev); 1934 isert_conn, ib_dev);
1921 break; 1935 break;
1922 case ISER_IB_RDMA_WRITE: 1936 case ISER_IB_RDMA_WRITE:
1923 pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1924 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1925 isert_completion_rdma_write(tx_desc, isert_cmd); 1937 isert_completion_rdma_write(tx_desc, isert_cmd);
1926 break; 1938 break;
1927 case ISER_IB_RDMA_READ: 1939 case ISER_IB_RDMA_READ:
1928 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1929
1930 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1931 isert_completion_rdma_read(tx_desc, isert_cmd); 1940 isert_completion_rdma_read(tx_desc, isert_cmd);
1932 break; 1941 break;
1933 default: 1942 default:
1934 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op); 1943 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
1935 dump_stack(); 1944 dump_stack();
1936 break; 1945 break;
1937 } 1946 }
1938} 1947}
1939 1948
1940static void 1949/**
1941isert_send_completion(struct iser_tx_desc *tx_desc, 1950 * is_isert_tx_desc() - Indicate if the completion wr_id
1942 struct isert_conn *isert_conn) 1951 * is a TX descriptor or not.
1943{ 1952 * @isert_conn: iser connection
1944 struct llist_node *llnode = tx_desc->comp_llnode_batch; 1953 * @wr_id: completion WR identifier
1945 struct iser_tx_desc *t; 1954 *
1946 /* 1955 * Since we cannot rely on wc opcode in FLUSH errors
1947 * Drain coalesced completion llist starting from comp_llnode_batch 1956 * we must work around it by checking if the wr_id address
1948 * setup in isert_init_send_wr(), and then complete trailing tx_desc. 1957 * falls in the iser connection rx_descs buffer. If so
1949 */ 1958 * it is an RX descriptor, otherwize it is a TX.
1950 while (llnode) { 1959 */
1951 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1960static inline bool
1952 llnode = llist_next(llnode); 1961is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
1953 __isert_send_completion(t, isert_conn);
1954 }
1955 __isert_send_completion(tx_desc, isert_conn);
1956}
1957
1958static void
1959isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
1960{ 1962{
1961 struct llist_node *llnode; 1963 void *start = isert_conn->conn_rx_descs;
1962 struct isert_rdma_wr *wr; 1964 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
1963 struct iser_tx_desc *t;
1964 1965
1965 mutex_lock(&isert_conn->conn_mutex); 1966 if (wr_id >= start && wr_id < start + len)
1966 llnode = llist_del_all(&isert_conn->conn_comp_llist); 1967 return false;
1967 isert_conn->conn_comp_batch = 0;
1968 mutex_unlock(&isert_conn->conn_mutex);
1969
1970 while (llnode) {
1971 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
1972 llnode = llist_next(llnode);
1973 wr = &t->isert_cmd->rdma_wr;
1974
1975 /**
1976 * If send_wr_num is 0 this means that we got
1977 * RDMA completion and we cleared it and we should
1978 * simply decrement the response post. else the
1979 * response is incorporated in send_wr_num, just
1980 * sub it.
1981 **/
1982 if (wr->send_wr_num)
1983 atomic_sub(wr->send_wr_num,
1984 &isert_conn->post_send_buf_count);
1985 else
1986 atomic_dec(&isert_conn->post_send_buf_count);
1987 1968
1988 isert_completion_put(t, t->isert_cmd, ib_dev, true); 1969 return true;
1989 }
1990} 1970}
1991 1971
1992static void 1972static void
1993isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) 1973isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
1994{ 1974{
1995 struct ib_device *ib_dev = isert_conn->conn_cm_id->device; 1975 if (wc->wr_id == ISER_BEACON_WRID) {
1996 struct isert_cmd *isert_cmd = tx_desc->isert_cmd; 1976 isert_info("conn %p completing conn_wait_comp_err\n",
1997 struct llist_node *llnode = tx_desc->comp_llnode_batch; 1977 isert_conn);
1998 struct isert_rdma_wr *wr; 1978 complete(&isert_conn->conn_wait_comp_err);
1999 struct iser_tx_desc *t; 1979 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
2000 1980 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2001 while (llnode) { 1981 struct isert_cmd *isert_cmd;
2002 t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); 1982 struct iser_tx_desc *desc;
2003 llnode = llist_next(llnode);
2004 wr = &t->isert_cmd->rdma_wr;
2005 1983
2006 /** 1984 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2007 * If send_wr_num is 0 this means that we got 1985 isert_cmd = desc->isert_cmd;
2008 * RDMA completion and we cleared it and we should 1986 if (!isert_cmd)
2009 * simply decrement the response post. else the 1987 isert_unmap_tx_desc(desc, ib_dev);
2010 * response is incorporated in send_wr_num, just
2011 * sub it.
2012 **/
2013 if (wr->send_wr_num)
2014 atomic_sub(wr->send_wr_num,
2015 &isert_conn->post_send_buf_count);
2016 else 1988 else
2017 atomic_dec(&isert_conn->post_send_buf_count); 1989 isert_completion_put(desc, isert_cmd, ib_dev, true);
2018 1990 } else {
2019 isert_completion_put(t, t->isert_cmd, ib_dev, true); 1991 isert_conn->post_recv_buf_count--;
2020 } 1992 if (!isert_conn->post_recv_buf_count)
2021 tx_desc->comp_llnode_batch = NULL; 1993 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2022
2023 if (!isert_cmd)
2024 isert_unmap_tx_desc(tx_desc, ib_dev);
2025 else
2026 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
2027}
2028
2029static void
2030isert_cq_rx_comp_err(struct isert_conn *isert_conn)
2031{
2032 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
2033 struct iscsi_conn *conn = isert_conn->conn;
2034
2035 if (isert_conn->post_recv_buf_count)
2036 return;
2037
2038 isert_cq_drain_comp_llist(isert_conn, ib_dev);
2039
2040 if (conn->sess) {
2041 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
2042 target_wait_for_sess_cmds(conn->sess->se_sess);
2043 } 1994 }
2044
2045 while (atomic_read(&isert_conn->post_send_buf_count))
2046 msleep(3000);
2047
2048 mutex_lock(&isert_conn->conn_mutex);
2049 isert_conn->state = ISER_CONN_DOWN;
2050 mutex_unlock(&isert_conn->conn_mutex);
2051
2052 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2053
2054 complete(&isert_conn->conn_wait_comp_err);
2055} 1995}
2056 1996
2057static void 1997static void
2058isert_cq_tx_work(struct work_struct *work) 1998isert_handle_wc(struct ib_wc *wc)
2059{ 1999{
2060 struct isert_cq_desc *cq_desc = container_of(work,
2061 struct isert_cq_desc, cq_tx_work);
2062 struct isert_device *device = cq_desc->device;
2063 int cq_index = cq_desc->cq_index;
2064 struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
2065 struct isert_conn *isert_conn; 2000 struct isert_conn *isert_conn;
2066 struct iser_tx_desc *tx_desc; 2001 struct iser_tx_desc *tx_desc;
2067 struct ib_wc wc; 2002 struct iser_rx_desc *rx_desc;
2068
2069 while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
2070 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
2071 isert_conn = wc.qp->qp_context;
2072 2003
2073 if (wc.status == IB_WC_SUCCESS) { 2004 isert_conn = wc->qp->qp_context;
2074 isert_send_completion(tx_desc, isert_conn); 2005 if (likely(wc->status == IB_WC_SUCCESS)) {
2006 if (wc->opcode == IB_WC_RECV) {
2007 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2008 isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
2075 } else { 2009 } else {
2076 pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); 2010 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2077 pr_debug("TX wc.status: 0x%08x\n", wc.status); 2011 isert_send_completion(tx_desc, isert_conn);
2078 pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
2079
2080 if (wc.wr_id != ISER_FASTREG_LI_WRID) {
2081 if (tx_desc->llnode_active)
2082 continue;
2083
2084 atomic_dec(&isert_conn->post_send_buf_count);
2085 isert_cq_tx_comp_err(tx_desc, isert_conn);
2086 }
2087 } 2012 }
2088 } 2013 } else {
2089 2014 if (wc->status != IB_WC_WR_FLUSH_ERR)
2090 ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP); 2015 isert_err("wr id %llx status %d vend_err %x\n",
2091} 2016 wc->wr_id, wc->status, wc->vendor_err);
2092 2017 else
2093static void 2018 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
2094isert_cq_tx_callback(struct ib_cq *cq, void *context)
2095{
2096 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
2097 2019
2098 queue_work(isert_comp_wq, &cq_desc->cq_tx_work); 2020 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2021 isert_cq_comp_err(isert_conn, wc);
2022 }
2099} 2023}
2100 2024
2101static void 2025static void
2102isert_cq_rx_work(struct work_struct *work) 2026isert_cq_work(struct work_struct *work)
2103{ 2027{
2104 struct isert_cq_desc *cq_desc = container_of(work, 2028 enum { isert_poll_budget = 65536 };
2105 struct isert_cq_desc, cq_rx_work); 2029 struct isert_comp *comp = container_of(work, struct isert_comp,
2106 struct isert_device *device = cq_desc->device; 2030 work);
2107 int cq_index = cq_desc->cq_index; 2031 struct ib_wc *const wcs = comp->wcs;
2108 struct ib_cq *rx_cq = device->dev_rx_cq[cq_index]; 2032 int i, n, completed = 0;
2109 struct isert_conn *isert_conn;
2110 struct iser_rx_desc *rx_desc;
2111 struct ib_wc wc;
2112 unsigned long xfer_len;
2113 2033
2114 while (ib_poll_cq(rx_cq, 1, &wc) == 1) { 2034 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2115 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id; 2035 for (i = 0; i < n; i++)
2116 isert_conn = wc.qp->qp_context; 2036 isert_handle_wc(&wcs[i]);
2117 2037
2118 if (wc.status == IB_WC_SUCCESS) { 2038 completed += n;
2119 xfer_len = (unsigned long)wc.byte_len; 2039 if (completed >= isert_poll_budget)
2120 isert_rx_completion(rx_desc, isert_conn, xfer_len); 2040 break;
2121 } else {
2122 pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
2123 if (wc.status != IB_WC_WR_FLUSH_ERR) {
2124 pr_debug("RX wc.status: 0x%08x\n", wc.status);
2125 pr_debug("RX wc.vendor_err: 0x%08x\n",
2126 wc.vendor_err);
2127 }
2128 isert_conn->post_recv_buf_count--;
2129 isert_cq_rx_comp_err(isert_conn);
2130 }
2131 } 2041 }
2132 2042
2133 ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP); 2043 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2134} 2044}
2135 2045
2136static void 2046static void
2137isert_cq_rx_callback(struct ib_cq *cq, void *context) 2047isert_cq_callback(struct ib_cq *cq, void *context)
2138{ 2048{
2139 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context; 2049 struct isert_comp *comp = context;
2140 2050
2141 queue_work(isert_rx_wq, &cq_desc->cq_rx_work); 2051 queue_work(isert_comp_wq, &comp->work);
2142} 2052}
2143 2053
2144static int 2054static int
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2147 struct ib_send_wr *wr_failed; 2057 struct ib_send_wr *wr_failed;
2148 int ret; 2058 int ret;
2149 2059
2150 atomic_inc(&isert_conn->post_send_buf_count);
2151
2152 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr, 2060 ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
2153 &wr_failed); 2061 &wr_failed);
2154 if (ret) { 2062 if (ret) {
2155 pr_err("ib_post_send failed with %d\n", ret); 2063 isert_err("ib_post_send failed with %d\n", ret);
2156 atomic_dec(&isert_conn->post_send_buf_count);
2157 return ret; 2064 return ret;
2158 } 2065 }
2159 return ret; 2066 return ret;
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2200 isert_cmd->tx_desc.num_sge = 2; 2107 isert_cmd->tx_desc.num_sge = 2;
2201 } 2108 }
2202 2109
2203 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2110 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2204 2111
2205 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2112 isert_dbg("Posting SCSI Response\n");
2206 2113
2207 return isert_post_response(isert_conn, isert_cmd); 2114 return isert_post_response(isert_conn, isert_cmd);
2208} 2115}
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
2231 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 2138 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2232 struct isert_device *device = isert_conn->conn_device; 2139 struct isert_device *device = isert_conn->conn_device;
2233 2140
2234 if (device->pi_capable) 2141 if (conn->tpg->tpg_attrib.t10_pi) {
2235 return TARGET_PROT_ALL; 2142 if (device->pi_capable) {
2143 isert_info("conn %p PI offload enabled\n", isert_conn);
2144 isert_conn->pi_support = true;
2145 return TARGET_PROT_ALL;
2146 }
2147 }
2148
2149 isert_info("conn %p PI offload disabled\n", isert_conn);
2150 isert_conn->pi_support = false;
2236 2151
2237 return TARGET_PROT_NORMAL; 2152 return TARGET_PROT_NORMAL;
2238} 2153}
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2250 &isert_cmd->tx_desc.iscsi_header, 2165 &isert_cmd->tx_desc.iscsi_header,
2251 nopout_response); 2166 nopout_response);
2252 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2167 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2253 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2168 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2254 2169
2255 pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2170 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2256 2171
2257 return isert_post_response(isert_conn, isert_cmd); 2172 return isert_post_response(isert_conn, isert_cmd);
2258} 2173}
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2268 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *) 2183 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2269 &isert_cmd->tx_desc.iscsi_header); 2184 &isert_cmd->tx_desc.iscsi_header);
2270 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2185 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2271 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2186 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2272 2187
2273 pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2188 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2274 2189
2275 return isert_post_response(isert_conn, isert_cmd); 2190 return isert_post_response(isert_conn, isert_cmd);
2276} 2191}
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2286 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *) 2201 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2287 &isert_cmd->tx_desc.iscsi_header); 2202 &isert_cmd->tx_desc.iscsi_header);
2288 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2203 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2289 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2204 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2290 2205
2291 pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2206 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2292 2207
2293 return isert_post_response(isert_conn, isert_cmd); 2208 return isert_post_response(isert_conn, isert_cmd);
2294} 2209}
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2318 tx_dsg->lkey = isert_conn->conn_mr->lkey; 2233 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2319 isert_cmd->tx_desc.num_sge = 2; 2234 isert_cmd->tx_desc.num_sge = 2;
2320 2235
2321 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2236 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2322 2237
2323 pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2238 isert_dbg("conn %p Posting Reject\n", isert_conn);
2324 2239
2325 return isert_post_response(isert_conn, isert_cmd); 2240 return isert_post_response(isert_conn, isert_cmd);
2326} 2241}
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2358 tx_dsg->lkey = isert_conn->conn_mr->lkey; 2273 tx_dsg->lkey = isert_conn->conn_mr->lkey;
2359 isert_cmd->tx_desc.num_sge = 2; 2274 isert_cmd->tx_desc.num_sge = 2;
2360 } 2275 }
2361 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false); 2276 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2362 2277
2363 pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2278 isert_dbg("conn %p Text Reject\n", isert_conn);
2364 2279
2365 return isert_post_response(isert_conn, isert_cmd); 2280 return isert_post_response(isert_conn, isert_cmd);
2366} 2281}
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2383 2298
2384 send_wr->sg_list = ib_sge; 2299 send_wr->sg_list = ib_sge;
2385 send_wr->num_sge = sg_nents; 2300 send_wr->num_sge = sg_nents;
2386 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2301 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2387 /* 2302 /*
2388 * Perform mapping of TCM scatterlist memory ib_sge dma_addr. 2303 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2389 */ 2304 */
2390 for_each_sg(sg_start, tmp_sg, sg_nents, i) { 2305 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2391 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n", 2306 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2392 (unsigned long long)tmp_sg->dma_address, 2307 "page_off: %u\n",
2393 tmp_sg->length, page_off); 2308 (unsigned long long)tmp_sg->dma_address,
2309 tmp_sg->length, page_off);
2394 2310
2395 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off; 2311 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2396 ib_sge->length = min_t(u32, data_left, 2312 ib_sge->length = min_t(u32, data_left,
2397 ib_sg_dma_len(ib_dev, tmp_sg) - page_off); 2313 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2398 ib_sge->lkey = isert_conn->conn_mr->lkey; 2314 ib_sge->lkey = isert_conn->conn_mr->lkey;
2399 2315
2400 pr_debug("RDMA ib_sge: addr: 0x%16llx length: %u lkey: %08x\n", 2316 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2401 ib_sge->addr, ib_sge->length, ib_sge->lkey); 2317 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2402 page_off = 0; 2318 page_off = 0;
2403 data_left -= ib_sge->length; 2319 data_left -= ib_sge->length;
2404 ib_sge++; 2320 ib_sge++;
2405 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge); 2321 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2406 } 2322 }
2407 2323
2408 pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n", 2324 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2409 send_wr->sg_list, send_wr->num_sge); 2325 send_wr->sg_list, send_wr->num_sge);
2410 2326
2411 return sg_nents; 2327 return sg_nents;
2412} 2328}
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2438 2354
2439 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL); 2355 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2440 if (!ib_sge) { 2356 if (!ib_sge) {
2441 pr_warn("Unable to allocate ib_sge\n"); 2357 isert_warn("Unable to allocate ib_sge\n");
2442 ret = -ENOMEM; 2358 ret = -ENOMEM;
2443 goto unmap_cmd; 2359 goto unmap_cmd;
2444 } 2360 }
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2448 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num, 2364 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2449 GFP_KERNEL); 2365 GFP_KERNEL);
2450 if (!wr->send_wr) { 2366 if (!wr->send_wr) {
2451 pr_debug("Unable to allocate wr->send_wr\n"); 2367 isert_dbg("Unable to allocate wr->send_wr\n");
2452 ret = -ENOMEM; 2368 ret = -ENOMEM;
2453 goto unmap_cmd; 2369 goto unmap_cmd;
2454 } 2370 }
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2512 chunk_start = start_addr; 2428 chunk_start = start_addr;
2513 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg); 2429 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2514 2430
2515 pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n", 2431 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2516 i, (unsigned long long)tmp_sg->dma_address, 2432 i, (unsigned long long)tmp_sg->dma_address,
2517 tmp_sg->length); 2433 tmp_sg->length);
2518 2434
2519 if ((end_addr & ~PAGE_MASK) && i < last_ent) { 2435 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2520 new_chunk = 0; 2436 new_chunk = 0;
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2525 page = chunk_start & PAGE_MASK; 2441 page = chunk_start & PAGE_MASK;
2526 do { 2442 do {
2527 fr_pl[n_pages++] = page; 2443 fr_pl[n_pages++] = page;
2528 pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n", 2444 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2529 n_pages - 1, page); 2445 n_pages - 1, page);
2530 page += PAGE_SIZE; 2446 page += PAGE_SIZE;
2531 } while (page < end_addr); 2447 } while (page < end_addr);
2532 } 2448 }
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
2534 return n_pages; 2450 return n_pages;
2535} 2451}
2536 2452
2453static inline void
2454isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2455{
2456 u32 rkey;
2457
2458 memset(inv_wr, 0, sizeof(*inv_wr));
2459 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2460 inv_wr->opcode = IB_WR_LOCAL_INV;
2461 inv_wr->ex.invalidate_rkey = mr->rkey;
2462
2463 /* Bump the key */
2464 rkey = ib_inc_rkey(mr->rkey);
2465 ib_update_fast_reg_key(mr, rkey);
2466}
2467
2537static int 2468static int
2538isert_fast_reg_mr(struct isert_conn *isert_conn, 2469isert_fast_reg_mr(struct isert_conn *isert_conn,
2539 struct fast_reg_descriptor *fr_desc, 2470 struct fast_reg_descriptor *fr_desc,
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2548 struct ib_send_wr *bad_wr, *wr = NULL; 2479 struct ib_send_wr *bad_wr, *wr = NULL;
2549 int ret, pagelist_len; 2480 int ret, pagelist_len;
2550 u32 page_off; 2481 u32 page_off;
2551 u8 key;
2552 2482
2553 if (mem->dma_nents == 1) { 2483 if (mem->dma_nents == 1) {
2554 sge->lkey = isert_conn->conn_mr->lkey; 2484 sge->lkey = isert_conn->conn_mr->lkey;
2555 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]); 2485 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2556 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]); 2486 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2557 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", 2487 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2558 __func__, __LINE__, sge->addr, sge->length, 2488 sge->addr, sge->length, sge->lkey);
2559 sge->lkey);
2560 return 0; 2489 return 0;
2561 } 2490 }
2562 2491
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2572 2501
2573 page_off = mem->offset % PAGE_SIZE; 2502 page_off = mem->offset % PAGE_SIZE;
2574 2503
2575 pr_debug("Use fr_desc %p sg_nents %d offset %u\n", 2504 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2576 fr_desc, mem->nents, mem->offset); 2505 fr_desc, mem->nents, mem->offset);
2577 2506
2578 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents, 2507 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2579 &frpl->page_list[0]); 2508 &frpl->page_list[0]);
2580 2509
2581 if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) { 2510 if (!(fr_desc->ind & ind)) {
2582 memset(&inv_wr, 0, sizeof(inv_wr)); 2511 isert_inv_rkey(&inv_wr, mr);
2583 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2584 inv_wr.opcode = IB_WR_LOCAL_INV;
2585 inv_wr.ex.invalidate_rkey = mr->rkey;
2586 wr = &inv_wr; 2512 wr = &inv_wr;
2587 /* Bump the key */
2588 key = (u8)(mr->rkey & 0x000000FF);
2589 ib_update_fast_reg_key(mr, ++key);
2590 } 2513 }
2591 2514
2592 /* Prepare FASTREG WR */ 2515 /* Prepare FASTREG WR */
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2608 2531
2609 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2532 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2610 if (ret) { 2533 if (ret) {
2611 pr_err("fast registration failed, ret:%d\n", ret); 2534 isert_err("fast registration failed, ret:%d\n", ret);
2612 return ret; 2535 return ret;
2613 } 2536 }
2614 fr_desc->ind &= ~ind; 2537 fr_desc->ind &= ~ind;
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
2617 sge->addr = frpl->page_list[0] + page_off; 2540 sge->addr = frpl->page_list[0] + page_off;
2618 sge->length = mem->len; 2541 sge->length = mem->len;
2619 2542
2620 pr_debug("%s:%d sge: addr: 0x%llx length: %u lkey: %x\n", 2543 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2621 __func__, __LINE__, sge->addr, sge->length, 2544 sge->addr, sge->length, sge->lkey);
2622 sge->lkey);
2623 2545
2624 return ret; 2546 return ret;
2625} 2547}
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2665 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem); 2587 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2666 break; 2588 break;
2667 default: 2589 default:
2668 pr_err("Unsupported PI operation %d\n", se_cmd->prot_op); 2590 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2669 return -EINVAL; 2591 return -EINVAL;
2670 } 2592 }
2671 2593
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks)
2681} 2603}
2682 2604
2683static int 2605static int
2684isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd, 2606isert_reg_sig_mr(struct isert_conn *isert_conn,
2685 struct fast_reg_descriptor *fr_desc, 2607 struct se_cmd *se_cmd,
2686 struct ib_sge *data_sge, struct ib_sge *prot_sge, 2608 struct isert_rdma_wr *rdma_wr,
2687 struct ib_sge *sig_sge) 2609 struct fast_reg_descriptor *fr_desc)
2688{ 2610{
2689 struct ib_send_wr sig_wr, inv_wr; 2611 struct ib_send_wr sig_wr, inv_wr;
2690 struct ib_send_wr *bad_wr, *wr = NULL; 2612 struct ib_send_wr *bad_wr, *wr = NULL;
2691 struct pi_context *pi_ctx = fr_desc->pi_ctx; 2613 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2692 struct ib_sig_attrs sig_attrs; 2614 struct ib_sig_attrs sig_attrs;
2693 int ret; 2615 int ret;
2694 u32 key;
2695 2616
2696 memset(&sig_attrs, 0, sizeof(sig_attrs)); 2617 memset(&sig_attrs, 0, sizeof(sig_attrs));
2697 ret = isert_set_sig_attrs(se_cmd, &sig_attrs); 2618 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2701 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks); 2622 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2702 2623
2703 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) { 2624 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2704 memset(&inv_wr, 0, sizeof(inv_wr)); 2625 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2705 inv_wr.opcode = IB_WR_LOCAL_INV;
2706 inv_wr.wr_id = ISER_FASTREG_LI_WRID;
2707 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
2708 wr = &inv_wr; 2626 wr = &inv_wr;
2709 /* Bump the key */
2710 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
2711 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
2712 } 2627 }
2713 2628
2714 memset(&sig_wr, 0, sizeof(sig_wr)); 2629 memset(&sig_wr, 0, sizeof(sig_wr));
2715 sig_wr.opcode = IB_WR_REG_SIG_MR; 2630 sig_wr.opcode = IB_WR_REG_SIG_MR;
2716 sig_wr.wr_id = ISER_FASTREG_LI_WRID; 2631 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2717 sig_wr.sg_list = data_sge; 2632 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2718 sig_wr.num_sge = 1; 2633 sig_wr.num_sge = 1;
2719 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE; 2634 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2720 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; 2635 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2721 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; 2636 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2722 if (se_cmd->t_prot_sg) 2637 if (se_cmd->t_prot_sg)
2723 sig_wr.wr.sig_handover.prot = prot_sge; 2638 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2724 2639
2725 if (!wr) 2640 if (!wr)
2726 wr = &sig_wr; 2641 wr = &sig_wr;
@@ -2729,39 +2644,98 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
2729 2644
2730 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr); 2645 ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
2731 if (ret) { 2646 if (ret) {
2732 pr_err("fast registration failed, ret:%d\n", ret); 2647 isert_err("fast registration failed, ret:%d\n", ret);
2733 goto err; 2648 goto err;
2734 } 2649 }
2735 fr_desc->ind &= ~ISERT_SIG_KEY_VALID; 2650 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2736 2651
2737 sig_sge->lkey = pi_ctx->sig_mr->lkey; 2652 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2738 sig_sge->addr = 0; 2653 rdma_wr->ib_sg[SIG].addr = 0;
2739 sig_sge->length = se_cmd->data_length; 2654 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2740 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP && 2655 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2741 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT) 2656 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2742 /* 2657 /*
2743 * We have protection guards on the wire 2658 * We have protection guards on the wire
2744 * so we need to set a larget transfer 2659 * so we need to set a larget transfer
2745 */ 2660 */
2746 sig_sge->length += se_cmd->prot_length; 2661 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2747 2662
2748 pr_debug("sig_sge: addr: 0x%llx length: %u lkey: %x\n", 2663 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2749 sig_sge->addr, sig_sge->length, 2664 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2750 sig_sge->lkey); 2665 rdma_wr->ib_sg[SIG].lkey);
2751err: 2666err:
2752 return ret; 2667 return ret;
2753} 2668}
2754 2669
2755static int 2670static int
2671isert_handle_prot_cmd(struct isert_conn *isert_conn,
2672 struct isert_cmd *isert_cmd,
2673 struct isert_rdma_wr *wr)
2674{
2675 struct isert_device *device = isert_conn->conn_device;
2676 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2677 int ret;
2678
2679 if (!wr->fr_desc->pi_ctx) {
2680 ret = isert_create_pi_ctx(wr->fr_desc,
2681 device->ib_device,
2682 isert_conn->conn_pd);
2683 if (ret) {
2684 isert_err("conn %p failed to allocate pi_ctx\n",
2685 isert_conn);
2686 return ret;
2687 }
2688 }
2689
2690 if (se_cmd->t_prot_sg) {
2691 ret = isert_map_data_buf(isert_conn, isert_cmd,
2692 se_cmd->t_prot_sg,
2693 se_cmd->t_prot_nents,
2694 se_cmd->prot_length,
2695 0, wr->iser_ib_op, &wr->prot);
2696 if (ret) {
2697 isert_err("conn %p failed to map protection buffer\n",
2698 isert_conn);
2699 return ret;
2700 }
2701
2702 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2703 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2704 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2705 if (ret) {
2706 isert_err("conn %p failed to fast reg mr\n",
2707 isert_conn);
2708 goto unmap_prot_cmd;
2709 }
2710 }
2711
2712 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2713 if (ret) {
2714 isert_err("conn %p failed to fast reg mr\n",
2715 isert_conn);
2716 goto unmap_prot_cmd;
2717 }
2718 wr->fr_desc->ind |= ISERT_PROTECTED;
2719
2720 return 0;
2721
2722unmap_prot_cmd:
2723 if (se_cmd->t_prot_sg)
2724 isert_unmap_data_buf(isert_conn, &wr->prot);
2725
2726 return ret;
2727}
2728
2729static int
2756isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, 2730isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2757 struct isert_rdma_wr *wr) 2731 struct isert_rdma_wr *wr)
2758{ 2732{
2759 struct se_cmd *se_cmd = &cmd->se_cmd; 2733 struct se_cmd *se_cmd = &cmd->se_cmd;
2760 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); 2734 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2761 struct isert_conn *isert_conn = conn->context; 2735 struct isert_conn *isert_conn = conn->context;
2762 struct ib_sge data_sge;
2763 struct ib_send_wr *send_wr;
2764 struct fast_reg_descriptor *fr_desc = NULL; 2736 struct fast_reg_descriptor *fr_desc = NULL;
2737 struct ib_send_wr *send_wr;
2738 struct ib_sge *ib_sg;
2765 u32 offset; 2739 u32 offset;
2766 int ret = 0; 2740 int ret = 0;
2767 unsigned long flags; 2741 unsigned long flags;
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2775 if (ret) 2749 if (ret)
2776 return ret; 2750 return ret;
2777 2751
2778 if (wr->data.dma_nents != 1 || 2752 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2779 se_cmd->prot_op != TARGET_PROT_NORMAL) {
2780 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2753 spin_lock_irqsave(&isert_conn->conn_lock, flags);
2781 fr_desc = list_first_entry(&isert_conn->conn_fr_pool, 2754 fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
2782 struct fast_reg_descriptor, list); 2755 struct fast_reg_descriptor, list);
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2786 } 2759 }
2787 2760
2788 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data, 2761 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2789 ISERT_DATA_KEY_VALID, &data_sge); 2762 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2790 if (ret) 2763 if (ret)
2791 goto unmap_cmd; 2764 goto unmap_cmd;
2792 2765
2793 if (se_cmd->prot_op != TARGET_PROT_NORMAL) { 2766 if (isert_prot_cmd(isert_conn, se_cmd)) {
2794 struct ib_sge prot_sge, sig_sge; 2767 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2795
2796 if (se_cmd->t_prot_sg) {
2797 ret = isert_map_data_buf(isert_conn, isert_cmd,
2798 se_cmd->t_prot_sg,
2799 se_cmd->t_prot_nents,
2800 se_cmd->prot_length,
2801 0, wr->iser_ib_op, &wr->prot);
2802 if (ret)
2803 goto unmap_cmd;
2804
2805 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
2806 ISERT_PROT_KEY_VALID, &prot_sge);
2807 if (ret)
2808 goto unmap_prot_cmd;
2809 }
2810
2811 ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
2812 &data_sge, &prot_sge, &sig_sge);
2813 if (ret) 2768 if (ret)
2814 goto unmap_prot_cmd; 2769 goto unmap_cmd;
2815 2770
2816 fr_desc->ind |= ISERT_PROTECTED; 2771 ib_sg = &wr->ib_sg[SIG];
2817 memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge)); 2772 } else {
2818 } else 2773 ib_sg = &wr->ib_sg[DATA];
2819 memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge)); 2774 }
2820 2775
2776 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2821 wr->ib_sge = &wr->s_ib_sge; 2777 wr->ib_sge = &wr->s_ib_sge;
2822 wr->send_wr_num = 1; 2778 wr->send_wr_num = 1;
2823 memset(&wr->s_send_wr, 0, sizeof(*send_wr)); 2779 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2827 send_wr = &isert_cmd->rdma_wr.s_send_wr; 2783 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2828 send_wr->sg_list = &wr->s_ib_sge; 2784 send_wr->sg_list = &wr->s_ib_sge;
2829 send_wr->num_sge = 1; 2785 send_wr->num_sge = 1;
2830 send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc; 2786 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2831 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { 2787 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2832 send_wr->opcode = IB_WR_RDMA_WRITE; 2788 send_wr->opcode = IB_WR_RDMA_WRITE;
2833 send_wr->wr.rdma.remote_addr = isert_cmd->read_va; 2789 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2834 send_wr->wr.rdma.rkey = isert_cmd->read_stag; 2790 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2835 send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ? 2791 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2836 0 : IB_SEND_SIGNALED; 2792 0 : IB_SEND_SIGNALED;
2837 } else { 2793 } else {
2838 send_wr->opcode = IB_WR_RDMA_READ; 2794 send_wr->opcode = IB_WR_RDMA_READ;
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2842 } 2798 }
2843 2799
2844 return 0; 2800 return 0;
2845unmap_prot_cmd: 2801
2846 if (se_cmd->t_prot_sg)
2847 isert_unmap_data_buf(isert_conn, &wr->prot);
2848unmap_cmd: 2802unmap_cmd:
2849 if (fr_desc) { 2803 if (fr_desc) {
2850 spin_lock_irqsave(&isert_conn->conn_lock, flags); 2804 spin_lock_irqsave(&isert_conn->conn_lock, flags);
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2867 struct ib_send_wr *wr_failed; 2821 struct ib_send_wr *wr_failed;
2868 int rc; 2822 int rc;
2869 2823
2870 pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n", 2824 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2871 isert_cmd, se_cmd->data_length); 2825 isert_cmd, se_cmd->data_length);
2826
2872 wr->iser_ib_op = ISER_IB_RDMA_WRITE; 2827 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2873 rc = device->reg_rdma_mem(conn, cmd, wr); 2828 rc = device->reg_rdma_mem(conn, cmd, wr);
2874 if (rc) { 2829 if (rc) {
2875 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2830 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2876 return rc; 2831 return rc;
2877 } 2832 }
2878 2833
2879 if (se_cmd->prot_op == TARGET_PROT_NORMAL) { 2834 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2880 /* 2835 /*
2881 * Build isert_conn->tx_desc for iSCSI response PDU and attach 2836 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2882 */ 2837 */
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2886 &isert_cmd->tx_desc.iscsi_header); 2841 &isert_cmd->tx_desc.iscsi_header);
2887 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2842 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2888 isert_init_send_wr(isert_conn, isert_cmd, 2843 isert_init_send_wr(isert_conn, isert_cmd,
2889 &isert_cmd->tx_desc.send_wr, false); 2844 &isert_cmd->tx_desc.send_wr);
2890 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2845 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2891 wr->send_wr_num += 1; 2846 wr->send_wr_num += 1;
2892 } 2847 }
2893 2848
2894 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2895
2896 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2849 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2897 if (rc) { 2850 if (rc)
2898 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); 2851 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2899 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
2900 }
2901 2852
2902 if (se_cmd->prot_op == TARGET_PROT_NORMAL) 2853 if (!isert_prot_cmd(isert_conn, se_cmd))
2903 pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data " 2854 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2904 "READ\n", isert_cmd); 2855 "READ\n", isert_cmd);
2905 else 2856 else
2906 pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", 2857 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2907 isert_cmd); 2858 isert_cmd);
2908 2859
2909 return 1; 2860 return 1;
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2920 struct ib_send_wr *wr_failed; 2871 struct ib_send_wr *wr_failed;
2921 int rc; 2872 int rc;
2922 2873
2923 pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n", 2874 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2924 isert_cmd, se_cmd->data_length, cmd->write_data_done); 2875 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2925 wr->iser_ib_op = ISER_IB_RDMA_READ; 2876 wr->iser_ib_op = ISER_IB_RDMA_READ;
2926 rc = device->reg_rdma_mem(conn, cmd, wr); 2877 rc = device->reg_rdma_mem(conn, cmd, wr);
2927 if (rc) { 2878 if (rc) {
2928 pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd); 2879 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2929 return rc; 2880 return rc;
2930 } 2881 }
2931 2882
2932 atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
2933
2934 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); 2883 rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
2935 if (rc) { 2884 if (rc)
2936 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); 2885 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2937 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); 2886
2938 } 2887 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2939 pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2940 isert_cmd); 2888 isert_cmd);
2941 2889
2942 return 0; 2890 return 0;
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2952 ret = isert_put_nopin(cmd, conn, false); 2900 ret = isert_put_nopin(cmd, conn, false);
2953 break; 2901 break;
2954 default: 2902 default:
2955 pr_err("Unknown immediate state: 0x%02x\n", state); 2903 isert_err("Unknown immediate state: 0x%02x\n", state);
2956 ret = -EINVAL; 2904 ret = -EINVAL;
2957 break; 2905 break;
2958 } 2906 }
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2963static int 2911static int
2964isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state) 2912isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2965{ 2913{
2914 struct isert_conn *isert_conn = conn->context;
2966 int ret; 2915 int ret;
2967 2916
2968 switch (state) { 2917 switch (state) {
2969 case ISTATE_SEND_LOGOUTRSP: 2918 case ISTATE_SEND_LOGOUTRSP:
2970 ret = isert_put_logout_rsp(cmd, conn); 2919 ret = isert_put_logout_rsp(cmd, conn);
2971 if (!ret) { 2920 if (!ret)
2972 pr_debug("Returning iSER Logout -EAGAIN\n"); 2921 isert_conn->logout_posted = true;
2973 ret = -EAGAIN;
2974 }
2975 break; 2922 break;
2976 case ISTATE_SEND_NOPIN: 2923 case ISTATE_SEND_NOPIN:
2977 ret = isert_put_nopin(cmd, conn, true); 2924 ret = isert_put_nopin(cmd, conn, true);
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2993 ret = isert_put_response(conn, cmd); 2940 ret = isert_put_response(conn, cmd);
2994 break; 2941 break;
2995 default: 2942 default:
2996 pr_err("Unknown response state: 0x%02x\n", state); 2943 isert_err("Unknown response state: 0x%02x\n", state);
2997 ret = -EINVAL; 2944 ret = -EINVAL;
2998 break; 2945 break;
2999 } 2946 }
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3001 return ret; 2948 return ret;
3002} 2949}
3003 2950
2951struct rdma_cm_id *
2952isert_setup_id(struct isert_np *isert_np)
2953{
2954 struct iscsi_np *np = isert_np->np;
2955 struct rdma_cm_id *id;
2956 struct sockaddr *sa;
2957 int ret;
2958
2959 sa = (struct sockaddr *)&np->np_sockaddr;
2960 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
2961
2962 id = rdma_create_id(isert_cma_handler, isert_np,
2963 RDMA_PS_TCP, IB_QPT_RC);
2964 if (IS_ERR(id)) {
2965 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
2966 ret = PTR_ERR(id);
2967 goto out;
2968 }
2969 isert_dbg("id %p context %p\n", id, id->context);
2970
2971 ret = rdma_bind_addr(id, sa);
2972 if (ret) {
2973 isert_err("rdma_bind_addr() failed: %d\n", ret);
2974 goto out_id;
2975 }
2976
2977 ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
2978 if (ret) {
2979 isert_err("rdma_listen() failed: %d\n", ret);
2980 goto out_id;
2981 }
2982
2983 return id;
2984out_id:
2985 rdma_destroy_id(id);
2986out:
2987 return ERR_PTR(ret);
2988}
2989
3004static int 2990static int
3005isert_setup_np(struct iscsi_np *np, 2991isert_setup_np(struct iscsi_np *np,
3006 struct __kernel_sockaddr_storage *ksockaddr) 2992 struct __kernel_sockaddr_storage *ksockaddr)
3007{ 2993{
3008 struct isert_np *isert_np; 2994 struct isert_np *isert_np;
3009 struct rdma_cm_id *isert_lid; 2995 struct rdma_cm_id *isert_lid;
3010 struct sockaddr *sa;
3011 int ret; 2996 int ret;
3012 2997
3013 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL); 2998 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3014 if (!isert_np) { 2999 if (!isert_np) {
3015 pr_err("Unable to allocate struct isert_np\n"); 3000 isert_err("Unable to allocate struct isert_np\n");
3016 return -ENOMEM; 3001 return -ENOMEM;
3017 } 3002 }
3018 sema_init(&isert_np->np_sem, 0); 3003 sema_init(&isert_np->np_sem, 0);
3019 mutex_init(&isert_np->np_accept_mutex); 3004 mutex_init(&isert_np->np_accept_mutex);
3020 INIT_LIST_HEAD(&isert_np->np_accept_list); 3005 INIT_LIST_HEAD(&isert_np->np_accept_list);
3021 init_completion(&isert_np->np_login_comp); 3006 init_completion(&isert_np->np_login_comp);
3007 isert_np->np = np;
3022 3008
3023 sa = (struct sockaddr *)ksockaddr;
3024 pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
3025 /* 3009 /*
3026 * Setup the np->np_sockaddr from the passed sockaddr setup 3010 * Setup the np->np_sockaddr from the passed sockaddr setup
3027 * in iscsi_target_configfs.c code.. 3011 * in iscsi_target_configfs.c code..
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np,
3029 memcpy(&np->np_sockaddr, ksockaddr, 3013 memcpy(&np->np_sockaddr, ksockaddr,
3030 sizeof(struct __kernel_sockaddr_storage)); 3014 sizeof(struct __kernel_sockaddr_storage));
3031 3015
3032 isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP, 3016 isert_lid = isert_setup_id(isert_np);
3033 IB_QPT_RC);
3034 if (IS_ERR(isert_lid)) { 3017 if (IS_ERR(isert_lid)) {
3035 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
3036 PTR_ERR(isert_lid));
3037 ret = PTR_ERR(isert_lid); 3018 ret = PTR_ERR(isert_lid);
3038 goto out; 3019 goto out;
3039 } 3020 }
3040 3021
3041 ret = rdma_bind_addr(isert_lid, sa);
3042 if (ret) {
3043 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
3044 goto out_lid;
3045 }
3046
3047 ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
3048 if (ret) {
3049 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
3050 goto out_lid;
3051 }
3052
3053 isert_np->np_cm_id = isert_lid; 3022 isert_np->np_cm_id = isert_lid;
3054 np->np_context = isert_np; 3023 np->np_context = isert_np;
3055 pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
3056 3024
3057 return 0; 3025 return 0;
3058 3026
3059out_lid:
3060 rdma_destroy_id(isert_lid);
3061out: 3027out:
3062 kfree(isert_np); 3028 kfree(isert_np);
3029
3063 return ret; 3030 return ret;
3064} 3031}
3065 3032
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn)
3075 cp.retry_count = 7; 3042 cp.retry_count = 7;
3076 cp.rnr_retry_count = 7; 3043 cp.rnr_retry_count = 7;
3077 3044
3078 pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
3079
3080 ret = rdma_accept(cm_id, &cp); 3045 ret = rdma_accept(cm_id, &cp);
3081 if (ret) { 3046 if (ret) {
3082 pr_err("rdma_accept() failed with: %d\n", ret); 3047 isert_err("rdma_accept() failed with: %d\n", ret);
3083 return ret; 3048 return ret;
3084 } 3049 }
3085 3050
3086 pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
3087
3088 return 0; 3051 return 0;
3089} 3052}
3090 3053
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3094 struct isert_conn *isert_conn = (struct isert_conn *)conn->context; 3057 struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
3095 int ret; 3058 int ret;
3096 3059
3097 pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn); 3060 isert_info("before login_req comp conn: %p\n", isert_conn);
3061 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3062 if (ret) {
3063 isert_err("isert_conn %p interrupted before got login req\n",
3064 isert_conn);
3065 return ret;
3066 }
3067 reinit_completion(&isert_conn->login_req_comp);
3068
3098 /* 3069 /*
3099 * For login requests after the first PDU, isert_rx_login_req() will 3070 * For login requests after the first PDU, isert_rx_login_req() will
3100 * kick schedule_delayed_work(&conn->login_work) as the packet is 3071 * kick schedule_delayed_work(&conn->login_work) as the packet is
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3104 if (!login->first_request) 3075 if (!login->first_request)
3105 return 0; 3076 return 0;
3106 3077
3078 isert_rx_login_req(isert_conn);
3079
3080 isert_info("before conn_login_comp conn: %p\n", conn);
3107 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp); 3081 ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
3108 if (ret) 3082 if (ret)
3109 return ret; 3083 return ret;
3110 3084
3111 pr_debug("isert_get_login_rx processing login->req: %p\n", login->req); 3085 isert_info("processing login->req: %p\n", login->req);
3086
3112 return 0; 3087 return 0;
3113} 3088}
3114 3089
@@ -3161,7 +3136,7 @@ accept_wait:
3161 spin_lock_bh(&np->np_thread_lock); 3136 spin_lock_bh(&np->np_thread_lock);
3162 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { 3137 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3163 spin_unlock_bh(&np->np_thread_lock); 3138 spin_unlock_bh(&np->np_thread_lock);
3164 pr_debug("np_thread_state %d for isert_accept_np\n", 3139 isert_dbg("np_thread_state %d for isert_accept_np\n",
3165 np->np_thread_state); 3140 np->np_thread_state);
3166 /** 3141 /**
3167 * No point in stalling here when np_thread 3142 * No point in stalling here when np_thread
@@ -3186,17 +3161,10 @@ accept_wait:
3186 isert_conn->conn = conn; 3161 isert_conn->conn = conn;
3187 max_accept = 0; 3162 max_accept = 0;
3188 3163
3189 ret = isert_rdma_post_recvl(isert_conn);
3190 if (ret)
3191 return ret;
3192
3193 ret = isert_rdma_accept(isert_conn);
3194 if (ret)
3195 return ret;
3196
3197 isert_set_conn_info(np, conn, isert_conn); 3164 isert_set_conn_info(np, conn, isert_conn);
3198 3165
3199 pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn); 3166 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3167
3200 return 0; 3168 return 0;
3201} 3169}
3202 3170
@@ -3204,25 +3172,103 @@ static void
3204isert_free_np(struct iscsi_np *np) 3172isert_free_np(struct iscsi_np *np)
3205{ 3173{
3206 struct isert_np *isert_np = (struct isert_np *)np->np_context; 3174 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3175 struct isert_conn *isert_conn, *n;
3207 3176
3208 if (isert_np->np_cm_id) 3177 if (isert_np->np_cm_id)
3209 rdma_destroy_id(isert_np->np_cm_id); 3178 rdma_destroy_id(isert_np->np_cm_id);
3210 3179
3180 /*
3181 * FIXME: At this point we don't have a good way to insure
3182 * that at this point we don't have hanging connections that
3183 * completed RDMA establishment but didn't start iscsi login
3184 * process. So work-around this by cleaning up what ever piled
3185 * up in np_accept_list.
3186 */
3187 mutex_lock(&isert_np->np_accept_mutex);
3188 if (!list_empty(&isert_np->np_accept_list)) {
3189 isert_info("Still have isert connections, cleaning up...\n");
3190 list_for_each_entry_safe(isert_conn, n,
3191 &isert_np->np_accept_list,
3192 conn_accept_node) {
3193 isert_info("cleaning isert_conn %p state (%d)\n",
3194 isert_conn, isert_conn->state);
3195 isert_connect_release(isert_conn);
3196 }
3197 }
3198 mutex_unlock(&isert_np->np_accept_mutex);
3199
3211 np->np_context = NULL; 3200 np->np_context = NULL;
3212 kfree(isert_np); 3201 kfree(isert_np);
3213} 3202}
3214 3203
3204static void isert_release_work(struct work_struct *work)
3205{
3206 struct isert_conn *isert_conn = container_of(work,
3207 struct isert_conn,
3208 release_work);
3209
3210 isert_info("Starting release conn %p\n", isert_conn);
3211
3212 wait_for_completion(&isert_conn->conn_wait);
3213
3214 mutex_lock(&isert_conn->conn_mutex);
3215 isert_conn->state = ISER_CONN_DOWN;
3216 mutex_unlock(&isert_conn->conn_mutex);
3217
3218 isert_info("Destroying conn %p\n", isert_conn);
3219 isert_put_conn(isert_conn);
3220}
3221
3222static void
3223isert_wait4logout(struct isert_conn *isert_conn)
3224{
3225 struct iscsi_conn *conn = isert_conn->conn;
3226
3227 isert_info("conn %p\n", isert_conn);
3228
3229 if (isert_conn->logout_posted) {
3230 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3231 wait_for_completion_timeout(&conn->conn_logout_comp,
3232 SECONDS_FOR_LOGOUT_COMP * HZ);
3233 }
3234}
3235
3236static void
3237isert_wait4cmds(struct iscsi_conn *conn)
3238{
3239 isert_info("iscsi_conn %p\n", conn);
3240
3241 if (conn->sess) {
3242 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3243 target_wait_for_sess_cmds(conn->sess->se_sess);
3244 }
3245}
3246
3247static void
3248isert_wait4flush(struct isert_conn *isert_conn)
3249{
3250 struct ib_recv_wr *bad_wr;
3251
3252 isert_info("conn %p\n", isert_conn);
3253
3254 init_completion(&isert_conn->conn_wait_comp_err);
3255 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3256 /* post an indication that all flush errors were consumed */
3257 if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
3258 isert_err("conn %p failed to post beacon", isert_conn);
3259 return;
3260 }
3261
3262 wait_for_completion(&isert_conn->conn_wait_comp_err);
3263}
3264
3215static void isert_wait_conn(struct iscsi_conn *conn) 3265static void isert_wait_conn(struct iscsi_conn *conn)
3216{ 3266{
3217 struct isert_conn *isert_conn = conn->context; 3267 struct isert_conn *isert_conn = conn->context;
3218 3268
3219 pr_debug("isert_wait_conn: Starting \n"); 3269 isert_info("Starting conn %p\n", isert_conn);
3220 3270
3221 mutex_lock(&isert_conn->conn_mutex); 3271 mutex_lock(&isert_conn->conn_mutex);
3222 if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
3223 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
3224 rdma_disconnect(isert_conn->conn_cm_id);
3225 }
3226 /* 3272 /*
3227 * Only wait for conn_wait_comp_err if the isert_conn made it 3273 * Only wait for conn_wait_comp_err if the isert_conn made it
3228 * into full feature phase.. 3274 * into full feature phase..
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3231 mutex_unlock(&isert_conn->conn_mutex); 3277 mutex_unlock(&isert_conn->conn_mutex);
3232 return; 3278 return;
3233 } 3279 }
3234 if (isert_conn->state == ISER_CONN_UP) 3280 isert_conn_terminate(isert_conn);
3235 isert_conn->state = ISER_CONN_TERMINATING;
3236 mutex_unlock(&isert_conn->conn_mutex); 3281 mutex_unlock(&isert_conn->conn_mutex);
3237 3282
3238 wait_for_completion(&isert_conn->conn_wait_comp_err); 3283 isert_wait4cmds(conn);
3284 isert_wait4flush(isert_conn);
3285 isert_wait4logout(isert_conn);
3239 3286
3240 wait_for_completion(&isert_conn->conn_wait); 3287 INIT_WORK(&isert_conn->release_work, isert_release_work);
3241 isert_put_conn(isert_conn); 3288 queue_work(isert_release_wq, &isert_conn->release_work);
3242} 3289}
3243 3290
3244static void isert_free_conn(struct iscsi_conn *conn) 3291static void isert_free_conn(struct iscsi_conn *conn)
@@ -3273,35 +3320,39 @@ static int __init isert_init(void)
3273{ 3320{
3274 int ret; 3321 int ret;
3275 3322
3276 isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0); 3323 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
3277 if (!isert_rx_wq) { 3324 if (!isert_comp_wq) {
3278 pr_err("Unable to allocate isert_rx_wq\n"); 3325 isert_err("Unable to allocate isert_comp_wq\n");
3326 ret = -ENOMEM;
3279 return -ENOMEM; 3327 return -ENOMEM;
3280 } 3328 }
3281 3329
3282 isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); 3330 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3283 if (!isert_comp_wq) { 3331 WQ_UNBOUND_MAX_ACTIVE);
3284 pr_err("Unable to allocate isert_comp_wq\n"); 3332 if (!isert_release_wq) {
3333 isert_err("Unable to allocate isert_release_wq\n");
3285 ret = -ENOMEM; 3334 ret = -ENOMEM;
3286 goto destroy_rx_wq; 3335 goto destroy_comp_wq;
3287 } 3336 }
3288 3337
3289 iscsit_register_transport(&iser_target_transport); 3338 iscsit_register_transport(&iser_target_transport);
3290 pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); 3339 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3340
3291 return 0; 3341 return 0;
3292 3342
3293destroy_rx_wq: 3343destroy_comp_wq:
3294 destroy_workqueue(isert_rx_wq); 3344 destroy_workqueue(isert_comp_wq);
3345
3295 return ret; 3346 return ret;
3296} 3347}
3297 3348
3298static void __exit isert_exit(void) 3349static void __exit isert_exit(void)
3299{ 3350{
3300 flush_scheduled_work(); 3351 flush_scheduled_work();
3352 destroy_workqueue(isert_release_wq);
3301 destroy_workqueue(isert_comp_wq); 3353 destroy_workqueue(isert_comp_wq);
3302 destroy_workqueue(isert_rx_wq);
3303 iscsit_unregister_transport(&iser_target_transport); 3354 iscsit_unregister_transport(&iser_target_transport);
3304 pr_debug("iSER_TARGET[0] - Released iser_target_transport\n"); 3355 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3305} 3356}
3306 3357
3307MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure"); 3358MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 04f51f7bf614..8dc8415d152d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -4,9 +4,37 @@
4#include <rdma/ib_verbs.h> 4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6 6
7#define DRV_NAME "isert"
8#define PFX DRV_NAME ": "
9
10#define isert_dbg(fmt, arg...) \
11 do { \
12 if (unlikely(isert_debug_level > 2)) \
13 printk(KERN_DEBUG PFX "%s: " fmt,\
14 __func__ , ## arg); \
15 } while (0)
16
17#define isert_warn(fmt, arg...) \
18 do { \
19 if (unlikely(isert_debug_level > 0)) \
20 pr_warn(PFX "%s: " fmt, \
21 __func__ , ## arg); \
22 } while (0)
23
24#define isert_info(fmt, arg...) \
25 do { \
26 if (unlikely(isert_debug_level > 1)) \
27 pr_info(PFX "%s: " fmt, \
28 __func__ , ## arg); \
29 } while (0)
30
31#define isert_err(fmt, arg...) \
32 pr_err(PFX "%s: " fmt, __func__ , ## arg)
33
7#define ISERT_RDMA_LISTEN_BACKLOG 10 34#define ISERT_RDMA_LISTEN_BACKLOG 10
8#define ISCSI_ISER_SG_TABLESIZE 256 35#define ISCSI_ISER_SG_TABLESIZE 256
9#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL 36#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
37#define ISER_BEACON_WRID 0xfffffffffffffffeULL
10 38
11enum isert_desc_type { 39enum isert_desc_type {
12 ISCSI_TX_CONTROL, 40 ISCSI_TX_CONTROL,
@@ -23,6 +51,7 @@ enum iser_ib_op_code {
23enum iser_conn_state { 51enum iser_conn_state {
24 ISER_CONN_INIT, 52 ISER_CONN_INIT,
25 ISER_CONN_UP, 53 ISER_CONN_UP,
54 ISER_CONN_FULL_FEATURE,
26 ISER_CONN_TERMINATING, 55 ISER_CONN_TERMINATING,
27 ISER_CONN_DOWN, 56 ISER_CONN_DOWN,
28}; 57};
@@ -44,9 +73,6 @@ struct iser_tx_desc {
44 struct ib_sge tx_sg[2]; 73 struct ib_sge tx_sg[2];
45 int num_sge; 74 int num_sge;
46 struct isert_cmd *isert_cmd; 75 struct isert_cmd *isert_cmd;
47 struct llist_node *comp_llnode_batch;
48 struct llist_node comp_llnode;
49 bool llnode_active;
50 struct ib_send_wr send_wr; 76 struct ib_send_wr send_wr;
51} __packed; 77} __packed;
52 78
@@ -81,6 +107,12 @@ struct isert_data_buf {
81 enum dma_data_direction dma_dir; 107 enum dma_data_direction dma_dir;
82}; 108};
83 109
110enum {
111 DATA = 0,
112 PROT = 1,
113 SIG = 2,
114};
115
84struct isert_rdma_wr { 116struct isert_rdma_wr {
85 struct list_head wr_list; 117 struct list_head wr_list;
86 struct isert_cmd *isert_cmd; 118 struct isert_cmd *isert_cmd;
@@ -90,6 +122,7 @@ struct isert_rdma_wr {
90 int send_wr_num; 122 int send_wr_num;
91 struct ib_send_wr *send_wr; 123 struct ib_send_wr *send_wr;
92 struct ib_send_wr s_send_wr; 124 struct ib_send_wr s_send_wr;
125 struct ib_sge ib_sg[3];
93 struct isert_data_buf data; 126 struct isert_data_buf data;
94 struct isert_data_buf prot; 127 struct isert_data_buf prot;
95 struct fast_reg_descriptor *fr_desc; 128 struct fast_reg_descriptor *fr_desc;
@@ -117,14 +150,15 @@ struct isert_device;
117struct isert_conn { 150struct isert_conn {
118 enum iser_conn_state state; 151 enum iser_conn_state state;
119 int post_recv_buf_count; 152 int post_recv_buf_count;
120 atomic_t post_send_buf_count;
121 u32 responder_resources; 153 u32 responder_resources;
122 u32 initiator_depth; 154 u32 initiator_depth;
155 bool pi_support;
123 u32 max_sge; 156 u32 max_sge;
124 char *login_buf; 157 char *login_buf;
125 char *login_req_buf; 158 char *login_req_buf;
126 char *login_rsp_buf; 159 char *login_rsp_buf;
127 u64 login_req_dma; 160 u64 login_req_dma;
161 int login_req_len;
128 u64 login_rsp_dma; 162 u64 login_rsp_dma;
129 unsigned int conn_rx_desc_head; 163 unsigned int conn_rx_desc_head;
130 struct iser_rx_desc *conn_rx_descs; 164 struct iser_rx_desc *conn_rx_descs;
@@ -132,13 +166,13 @@ struct isert_conn {
132 struct iscsi_conn *conn; 166 struct iscsi_conn *conn;
133 struct list_head conn_accept_node; 167 struct list_head conn_accept_node;
134 struct completion conn_login_comp; 168 struct completion conn_login_comp;
169 struct completion login_req_comp;
135 struct iser_tx_desc conn_login_tx_desc; 170 struct iser_tx_desc conn_login_tx_desc;
136 struct rdma_cm_id *conn_cm_id; 171 struct rdma_cm_id *conn_cm_id;
137 struct ib_pd *conn_pd; 172 struct ib_pd *conn_pd;
138 struct ib_mr *conn_mr; 173 struct ib_mr *conn_mr;
139 struct ib_qp *conn_qp; 174 struct ib_qp *conn_qp;
140 struct isert_device *conn_device; 175 struct isert_device *conn_device;
141 struct work_struct conn_logout_work;
142 struct mutex conn_mutex; 176 struct mutex conn_mutex;
143 struct completion conn_wait; 177 struct completion conn_wait;
144 struct completion conn_wait_comp_err; 178 struct completion conn_wait_comp_err;
@@ -147,31 +181,38 @@ struct isert_conn {
147 int conn_fr_pool_size; 181 int conn_fr_pool_size;
148 /* lock to protect fastreg pool */ 182 /* lock to protect fastreg pool */
149 spinlock_t conn_lock; 183 spinlock_t conn_lock;
150#define ISERT_COMP_BATCH_COUNT 8 184 struct work_struct release_work;
151 int conn_comp_batch; 185 struct ib_recv_wr beacon;
152 struct llist_head conn_comp_llist; 186 bool logout_posted;
153 bool disconnect;
154}; 187};
155 188
156#define ISERT_MAX_CQ 64 189#define ISERT_MAX_CQ 64
157 190
158struct isert_cq_desc { 191/**
159 struct isert_device *device; 192 * struct isert_comp - iSER completion context
160 int cq_index; 193 *
161 struct work_struct cq_rx_work; 194 * @device: pointer to device handle
162 struct work_struct cq_tx_work; 195 * @cq: completion queue
196 * @wcs: work completion array
197 * @active_qps: Number of active QPs attached
198 * to completion context
199 * @work: completion work handle
200 */
201struct isert_comp {
202 struct isert_device *device;
203 struct ib_cq *cq;
204 struct ib_wc wcs[16];
205 int active_qps;
206 struct work_struct work;
163}; 207};
164 208
165struct isert_device { 209struct isert_device {
166 int use_fastreg; 210 int use_fastreg;
167 bool pi_capable; 211 bool pi_capable;
168 int cqs_used;
169 int refcount; 212 int refcount;
170 int cq_active_qps[ISERT_MAX_CQ];
171 struct ib_device *ib_device; 213 struct ib_device *ib_device;
172 struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; 214 struct isert_comp *comps;
173 struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; 215 int comps_used;
174 struct isert_cq_desc *cq_desc;
175 struct list_head dev_node; 216 struct list_head dev_node;
176 struct ib_device_attr dev_attr; 217 struct ib_device_attr dev_attr;
177 int (*reg_rdma_mem)(struct iscsi_conn *conn, 218 int (*reg_rdma_mem)(struct iscsi_conn *conn,
@@ -182,6 +223,7 @@ struct isert_device {
182}; 223};
183 224
184struct isert_np { 225struct isert_np {
226 struct iscsi_np *np;
185 struct semaphore np_sem; 227 struct semaphore np_sem;
186 struct rdma_cm_id *np_cm_id; 228 struct rdma_cm_id *np_cm_id;
187 struct mutex np_accept_mutex; 229 struct mutex np_accept_mutex;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index db3c8c851af1..0747c0595a9d 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2740,7 +2740,6 @@ static struct scsi_host_template srp_template = {
2740 .info = srp_target_info, 2740 .info = srp_target_info,
2741 .queuecommand = srp_queuecommand, 2741 .queuecommand = srp_queuecommand,
2742 .change_queue_depth = srp_change_queue_depth, 2742 .change_queue_depth = srp_change_queue_depth,
2743 .change_queue_type = scsi_change_queue_type,
2744 .eh_abort_handler = srp_abort, 2743 .eh_abort_handler = srp_abort,
2745 .eh_device_reset_handler = srp_reset_device, 2744 .eh_device_reset_handler = srp_reset_device,
2746 .eh_host_reset_handler = srp_reset_host, 2745 .eh_host_reset_handler = srp_reset_host,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index dc829682701a..eb694ddad79f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1708,17 +1708,17 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1708 1708
1709 switch (srp_cmd->task_attr) { 1709 switch (srp_cmd->task_attr) {
1710 case SRP_CMD_SIMPLE_Q: 1710 case SRP_CMD_SIMPLE_Q:
1711 cmd->sam_task_attr = MSG_SIMPLE_TAG; 1711 cmd->sam_task_attr = TCM_SIMPLE_TAG;
1712 break; 1712 break;
1713 case SRP_CMD_ORDERED_Q: 1713 case SRP_CMD_ORDERED_Q:
1714 default: 1714 default:
1715 cmd->sam_task_attr = MSG_ORDERED_TAG; 1715 cmd->sam_task_attr = TCM_ORDERED_TAG;
1716 break; 1716 break;
1717 case SRP_CMD_HEAD_OF_Q: 1717 case SRP_CMD_HEAD_OF_Q:
1718 cmd->sam_task_attr = MSG_HEAD_TAG; 1718 cmd->sam_task_attr = TCM_HEAD_TAG;
1719 break; 1719 break;
1720 case SRP_CMD_ACA: 1720 case SRP_CMD_ACA:
1721 cmd->sam_task_attr = MSG_ACA_TAG; 1721 cmd->sam_task_attr = TCM_ACA_TAG;
1722 break; 1722 break;
1723 } 1723 }
1724 1724
@@ -1733,7 +1733,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
1733 sizeof(srp_cmd->lun)); 1733 sizeof(srp_cmd->lun));
1734 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb, 1734 rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
1735 &send_ioctx->sense_data[0], unpacked_lun, data_len, 1735 &send_ioctx->sense_data[0], unpacked_lun, data_len,
1736 MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF); 1736 TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
1737 if (rc != 0) { 1737 if (rc != 0) {
1738 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1738 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1739 goto send_sense; 1739 goto send_sense;
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 3067d56b11a6..5844b80bd90e 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -204,16 +204,6 @@ config THERM_ADT746X
204 iBook G4, and the ATI based aluminium PowerBooks, allowing slightly 204 iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
205 better fan behaviour by default, and some manual control. 205 better fan behaviour by default, and some manual control.
206 206
207config THERM_PM72
208 tristate "Support for thermal management on PowerMac G5 (AGP)"
209 depends on I2C && I2C_POWERMAC && PPC_PMAC64
210 default n
211 help
212 This driver provides thermostat and fan control for the desktop
213 G5 machines.
214
215 This is deprecated, use windfarm instead.
216
217config WINDFARM 207config WINDFARM
218 tristate "New PowerMac thermal control infrastructure" 208 tristate "New PowerMac thermal control infrastructure"
219 depends on PPC 209 depends on PPC
diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile
index d2f0120bc878..383ba920085b 100644
--- a/drivers/macintosh/Makefile
+++ b/drivers/macintosh/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_ADB_IOP) += adb-iop.o
25obj-$(CONFIG_ADB_PMU68K) += via-pmu68k.o 25obj-$(CONFIG_ADB_PMU68K) += via-pmu68k.o
26obj-$(CONFIG_ADB_MACIO) += macio-adb.o 26obj-$(CONFIG_ADB_MACIO) += macio-adb.o
27 27
28obj-$(CONFIG_THERM_PM72) += therm_pm72.o
29obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o 28obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o
30obj-$(CONFIG_THERM_ADT746X) += therm_adt746x.o 29obj-$(CONFIG_THERM_ADT746X) += therm_adt746x.o
31obj-$(CONFIG_WINDFARM) += windfarm_core.o 30obj-$(CONFIG_WINDFARM) += windfarm_core.o
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
deleted file mode 100644
index 7ed92582d2cf..000000000000
--- a/drivers/macintosh/therm_pm72.c
+++ /dev/null
@@ -1,2278 +0,0 @@
1/*
2 * Device driver for the thermostats & fan controller of the
3 * Apple G5 "PowerMac7,2" desktop machines.
4 *
5 * (c) Copyright IBM Corp. 2003-2004
6 *
7 * Maintained by: Benjamin Herrenschmidt
8 * <benh@kernel.crashing.org>
9 *
10 *
11 * The algorithm used is the PID control algorithm, used the same
12 * way the published Darwin code does, using the same values that
13 * are present in the Darwin 7.0 snapshot property lists.
14 *
15 * As far as the CPUs control loops are concerned, I use the
16 * calibration & PID constants provided by the EEPROM,
17 * I do _not_ embed any value from the property lists, as the ones
18 * provided by Darwin 7.0 seem to always have an older version that
19 * what I've seen on the actual computers.
20 * It would be interesting to verify that though. Darwin has a
21 * version code of 1.0.0d11 for all control loops it seems, while
22 * so far, the machines EEPROMs contain a dataset versioned 1.0.0f
23 *
24 * Darwin doesn't provide source to all parts, some missing
25 * bits like the AppleFCU driver or the actual scale of some
26 * of the values returned by sensors had to be "guessed" some
27 * way... or based on what Open Firmware does.
28 *
29 * I didn't yet figure out how to get the slots power consumption
30 * out of the FCU, so that part has not been implemented yet and
31 * the slots fan is set to a fixed 50% PWM, hoping this value is
32 * safe enough ...
33 *
34 * Note: I have observed strange oscillations of the CPU control
35 * loop on a dual G5 here. When idle, the CPU exhaust fan tend to
36 * oscillates slowly (over several minutes) between the minimum
37 * of 300RPMs and approx. 1000 RPMs. I don't know what is causing
38 * this, it could be some incorrect constant or an error in the
39 * way I ported the algorithm, or it could be just normal. I
40 * don't have full understanding on the way Apple tweaked the PID
41 * algorithm for the CPU control, it is definitely not a standard
42 * implementation...
43 *
44 * TODO: - Check MPU structure version/signature
45 * - Add things like /sbin/overtemp for non-critical
46 * overtemp conditions so userland can take some policy
47 * decisions, like slowing down CPUs
48 * - Deal with fan and i2c failures in a better way
49 * - Maybe do a generic PID based on params used for
50 * U3 and Drives ? Definitely need to factor code a bit
51 * better... also make sensor detection more robust using
52 * the device-tree to probe for them
53 * - Figure out how to get the slots consumption and set the
54 * slots fan accordingly
55 *
56 * History:
57 *
58 * Nov. 13, 2003 : 0.5
59 * - First release
60 *
61 * Nov. 14, 2003 : 0.6
62 * - Read fan speed from FCU, low level fan routines now deal
63 * with errors & check fan status, though higher level don't
64 * do much.
65 * - Move a bunch of definitions to .h file
66 *
67 * Nov. 18, 2003 : 0.7
68 * - Fix build on ppc64 kernel
69 * - Move back statics definitions to .c file
70 * - Avoid calling schedule_timeout with a negative number
71 *
72 * Dec. 18, 2003 : 0.8
73 * - Fix typo when reading back fan speed on 2 CPU machines
74 *
75 * Mar. 11, 2004 : 0.9
76 * - Rework code accessing the ADC chips, make it more robust and
77 * closer to the chip spec. Also make sure it is configured properly,
78 * I've seen yet unexplained cases where on startup, I would have stale
79 * values in the configuration register
80 * - Switch back to use of target fan speed for PID, thus lowering
81 * pressure on i2c
82 *
83 * Oct. 20, 2004 : 1.1
84 * - Add device-tree lookup for fan IDs, should detect liquid cooling
85 * pumps when present
86 * - Enable driver for PowerMac7,3 machines
87 * - Split the U3/Backside cooling on U3 & U3H versions as Darwin does
88 * - Add new CPU cooling algorithm for machines with liquid cooling
89 * - Workaround for some PowerMac7,3 with empty "fan" node in the devtree
90 * - Fix a signed/unsigned compare issue in some PID loops
91 *
92 * Mar. 10, 2005 : 1.2
93 * - Add basic support for Xserve G5
94 * - Retrieve pumps min/max from EEPROM image in device-tree (broken)
95 * - Use min/max macros here or there
96 * - Latest darwin updated U3H min fan speed to 20% PWM
97 *
98 * July. 06, 2006 : 1.3
99 * - Fix setting of RPM fans on Xserve G5 (they were going too fast)
100 * - Add missing slots fan control loop for Xserve G5
101 * - Lower fixed slots fan speed from 50% to 40% on desktop G5s. We
102 * still can't properly implement the control loop for these, so let's
103 * reduce the noise a little bit, it appears that 40% still gives us
104 * a pretty good air flow
105 * - Add code to "tickle" the FCU regulary so it doesn't think that
106 * we are gone while in fact, the machine just didn't need any fan
107 * speed change lately
108 *
109 */
110
111#include <linux/types.h>
112#include <linux/module.h>
113#include <linux/errno.h>
114#include <linux/kernel.h>
115#include <linux/delay.h>
116#include <linux/sched.h>
117#include <linux/init.h>
118#include <linux/spinlock.h>
119#include <linux/wait.h>
120#include <linux/reboot.h>
121#include <linux/kmod.h>
122#include <linux/i2c.h>
123#include <linux/kthread.h>
124#include <linux/mutex.h>
125#include <linux/of_device.h>
126#include <linux/of_platform.h>
127#include <asm/prom.h>
128#include <asm/machdep.h>
129#include <asm/io.h>
130#include <asm/sections.h>
131#include <asm/macio.h>
132
133#include "therm_pm72.h"
134
135#define VERSION "1.3"
136
137#undef DEBUG
138
139#ifdef DEBUG
140#define DBG(args...) printk(args)
141#else
142#define DBG(args...) do { } while(0)
143#endif
144
145
146/*
147 * Driver statics
148 */
149
150static struct platform_device * of_dev;
151static struct i2c_adapter * u3_0;
152static struct i2c_adapter * u3_1;
153static struct i2c_adapter * k2;
154static struct i2c_client * fcu;
155static struct cpu_pid_state processor_state[2];
156static struct basckside_pid_params backside_params;
157static struct backside_pid_state backside_state;
158static struct drives_pid_state drives_state;
159static struct dimm_pid_state dimms_state;
160static struct slots_pid_state slots_state;
161static int state;
162static int cpu_count;
163static int cpu_pid_type;
164static struct task_struct *ctrl_task;
165static struct completion ctrl_complete;
166static int critical_state;
167static int rackmac;
168static s32 dimm_output_clamp;
169static int fcu_rpm_shift;
170static int fcu_tickle_ticks;
171static DEFINE_MUTEX(driver_lock);
172
173/*
174 * We have 3 types of CPU PID control. One is "split" old style control
175 * for intake & exhaust fans, the other is "combined" control for both
176 * CPUs that also deals with the pumps when present. To be "compatible"
177 * with OS X at this point, we only use "COMBINED" on the machines that
178 * are identified as having the pumps (though that identification is at
179 * least dodgy). Ultimately, we could probably switch completely to this
180 * algorithm provided we hack it to deal with the UP case
181 */
182#define CPU_PID_TYPE_SPLIT 0
183#define CPU_PID_TYPE_COMBINED 1
184#define CPU_PID_TYPE_RACKMAC 2
185
186/*
187 * This table describes all fans in the FCU. The "id" and "type" values
188 * are defaults valid for all earlier machines. Newer machines will
189 * eventually override the table content based on the device-tree
190 */
191struct fcu_fan_table
192{
193 char* loc; /* location code */
194 int type; /* 0 = rpm, 1 = pwm, 2 = pump */
195 int id; /* id or -1 */
196};
197
198#define FCU_FAN_RPM 0
199#define FCU_FAN_PWM 1
200
201#define FCU_FAN_ABSENT_ID -1
202
203#define FCU_FAN_COUNT ARRAY_SIZE(fcu_fans)
204
205struct fcu_fan_table fcu_fans[] = {
206 [BACKSIDE_FAN_PWM_INDEX] = {
207 .loc = "BACKSIDE,SYS CTRLR FAN",
208 .type = FCU_FAN_PWM,
209 .id = BACKSIDE_FAN_PWM_DEFAULT_ID,
210 },
211 [DRIVES_FAN_RPM_INDEX] = {
212 .loc = "DRIVE BAY",
213 .type = FCU_FAN_RPM,
214 .id = DRIVES_FAN_RPM_DEFAULT_ID,
215 },
216 [SLOTS_FAN_PWM_INDEX] = {
217 .loc = "SLOT,PCI FAN",
218 .type = FCU_FAN_PWM,
219 .id = SLOTS_FAN_PWM_DEFAULT_ID,
220 },
221 [CPUA_INTAKE_FAN_RPM_INDEX] = {
222 .loc = "CPU A INTAKE",
223 .type = FCU_FAN_RPM,
224 .id = CPUA_INTAKE_FAN_RPM_DEFAULT_ID,
225 },
226 [CPUA_EXHAUST_FAN_RPM_INDEX] = {
227 .loc = "CPU A EXHAUST",
228 .type = FCU_FAN_RPM,
229 .id = CPUA_EXHAUST_FAN_RPM_DEFAULT_ID,
230 },
231 [CPUB_INTAKE_FAN_RPM_INDEX] = {
232 .loc = "CPU B INTAKE",
233 .type = FCU_FAN_RPM,
234 .id = CPUB_INTAKE_FAN_RPM_DEFAULT_ID,
235 },
236 [CPUB_EXHAUST_FAN_RPM_INDEX] = {
237 .loc = "CPU B EXHAUST",
238 .type = FCU_FAN_RPM,
239 .id = CPUB_EXHAUST_FAN_RPM_DEFAULT_ID,
240 },
241 /* pumps aren't present by default, have to be looked up in the
242 * device-tree
243 */
244 [CPUA_PUMP_RPM_INDEX] = {
245 .loc = "CPU A PUMP",
246 .type = FCU_FAN_RPM,
247 .id = FCU_FAN_ABSENT_ID,
248 },
249 [CPUB_PUMP_RPM_INDEX] = {
250 .loc = "CPU B PUMP",
251 .type = FCU_FAN_RPM,
252 .id = FCU_FAN_ABSENT_ID,
253 },
254 /* Xserve fans */
255 [CPU_A1_FAN_RPM_INDEX] = {
256 .loc = "CPU A 1",
257 .type = FCU_FAN_RPM,
258 .id = FCU_FAN_ABSENT_ID,
259 },
260 [CPU_A2_FAN_RPM_INDEX] = {
261 .loc = "CPU A 2",
262 .type = FCU_FAN_RPM,
263 .id = FCU_FAN_ABSENT_ID,
264 },
265 [CPU_A3_FAN_RPM_INDEX] = {
266 .loc = "CPU A 3",
267 .type = FCU_FAN_RPM,
268 .id = FCU_FAN_ABSENT_ID,
269 },
270 [CPU_B1_FAN_RPM_INDEX] = {
271 .loc = "CPU B 1",
272 .type = FCU_FAN_RPM,
273 .id = FCU_FAN_ABSENT_ID,
274 },
275 [CPU_B2_FAN_RPM_INDEX] = {
276 .loc = "CPU B 2",
277 .type = FCU_FAN_RPM,
278 .id = FCU_FAN_ABSENT_ID,
279 },
280 [CPU_B3_FAN_RPM_INDEX] = {
281 .loc = "CPU B 3",
282 .type = FCU_FAN_RPM,
283 .id = FCU_FAN_ABSENT_ID,
284 },
285};
286
287static struct i2c_driver therm_pm72_driver;
288
289/*
290 * Utility function to create an i2c_client structure and
291 * attach it to one of u3 adapters
292 */
293static struct i2c_client *attach_i2c_chip(int id, const char *name)
294{
295 struct i2c_client *clt;
296 struct i2c_adapter *adap;
297 struct i2c_board_info info;
298
299 if (id & 0x200)
300 adap = k2;
301 else if (id & 0x100)
302 adap = u3_1;
303 else
304 adap = u3_0;
305 if (adap == NULL)
306 return NULL;
307
308 memset(&info, 0, sizeof(struct i2c_board_info));
309 info.addr = (id >> 1) & 0x7f;
310 strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE);
311 clt = i2c_new_device(adap, &info);
312 if (!clt) {
313 printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id);
314 return NULL;
315 }
316
317 /*
318 * Let i2c-core delete that device on driver removal.
319 * This is safe because i2c-core holds the core_lock mutex for us.
320 */
321 list_add_tail(&clt->detected, &therm_pm72_driver.clients);
322 return clt;
323}
324
325/*
326 * Here are the i2c chip access wrappers
327 */
328
329static void initialize_adc(struct cpu_pid_state *state)
330{
331 int rc;
332 u8 buf[2];
333
334 /* Read ADC the configuration register and cache it. We
335 * also make sure Config2 contains proper values, I've seen
336 * cases where we got stale grabage in there, thus preventing
337 * proper reading of conv. values
338 */
339
340 /* Clear Config2 */
341 buf[0] = 5;
342 buf[1] = 0;
343 i2c_master_send(state->monitor, buf, 2);
344
345 /* Read & cache Config1 */
346 buf[0] = 1;
347 rc = i2c_master_send(state->monitor, buf, 1);
348 if (rc > 0) {
349 rc = i2c_master_recv(state->monitor, buf, 1);
350 if (rc > 0) {
351 state->adc_config = buf[0];
352 DBG("ADC config reg: %02x\n", state->adc_config);
353 /* Disable shutdown mode */
354 state->adc_config &= 0xfe;
355 buf[0] = 1;
356 buf[1] = state->adc_config;
357 rc = i2c_master_send(state->monitor, buf, 2);
358 }
359 }
360 if (rc <= 0)
361 printk(KERN_ERR "therm_pm72: Error reading ADC config"
362 " register !\n");
363}
364
365static int read_smon_adc(struct cpu_pid_state *state, int chan)
366{
367 int rc, data, tries = 0;
368 u8 buf[2];
369
370 for (;;) {
371 /* Set channel */
372 buf[0] = 1;
373 buf[1] = (state->adc_config & 0x1f) | (chan << 5);
374 rc = i2c_master_send(state->monitor, buf, 2);
375 if (rc <= 0)
376 goto error;
377 /* Wait for conversion */
378 msleep(1);
379 /* Switch to data register */
380 buf[0] = 4;
381 rc = i2c_master_send(state->monitor, buf, 1);
382 if (rc <= 0)
383 goto error;
384 /* Read result */
385 rc = i2c_master_recv(state->monitor, buf, 2);
386 if (rc < 0)
387 goto error;
388 data = ((u16)buf[0]) << 8 | (u16)buf[1];
389 return data >> 6;
390 error:
391 DBG("Error reading ADC, retrying...\n");
392 if (++tries > 10) {
393 printk(KERN_ERR "therm_pm72: Error reading ADC !\n");
394 return -1;
395 }
396 msleep(10);
397 }
398}
399
400static int read_lm87_reg(struct i2c_client * chip, int reg)
401{
402 int rc, tries = 0;
403 u8 buf;
404
405 for (;;) {
406 /* Set address */
407 buf = (u8)reg;
408 rc = i2c_master_send(chip, &buf, 1);
409 if (rc <= 0)
410 goto error;
411 rc = i2c_master_recv(chip, &buf, 1);
412 if (rc <= 0)
413 goto error;
414 return (int)buf;
415 error:
416 DBG("Error reading LM87, retrying...\n");
417 if (++tries > 10) {
418 printk(KERN_ERR "therm_pm72: Error reading LM87 !\n");
419 return -1;
420 }
421 msleep(10);
422 }
423}
424
425static int fan_read_reg(int reg, unsigned char *buf, int nb)
426{
427 int tries, nr, nw;
428
429 buf[0] = reg;
430 tries = 0;
431 for (;;) {
432 nw = i2c_master_send(fcu, buf, 1);
433 if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
434 break;
435 msleep(10);
436 ++tries;
437 }
438 if (nw <= 0) {
439 printk(KERN_ERR "Failure writing address to FCU: %d", nw);
440 return -EIO;
441 }
442 tries = 0;
443 for (;;) {
444 nr = i2c_master_recv(fcu, buf, nb);
445 if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
446 break;
447 msleep(10);
448 ++tries;
449 }
450 if (nr <= 0)
451 printk(KERN_ERR "Failure reading data from FCU: %d", nw);
452 return nr;
453}
454
455static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
456{
457 int tries, nw;
458 unsigned char buf[16];
459
460 buf[0] = reg;
461 memcpy(buf+1, ptr, nb);
462 ++nb;
463 tries = 0;
464 for (;;) {
465 nw = i2c_master_send(fcu, buf, nb);
466 if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
467 break;
468 msleep(10);
469 ++tries;
470 }
471 if (nw < 0)
472 printk(KERN_ERR "Failure writing to FCU: %d", nw);
473 return nw;
474}
475
476static int start_fcu(void)
477{
478 unsigned char buf = 0xff;
479 int rc;
480
481 rc = fan_write_reg(0xe, &buf, 1);
482 if (rc < 0)
483 return -EIO;
484 rc = fan_write_reg(0x2e, &buf, 1);
485 if (rc < 0)
486 return -EIO;
487 rc = fan_read_reg(0, &buf, 1);
488 if (rc < 0)
489 return -EIO;
490 fcu_rpm_shift = (buf == 1) ? 2 : 3;
491 printk(KERN_DEBUG "FCU Initialized, RPM fan shift is %d\n",
492 fcu_rpm_shift);
493
494 return 0;
495}
496
497static int set_rpm_fan(int fan_index, int rpm)
498{
499 unsigned char buf[2];
500 int rc, id, min, max;
501
502 if (fcu_fans[fan_index].type != FCU_FAN_RPM)
503 return -EINVAL;
504 id = fcu_fans[fan_index].id;
505 if (id == FCU_FAN_ABSENT_ID)
506 return -EINVAL;
507
508 min = 2400 >> fcu_rpm_shift;
509 max = 56000 >> fcu_rpm_shift;
510
511 if (rpm < min)
512 rpm = min;
513 else if (rpm > max)
514 rpm = max;
515 buf[0] = rpm >> (8 - fcu_rpm_shift);
516 buf[1] = rpm << fcu_rpm_shift;
517 rc = fan_write_reg(0x10 + (id * 2), buf, 2);
518 if (rc < 0)
519 return -EIO;
520 return 0;
521}
522
523static int get_rpm_fan(int fan_index, int programmed)
524{
525 unsigned char failure;
526 unsigned char active;
527 unsigned char buf[2];
528 int rc, id, reg_base;
529
530 if (fcu_fans[fan_index].type != FCU_FAN_RPM)
531 return -EINVAL;
532 id = fcu_fans[fan_index].id;
533 if (id == FCU_FAN_ABSENT_ID)
534 return -EINVAL;
535
536 rc = fan_read_reg(0xb, &failure, 1);
537 if (rc != 1)
538 return -EIO;
539 if ((failure & (1 << id)) != 0)
540 return -EFAULT;
541 rc = fan_read_reg(0xd, &active, 1);
542 if (rc != 1)
543 return -EIO;
544 if ((active & (1 << id)) == 0)
545 return -ENXIO;
546
547 /* Programmed value or real current speed */
548 reg_base = programmed ? 0x10 : 0x11;
549 rc = fan_read_reg(reg_base + (id * 2), buf, 2);
550 if (rc != 2)
551 return -EIO;
552
553 return (buf[0] << (8 - fcu_rpm_shift)) | buf[1] >> fcu_rpm_shift;
554}
555
556static int set_pwm_fan(int fan_index, int pwm)
557{
558 unsigned char buf[2];
559 int rc, id;
560
561 if (fcu_fans[fan_index].type != FCU_FAN_PWM)
562 return -EINVAL;
563 id = fcu_fans[fan_index].id;
564 if (id == FCU_FAN_ABSENT_ID)
565 return -EINVAL;
566
567 if (pwm < 10)
568 pwm = 10;
569 else if (pwm > 100)
570 pwm = 100;
571 pwm = (pwm * 2559) / 1000;
572 buf[0] = pwm;
573 rc = fan_write_reg(0x30 + (id * 2), buf, 1);
574 if (rc < 0)
575 return rc;
576 return 0;
577}
578
579static int get_pwm_fan(int fan_index)
580{
581 unsigned char failure;
582 unsigned char active;
583 unsigned char buf[2];
584 int rc, id;
585
586 if (fcu_fans[fan_index].type != FCU_FAN_PWM)
587 return -EINVAL;
588 id = fcu_fans[fan_index].id;
589 if (id == FCU_FAN_ABSENT_ID)
590 return -EINVAL;
591
592 rc = fan_read_reg(0x2b, &failure, 1);
593 if (rc != 1)
594 return -EIO;
595 if ((failure & (1 << id)) != 0)
596 return -EFAULT;
597 rc = fan_read_reg(0x2d, &active, 1);
598 if (rc != 1)
599 return -EIO;
600 if ((active & (1 << id)) == 0)
601 return -ENXIO;
602
603 /* Programmed value or real current speed */
604 rc = fan_read_reg(0x30 + (id * 2), buf, 1);
605 if (rc != 1)
606 return -EIO;
607
608 return (buf[0] * 1000) / 2559;
609}
610
611static void tickle_fcu(void)
612{
613 int pwm;
614
615 pwm = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
616
617 DBG("FCU Tickle, slots fan is: %d\n", pwm);
618 if (pwm < 0)
619 pwm = 100;
620
621 if (!rackmac) {
622 pwm = SLOTS_FAN_DEFAULT_PWM;
623 } else if (pwm < SLOTS_PID_OUTPUT_MIN)
624 pwm = SLOTS_PID_OUTPUT_MIN;
625
626 /* That is hopefully enough to make the FCU happy */
627 set_pwm_fan(SLOTS_FAN_PWM_INDEX, pwm);
628}
629
630
631/*
632 * Utility routine to read the CPU calibration EEPROM data
633 * from the device-tree
634 */
635static int read_eeprom(int cpu, struct mpu_data *out)
636{
637 struct device_node *np;
638 char nodename[64];
639 const u8 *data;
640 int len;
641
642 /* prom.c routine for finding a node by path is a bit brain dead
643 * and requires exact @xxx unit numbers. This is a bit ugly but
644 * will work for these machines
645 */
646 sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
647 np = of_find_node_by_path(nodename);
648 if (np == NULL) {
649 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n");
650 return -ENODEV;
651 }
652 data = of_get_property(np, "cpuid", &len);
653 if (data == NULL) {
654 printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n");
655 of_node_put(np);
656 return -ENODEV;
657 }
658 memcpy(out, data, sizeof(struct mpu_data));
659 of_node_put(np);
660
661 return 0;
662}
663
664static void fetch_cpu_pumps_minmax(void)
665{
666 struct cpu_pid_state *state0 = &processor_state[0];
667 struct cpu_pid_state *state1 = &processor_state[1];
668 u16 pump_min = 0, pump_max = 0xffff;
669 u16 tmp[4];
670
671 /* Try to fetch pumps min/max infos from eeprom */
672
673 memcpy(&tmp, &state0->mpu.processor_part_num, 8);
674 if (tmp[0] != 0xffff && tmp[1] != 0xffff) {
675 pump_min = max(pump_min, tmp[0]);
676 pump_max = min(pump_max, tmp[1]);
677 }
678 if (tmp[2] != 0xffff && tmp[3] != 0xffff) {
679 pump_min = max(pump_min, tmp[2]);
680 pump_max = min(pump_max, tmp[3]);
681 }
682
683 /* Double check the values, this _IS_ needed as the EEPROM on
684 * some dual 2.5Ghz G5s seem, at least, to have both min & max
685 * same to the same value ... (grrrr)
686 */
687 if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) {
688 pump_min = CPU_PUMP_OUTPUT_MIN;
689 pump_max = CPU_PUMP_OUTPUT_MAX;
690 }
691
692 state0->pump_min = state1->pump_min = pump_min;
693 state0->pump_max = state1->pump_max = pump_max;
694}
695
696/*
697 * Now, unfortunately, sysfs doesn't give us a nice void * we could
698 * pass around to the attribute functions, so we don't really have
699 * choice but implement a bunch of them...
700 *
701 * That sucks a bit, we take the lock because FIX32TOPRINT evaluates
702 * the input twice... I accept patches :)
703 */
704#define BUILD_SHOW_FUNC_FIX(name, data) \
705static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
706{ \
707 ssize_t r; \
708 mutex_lock(&driver_lock); \
709 r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \
710 mutex_unlock(&driver_lock); \
711 return r; \
712}
713#define BUILD_SHOW_FUNC_INT(name, data) \
714static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
715{ \
716 return sprintf(buf, "%d", data); \
717}
718
719BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp)
720BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage)
721BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a)
722BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm)
723BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm)
724
725BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp)
726BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage)
727BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a)
728BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm)
729BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm)
730
731BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp)
732BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm)
733
734BUILD_SHOW_FUNC_FIX(drives_temperature, drives_state.last_temp)
735BUILD_SHOW_FUNC_INT(drives_fan_rpm, drives_state.rpm)
736
737BUILD_SHOW_FUNC_FIX(slots_temperature, slots_state.last_temp)
738BUILD_SHOW_FUNC_INT(slots_fan_pwm, slots_state.pwm)
739
740BUILD_SHOW_FUNC_FIX(dimms_temperature, dimms_state.last_temp)
741
742static DEVICE_ATTR(cpu0_temperature,S_IRUGO,show_cpu0_temperature,NULL);
743static DEVICE_ATTR(cpu0_voltage,S_IRUGO,show_cpu0_voltage,NULL);
744static DEVICE_ATTR(cpu0_current,S_IRUGO,show_cpu0_current,NULL);
745static DEVICE_ATTR(cpu0_exhaust_fan_rpm,S_IRUGO,show_cpu0_exhaust_fan_rpm,NULL);
746static DEVICE_ATTR(cpu0_intake_fan_rpm,S_IRUGO,show_cpu0_intake_fan_rpm,NULL);
747
748static DEVICE_ATTR(cpu1_temperature,S_IRUGO,show_cpu1_temperature,NULL);
749static DEVICE_ATTR(cpu1_voltage,S_IRUGO,show_cpu1_voltage,NULL);
750static DEVICE_ATTR(cpu1_current,S_IRUGO,show_cpu1_current,NULL);
751static DEVICE_ATTR(cpu1_exhaust_fan_rpm,S_IRUGO,show_cpu1_exhaust_fan_rpm,NULL);
752static DEVICE_ATTR(cpu1_intake_fan_rpm,S_IRUGO,show_cpu1_intake_fan_rpm,NULL);
753
754static DEVICE_ATTR(backside_temperature,S_IRUGO,show_backside_temperature,NULL);
755static DEVICE_ATTR(backside_fan_pwm,S_IRUGO,show_backside_fan_pwm,NULL);
756
757static DEVICE_ATTR(drives_temperature,S_IRUGO,show_drives_temperature,NULL);
758static DEVICE_ATTR(drives_fan_rpm,S_IRUGO,show_drives_fan_rpm,NULL);
759
760static DEVICE_ATTR(slots_temperature,S_IRUGO,show_slots_temperature,NULL);
761static DEVICE_ATTR(slots_fan_pwm,S_IRUGO,show_slots_fan_pwm,NULL);
762
763static DEVICE_ATTR(dimms_temperature,S_IRUGO,show_dimms_temperature,NULL);
764
765/*
766 * CPUs fans control loop
767 */
768
769static int do_read_one_cpu_values(struct cpu_pid_state *state, s32 *temp, s32 *power)
770{
771 s32 ltemp, volts, amps;
772 int index, rc = 0;
773
774 /* Default (in case of error) */
775 *temp = state->cur_temp;
776 *power = state->cur_power;
777
778 if (cpu_pid_type == CPU_PID_TYPE_RACKMAC)
779 index = (state->index == 0) ?
780 CPU_A1_FAN_RPM_INDEX : CPU_B1_FAN_RPM_INDEX;
781 else
782 index = (state->index == 0) ?
783 CPUA_EXHAUST_FAN_RPM_INDEX : CPUB_EXHAUST_FAN_RPM_INDEX;
784
785 /* Read current fan status */
786 rc = get_rpm_fan(index, !RPM_PID_USE_ACTUAL_SPEED);
787 if (rc < 0) {
788 /* XXX What do we do now ? Nothing for now, keep old value, but
789 * return error upstream
790 */
791 DBG(" cpu %d, fan reading error !\n", state->index);
792 } else {
793 state->rpm = rc;
794 DBG(" cpu %d, exhaust RPM: %d\n", state->index, state->rpm);
795 }
796
797 /* Get some sensor readings and scale it */
798 ltemp = read_smon_adc(state, 1);
799 if (ltemp == -1) {
800 /* XXX What do we do now ? */
801 state->overtemp++;
802 if (rc == 0)
803 rc = -EIO;
804 DBG(" cpu %d, temp reading error !\n", state->index);
805 } else {
806 /* Fixup temperature according to diode calibration
807 */
808 DBG(" cpu %d, temp raw: %04x, m_diode: %04x, b_diode: %04x\n",
809 state->index,
810 ltemp, state->mpu.mdiode, state->mpu.bdiode);
811 *temp = ((s32)ltemp * (s32)state->mpu.mdiode + ((s32)state->mpu.bdiode << 12)) >> 2;
812 state->last_temp = *temp;
813 DBG(" temp: %d.%03d\n", FIX32TOPRINT((*temp)));
814 }
815
816 /*
817 * Read voltage & current and calculate power
818 */
819 volts = read_smon_adc(state, 3);
820 amps = read_smon_adc(state, 4);
821
822 /* Scale voltage and current raw sensor values according to fixed scales
823 * obtained in Darwin and calculate power from I and V
824 */
825 volts *= ADC_CPU_VOLTAGE_SCALE;
826 amps *= ADC_CPU_CURRENT_SCALE;
827 *power = (((u64)volts) * ((u64)amps)) >> 16;
828 state->voltage = volts;
829 state->current_a = amps;
830 state->last_power = *power;
831
832 DBG(" cpu %d, current: %d.%03d, voltage: %d.%03d, power: %d.%03d W\n",
833 state->index, FIX32TOPRINT(state->current_a),
834 FIX32TOPRINT(state->voltage), FIX32TOPRINT(*power));
835
836 return 0;
837}
838
839static void do_cpu_pid(struct cpu_pid_state *state, s32 temp, s32 power)
840{
841 s32 power_target, integral, derivative, proportional, adj_in_target, sval;
842 s64 integ_p, deriv_p, prop_p, sum;
843 int i;
844
845 /* Calculate power target value (could be done once for all)
846 * and convert to a 16.16 fp number
847 */
848 power_target = ((u32)(state->mpu.pmaxh - state->mpu.padjmax)) << 16;
849 DBG(" power target: %d.%03d, error: %d.%03d\n",
850 FIX32TOPRINT(power_target), FIX32TOPRINT(power_target - power));
851
852 /* Store temperature and power in history array */
853 state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
854 state->temp_history[state->cur_temp] = temp;
855 state->cur_power = (state->cur_power + 1) % state->count_power;
856 state->power_history[state->cur_power] = power;
857 state->error_history[state->cur_power] = power_target - power;
858
859 /* If first loop, fill the history table */
860 if (state->first) {
861 for (i = 0; i < (state->count_power - 1); i++) {
862 state->cur_power = (state->cur_power + 1) % state->count_power;
863 state->power_history[state->cur_power] = power;
864 state->error_history[state->cur_power] = power_target - power;
865 }
866 for (i = 0; i < (CPU_TEMP_HISTORY_SIZE - 1); i++) {
867 state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
868 state->temp_history[state->cur_temp] = temp;
869 }
870 state->first = 0;
871 }
872
873 /* Calculate the integral term normally based on the "power" values */
874 sum = 0;
875 integral = 0;
876 for (i = 0; i < state->count_power; i++)
877 integral += state->error_history[i];
878 integral *= CPU_PID_INTERVAL;
879 DBG(" integral: %08x\n", integral);
880
881 /* Calculate the adjusted input (sense value).
882 * G_r is 12.20
883 * integ is 16.16
884 * so the result is 28.36
885 *
886 * input target is mpu.ttarget, input max is mpu.tmax
887 */
888 integ_p = ((s64)state->mpu.pid_gr) * (s64)integral;
889 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
890 sval = (state->mpu.tmax << 16) - ((integ_p >> 20) & 0xffffffff);
891 adj_in_target = (state->mpu.ttarget << 16);
892 if (adj_in_target > sval)
893 adj_in_target = sval;
894 DBG(" adj_in_target: %d.%03d, ttarget: %d\n", FIX32TOPRINT(adj_in_target),
895 state->mpu.ttarget);
896
897 /* Calculate the derivative term */
898 derivative = state->temp_history[state->cur_temp] -
899 state->temp_history[(state->cur_temp + CPU_TEMP_HISTORY_SIZE - 1)
900 % CPU_TEMP_HISTORY_SIZE];
901 derivative /= CPU_PID_INTERVAL;
902 deriv_p = ((s64)state->mpu.pid_gd) * (s64)derivative;
903 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
904 sum += deriv_p;
905
906 /* Calculate the proportional term */
907 proportional = temp - adj_in_target;
908 prop_p = ((s64)state->mpu.pid_gp) * (s64)proportional;
909 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
910 sum += prop_p;
911
912 /* Scale sum */
913 sum >>= 36;
914
915 DBG(" sum: %d\n", (int)sum);
916 state->rpm += (s32)sum;
917}
918
919static void do_monitor_cpu_combined(void)
920{
921 struct cpu_pid_state *state0 = &processor_state[0];
922 struct cpu_pid_state *state1 = &processor_state[1];
923 s32 temp0, power0, temp1, power1;
924 s32 temp_combi, power_combi;
925 int rc, intake, pump;
926
927 rc = do_read_one_cpu_values(state0, &temp0, &power0);
928 if (rc < 0) {
929 /* XXX What do we do now ? */
930 }
931 state1->overtemp = 0;
932 rc = do_read_one_cpu_values(state1, &temp1, &power1);
933 if (rc < 0) {
934 /* XXX What do we do now ? */
935 }
936 if (state1->overtemp)
937 state0->overtemp++;
938
939 temp_combi = max(temp0, temp1);
940 power_combi = max(power0, power1);
941
942 /* Check tmax, increment overtemp if we are there. At tmax+8, we go
943 * full blown immediately and try to trigger a shutdown
944 */
945 if (temp_combi >= ((state0->mpu.tmax + 8) << 16)) {
946 printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n",
947 temp_combi >> 16);
948 state0->overtemp += CPU_MAX_OVERTEMP / 4;
949 } else if (temp_combi > (state0->mpu.tmax << 16)) {
950 state0->overtemp++;
951 printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n",
952 temp_combi >> 16, state0->mpu.tmax, state0->overtemp);
953 } else {
954 if (state0->overtemp)
955 printk(KERN_WARNING "Temperature back down to %d\n",
956 temp_combi >> 16);
957 state0->overtemp = 0;
958 }
959 if (state0->overtemp >= CPU_MAX_OVERTEMP)
960 critical_state = 1;
961 if (state0->overtemp > 0) {
962 state0->rpm = state0->mpu.rmaxn_exhaust_fan;
963 state0->intake_rpm = intake = state0->mpu.rmaxn_intake_fan;
964 pump = state0->pump_max;
965 goto do_set_fans;
966 }
967
968 /* Do the PID */
969 do_cpu_pid(state0, temp_combi, power_combi);
970
971 /* Range check */
972 state0->rpm = max(state0->rpm, (int)state0->mpu.rminn_exhaust_fan);
973 state0->rpm = min(state0->rpm, (int)state0->mpu.rmaxn_exhaust_fan);
974
975 /* Calculate intake fan speed */
976 intake = (state0->rpm * CPU_INTAKE_SCALE) >> 16;
977 intake = max(intake, (int)state0->mpu.rminn_intake_fan);
978 intake = min(intake, (int)state0->mpu.rmaxn_intake_fan);
979 state0->intake_rpm = intake;
980
981 /* Calculate pump speed */
982 pump = (state0->rpm * state0->pump_max) /
983 state0->mpu.rmaxn_exhaust_fan;
984 pump = min(pump, state0->pump_max);
985 pump = max(pump, state0->pump_min);
986
987 do_set_fans:
988 /* We copy values from state 0 to state 1 for /sysfs */
989 state1->rpm = state0->rpm;
990 state1->intake_rpm = state0->intake_rpm;
991
992 DBG("** CPU %d RPM: %d Ex, %d, Pump: %d, In, overtemp: %d\n",
993 state1->index, (int)state1->rpm, intake, pump, state1->overtemp);
994
995 /* We should check for errors, shouldn't we ? But then, what
996 * do we do once the error occurs ? For FCU notified fan
997 * failures (-EFAULT) we probably want to notify userland
998 * some way...
999 */
1000 set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
1001 set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state0->rpm);
1002 set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
1003 set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state0->rpm);
1004
1005 if (fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
1006 set_rpm_fan(CPUA_PUMP_RPM_INDEX, pump);
1007 if (fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
1008 set_rpm_fan(CPUB_PUMP_RPM_INDEX, pump);
1009}
1010
1011static void do_monitor_cpu_split(struct cpu_pid_state *state)
1012{
1013 s32 temp, power;
1014 int rc, intake;
1015
1016 /* Read current fan status */
1017 rc = do_read_one_cpu_values(state, &temp, &power);
1018 if (rc < 0) {
1019 /* XXX What do we do now ? */
1020 }
1021
1022 /* Check tmax, increment overtemp if we are there. At tmax+8, we go
1023 * full blown immediately and try to trigger a shutdown
1024 */
1025 if (temp >= ((state->mpu.tmax + 8) << 16)) {
1026 printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
1027 " (%d) !\n",
1028 state->index, temp >> 16);
1029 state->overtemp += CPU_MAX_OVERTEMP / 4;
1030 } else if (temp > (state->mpu.tmax << 16)) {
1031 state->overtemp++;
1032 printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
1033 state->index, temp >> 16, state->mpu.tmax, state->overtemp);
1034 } else {
1035 if (state->overtemp)
1036 printk(KERN_WARNING "CPU %d temperature back down to %d\n",
1037 state->index, temp >> 16);
1038 state->overtemp = 0;
1039 }
1040 if (state->overtemp >= CPU_MAX_OVERTEMP)
1041 critical_state = 1;
1042 if (state->overtemp > 0) {
1043 state->rpm = state->mpu.rmaxn_exhaust_fan;
1044 state->intake_rpm = intake = state->mpu.rmaxn_intake_fan;
1045 goto do_set_fans;
1046 }
1047
1048 /* Do the PID */
1049 do_cpu_pid(state, temp, power);
1050
1051 /* Range check */
1052 state->rpm = max(state->rpm, (int)state->mpu.rminn_exhaust_fan);
1053 state->rpm = min(state->rpm, (int)state->mpu.rmaxn_exhaust_fan);
1054
1055 /* Calculate intake fan */
1056 intake = (state->rpm * CPU_INTAKE_SCALE) >> 16;
1057 intake = max(intake, (int)state->mpu.rminn_intake_fan);
1058 intake = min(intake, (int)state->mpu.rmaxn_intake_fan);
1059 state->intake_rpm = intake;
1060
1061 do_set_fans:
1062 DBG("** CPU %d RPM: %d Ex, %d In, overtemp: %d\n",
1063 state->index, (int)state->rpm, intake, state->overtemp);
1064
1065 /* We should check for errors, shouldn't we ? But then, what
1066 * do we do once the error occurs ? For FCU notified fan
1067 * failures (-EFAULT) we probably want to notify userland
1068 * some way...
1069 */
1070 if (state->index == 0) {
1071 set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
1072 set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state->rpm);
1073 } else {
1074 set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
1075 set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state->rpm);
1076 }
1077}
1078
1079static void do_monitor_cpu_rack(struct cpu_pid_state *state)
1080{
1081 s32 temp, power, fan_min;
1082 int rc;
1083
1084 /* Read current fan status */
1085 rc = do_read_one_cpu_values(state, &temp, &power);
1086 if (rc < 0) {
1087 /* XXX What do we do now ? */
1088 }
1089
1090 /* Check tmax, increment overtemp if we are there. At tmax+8, we go
1091 * full blown immediately and try to trigger a shutdown
1092 */
1093 if (temp >= ((state->mpu.tmax + 8) << 16)) {
1094 printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
1095 " (%d) !\n",
1096 state->index, temp >> 16);
1097 state->overtemp = CPU_MAX_OVERTEMP / 4;
1098 } else if (temp > (state->mpu.tmax << 16)) {
1099 state->overtemp++;
1100 printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
1101 state->index, temp >> 16, state->mpu.tmax, state->overtemp);
1102 } else {
1103 if (state->overtemp)
1104 printk(KERN_WARNING "CPU %d temperature back down to %d\n",
1105 state->index, temp >> 16);
1106 state->overtemp = 0;
1107 }
1108 if (state->overtemp >= CPU_MAX_OVERTEMP)
1109 critical_state = 1;
1110 if (state->overtemp > 0) {
1111 state->rpm = state->intake_rpm = state->mpu.rmaxn_intake_fan;
1112 goto do_set_fans;
1113 }
1114
1115 /* Do the PID */
1116 do_cpu_pid(state, temp, power);
1117
1118 /* Check clamp from dimms */
1119 fan_min = dimm_output_clamp;
1120 fan_min = max(fan_min, (int)state->mpu.rminn_intake_fan);
1121
1122 DBG(" CPU min mpu = %d, min dimm = %d\n",
1123 state->mpu.rminn_intake_fan, dimm_output_clamp);
1124
1125 state->rpm = max(state->rpm, (int)fan_min);
1126 state->rpm = min(state->rpm, (int)state->mpu.rmaxn_intake_fan);
1127 state->intake_rpm = state->rpm;
1128
1129 do_set_fans:
1130 DBG("** CPU %d RPM: %d overtemp: %d\n",
1131 state->index, (int)state->rpm, state->overtemp);
1132
1133 /* We should check for errors, shouldn't we ? But then, what
1134 * do we do once the error occurs ? For FCU notified fan
1135 * failures (-EFAULT) we probably want to notify userland
1136 * some way...
1137 */
1138 if (state->index == 0) {
1139 set_rpm_fan(CPU_A1_FAN_RPM_INDEX, state->rpm);
1140 set_rpm_fan(CPU_A2_FAN_RPM_INDEX, state->rpm);
1141 set_rpm_fan(CPU_A3_FAN_RPM_INDEX, state->rpm);
1142 } else {
1143 set_rpm_fan(CPU_B1_FAN_RPM_INDEX, state->rpm);
1144 set_rpm_fan(CPU_B2_FAN_RPM_INDEX, state->rpm);
1145 set_rpm_fan(CPU_B3_FAN_RPM_INDEX, state->rpm);
1146 }
1147}
1148
1149/*
1150 * Initialize the state structure for one CPU control loop
1151 */
1152static int init_processor_state(struct cpu_pid_state *state, int index)
1153{
1154 int err;
1155
1156 state->index = index;
1157 state->first = 1;
1158 state->rpm = (cpu_pid_type == CPU_PID_TYPE_RACKMAC) ? 4000 : 1000;
1159 state->overtemp = 0;
1160 state->adc_config = 0x00;
1161
1162
1163 if (index == 0)
1164 state->monitor = attach_i2c_chip(SUPPLY_MONITOR_ID, "CPU0_monitor");
1165 else if (index == 1)
1166 state->monitor = attach_i2c_chip(SUPPLY_MONITORB_ID, "CPU1_monitor");
1167 if (state->monitor == NULL)
1168 goto fail;
1169
1170 if (read_eeprom(index, &state->mpu))
1171 goto fail;
1172
1173 state->count_power = state->mpu.tguardband;
1174 if (state->count_power > CPU_POWER_HISTORY_SIZE) {
1175 printk(KERN_WARNING "Warning ! too many power history slots\n");
1176 state->count_power = CPU_POWER_HISTORY_SIZE;
1177 }
1178 DBG("CPU %d Using %d power history entries\n", index, state->count_power);
1179
1180 if (index == 0) {
1181 err = device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature);
1182 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage);
1183 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_current);
1184 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
1185 err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
1186 } else {
1187 err = device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature);
1188 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage);
1189 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_current);
1190 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
1191 err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
1192 }
1193 if (err)
1194 printk(KERN_WARNING "Failed to create some of the attribute"
1195 "files for CPU %d\n", index);
1196
1197 return 0;
1198 fail:
1199 state->monitor = NULL;
1200
1201 return -ENODEV;
1202}
1203
1204/*
1205 * Dispose of the state data for one CPU control loop
1206 */
1207static void dispose_processor_state(struct cpu_pid_state *state)
1208{
1209 if (state->monitor == NULL)
1210 return;
1211
1212 if (state->index == 0) {
1213 device_remove_file(&of_dev->dev, &dev_attr_cpu0_temperature);
1214 device_remove_file(&of_dev->dev, &dev_attr_cpu0_voltage);
1215 device_remove_file(&of_dev->dev, &dev_attr_cpu0_current);
1216 device_remove_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
1217 device_remove_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
1218 } else {
1219 device_remove_file(&of_dev->dev, &dev_attr_cpu1_temperature);
1220 device_remove_file(&of_dev->dev, &dev_attr_cpu1_voltage);
1221 device_remove_file(&of_dev->dev, &dev_attr_cpu1_current);
1222 device_remove_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
1223 device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
1224 }
1225
1226 state->monitor = NULL;
1227}
1228
1229/*
1230 * Motherboard backside & U3 heatsink fan control loop
1231 */
1232static void do_monitor_backside(struct backside_pid_state *state)
1233{
1234 s32 temp, integral, derivative, fan_min;
1235 s64 integ_p, deriv_p, prop_p, sum;
1236 int i, rc;
1237
1238 if (--state->ticks != 0)
1239 return;
1240 state->ticks = backside_params.interval;
1241
1242 DBG("backside:\n");
1243
1244 /* Check fan status */
1245 rc = get_pwm_fan(BACKSIDE_FAN_PWM_INDEX);
1246 if (rc < 0) {
1247 printk(KERN_WARNING "Error %d reading backside fan !\n", rc);
1248 /* XXX What do we do now ? */
1249 } else
1250 state->pwm = rc;
1251 DBG(" current pwm: %d\n", state->pwm);
1252
1253 /* Get some sensor readings */
1254 temp = i2c_smbus_read_byte_data(state->monitor, MAX6690_EXT_TEMP) << 16;
1255 state->last_temp = temp;
1256 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1257 FIX32TOPRINT(backside_params.input_target));
1258
1259 /* Store temperature and error in history array */
1260 state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE;
1261 state->sample_history[state->cur_sample] = temp;
1262 state->error_history[state->cur_sample] = temp - backside_params.input_target;
1263
1264 /* If first loop, fill the history table */
1265 if (state->first) {
1266 for (i = 0; i < (BACKSIDE_PID_HISTORY_SIZE - 1); i++) {
1267 state->cur_sample = (state->cur_sample + 1) %
1268 BACKSIDE_PID_HISTORY_SIZE;
1269 state->sample_history[state->cur_sample] = temp;
1270 state->error_history[state->cur_sample] =
1271 temp - backside_params.input_target;
1272 }
1273 state->first = 0;
1274 }
1275
1276 /* Calculate the integral term */
1277 sum = 0;
1278 integral = 0;
1279 for (i = 0; i < BACKSIDE_PID_HISTORY_SIZE; i++)
1280 integral += state->error_history[i];
1281 integral *= backside_params.interval;
1282 DBG(" integral: %08x\n", integral);
1283 integ_p = ((s64)backside_params.G_r) * (s64)integral;
1284 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1285 sum += integ_p;
1286
1287 /* Calculate the derivative term */
1288 derivative = state->error_history[state->cur_sample] -
1289 state->error_history[(state->cur_sample + BACKSIDE_PID_HISTORY_SIZE - 1)
1290 % BACKSIDE_PID_HISTORY_SIZE];
1291 derivative /= backside_params.interval;
1292 deriv_p = ((s64)backside_params.G_d) * (s64)derivative;
1293 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1294 sum += deriv_p;
1295
1296 /* Calculate the proportional term */
1297 prop_p = ((s64)backside_params.G_p) * (s64)(state->error_history[state->cur_sample]);
1298 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1299 sum += prop_p;
1300
1301 /* Scale sum */
1302 sum >>= 36;
1303
1304 DBG(" sum: %d\n", (int)sum);
1305 if (backside_params.additive)
1306 state->pwm += (s32)sum;
1307 else
1308 state->pwm = sum;
1309
1310 /* Check for clamp */
1311 fan_min = (dimm_output_clamp * 100) / 14000;
1312 fan_min = max(fan_min, backside_params.output_min);
1313
1314 state->pwm = max(state->pwm, fan_min);
1315 state->pwm = min(state->pwm, backside_params.output_max);
1316
1317 DBG("** BACKSIDE PWM: %d\n", (int)state->pwm);
1318 set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, state->pwm);
1319}
1320
1321/*
1322 * Initialize the state structure for the backside fan control loop
1323 */
1324static int init_backside_state(struct backside_pid_state *state)
1325{
1326 struct device_node *u3;
1327 int u3h = 1; /* conservative by default */
1328 int err;
1329
1330 /*
1331 * There are different PID params for machines with U3 and machines
1332 * with U3H, pick the right ones now
1333 */
1334 u3 = of_find_node_by_path("/u3@0,f8000000");
1335 if (u3 != NULL) {
1336 const u32 *vers = of_get_property(u3, "device-rev", NULL);
1337 if (vers)
1338 if (((*vers) & 0x3f) < 0x34)
1339 u3h = 0;
1340 of_node_put(u3);
1341 }
1342
1343 if (rackmac) {
1344 backside_params.G_d = BACKSIDE_PID_RACK_G_d;
1345 backside_params.input_target = BACKSIDE_PID_RACK_INPUT_TARGET;
1346 backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
1347 backside_params.interval = BACKSIDE_PID_RACK_INTERVAL;
1348 backside_params.G_p = BACKSIDE_PID_RACK_G_p;
1349 backside_params.G_r = BACKSIDE_PID_G_r;
1350 backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
1351 backside_params.additive = 0;
1352 } else if (u3h) {
1353 backside_params.G_d = BACKSIDE_PID_U3H_G_d;
1354 backside_params.input_target = BACKSIDE_PID_U3H_INPUT_TARGET;
1355 backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
1356 backside_params.interval = BACKSIDE_PID_INTERVAL;
1357 backside_params.G_p = BACKSIDE_PID_G_p;
1358 backside_params.G_r = BACKSIDE_PID_G_r;
1359 backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
1360 backside_params.additive = 1;
1361 } else {
1362 backside_params.G_d = BACKSIDE_PID_U3_G_d;
1363 backside_params.input_target = BACKSIDE_PID_U3_INPUT_TARGET;
1364 backside_params.output_min = BACKSIDE_PID_U3_OUTPUT_MIN;
1365 backside_params.interval = BACKSIDE_PID_INTERVAL;
1366 backside_params.G_p = BACKSIDE_PID_G_p;
1367 backside_params.G_r = BACKSIDE_PID_G_r;
1368 backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
1369 backside_params.additive = 1;
1370 }
1371
1372 state->ticks = 1;
1373 state->first = 1;
1374 state->pwm = 50;
1375
1376 state->monitor = attach_i2c_chip(BACKSIDE_MAX_ID, "backside_temp");
1377 if (state->monitor == NULL)
1378 return -ENODEV;
1379
1380 err = device_create_file(&of_dev->dev, &dev_attr_backside_temperature);
1381 err |= device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
1382 if (err)
1383 printk(KERN_WARNING "Failed to create attribute file(s)"
1384 " for backside fan\n");
1385
1386 return 0;
1387}
1388
1389/*
1390 * Dispose of the state data for the backside control loop
1391 */
1392static void dispose_backside_state(struct backside_pid_state *state)
1393{
1394 if (state->monitor == NULL)
1395 return;
1396
1397 device_remove_file(&of_dev->dev, &dev_attr_backside_temperature);
1398 device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
1399
1400 state->monitor = NULL;
1401}
1402
1403/*
1404 * Drives bay fan control loop
1405 */
1406static void do_monitor_drives(struct drives_pid_state *state)
1407{
1408 s32 temp, integral, derivative;
1409 s64 integ_p, deriv_p, prop_p, sum;
1410 int i, rc;
1411
1412 if (--state->ticks != 0)
1413 return;
1414 state->ticks = DRIVES_PID_INTERVAL;
1415
1416 DBG("drives:\n");
1417
1418 /* Check fan status */
1419 rc = get_rpm_fan(DRIVES_FAN_RPM_INDEX, !RPM_PID_USE_ACTUAL_SPEED);
1420 if (rc < 0) {
1421 printk(KERN_WARNING "Error %d reading drives fan !\n", rc);
1422 /* XXX What do we do now ? */
1423 } else
1424 state->rpm = rc;
1425 DBG(" current rpm: %d\n", state->rpm);
1426
1427 /* Get some sensor readings */
1428 temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
1429 DS1775_TEMP)) << 8;
1430 state->last_temp = temp;
1431 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1432 FIX32TOPRINT(DRIVES_PID_INPUT_TARGET));
1433
1434 /* Store temperature and error in history array */
1435 state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE;
1436 state->sample_history[state->cur_sample] = temp;
1437 state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET;
1438
1439 /* If first loop, fill the history table */
1440 if (state->first) {
1441 for (i = 0; i < (DRIVES_PID_HISTORY_SIZE - 1); i++) {
1442 state->cur_sample = (state->cur_sample + 1) %
1443 DRIVES_PID_HISTORY_SIZE;
1444 state->sample_history[state->cur_sample] = temp;
1445 state->error_history[state->cur_sample] =
1446 temp - DRIVES_PID_INPUT_TARGET;
1447 }
1448 state->first = 0;
1449 }
1450
1451 /* Calculate the integral term */
1452 sum = 0;
1453 integral = 0;
1454 for (i = 0; i < DRIVES_PID_HISTORY_SIZE; i++)
1455 integral += state->error_history[i];
1456 integral *= DRIVES_PID_INTERVAL;
1457 DBG(" integral: %08x\n", integral);
1458 integ_p = ((s64)DRIVES_PID_G_r) * (s64)integral;
1459 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1460 sum += integ_p;
1461
1462 /* Calculate the derivative term */
1463 derivative = state->error_history[state->cur_sample] -
1464 state->error_history[(state->cur_sample + DRIVES_PID_HISTORY_SIZE - 1)
1465 % DRIVES_PID_HISTORY_SIZE];
1466 derivative /= DRIVES_PID_INTERVAL;
1467 deriv_p = ((s64)DRIVES_PID_G_d) * (s64)derivative;
1468 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1469 sum += deriv_p;
1470
1471 /* Calculate the proportional term */
1472 prop_p = ((s64)DRIVES_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
1473 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1474 sum += prop_p;
1475
1476 /* Scale sum */
1477 sum >>= 36;
1478
1479 DBG(" sum: %d\n", (int)sum);
1480 state->rpm += (s32)sum;
1481
1482 state->rpm = max(state->rpm, DRIVES_PID_OUTPUT_MIN);
1483 state->rpm = min(state->rpm, DRIVES_PID_OUTPUT_MAX);
1484
1485 DBG("** DRIVES RPM: %d\n", (int)state->rpm);
1486 set_rpm_fan(DRIVES_FAN_RPM_INDEX, state->rpm);
1487}
1488
1489/*
1490 * Initialize the state structure for the drives bay fan control loop
1491 */
1492static int init_drives_state(struct drives_pid_state *state)
1493{
1494 int err;
1495
1496 state->ticks = 1;
1497 state->first = 1;
1498 state->rpm = 1000;
1499
1500 state->monitor = attach_i2c_chip(DRIVES_DALLAS_ID, "drives_temp");
1501 if (state->monitor == NULL)
1502 return -ENODEV;
1503
1504 err = device_create_file(&of_dev->dev, &dev_attr_drives_temperature);
1505 err |= device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
1506 if (err)
1507 printk(KERN_WARNING "Failed to create attribute file(s)"
1508 " for drives bay fan\n");
1509
1510 return 0;
1511}
1512
1513/*
1514 * Dispose of the state data for the drives control loop
1515 */
1516static void dispose_drives_state(struct drives_pid_state *state)
1517{
1518 if (state->monitor == NULL)
1519 return;
1520
1521 device_remove_file(&of_dev->dev, &dev_attr_drives_temperature);
1522 device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
1523
1524 state->monitor = NULL;
1525}
1526
1527/*
1528 * DIMMs temp control loop
1529 */
1530static void do_monitor_dimms(struct dimm_pid_state *state)
1531{
1532 s32 temp, integral, derivative, fan_min;
1533 s64 integ_p, deriv_p, prop_p, sum;
1534 int i;
1535
1536 if (--state->ticks != 0)
1537 return;
1538 state->ticks = DIMM_PID_INTERVAL;
1539
1540 DBG("DIMM:\n");
1541
1542 DBG(" current value: %d\n", state->output);
1543
1544 temp = read_lm87_reg(state->monitor, LM87_INT_TEMP);
1545 if (temp < 0)
1546 return;
1547 temp <<= 16;
1548 state->last_temp = temp;
1549 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1550 FIX32TOPRINT(DIMM_PID_INPUT_TARGET));
1551
1552 /* Store temperature and error in history array */
1553 state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE;
1554 state->sample_history[state->cur_sample] = temp;
1555 state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET;
1556
1557 /* If first loop, fill the history table */
1558 if (state->first) {
1559 for (i = 0; i < (DIMM_PID_HISTORY_SIZE - 1); i++) {
1560 state->cur_sample = (state->cur_sample + 1) %
1561 DIMM_PID_HISTORY_SIZE;
1562 state->sample_history[state->cur_sample] = temp;
1563 state->error_history[state->cur_sample] =
1564 temp - DIMM_PID_INPUT_TARGET;
1565 }
1566 state->first = 0;
1567 }
1568
1569 /* Calculate the integral term */
1570 sum = 0;
1571 integral = 0;
1572 for (i = 0; i < DIMM_PID_HISTORY_SIZE; i++)
1573 integral += state->error_history[i];
1574 integral *= DIMM_PID_INTERVAL;
1575 DBG(" integral: %08x\n", integral);
1576 integ_p = ((s64)DIMM_PID_G_r) * (s64)integral;
1577 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1578 sum += integ_p;
1579
1580 /* Calculate the derivative term */
1581 derivative = state->error_history[state->cur_sample] -
1582 state->error_history[(state->cur_sample + DIMM_PID_HISTORY_SIZE - 1)
1583 % DIMM_PID_HISTORY_SIZE];
1584 derivative /= DIMM_PID_INTERVAL;
1585 deriv_p = ((s64)DIMM_PID_G_d) * (s64)derivative;
1586 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1587 sum += deriv_p;
1588
1589 /* Calculate the proportional term */
1590 prop_p = ((s64)DIMM_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
1591 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1592 sum += prop_p;
1593
1594 /* Scale sum */
1595 sum >>= 36;
1596
1597 DBG(" sum: %d\n", (int)sum);
1598 state->output = (s32)sum;
1599 state->output = max(state->output, DIMM_PID_OUTPUT_MIN);
1600 state->output = min(state->output, DIMM_PID_OUTPUT_MAX);
1601 dimm_output_clamp = state->output;
1602
1603 DBG("** DIMM clamp value: %d\n", (int)state->output);
1604
1605 /* Backside PID is only every 5 seconds, force backside fan clamping now */
1606 fan_min = (dimm_output_clamp * 100) / 14000;
1607 fan_min = max(fan_min, backside_params.output_min);
1608 if (backside_state.pwm < fan_min) {
1609 backside_state.pwm = fan_min;
1610 DBG(" -> applying clamp to backside fan now: %d !\n", fan_min);
1611 set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, fan_min);
1612 }
1613}
1614
1615/*
1616 * Initialize the state structure for the DIMM temp control loop
1617 */
1618static int init_dimms_state(struct dimm_pid_state *state)
1619{
1620 state->ticks = 1;
1621 state->first = 1;
1622 state->output = 4000;
1623
1624 state->monitor = attach_i2c_chip(XSERVE_DIMMS_LM87, "dimms_temp");
1625 if (state->monitor == NULL)
1626 return -ENODEV;
1627
1628 if (device_create_file(&of_dev->dev, &dev_attr_dimms_temperature))
1629 printk(KERN_WARNING "Failed to create attribute file"
1630 " for DIMM temperature\n");
1631
1632 return 0;
1633}
1634
1635/*
1636 * Dispose of the state data for the DIMM control loop
1637 */
1638static void dispose_dimms_state(struct dimm_pid_state *state)
1639{
1640 if (state->monitor == NULL)
1641 return;
1642
1643 device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature);
1644
1645 state->monitor = NULL;
1646}
1647
1648/*
1649 * Slots fan control loop
1650 */
1651static void do_monitor_slots(struct slots_pid_state *state)
1652{
1653 s32 temp, integral, derivative;
1654 s64 integ_p, deriv_p, prop_p, sum;
1655 int i, rc;
1656
1657 if (--state->ticks != 0)
1658 return;
1659 state->ticks = SLOTS_PID_INTERVAL;
1660
1661 DBG("slots:\n");
1662
1663 /* Check fan status */
1664 rc = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
1665 if (rc < 0) {
1666 printk(KERN_WARNING "Error %d reading slots fan !\n", rc);
1667 /* XXX What do we do now ? */
1668 } else
1669 state->pwm = rc;
1670 DBG(" current pwm: %d\n", state->pwm);
1671
1672 /* Get some sensor readings */
1673 temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
1674 DS1775_TEMP)) << 8;
1675 state->last_temp = temp;
1676 DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
1677 FIX32TOPRINT(SLOTS_PID_INPUT_TARGET));
1678
1679 /* Store temperature and error in history array */
1680 state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE;
1681 state->sample_history[state->cur_sample] = temp;
1682 state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET;
1683
1684 /* If first loop, fill the history table */
1685 if (state->first) {
1686 for (i = 0; i < (SLOTS_PID_HISTORY_SIZE - 1); i++) {
1687 state->cur_sample = (state->cur_sample + 1) %
1688 SLOTS_PID_HISTORY_SIZE;
1689 state->sample_history[state->cur_sample] = temp;
1690 state->error_history[state->cur_sample] =
1691 temp - SLOTS_PID_INPUT_TARGET;
1692 }
1693 state->first = 0;
1694 }
1695
1696 /* Calculate the integral term */
1697 sum = 0;
1698 integral = 0;
1699 for (i = 0; i < SLOTS_PID_HISTORY_SIZE; i++)
1700 integral += state->error_history[i];
1701 integral *= SLOTS_PID_INTERVAL;
1702 DBG(" integral: %08x\n", integral);
1703 integ_p = ((s64)SLOTS_PID_G_r) * (s64)integral;
1704 DBG(" integ_p: %d\n", (int)(integ_p >> 36));
1705 sum += integ_p;
1706
1707 /* Calculate the derivative term */
1708 derivative = state->error_history[state->cur_sample] -
1709 state->error_history[(state->cur_sample + SLOTS_PID_HISTORY_SIZE - 1)
1710 % SLOTS_PID_HISTORY_SIZE];
1711 derivative /= SLOTS_PID_INTERVAL;
1712 deriv_p = ((s64)SLOTS_PID_G_d) * (s64)derivative;
1713 DBG(" deriv_p: %d\n", (int)(deriv_p >> 36));
1714 sum += deriv_p;
1715
1716 /* Calculate the proportional term */
1717 prop_p = ((s64)SLOTS_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
1718 DBG(" prop_p: %d\n", (int)(prop_p >> 36));
1719 sum += prop_p;
1720
1721 /* Scale sum */
1722 sum >>= 36;
1723
1724 DBG(" sum: %d\n", (int)sum);
1725 state->pwm = (s32)sum;
1726
1727 state->pwm = max(state->pwm, SLOTS_PID_OUTPUT_MIN);
1728 state->pwm = min(state->pwm, SLOTS_PID_OUTPUT_MAX);
1729
1730 DBG("** DRIVES PWM: %d\n", (int)state->pwm);
1731 set_pwm_fan(SLOTS_FAN_PWM_INDEX, state->pwm);
1732}
1733
1734/*
1735 * Initialize the state structure for the slots bay fan control loop
1736 */
1737static int init_slots_state(struct slots_pid_state *state)
1738{
1739 int err;
1740
1741 state->ticks = 1;
1742 state->first = 1;
1743 state->pwm = 50;
1744
1745 state->monitor = attach_i2c_chip(XSERVE_SLOTS_LM75, "slots_temp");
1746 if (state->monitor == NULL)
1747 return -ENODEV;
1748
1749 err = device_create_file(&of_dev->dev, &dev_attr_slots_temperature);
1750 err |= device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
1751 if (err)
1752 printk(KERN_WARNING "Failed to create attribute file(s)"
1753 " for slots bay fan\n");
1754
1755 return 0;
1756}
1757
1758/*
1759 * Dispose of the state data for the slots control loop
1760 */
1761static void dispose_slots_state(struct slots_pid_state *state)
1762{
1763 if (state->monitor == NULL)
1764 return;
1765
1766 device_remove_file(&of_dev->dev, &dev_attr_slots_temperature);
1767 device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
1768
1769 state->monitor = NULL;
1770}
1771
1772
1773static int call_critical_overtemp(void)
1774{
1775 char *argv[] = { critical_overtemp_path, NULL };
1776 static char *envp[] = { "HOME=/",
1777 "TERM=linux",
1778 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
1779 NULL };
1780
1781 return call_usermodehelper(critical_overtemp_path,
1782 argv, envp, UMH_WAIT_EXEC);
1783}
1784
1785
1786/*
1787 * Here's the kernel thread that calls the various control loops
1788 */
1789static int main_control_loop(void *x)
1790{
1791 DBG("main_control_loop started\n");
1792
1793 mutex_lock(&driver_lock);
1794
1795 if (start_fcu() < 0) {
1796 printk(KERN_ERR "kfand: failed to start FCU\n");
1797 mutex_unlock(&driver_lock);
1798 goto out;
1799 }
1800
1801 /* Set the PCI fan once for now on non-RackMac */
1802 if (!rackmac)
1803 set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM);
1804
1805 /* Initialize ADCs */
1806 initialize_adc(&processor_state[0]);
1807 if (processor_state[1].monitor != NULL)
1808 initialize_adc(&processor_state[1]);
1809
1810 fcu_tickle_ticks = FCU_TICKLE_TICKS;
1811
1812 mutex_unlock(&driver_lock);
1813
1814 while (state == state_attached) {
1815 unsigned long elapsed, start;
1816
1817 start = jiffies;
1818
1819 mutex_lock(&driver_lock);
1820
1821 /* Tickle the FCU just in case */
1822 if (--fcu_tickle_ticks < 0) {
1823 fcu_tickle_ticks = FCU_TICKLE_TICKS;
1824 tickle_fcu();
1825 }
1826
1827 /* First, we always calculate the new DIMMs state on an Xserve */
1828 if (rackmac)
1829 do_monitor_dimms(&dimms_state);
1830
1831 /* Then, the CPUs */
1832 if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
1833 do_monitor_cpu_combined();
1834 else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) {
1835 do_monitor_cpu_rack(&processor_state[0]);
1836 if (processor_state[1].monitor != NULL)
1837 do_monitor_cpu_rack(&processor_state[1]);
1838 // better deal with UP
1839 } else {
1840 do_monitor_cpu_split(&processor_state[0]);
1841 if (processor_state[1].monitor != NULL)
1842 do_monitor_cpu_split(&processor_state[1]);
1843 // better deal with UP
1844 }
1845 /* Then, the rest */
1846 do_monitor_backside(&backside_state);
1847 if (rackmac)
1848 do_monitor_slots(&slots_state);
1849 else
1850 do_monitor_drives(&drives_state);
1851 mutex_unlock(&driver_lock);
1852
1853 if (critical_state == 1) {
1854 printk(KERN_WARNING "Temperature control detected a critical condition\n");
1855 printk(KERN_WARNING "Attempting to shut down...\n");
1856 if (call_critical_overtemp()) {
1857 printk(KERN_WARNING "Can't call %s, power off now!\n",
1858 critical_overtemp_path);
1859 machine_power_off();
1860 }
1861 }
1862 if (critical_state > 0)
1863 critical_state++;
1864 if (critical_state > MAX_CRITICAL_STATE) {
1865 printk(KERN_WARNING "Shutdown timed out, power off now !\n");
1866 machine_power_off();
1867 }
1868
1869 // FIXME: Deal with signals
1870 elapsed = jiffies - start;
1871 if (elapsed < HZ)
1872 schedule_timeout_interruptible(HZ - elapsed);
1873 }
1874
1875 out:
1876 DBG("main_control_loop ended\n");
1877
1878 ctrl_task = 0;
1879 complete_and_exit(&ctrl_complete, 0);
1880}
1881
1882/*
1883 * Dispose the control loops when tearing down
1884 */
1885static void dispose_control_loops(void)
1886{
1887 dispose_processor_state(&processor_state[0]);
1888 dispose_processor_state(&processor_state[1]);
1889 dispose_backside_state(&backside_state);
1890 dispose_drives_state(&drives_state);
1891 dispose_slots_state(&slots_state);
1892 dispose_dimms_state(&dimms_state);
1893}
1894
1895/*
1896 * Create the control loops. U3-0 i2c bus is up, so we can now
1897 * get to the various sensors
1898 */
1899static int create_control_loops(void)
1900{
1901 struct device_node *np;
1902
1903 /* Count CPUs from the device-tree, we don't care how many are
1904 * actually used by Linux
1905 */
1906 cpu_count = 0;
1907 for (np = NULL; NULL != (np = of_find_node_by_type(np, "cpu"));)
1908 cpu_count++;
1909
1910 DBG("counted %d CPUs in the device-tree\n", cpu_count);
1911
1912 /* Decide the type of PID algorithm to use based on the presence of
1913 * the pumps, though that may not be the best way, that is good enough
1914 * for now
1915 */
1916 if (rackmac)
1917 cpu_pid_type = CPU_PID_TYPE_RACKMAC;
1918 else if (of_machine_is_compatible("PowerMac7,3")
1919 && (cpu_count > 1)
1920 && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID
1921 && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) {
1922 printk(KERN_INFO "Liquid cooling pumps detected, using new algorithm !\n");
1923 cpu_pid_type = CPU_PID_TYPE_COMBINED;
1924 } else
1925 cpu_pid_type = CPU_PID_TYPE_SPLIT;
1926
1927 /* Create control loops for everything. If any fail, everything
1928 * fails
1929 */
1930 if (init_processor_state(&processor_state[0], 0))
1931 goto fail;
1932 if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
1933 fetch_cpu_pumps_minmax();
1934
1935 if (cpu_count > 1 && init_processor_state(&processor_state[1], 1))
1936 goto fail;
1937 if (init_backside_state(&backside_state))
1938 goto fail;
1939 if (rackmac && init_dimms_state(&dimms_state))
1940 goto fail;
1941 if (rackmac && init_slots_state(&slots_state))
1942 goto fail;
1943 if (!rackmac && init_drives_state(&drives_state))
1944 goto fail;
1945
1946 DBG("all control loops up !\n");
1947
1948 return 0;
1949
1950 fail:
1951 DBG("failure creating control loops, disposing\n");
1952
1953 dispose_control_loops();
1954
1955 return -ENODEV;
1956}
1957
1958/*
1959 * Start the control loops after everything is up, that is create
1960 * the thread that will make them run
1961 */
1962static void start_control_loops(void)
1963{
1964 init_completion(&ctrl_complete);
1965
1966 ctrl_task = kthread_run(main_control_loop, NULL, "kfand");
1967}
1968
1969/*
1970 * Stop the control loops when tearing down
1971 */
1972static void stop_control_loops(void)
1973{
1974 if (ctrl_task)
1975 wait_for_completion(&ctrl_complete);
1976}
1977
1978/*
1979 * Attach to the i2c FCU after detecting U3-1 bus
1980 */
1981static int attach_fcu(void)
1982{
1983 fcu = attach_i2c_chip(FAN_CTRLER_ID, "fcu");
1984 if (fcu == NULL)
1985 return -ENODEV;
1986
1987 DBG("FCU attached\n");
1988
1989 return 0;
1990}
1991
1992/*
1993 * Detach from the i2c FCU when tearing down
1994 */
1995static void detach_fcu(void)
1996{
1997 fcu = NULL;
1998}
1999
2000/*
2001 * Attach to the i2c controller. We probe the various chips based
2002 * on the device-tree nodes and build everything for the driver to
2003 * run, we then kick the driver monitoring thread
2004 */
2005static int therm_pm72_attach(struct i2c_adapter *adapter)
2006{
2007 mutex_lock(&driver_lock);
2008
2009 /* Check state */
2010 if (state == state_detached)
2011 state = state_attaching;
2012 if (state != state_attaching) {
2013 mutex_unlock(&driver_lock);
2014 return 0;
2015 }
2016
2017 /* Check if we are looking for one of these */
2018 if (u3_0 == NULL && !strcmp(adapter->name, "u3 0")) {
2019 u3_0 = adapter;
2020 DBG("found U3-0\n");
2021 if (k2 || !rackmac)
2022 if (create_control_loops())
2023 u3_0 = NULL;
2024 } else if (u3_1 == NULL && !strcmp(adapter->name, "u3 1")) {
2025 u3_1 = adapter;
2026 DBG("found U3-1, attaching FCU\n");
2027 if (attach_fcu())
2028 u3_1 = NULL;
2029 } else if (k2 == NULL && !strcmp(adapter->name, "mac-io 0")) {
2030 k2 = adapter;
2031 DBG("Found K2\n");
2032 if (u3_0 && rackmac)
2033 if (create_control_loops())
2034 k2 = NULL;
2035 }
2036 /* We got all we need, start control loops */
2037 if (u3_0 != NULL && u3_1 != NULL && (k2 || !rackmac)) {
2038 DBG("everything up, starting control loops\n");
2039 state = state_attached;
2040 start_control_loops();
2041 }
2042 mutex_unlock(&driver_lock);
2043
2044 return 0;
2045}
2046
2047static int therm_pm72_probe(struct i2c_client *client,
2048 const struct i2c_device_id *id)
2049{
2050 /* Always succeed, the real work was done in therm_pm72_attach() */
2051 return 0;
2052}
2053
2054/*
2055 * Called when any of the devices which participates into thermal management
2056 * is going away.
2057 */
2058static int therm_pm72_remove(struct i2c_client *client)
2059{
2060 struct i2c_adapter *adapter = client->adapter;
2061
2062 mutex_lock(&driver_lock);
2063
2064 if (state != state_detached)
2065 state = state_detaching;
2066
2067 /* Stop control loops if any */
2068 DBG("stopping control loops\n");
2069 mutex_unlock(&driver_lock);
2070 stop_control_loops();
2071 mutex_lock(&driver_lock);
2072
2073 if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) {
2074 DBG("lost U3-0, disposing control loops\n");
2075 dispose_control_loops();
2076 u3_0 = NULL;
2077 }
2078
2079 if (u3_1 != NULL && !strcmp(adapter->name, "u3 1")) {
2080 DBG("lost U3-1, detaching FCU\n");
2081 detach_fcu();
2082 u3_1 = NULL;
2083 }
2084 if (u3_0 == NULL && u3_1 == NULL)
2085 state = state_detached;
2086
2087 mutex_unlock(&driver_lock);
2088
2089 return 0;
2090}
2091
2092/*
2093 * i2c_driver structure to attach to the host i2c controller
2094 */
2095
2096static const struct i2c_device_id therm_pm72_id[] = {
2097 /*
2098 * Fake device name, thermal management is done by several
2099 * chips but we don't need to differentiate between them at
2100 * this point.
2101 */
2102 { "therm_pm72", 0 },
2103 { }
2104};
2105
2106static struct i2c_driver therm_pm72_driver = {
2107 .driver = {
2108 .name = "therm_pm72",
2109 },
2110 .attach_adapter = therm_pm72_attach,
2111 .probe = therm_pm72_probe,
2112 .remove = therm_pm72_remove,
2113 .id_table = therm_pm72_id,
2114};
2115
2116static int fan_check_loc_match(const char *loc, int fan)
2117{
2118 char tmp[64];
2119 char *c, *e;
2120
2121 strlcpy(tmp, fcu_fans[fan].loc, 64);
2122
2123 c = tmp;
2124 for (;;) {
2125 e = strchr(c, ',');
2126 if (e)
2127 *e = 0;
2128 if (strcmp(loc, c) == 0)
2129 return 1;
2130 if (e == NULL)
2131 break;
2132 c = e + 1;
2133 }
2134 return 0;
2135}
2136
2137static void fcu_lookup_fans(struct device_node *fcu_node)
2138{
2139 struct device_node *np = NULL;
2140 int i;
2141
2142 /* The table is filled by default with values that are suitable
2143 * for the old machines without device-tree informations. We scan
2144 * the device-tree and override those values with whatever is
2145 * there
2146 */
2147
2148 DBG("Looking up FCU controls in device-tree...\n");
2149
2150 while ((np = of_get_next_child(fcu_node, np)) != NULL) {
2151 int type = -1;
2152 const char *loc;
2153 const u32 *reg;
2154
2155 DBG(" control: %s, type: %s\n", np->name, np->type);
2156
2157 /* Detect control type */
2158 if (!strcmp(np->type, "fan-rpm-control") ||
2159 !strcmp(np->type, "fan-rpm"))
2160 type = FCU_FAN_RPM;
2161 if (!strcmp(np->type, "fan-pwm-control") ||
2162 !strcmp(np->type, "fan-pwm"))
2163 type = FCU_FAN_PWM;
2164 /* Only care about fans for now */
2165 if (type == -1)
2166 continue;
2167
2168 /* Lookup for a matching location */
2169 loc = of_get_property(np, "location", NULL);
2170 reg = of_get_property(np, "reg", NULL);
2171 if (loc == NULL || reg == NULL)
2172 continue;
2173 DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg);
2174
2175 for (i = 0; i < FCU_FAN_COUNT; i++) {
2176 int fan_id;
2177
2178 if (!fan_check_loc_match(loc, i))
2179 continue;
2180 DBG(" location match, index: %d\n", i);
2181 fcu_fans[i].id = FCU_FAN_ABSENT_ID;
2182 if (type != fcu_fans[i].type) {
2183 printk(KERN_WARNING "therm_pm72: Fan type mismatch "
2184 "in device-tree for %s\n", np->full_name);
2185 break;
2186 }
2187 if (type == FCU_FAN_RPM)
2188 fan_id = ((*reg) - 0x10) / 2;
2189 else
2190 fan_id = ((*reg) - 0x30) / 2;
2191 if (fan_id > 7) {
2192 printk(KERN_WARNING "therm_pm72: Can't parse "
2193 "fan ID in device-tree for %s\n", np->full_name);
2194 break;
2195 }
2196 DBG(" fan id -> %d, type -> %d\n", fan_id, type);
2197 fcu_fans[i].id = fan_id;
2198 }
2199 }
2200
2201 /* Now dump the array */
2202 printk(KERN_INFO "Detected fan controls:\n");
2203 for (i = 0; i < FCU_FAN_COUNT; i++) {
2204 if (fcu_fans[i].id == FCU_FAN_ABSENT_ID)
2205 continue;
2206 printk(KERN_INFO " %d: %s fan, id %d, location: %s\n", i,
2207 fcu_fans[i].type == FCU_FAN_RPM ? "RPM" : "PWM",
2208 fcu_fans[i].id, fcu_fans[i].loc);
2209 }
2210}
2211
2212static int fcu_of_probe(struct platform_device* dev)
2213{
2214 state = state_detached;
2215 of_dev = dev;
2216
2217 dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
2218
2219 /* Lookup the fans in the device tree */
2220 fcu_lookup_fans(dev->dev.of_node);
2221
2222 /* Add the driver */
2223 return i2c_add_driver(&therm_pm72_driver);
2224}
2225
2226static int fcu_of_remove(struct platform_device* dev)
2227{
2228 i2c_del_driver(&therm_pm72_driver);
2229
2230 return 0;
2231}
2232
2233static const struct of_device_id fcu_match[] =
2234{
2235 {
2236 .type = "fcu",
2237 },
2238 {},
2239};
2240MODULE_DEVICE_TABLE(of, fcu_match);
2241
2242static struct platform_driver fcu_of_platform_driver =
2243{
2244 .driver = {
2245 .name = "temperature",
2246 .of_match_table = fcu_match,
2247 },
2248 .probe = fcu_of_probe,
2249 .remove = fcu_of_remove
2250};
2251
2252/*
2253 * Check machine type, attach to i2c controller
2254 */
2255static int __init therm_pm72_init(void)
2256{
2257 rackmac = of_machine_is_compatible("RackMac3,1");
2258
2259 if (!of_machine_is_compatible("PowerMac7,2") &&
2260 !of_machine_is_compatible("PowerMac7,3") &&
2261 !rackmac)
2262 return -ENODEV;
2263
2264 return platform_driver_register(&fcu_of_platform_driver);
2265}
2266
2267static void __exit therm_pm72_exit(void)
2268{
2269 platform_driver_unregister(&fcu_of_platform_driver);
2270}
2271
2272module_init(therm_pm72_init);
2273module_exit(therm_pm72_exit);
2274
2275MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
2276MODULE_DESCRIPTION("Driver for Apple's PowerMac G5 thermal control");
2277MODULE_LICENSE("GPL");
2278
diff --git a/drivers/macintosh/therm_pm72.h b/drivers/macintosh/therm_pm72.h
deleted file mode 100644
index df3680e2a22f..000000000000
--- a/drivers/macintosh/therm_pm72.h
+++ /dev/null
@@ -1,326 +0,0 @@
1#ifndef __THERM_PMAC_7_2_H__
2#define __THERM_PMAC_7_2_H__
3
4typedef unsigned short fu16;
5typedef int fs32;
6typedef short fs16;
7
8struct mpu_data
9{
10 u8 signature; /* 0x00 - EEPROM sig. */
11 u8 bytes_used; /* 0x01 - Bytes used in eeprom (160 ?) */
12 u8 size; /* 0x02 - EEPROM size (256 ?) */
13 u8 version; /* 0x03 - EEPROM version */
14 u32 data_revision; /* 0x04 - Dataset revision */
15 u8 processor_bin_code[3]; /* 0x08 - Processor BIN code */
16 u8 bin_code_expansion; /* 0x0b - ??? (padding ?) */
17 u8 processor_num; /* 0x0c - Number of CPUs on this MPU */
18 u8 input_mul_bus_div; /* 0x0d - Clock input multiplier/bus divider */
19 u8 reserved1[2]; /* 0x0e - */
20 u32 input_clk_freq_high; /* 0x10 - Input clock frequency high */
21 u8 cpu_nb_target_cycles; /* 0x14 - ??? */
22 u8 cpu_statlat; /* 0x15 - ??? */
23 u8 cpu_snooplat; /* 0x16 - ??? */
24 u8 cpu_snoopacc; /* 0x17 - ??? */
25 u8 nb_paamwin; /* 0x18 - ??? */
26 u8 nb_statlat; /* 0x19 - ??? */
27 u8 nb_snooplat; /* 0x1a - ??? */
28 u8 nb_snoopwin; /* 0x1b - ??? */
29 u8 api_bus_mode; /* 0x1c - ??? */
30 u8 reserved2[3]; /* 0x1d - */
31 u32 input_clk_freq_low; /* 0x20 - Input clock frequency low */
32 u8 processor_card_slot; /* 0x24 - Processor card slot number */
33 u8 reserved3[2]; /* 0x25 - */
34 u8 padjmax; /* 0x27 - Max power adjustment (Not in OF!) */
35 u8 ttarget; /* 0x28 - Target temperature */
36 u8 tmax; /* 0x29 - Max temperature */
37 u8 pmaxh; /* 0x2a - Max power */
38 u8 tguardband; /* 0x2b - Guardband temp ??? Hist. len in OSX */
39 fs32 pid_gp; /* 0x2c - PID proportional gain */
40 fs32 pid_gr; /* 0x30 - PID reset gain */
41 fs32 pid_gd; /* 0x34 - PID derivative gain */
42 fu16 voph; /* 0x38 - Vop High */
43 fu16 vopl; /* 0x3a - Vop Low */
44 fs16 nactual_die; /* 0x3c - nActual Die */
45 fs16 nactual_heatsink; /* 0x3e - nActual Heatsink */
46 fs16 nactual_system; /* 0x40 - nActual System */
47 u16 calibration_flags; /* 0x42 - Calibration flags */
48 fu16 mdiode; /* 0x44 - Diode M value (scaling factor) */
49 fs16 bdiode; /* 0x46 - Diode B value (offset) */
50 fs32 theta_heat_sink; /* 0x48 - Theta heat sink */
51 u16 rminn_intake_fan; /* 0x4c - Intake fan min RPM */
52 u16 rmaxn_intake_fan; /* 0x4e - Intake fan max RPM */
53 u16 rminn_exhaust_fan; /* 0x50 - Exhaust fan min RPM */
54 u16 rmaxn_exhaust_fan; /* 0x52 - Exhaust fan max RPM */
55 u8 processor_part_num[8]; /* 0x54 - Processor part number XX pumps min/max */
56 u32 processor_lot_num; /* 0x5c - Processor lot number */
57 u8 orig_card_sernum[0x10]; /* 0x60 - Card original serial number */
58 u8 curr_card_sernum[0x10]; /* 0x70 - Card current serial number */
59 u8 mlb_sernum[0x18]; /* 0x80 - MLB serial number */
60 u32 checksum1; /* 0x98 - */
61 u32 checksum2; /* 0x9c - */
62}; /* Total size = 0xa0 */
63
64/* Display a 16.16 fixed point value */
65#define FIX32TOPRINT(f) ((f) >> 16),((((f) & 0xffff) * 1000) >> 16)
66
67/*
68 * Maximum number of seconds to be in critical state (after a
69 * normal shutdown attempt). If the machine isn't down after
70 * this counter elapses, we force an immediate machine power
71 * off.
72 */
73#define MAX_CRITICAL_STATE 30
74static char * critical_overtemp_path = "/sbin/critical_overtemp";
75
76/*
77 * This option is "weird" :) Basically, if you define this to 1
78 * the control loop for the RPMs fans (not PWMs) will apply the
79 * correction factor obtained from the PID to the _actual_ RPM
80 * speed read from the FCU.
81 * If you define the below constant to 0, then it will be
82 * applied to the setpoint RPM speed, that is basically the
83 * speed we proviously "asked" for.
84 *
85 * I'm not sure which of these Apple's algorithm is supposed
86 * to use
87 */
88#define RPM_PID_USE_ACTUAL_SPEED 0
89
90/*
91 * i2c IDs. Currently, we hard code those and assume that
92 * the FCU is on U3 bus 1 while all sensors are on U3 bus
93 * 0. This appear to be safe enough for this first version
94 * of the driver, though I would accept any clean patch
95 * doing a better use of the device-tree without turning the
96 * while i2c registration mechanism into a racy mess
97 *
98 * Note: Xserve changed this. We have some bits on the K2 bus,
99 * which I arbitrarily set to 0x200. Ultimately, we really want
100 * too lookup these in the device-tree though
101 */
102#define FAN_CTRLER_ID 0x15e
103#define SUPPLY_MONITOR_ID 0x58
104#define SUPPLY_MONITORB_ID 0x5a
105#define DRIVES_DALLAS_ID 0x94
106#define BACKSIDE_MAX_ID 0x98
107#define XSERVE_DIMMS_LM87 0x25a
108#define XSERVE_SLOTS_LM75 0x290
109
110/*
111 * Some MAX6690, DS1775, LM87 register definitions
112 */
113#define MAX6690_INT_TEMP 0
114#define MAX6690_EXT_TEMP 1
115#define DS1775_TEMP 0
116#define LM87_INT_TEMP 0x27
117
118/*
119 * Scaling factors for the AD7417 ADC converters (except
120 * for the CPU diode which is obtained from the EEPROM).
121 * Those values are obtained from the property list of
122 * the darwin driver
123 */
124#define ADC_12V_CURRENT_SCALE 0x0320 /* _AD2 */
125#define ADC_CPU_VOLTAGE_SCALE 0x00a0 /* _AD3 */
126#define ADC_CPU_CURRENT_SCALE 0x1f40 /* _AD4 */
127
128/*
129 * PID factors for the U3/Backside fan control loop. We have 2 sets
130 * of values here, one set for U3 and one set for U3H
131 */
132#define BACKSIDE_FAN_PWM_DEFAULT_ID 1
133#define BACKSIDE_FAN_PWM_INDEX 0
134#define BACKSIDE_PID_U3_G_d 0x02800000
135#define BACKSIDE_PID_U3H_G_d 0x01400000
136#define BACKSIDE_PID_RACK_G_d 0x00500000
137#define BACKSIDE_PID_G_p 0x00500000
138#define BACKSIDE_PID_RACK_G_p 0x0004cccc
139#define BACKSIDE_PID_G_r 0x00000000
140#define BACKSIDE_PID_U3_INPUT_TARGET 0x00410000
141#define BACKSIDE_PID_U3H_INPUT_TARGET 0x004b0000
142#define BACKSIDE_PID_RACK_INPUT_TARGET 0x00460000
143#define BACKSIDE_PID_INTERVAL 5
144#define BACKSIDE_PID_RACK_INTERVAL 1
145#define BACKSIDE_PID_OUTPUT_MAX 100
146#define BACKSIDE_PID_U3_OUTPUT_MIN 20
147#define BACKSIDE_PID_U3H_OUTPUT_MIN 20
148#define BACKSIDE_PID_HISTORY_SIZE 2
149
150struct basckside_pid_params
151{
152 s32 G_d;
153 s32 G_p;
154 s32 G_r;
155 s32 input_target;
156 s32 output_min;
157 s32 output_max;
158 s32 interval;
159 int additive;
160};
161
162struct backside_pid_state
163{
164 int ticks;
165 struct i2c_client * monitor;
166 s32 sample_history[BACKSIDE_PID_HISTORY_SIZE];
167 s32 error_history[BACKSIDE_PID_HISTORY_SIZE];
168 int cur_sample;
169 s32 last_temp;
170 int pwm;
171 int first;
172};
173
174/*
175 * PID factors for the Drive Bay fan control loop
176 */
177#define DRIVES_FAN_RPM_DEFAULT_ID 2
178#define DRIVES_FAN_RPM_INDEX 1
179#define DRIVES_PID_G_d 0x01e00000
180#define DRIVES_PID_G_p 0x00500000
181#define DRIVES_PID_G_r 0x00000000
182#define DRIVES_PID_INPUT_TARGET 0x00280000
183#define DRIVES_PID_INTERVAL 5
184#define DRIVES_PID_OUTPUT_MAX 4000
185#define DRIVES_PID_OUTPUT_MIN 300
186#define DRIVES_PID_HISTORY_SIZE 2
187
188struct drives_pid_state
189{
190 int ticks;
191 struct i2c_client * monitor;
192 s32 sample_history[BACKSIDE_PID_HISTORY_SIZE];
193 s32 error_history[BACKSIDE_PID_HISTORY_SIZE];
194 int cur_sample;
195 s32 last_temp;
196 int rpm;
197 int first;
198};
199
200#define SLOTS_FAN_PWM_DEFAULT_ID 2
201#define SLOTS_FAN_PWM_INDEX 2
202#define SLOTS_FAN_DEFAULT_PWM 40 /* Do better here ! */
203
204
205/*
206 * PID factors for the Xserve DIMM control loop
207 */
208#define DIMM_PID_G_d 0
209#define DIMM_PID_G_p 0
210#define DIMM_PID_G_r 0x06553600
211#define DIMM_PID_INPUT_TARGET 3276800
212#define DIMM_PID_INTERVAL 1
213#define DIMM_PID_OUTPUT_MAX 14000
214#define DIMM_PID_OUTPUT_MIN 4000
215#define DIMM_PID_HISTORY_SIZE 20
216
217struct dimm_pid_state
218{
219 int ticks;
220 struct i2c_client * monitor;
221 s32 sample_history[DIMM_PID_HISTORY_SIZE];
222 s32 error_history[DIMM_PID_HISTORY_SIZE];
223 int cur_sample;
224 s32 last_temp;
225 int first;
226 int output;
227};
228
229
230/*
231 * PID factors for the Xserve Slots control loop
232 */
233#define SLOTS_PID_G_d 0
234#define SLOTS_PID_G_p 0
235#define SLOTS_PID_G_r 0x00100000
236#define SLOTS_PID_INPUT_TARGET 3200000
237#define SLOTS_PID_INTERVAL 1
238#define SLOTS_PID_OUTPUT_MAX 100
239#define SLOTS_PID_OUTPUT_MIN 20
240#define SLOTS_PID_HISTORY_SIZE 20
241
242struct slots_pid_state
243{
244 int ticks;
245 struct i2c_client * monitor;
246 s32 sample_history[SLOTS_PID_HISTORY_SIZE];
247 s32 error_history[SLOTS_PID_HISTORY_SIZE];
248 int cur_sample;
249 s32 last_temp;
250 int first;
251 int pwm;
252};
253
254
255
256/* Desktops */
257
258#define CPUA_INTAKE_FAN_RPM_DEFAULT_ID 3
259#define CPUA_EXHAUST_FAN_RPM_DEFAULT_ID 4
260#define CPUB_INTAKE_FAN_RPM_DEFAULT_ID 5
261#define CPUB_EXHAUST_FAN_RPM_DEFAULT_ID 6
262
263#define CPUA_INTAKE_FAN_RPM_INDEX 3
264#define CPUA_EXHAUST_FAN_RPM_INDEX 4
265#define CPUB_INTAKE_FAN_RPM_INDEX 5
266#define CPUB_EXHAUST_FAN_RPM_INDEX 6
267
268#define CPU_INTAKE_SCALE 0x0000f852
269#define CPU_TEMP_HISTORY_SIZE 2
270#define CPU_POWER_HISTORY_SIZE 10
271#define CPU_PID_INTERVAL 1
272#define CPU_MAX_OVERTEMP 90
273
274#define CPUA_PUMP_RPM_INDEX 7
275#define CPUB_PUMP_RPM_INDEX 8
276#define CPU_PUMP_OUTPUT_MAX 3200
277#define CPU_PUMP_OUTPUT_MIN 1250
278
279/* Xserve */
280#define CPU_A1_FAN_RPM_INDEX 9
281#define CPU_A2_FAN_RPM_INDEX 10
282#define CPU_A3_FAN_RPM_INDEX 11
283#define CPU_B1_FAN_RPM_INDEX 12
284#define CPU_B2_FAN_RPM_INDEX 13
285#define CPU_B3_FAN_RPM_INDEX 14
286
287
288struct cpu_pid_state
289{
290 int index;
291 struct i2c_client * monitor;
292 struct mpu_data mpu;
293 int overtemp;
294 s32 temp_history[CPU_TEMP_HISTORY_SIZE];
295 int cur_temp;
296 s32 power_history[CPU_POWER_HISTORY_SIZE];
297 s32 error_history[CPU_POWER_HISTORY_SIZE];
298 int cur_power;
299 int count_power;
300 int rpm;
301 int intake_rpm;
302 s32 voltage;
303 s32 current_a;
304 s32 last_temp;
305 s32 last_power;
306 int first;
307 u8 adc_config;
308 s32 pump_min;
309 s32 pump_max;
310};
311
312/* Tickle FCU every 10 seconds */
313#define FCU_TICKLE_TICKS 10
314
315/*
316 * Driver state
317 */
318enum {
319 state_detached,
320 state_attaching,
321 state_attached,
322 state_detaching,
323};
324
325
326#endif /* __THERM_PMAC_7_2_H__ */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index aa915da2a5e5..82abfce1cb42 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -176,7 +176,6 @@ STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt); 176STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt); 177STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth); 178static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
180 179
181STATIC struct device_attribute *NCR_700_dev_attrs[]; 180STATIC struct device_attribute *NCR_700_dev_attrs[];
182 181
@@ -326,7 +325,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
326 tpnt->slave_destroy = NCR_700_slave_destroy; 325 tpnt->slave_destroy = NCR_700_slave_destroy;
327 tpnt->slave_alloc = NCR_700_slave_alloc; 326 tpnt->slave_alloc = NCR_700_slave_alloc;
328 tpnt->change_queue_depth = NCR_700_change_queue_depth; 327 tpnt->change_queue_depth = NCR_700_change_queue_depth;
329 tpnt->change_queue_type = NCR_700_change_queue_type;
330 tpnt->use_blk_tags = 1; 328 tpnt->use_blk_tags = 1;
331 329
332 if(tpnt->name == NULL) 330 if(tpnt->name == NULL)
@@ -904,8 +902,8 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
904 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 902 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
905 903
906 SCp->device->tagged_supported = 0; 904 SCp->device->tagged_supported = 0;
905 SCp->device->simple_tags = 0;
907 scsi_change_queue_depth(SCp->device, host->cmd_per_lun); 906 scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
908 scsi_set_tag_type(SCp->device, 0);
909 } else { 907 } else {
910 shost_printk(KERN_WARNING, host, 908 shost_printk(KERN_WARNING, host,
911 "(%d:%d) Unexpected REJECT Message %s\n", 909 "(%d:%d) Unexpected REJECT Message %s\n",
@@ -1818,8 +1816,8 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
1818 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp)); 1816 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1819 } 1817 }
1820 1818
1821 if((hostdata->tag_negotiated &(1<<scmd_id(SCp))) 1819 if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1822 && scsi_get_tag_type(SCp->device)) { 1820 SCp->device->simple_tags) {
1823 slot->tag = SCp->request->tag; 1821 slot->tag = SCp->request->tag;
1824 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", 1822 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1825 slot->tag, slot); 1823 slot->tag, slot);
@@ -2082,39 +2080,6 @@ NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2082 return scsi_change_queue_depth(SDp, depth); 2080 return scsi_change_queue_depth(SDp, depth);
2083} 2081}
2084 2082
2085static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2086{
2087 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2088 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2089 struct NCR_700_Host_Parameters *hostdata =
2090 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2091
2092 /* We have a global (per target) flag to track whether TCQ is
2093 * enabled, so we'll be turning it off for the entire target here.
2094 * our tag algorithm will fail if we mix tagged and untagged commands,
2095 * so quiesce the device before doing this */
2096 if (change_tag)
2097 scsi_target_quiesce(SDp->sdev_target);
2098
2099 scsi_set_tag_type(SDp, tag_type);
2100 if (!tag_type) {
2101 /* shift back to the default unqueued number of commands
2102 * (the user can still raise this) */
2103 scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun);
2104 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2105 } else {
2106 /* Here, we cleared the negotiation flag above, so this
2107 * will force the driver to renegotiate */
2108 scsi_change_queue_depth(SDp, SDp->queue_depth);
2109 if (change_tag)
2110 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2111 }
2112 if (change_tag)
2113 scsi_target_resume(SDp->sdev_target);
2114
2115 return tag_type;
2116}
2117
2118static ssize_t 2083static ssize_t
2119NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf) 2084NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2120{ 2085{
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 86cf3d671eb9..9c92f415229f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1462,18 +1462,17 @@ config SCSI_WD719X
1462 SCSI controllers (based on WD33C296A chip). 1462 SCSI controllers (based on WD33C296A chip).
1463 1463
1464config SCSI_DEBUG 1464config SCSI_DEBUG
1465 tristate "SCSI debugging host simulator" 1465 tristate "SCSI debugging host and device simulator"
1466 depends on SCSI 1466 depends on SCSI
1467 select CRC_T10DIF 1467 select CRC_T10DIF
1468 help 1468 help
1469 This is a host adapter simulator that can simulate multiple hosts 1469 This pseudo driver simulates one or more hosts (SCSI initiators),
1470 each with multiple dummy SCSI devices (disks). It defaults to one 1470 each with one or more targets, each with one or more logical units.
1471 host adapter with one dummy SCSI disk. Each dummy disk uses kernel 1471 Defaults to one of each, creating a small RAM disk device. Many
1472 RAM as storage (i.e. it is a ramdisk). To save space when multiple 1472 parameters found in the /sys/bus/pseudo/drivers/scsi_debug
1473 dummy disks are simulated, they share the same kernel RAM for 1473 directory can be tweaked at run time.
1474 their storage. See <http://sg.danny.cz/sg/sdebug26.html> for more 1474 See <http://sg.danny.cz/sg/sdebug26.html> for more information.
1475 information. This driver is primarily of use to those testing the 1475 Mainly used for testing and best as a module. If unsure, say N.
1476 SCSI and block subsystems. If unsure, say N.
1477 1476
1478config SCSI_MESH 1477config SCSI_MESH
1479 tristate "MESH (Power Mac internal SCSI) support" 1478 tristate "MESH (Power Mac internal SCSI) support"
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 6719a3390ebd..2c5ce48c8f95 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -7921,9 +7921,9 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
7921 */ 7921 */
7922 if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) && 7922 if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) &&
7923 (boardp->reqcnt[scp->device->id] % 255) == 0) { 7923 (boardp->reqcnt[scp->device->id] % 255) == 0) {
7924 asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG; 7924 asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG;
7925 } else { 7925 } else {
7926 asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG; 7926 asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG;
7927 } 7927 }
7928 7928
7929 /* Build ASC_SCSI_Q */ 7929 /* Build ASC_SCSI_Q */
@@ -8351,7 +8351,7 @@ static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
8351 } 8351 }
8352 q_addr = ASC_QNO_TO_QADDR(q_no); 8352 q_addr = ASC_QNO_TO_QADDR(q_no);
8353 if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) { 8353 if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
8354 scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; 8354 scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
8355 } 8355 }
8356 scsiq->q1.status = QS_FREE; 8356 scsiq->q1.status = QS_FREE;
8357 AscMemWordCopyPtrToLram(iop_base, 8357 AscMemWordCopyPtrToLram(iop_base,
@@ -8669,7 +8669,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
8669 } 8669 }
8670 } 8670 }
8671 if (disable_syn_offset_one_fix) { 8671 if (disable_syn_offset_one_fix) {
8672 scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG; 8672 scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
8673 scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX | 8673 scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
8674 ASC_TAG_FLAG_DISABLE_DISCONNECT); 8674 ASC_TAG_FLAG_DISABLE_DISCONNECT);
8675 } else { 8675 } else {
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 14fc018436c2..02a2512b76a8 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -63,7 +63,6 @@ static struct scsi_host_template aic94xx_sht = {
63 .scan_finished = asd_scan_finished, 63 .scan_finished = asd_scan_finished,
64 .scan_start = asd_scan_start, 64 .scan_start = asd_scan_start,
65 .change_queue_depth = sas_change_queue_depth, 65 .change_queue_depth = sas_change_queue_depth,
66 .change_queue_type = sas_change_queue_type,
67 .bios_param = sas_bios_param, 66 .bios_param = sas_bios_param,
68 .can_queue = 1, 67 .can_queue = 1,
69 .cmd_per_lun = 1, 68 .cmd_per_lun = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e861f286b42e..98d06d151958 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2792,7 +2792,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
2792 .eh_host_reset_handler = fc_eh_host_reset, 2792 .eh_host_reset_handler = fc_eh_host_reset,
2793 .slave_alloc = fc_slave_alloc, 2793 .slave_alloc = fc_slave_alloc,
2794 .change_queue_depth = scsi_change_queue_depth, 2794 .change_queue_depth = scsi_change_queue_depth,
2795 .change_queue_type = scsi_change_queue_type,
2796 .this_id = -1, 2795 .this_id = -1,
2797 .cmd_per_lun = 3, 2796 .cmd_per_lun = 3,
2798 .use_clustering = ENABLE_CLUSTERING, 2797 .use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4b56858c1df2..9ecca8504f60 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1737,11 +1737,7 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
1737 fcp_cmnd->fc_pri_ta = 0; 1737 fcp_cmnd->fc_pri_ta = 0;
1738 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1738 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
1739 fcp_cmnd->fc_flags = io_req->io_req_flags; 1739 fcp_cmnd->fc_flags = io_req->io_req_flags;
1740 1740 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1741 if (sc_cmd->flags & SCMD_TAGGED)
1742 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
1743 else
1744 fcp_cmnd->fc_pri_ta = 0;
1745} 1741}
1746 1742
1747static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1743static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 51ea5dc5f084..3987284e0d2a 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -172,10 +172,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
172 fcp_cmnd->fc_cmdref = 0; 172 fcp_cmnd->fc_cmdref = 0;
173 173
174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); 174 memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
175 if (scmnd->flags & SCMD_TAGGED) 175 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
176 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
177 else
178 fcp_cmnd->fc_pri_ta = 0;
179 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); 176 fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
180 177
181 if (req->nsge) 178 if (req->nsge)
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
index b7dc59fca7a6..7bd376d95ed5 100644
--- a/drivers/scsi/esas2r/esas2r_flash.c
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -684,9 +684,9 @@ static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
684 * 1) verify the fi_version is correct 684 * 1) verify the fi_version is correct
685 * 2) verify the checksum of the entire image. 685 * 2) verify the checksum of the entire image.
686 * 3) validate the adap_typ, action and length fields. 686 * 3) validate the adap_typ, action and length fields.
687 * 4) valdiate each component header. check the img_type and 687 * 4) validate each component header. check the img_type and
688 * length fields 688 * length fields
689 * 5) valdiate each component image. validate signatures and 689 * 5) validate each component image. validate signatures and
690 * local checksums 690 * local checksums
691 */ 691 */
692static bool verify_fi(struct esas2r_adapter *a, 692static bool verify_fi(struct esas2r_adapter *a,
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 593ff8a63c70..7e1c21e6736b 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -255,7 +255,6 @@ static struct scsi_host_template driver_template = {
255 .emulated = 0, 255 .emulated = 0,
256 .proc_name = ESAS2R_DRVR_NAME, 256 .proc_name = ESAS2R_DRVR_NAME,
257 .change_queue_depth = scsi_change_queue_depth, 257 .change_queue_depth = scsi_change_queue_depth,
258 .change_queue_type = scsi_change_queue_type,
259 .max_sectors = 0xFFFF, 258 .max_sectors = 0xFFFF,
260 .use_blk_tags = 1, 259 .use_blk_tags = 1,
261}; 260};
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cd00a6cdf55b..ec193a8357d7 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -281,7 +281,6 @@ static struct scsi_host_template fcoe_shost_template = {
281 .eh_host_reset_handler = fc_eh_host_reset, 281 .eh_host_reset_handler = fc_eh_host_reset,
282 .slave_alloc = fc_slave_alloc, 282 .slave_alloc = fc_slave_alloc,
283 .change_queue_depth = scsi_change_queue_depth, 283 .change_queue_depth = scsi_change_queue_depth,
284 .change_queue_type = scsi_change_queue_type,
285 .this_id = -1, 284 .this_id = -1,
286 .cmd_per_lun = 3, 285 .cmd_per_lun = 3,
287 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS, 286 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 0c1f8177b5b7..8a0d4d7b3254 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -111,7 +111,6 @@ static struct scsi_host_template fnic_host_template = {
111 .eh_host_reset_handler = fnic_host_reset, 111 .eh_host_reset_handler = fnic_host_reset,
112 .slave_alloc = fnic_slave_alloc, 112 .slave_alloc = fnic_slave_alloc,
113 .change_queue_depth = scsi_change_queue_depth, 113 .change_queue_depth = scsi_change_queue_depth,
114 .change_queue_type = scsi_change_queue_type,
115 .this_id = -1, 114 .this_id = -1,
116 .cmd_per_lun = 3, 115 .cmd_per_lun = 3,
117 .can_queue = FNIC_DFLT_IO_REQ, 116 .can_queue = FNIC_DFLT_IO_REQ,
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index f58c6d8e0264..057d27721d5b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1615,7 +1615,6 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
1615 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1615 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1616 struct ibmvfc_cmd *vfc_cmd; 1616 struct ibmvfc_cmd *vfc_cmd;
1617 struct ibmvfc_event *evt; 1617 struct ibmvfc_event *evt;
1618 u8 tag[2];
1619 int rc; 1618 int rc;
1620 1619
1621 if (unlikely((rc = fc_remote_port_chkready(rport))) || 1620 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
@@ -3089,7 +3088,6 @@ static struct scsi_host_template driver_template = {
3089 .target_alloc = ibmvfc_target_alloc, 3088 .target_alloc = ibmvfc_target_alloc,
3090 .scan_finished = ibmvfc_scan_finished, 3089 .scan_finished = ibmvfc_scan_finished,
3091 .change_queue_depth = ibmvfc_change_queue_depth, 3090 .change_queue_depth = ibmvfc_change_queue_depth,
3092 .change_queue_type = scsi_change_queue_type,
3093 .cmd_per_lun = 16, 3091 .cmd_per_lun = 16,
3094 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT, 3092 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3095 .this_id = -1, 3093 .this_id = -1,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 540294389355..df4e27cd996a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1426,16 +1426,14 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1426 if (res->sdev) { 1426 if (res->sdev) {
1427 res->del_from_ml = 1; 1427 res->del_from_ml = 1;
1428 res->res_handle = IPR_INVALID_RES_HANDLE; 1428 res->res_handle = IPR_INVALID_RES_HANDLE;
1429 if (ioa_cfg->allow_ml_add_del) 1429 schedule_work(&ioa_cfg->work_q);
1430 schedule_work(&ioa_cfg->work_q);
1431 } else { 1430 } else {
1432 ipr_clear_res_target(res); 1431 ipr_clear_res_target(res);
1433 list_move_tail(&res->queue, &ioa_cfg->free_res_q); 1432 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1434 } 1433 }
1435 } else if (!res->sdev || res->del_from_ml) { 1434 } else if (!res->sdev || res->del_from_ml) {
1436 res->add_to_ml = 1; 1435 res->add_to_ml = 1;
1437 if (ioa_cfg->allow_ml_add_del) 1436 schedule_work(&ioa_cfg->work_q);
1438 schedule_work(&ioa_cfg->work_q);
1439 } 1437 }
1440 1438
1441 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 1439 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
@@ -3273,8 +3271,7 @@ static void ipr_worker_thread(struct work_struct *work)
3273restart: 3271restart:
3274 do { 3272 do {
3275 did_work = 0; 3273 did_work = 0;
3276 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds || 3274 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3277 !ioa_cfg->allow_ml_add_del) {
3278 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3275 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3279 return; 3276 return;
3280 } 3277 }
@@ -3311,6 +3308,7 @@ restart:
3311 } 3308 }
3312 } 3309 }
3313 3310
3311 ioa_cfg->scan_done = 1;
3314 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3312 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3315 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); 3313 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3316 LEAVE; 3314 LEAVE;
@@ -4346,30 +4344,6 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4346} 4344}
4347 4345
4348/** 4346/**
4349 * ipr_change_queue_type - Change the device's queue type
4350 * @dsev: scsi device struct
4351 * @tag_type: type of tags to use
4352 *
4353 * Return value:
4354 * actual queue type set
4355 **/
4356static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4357{
4358 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4359 struct ipr_resource_entry *res;
4360 unsigned long lock_flags = 0;
4361
4362 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4363 res = (struct ipr_resource_entry *)sdev->hostdata;
4364 if (res && ipr_is_gscsi(res))
4365 tag_type = scsi_change_queue_type(sdev, tag_type);
4366 else
4367 tag_type = 0;
4368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4369 return tag_type;
4370}
4371
4372/**
4373 * ipr_show_adapter_handle - Show the adapter's resource handle for this device 4347 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4374 * @dev: device struct 4348 * @dev: device struct
4375 * @attr: device attribute structure 4349 * @attr: device attribute structure
@@ -4739,6 +4713,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
4739 sdev->no_uld_attach = 1; 4713 sdev->no_uld_attach = 1;
4740 } 4714 }
4741 if (ipr_is_vset_device(res)) { 4715 if (ipr_is_vset_device(res)) {
4716 sdev->scsi_level = SCSI_SPC_3;
4742 blk_queue_rq_timeout(sdev->request_queue, 4717 blk_queue_rq_timeout(sdev->request_queue,
4743 IPR_VSET_RW_TIMEOUT); 4718 IPR_VSET_RW_TIMEOUT);
4744 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); 4719 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
@@ -5231,6 +5206,28 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5231 * @scsi_cmd: scsi command struct 5206 * @scsi_cmd: scsi command struct
5232 * 5207 *
5233 * Return value: 5208 * Return value:
5209 * 0 if scan in progress / 1 if scan is complete
5210 **/
5211static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5212{
5213 unsigned long lock_flags;
5214 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5215 int rc = 0;
5216
5217 spin_lock_irqsave(shost->host_lock, lock_flags);
5218 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5219 rc = 1;
5220 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5221 rc = 1;
5222 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5223 return rc;
5224}
5225
5226/**
5227 * ipr_eh_host_reset - Reset the host adapter
5228 * @scsi_cmd: scsi command struct
5229 *
5230 * Return value:
5234 * SUCCESS / FAILED 5231 * SUCCESS / FAILED
5235 **/ 5232 **/
5236static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) 5233static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
@@ -5779,7 +5776,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5779 5776
5780 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); 5777 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5781 5778
5782 if (!scsi_get_tag_type(scsi_cmd->device)) { 5779 if (!scsi_cmd->device->simple_tags) {
5783 ipr_erp_request_sense(ipr_cmd); 5780 ipr_erp_request_sense(ipr_cmd);
5784 return; 5781 return;
5785 } 5782 }
@@ -6299,10 +6296,10 @@ static struct scsi_host_template driver_template = {
6299 .slave_alloc = ipr_slave_alloc, 6296 .slave_alloc = ipr_slave_alloc,
6300 .slave_configure = ipr_slave_configure, 6297 .slave_configure = ipr_slave_configure,
6301 .slave_destroy = ipr_slave_destroy, 6298 .slave_destroy = ipr_slave_destroy,
6299 .scan_finished = ipr_scan_finished,
6302 .target_alloc = ipr_target_alloc, 6300 .target_alloc = ipr_target_alloc,
6303 .target_destroy = ipr_target_destroy, 6301 .target_destroy = ipr_target_destroy,
6304 .change_queue_depth = ipr_change_queue_depth, 6302 .change_queue_depth = ipr_change_queue_depth,
6305 .change_queue_type = ipr_change_queue_type,
6306 .bios_param = ipr_biosparam, 6303 .bios_param = ipr_biosparam,
6307 .can_queue = IPR_MAX_COMMANDS, 6304 .can_queue = IPR_MAX_COMMANDS,
6308 .this_id = -1, 6305 .this_id = -1,
@@ -6841,7 +6838,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6841 ioa_cfg->doorbell |= IPR_RUNTIME_RESET; 6838 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6842 6839
6843 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { 6840 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6844 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { 6841 if (res->add_to_ml || res->del_from_ml) {
6845 ipr_trace; 6842 ipr_trace;
6846 break; 6843 break;
6847 } 6844 }
@@ -6870,6 +6867,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6870 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 6867 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6871 scsi_block_requests(ioa_cfg->host); 6868 scsi_block_requests(ioa_cfg->host);
6872 6869
6870 schedule_work(&ioa_cfg->work_q);
6873 LEAVE; 6871 LEAVE;
6874 return IPR_RC_JOB_RETURN; 6872 return IPR_RC_JOB_RETURN;
6875} 6873}
@@ -7610,6 +7608,19 @@ static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7610 type[4] = '\0'; 7608 type[4] = '\0';
7611 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); 7609 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7612 7610
7611 if (ipr_invalid_adapter(ioa_cfg)) {
7612 dev_err(&ioa_cfg->pdev->dev,
7613 "Adapter not supported in this hardware configuration.\n");
7614
7615 if (!ipr_testmode) {
7616 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7618 list_add_tail(&ipr_cmd->queue,
7619 &ioa_cfg->hrrq->hrrq_free_q);
7620 return IPR_RC_JOB_RETURN;
7621 }
7622 }
7623
7613 ipr_cmd->job_step = ipr_ioafp_page3_inquiry; 7624 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7614 7625
7615 ipr_ioafp_inquiry(ipr_cmd, 1, 0, 7626 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
@@ -8797,20 +8808,6 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8797 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, 8808 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8798 IPR_SHUTDOWN_NONE); 8809 IPR_SHUTDOWN_NONE);
8799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 8810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8800 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8801 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8802
8803 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8804 rc = -EIO;
8805 } else if (ipr_invalid_adapter(ioa_cfg)) {
8806 if (!ipr_testmode)
8807 rc = -EIO;
8808
8809 dev_err(&ioa_cfg->pdev->dev,
8810 "Adapter not supported in this hardware configuration.\n");
8811 }
8812
8813 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8814 8811
8815 LEAVE; 8812 LEAVE;
8816 return rc; 8813 return rc;
@@ -9264,7 +9261,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9264 * ioa_cfg->max_devs_supported))); 9261 * ioa_cfg->max_devs_supported)));
9265 } 9262 }
9266 9263
9267 host->max_channel = IPR_MAX_BUS_TO_SCAN; 9264 host->max_channel = IPR_VSET_BUS;
9268 host->unique_id = host->host_no; 9265 host->unique_id = host->host_no;
9269 host->max_cmd_len = IPR_MAX_CDB_LEN; 9266 host->max_cmd_len = IPR_MAX_CDB_LEN;
9270 host->can_queue = ioa_cfg->max_cmds; 9267 host->can_queue = ioa_cfg->max_cmds;
@@ -9764,25 +9761,6 @@ out_scsi_host_put:
9764} 9761}
9765 9762
9766/** 9763/**
9767 * ipr_scan_vsets - Scans for VSET devices
9768 * @ioa_cfg: ioa config struct
9769 *
9770 * Description: Since the VSET resources do not follow SAM in that we can have
9771 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
9772 *
9773 * Return value:
9774 * none
9775 **/
9776static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
9777{
9778 int target, lun;
9779
9780 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
9781 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
9782 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
9783}
9784
9785/**
9786 * ipr_initiate_ioa_bringdown - Bring down an adapter 9764 * ipr_initiate_ioa_bringdown - Bring down an adapter
9787 * @ioa_cfg: ioa config struct 9765 * @ioa_cfg: ioa config struct
9788 * @shutdown_type: shutdown type 9766 * @shutdown_type: shutdown type
@@ -9937,10 +9915,6 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9937 } 9915 }
9938 9916
9939 scsi_scan_host(ioa_cfg->host); 9917 scsi_scan_host(ioa_cfg->host);
9940 ipr_scan_vsets(ioa_cfg);
9941 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
9942 ioa_cfg->allow_ml_add_del = 1;
9943 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9944 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 9918 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9945 9919
9946 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { 9920 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 9ebdebd944e7..b4f3eec51bc9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -157,13 +157,11 @@
157 157
158#define IPR_MAX_NUM_TARGETS_PER_BUS 256 158#define IPR_MAX_NUM_TARGETS_PER_BUS 256
159#define IPR_MAX_NUM_LUNS_PER_TARGET 256 159#define IPR_MAX_NUM_LUNS_PER_TARGET 256
160#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8
161#define IPR_VSET_BUS 0xff 160#define IPR_VSET_BUS 0xff
162#define IPR_IOA_BUS 0xff 161#define IPR_IOA_BUS 0xff
163#define IPR_IOA_TARGET 0xff 162#define IPR_IOA_TARGET 0xff
164#define IPR_IOA_LUN 0xff 163#define IPR_IOA_LUN 0xff
165#define IPR_MAX_NUM_BUSES 16 164#define IPR_MAX_NUM_BUSES 16
166#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES
167 165
168#define IPR_NUM_RESET_RELOAD_RETRIES 3 166#define IPR_NUM_RESET_RELOAD_RETRIES 3
169 167
@@ -1453,7 +1451,7 @@ struct ipr_ioa_cfg {
1453 u8 in_ioa_bringdown:1; 1451 u8 in_ioa_bringdown:1;
1454 u8 ioa_unit_checked:1; 1452 u8 ioa_unit_checked:1;
1455 u8 dump_taken:1; 1453 u8 dump_taken:1;
1456 u8 allow_ml_add_del:1; 1454 u8 scan_done:1;
1457 u8 needs_hard_reset:1; 1455 u8 needs_hard_reset:1;
1458 u8 dual_raid:1; 1456 u8 dual_raid:1;
1459 u8 needs_warm_reset:1; 1457 u8 needs_warm_reset:1;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 724c6265b667..cd41b63a2f10 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -158,7 +158,6 @@ static struct scsi_host_template isci_sht = {
158 .scan_finished = isci_host_scan_finished, 158 .scan_finished = isci_host_scan_finished,
159 .scan_start = isci_host_start, 159 .scan_start = isci_host_start,
160 .change_queue_depth = sas_change_queue_depth, 160 .change_queue_depth = sas_change_queue_depth,
161 .change_queue_type = sas_change_queue_type,
162 .bios_param = sas_bios_param, 161 .bios_param = sas_bios_param,
163 .can_queue = ISCI_CAN_QUEUE_VAL, 162 .can_queue = ISCI_CAN_QUEUE_VAL,
164 .cmd_per_lun = 1, 163 .cmd_per_lun = 1,
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 72918d227ead..519dac4e341e 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -906,13 +906,6 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
906 return scsi_change_queue_depth(sdev, depth); 906 return scsi_change_queue_depth(sdev, depth);
907} 907}
908 908
909int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
910{
911 if (dev_is_sata(sdev_to_domain_dev(scsi_dev)))
912 return -EINVAL;
913 return scsi_change_queue_type(scsi_dev, type);
914}
915
916int sas_bios_param(struct scsi_device *scsi_dev, 909int sas_bios_param(struct scsi_device *scsi_dev,
917 struct block_device *bdev, 910 struct block_device *bdev,
918 sector_t capacity, int *hsc) 911 sector_t capacity, int *hsc)
@@ -1011,7 +1004,6 @@ EXPORT_SYMBOL_GPL(sas_queuecommand);
1011EXPORT_SYMBOL_GPL(sas_target_alloc); 1004EXPORT_SYMBOL_GPL(sas_target_alloc);
1012EXPORT_SYMBOL_GPL(sas_slave_configure); 1005EXPORT_SYMBOL_GPL(sas_slave_configure);
1013EXPORT_SYMBOL_GPL(sas_change_queue_depth); 1006EXPORT_SYMBOL_GPL(sas_change_queue_depth);
1014EXPORT_SYMBOL_GPL(sas_change_queue_type);
1015EXPORT_SYMBOL_GPL(sas_bios_param); 1007EXPORT_SYMBOL_GPL(sas_bios_param);
1016EXPORT_SYMBOL_GPL(sas_task_abort); 1008EXPORT_SYMBOL_GPL(sas_task_abort);
1017EXPORT_SYMBOL_GPL(sas_phy_reset); 1009EXPORT_SYMBOL_GPL(sas_phy_reset);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index fd85952b621d..4f9222eb2266 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5879,7 +5879,6 @@ struct scsi_host_template lpfc_template = {
5879 .max_sectors = 0xFFFF, 5879 .max_sectors = 0xFFFF,
5880 .vendor_id = LPFC_NL_VENDOR_ID, 5880 .vendor_id = LPFC_NL_VENDOR_ID,
5881 .change_queue_depth = scsi_change_queue_depth, 5881 .change_queue_depth = scsi_change_queue_depth,
5882 .change_queue_type = scsi_change_queue_type,
5883 .use_blk_tags = 1, 5882 .use_blk_tags = 1,
5884 .track_queue_depth = 1, 5883 .track_queue_depth = 1,
5885}; 5884};
@@ -5904,7 +5903,6 @@ struct scsi_host_template lpfc_vport_template = {
5904 .shost_attrs = lpfc_vport_attrs, 5903 .shost_attrs = lpfc_vport_attrs,
5905 .max_sectors = 0xFFFF, 5904 .max_sectors = 0xFFFF,
5906 .change_queue_depth = scsi_change_queue_depth, 5905 .change_queue_depth = scsi_change_queue_depth,
5907 .change_queue_type = scsi_change_queue_type,
5908 .use_blk_tags = 1, 5906 .use_blk_tags = 1,
5909 .track_queue_depth = 1, 5907 .track_queue_depth = 1,
5910}; 5908};
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 8431eb10bbb1..6a1c036a6f3f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -7592,7 +7592,6 @@ static struct scsi_host_template scsih_driver_template = {
7592 .scan_finished = _scsih_scan_finished, 7592 .scan_finished = _scsih_scan_finished,
7593 .scan_start = _scsih_scan_start, 7593 .scan_start = _scsih_scan_start,
7594 .change_queue_depth = _scsih_change_queue_depth, 7594 .change_queue_depth = _scsih_change_queue_depth,
7595 .change_queue_type = scsi_change_queue_type,
7596 .eh_abort_handler = _scsih_abort, 7595 .eh_abort_handler = _scsih_abort,
7597 .eh_device_reset_handler = _scsih_dev_reset, 7596 .eh_device_reset_handler = _scsih_dev_reset,
7598 .eh_target_reset_handler = _scsih_target_reset, 7597 .eh_target_reset_handler = _scsih_target_reset,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 0d1d06488a28..e689bf20a3ea 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
1006 &mpt2sas_phy->remote_identify); 1006 &mpt2sas_phy->remote_identify);
1007 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1007 _transport_add_phy_to_an_existing_port(ioc, sas_node,
1008 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); 1008 mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
1009 } else { 1009 } else
1010 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct 1010 memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
1011 sas_identify)); 1011 sas_identify));
1012 _transport_del_phy_from_an_existing_port(ioc, sas_node,
1013 mpt2sas_phy);
1014 }
1015 1012
1016 if (mpt2sas_phy->phy) 1013 if (mpt2sas_phy->phy)
1017 mpt2sas_phy->phy->negotiated_linkrate = 1014 mpt2sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a2b60991efd4..94261ee9e72d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7229,7 +7229,6 @@ static struct scsi_host_template scsih_driver_template = {
7229 .scan_finished = _scsih_scan_finished, 7229 .scan_finished = _scsih_scan_finished,
7230 .scan_start = _scsih_scan_start, 7230 .scan_start = _scsih_scan_start,
7231 .change_queue_depth = _scsih_change_queue_depth, 7231 .change_queue_depth = _scsih_change_queue_depth,
7232 .change_queue_type = scsi_change_queue_type,
7233 .eh_abort_handler = _scsih_abort, 7232 .eh_abort_handler = _scsih_abort,
7234 .eh_device_reset_handler = _scsih_dev_reset, 7233 .eh_device_reset_handler = _scsih_dev_reset,
7235 .eh_target_reset_handler = _scsih_target_reset, 7234 .eh_target_reset_handler = _scsih_target_reset,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index d4bafaaebea9..3637ae6c0171 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
1003 &mpt3sas_phy->remote_identify); 1003 &mpt3sas_phy->remote_identify);
1004 _transport_add_phy_to_an_existing_port(ioc, sas_node, 1004 _transport_add_phy_to_an_existing_port(ioc, sas_node,
1005 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); 1005 mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
1006 } else { 1006 } else
1007 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct 1007 memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
1008 sas_identify)); 1008 sas_identify));
1009 _transport_del_phy_from_an_existing_port(ioc, sas_node,
1010 mpt3sas_phy);
1011 }
1012 1009
1013 if (mpt3sas_phy->phy) 1010 if (mpt3sas_phy->phy)
1014 mpt3sas_phy->phy->negotiated_linkrate = 1011 mpt3sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index f15df3de6790..53030b0e8015 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -54,7 +54,6 @@ static struct scsi_host_template mvs_sht = {
54 .scan_finished = mvs_scan_finished, 54 .scan_finished = mvs_scan_finished,
55 .scan_start = mvs_scan_start, 55 .scan_start = mvs_scan_start,
56 .change_queue_depth = sas_change_queue_depth, 56 .change_queue_depth = sas_change_queue_depth,
57 .change_queue_type = sas_change_queue_type,
58 .bios_param = sas_bios_param, 57 .bios_param = sas_bios_param,
59 .can_queue = 1, 58 .can_queue = 1,
60 .cmd_per_lun = 1, 59 .cmd_per_lun = 1,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 329aba0083ab..65555916d3b8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -76,7 +76,6 @@ static struct scsi_host_template pm8001_sht = {
76 .scan_finished = pm8001_scan_finished, 76 .scan_finished = pm8001_scan_finished,
77 .scan_start = pm8001_scan_start, 77 .scan_start = pm8001_scan_start,
78 .change_queue_depth = sas_change_queue_depth, 78 .change_queue_depth = sas_change_queue_depth,
79 .change_queue_type = sas_change_queue_type,
80 .bios_param = sas_bios_param, 79 .bios_param = sas_bios_param,
81 .can_queue = 1, 80 .can_queue = 1,
82 .cmd_per_lun = 1, 81 .cmd_per_lun = 1,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b1b1f66b1ab7..8c27b6a77ec4 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4251,7 +4251,6 @@ static struct scsi_host_template pmcraid_host_template = {
4251 .slave_configure = pmcraid_slave_configure, 4251 .slave_configure = pmcraid_slave_configure,
4252 .slave_destroy = pmcraid_slave_destroy, 4252 .slave_destroy = pmcraid_slave_destroy,
4253 .change_queue_depth = pmcraid_change_queue_depth, 4253 .change_queue_depth = pmcraid_change_queue_depth,
4254 .change_queue_type = scsi_change_queue_type,
4255 .can_queue = PMCRAID_MAX_IO_CMD, 4254 .can_queue = PMCRAID_MAX_IO_CMD,
4256 .this_id = -1, 4255 .this_id = -1,
4257 .sg_tablesize = PMCRAID_MAX_IOADLS, 4256 .sg_tablesize = PMCRAID_MAX_IOADLS,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a4dde7e80dbd..e59f25bff7ab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3237,8 +3237,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3237 struct fc_rport *rport; 3237 struct fc_rport *rport;
3238 unsigned long flags; 3238 unsigned long flags;
3239 3239
3240 qla2x00_rport_del(fcport);
3241
3242 rport_ids.node_name = wwn_to_u64(fcport->node_name); 3240 rport_ids.node_name = wwn_to_u64(fcport->node_name);
3243 rport_ids.port_name = wwn_to_u64(fcport->port_name); 3241 rport_ids.port_name = wwn_to_u64(fcport->port_name);
3244 rport_ids.port_id = fcport->d_id.b.domain << 16 | 3242 rport_ids.port_id = fcport->d_id.b.domain << 16 |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6b4d9235368a..12ca291c1380 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -258,7 +258,6 @@ struct scsi_host_template qla2xxx_driver_template = {
258 .scan_finished = qla2xxx_scan_finished, 258 .scan_finished = qla2xxx_scan_finished,
259 .scan_start = qla2xxx_scan_start, 259 .scan_start = qla2xxx_scan_start,
260 .change_queue_depth = scsi_change_queue_depth, 260 .change_queue_depth = scsi_change_queue_depth,
261 .change_queue_type = scsi_change_queue_type,
262 .this_id = -1, 261 .this_id = -1,
263 .cmd_per_lun = 3, 262 .cmd_per_lun = 3,
264 .use_clustering = ENABLE_CLUSTERING, 263 .use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index a902fa1db7af..57418258c101 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3218,25 +3218,25 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
3218 3218
3219 switch (task_codes) { 3219 switch (task_codes) {
3220 case ATIO_SIMPLE_QUEUE: 3220 case ATIO_SIMPLE_QUEUE:
3221 fcp_task_attr = MSG_SIMPLE_TAG; 3221 fcp_task_attr = TCM_SIMPLE_TAG;
3222 break; 3222 break;
3223 case ATIO_HEAD_OF_QUEUE: 3223 case ATIO_HEAD_OF_QUEUE:
3224 fcp_task_attr = MSG_HEAD_TAG; 3224 fcp_task_attr = TCM_HEAD_TAG;
3225 break; 3225 break;
3226 case ATIO_ORDERED_QUEUE: 3226 case ATIO_ORDERED_QUEUE:
3227 fcp_task_attr = MSG_ORDERED_TAG; 3227 fcp_task_attr = TCM_ORDERED_TAG;
3228 break; 3228 break;
3229 case ATIO_ACA_QUEUE: 3229 case ATIO_ACA_QUEUE:
3230 fcp_task_attr = MSG_ACA_TAG; 3230 fcp_task_attr = TCM_ACA_TAG;
3231 break; 3231 break;
3232 case ATIO_UNTAGGED: 3232 case ATIO_UNTAGGED:
3233 fcp_task_attr = MSG_SIMPLE_TAG; 3233 fcp_task_attr = TCM_SIMPLE_TAG;
3234 break; 3234 break;
3235 default: 3235 default:
3236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 3236 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
3237 "qla_target: unknown task code %x, use ORDERED instead\n", 3237 "qla_target: unknown task code %x, use ORDERED instead\n",
3238 task_codes); 3238 task_codes);
3239 fcp_task_attr = MSG_ORDERED_TAG; 3239 fcp_task_attr = TCM_ORDERED_TAG;
3240 break; 3240 break;
3241 } 3241 }
3242 3242
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1ad0c36375b8..e02885451425 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -739,34 +739,12 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
739 739
740 if (sdev->last_queue_full_count <= 10) 740 if (sdev->last_queue_full_count <= 10)
741 return 0; 741 return 0;
742 if (sdev->last_queue_full_depth < 8) {
743 /* Drop back to untagged */
744 scsi_set_tag_type(sdev, 0);
745 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
746 return -1;
747 }
748 742
749 return scsi_change_queue_depth(sdev, depth); 743 return scsi_change_queue_depth(sdev, depth);
750} 744}
751EXPORT_SYMBOL(scsi_track_queue_full); 745EXPORT_SYMBOL(scsi_track_queue_full);
752 746
753/** 747/**
754 * scsi_change_queue_type() - Change a device's queue type
755 * @sdev: The SCSI device whose queue depth is to change
756 * @tag_type: Identifier for queue type
757 */
758int scsi_change_queue_type(struct scsi_device *sdev, int tag_type)
759{
760 if (!sdev->tagged_supported)
761 return 0;
762
763 scsi_set_tag_type(sdev, tag_type);
764 return tag_type;
765
766}
767EXPORT_SYMBOL(scsi_change_queue_type);
768
769/**
770 * scsi_vpd_inquiry - Request a device provide us with a VPD page 748 * scsi_vpd_inquiry - Request a device provide us with a VPD page
771 * @sdev: The device to ask 749 * @sdev: The device to ask
772 * @buffer: Where to put the result 750 * @buffer: Where to put the result
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index aa4b6b80aade..7b8b51bc29b4 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -128,7 +128,6 @@ static const char *scsi_debug_version_date = "20141022";
128#define DEF_REMOVABLE false 128#define DEF_REMOVABLE false
129#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */ 129#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
130#define DEF_SECTOR_SIZE 512 130#define DEF_SECTOR_SIZE 512
131#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
132#define DEF_UNMAP_ALIGNMENT 0 131#define DEF_UNMAP_ALIGNMENT 0
133#define DEF_UNMAP_GRANULARITY 1 132#define DEF_UNMAP_GRANULARITY 1
134#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF 133#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
@@ -817,6 +816,7 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
817 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ); 816 UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
818 if (debug) 817 if (debug)
819 cp = "capacity data changed"; 818 cp = "capacity data changed";
819 break;
820 default: 820 default:
821 pr_warn("%s: unexpected unit attention code=%d\n", 821 pr_warn("%s: unexpected unit attention code=%d\n",
822 __func__, k); 822 __func__, k);
@@ -3045,18 +3045,12 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3045 u8 num; 3045 u8 num;
3046 unsigned long iflags; 3046 unsigned long iflags;
3047 int ret; 3047 int ret;
3048 int retval = 0;
3048 3049
3049 lba = get_unaligned_be32(cmd + 2); 3050 lba = get_unaligned_be64(cmd + 2);
3050 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ 3051 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3051 if (0 == num) 3052 if (0 == num)
3052 return 0; /* degenerate case, not an error */ 3053 return 0; /* degenerate case, not an error */
3053 dnum = 2 * num;
3054 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3055 if (NULL == arr) {
3056 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3057 INSUFF_RES_ASCQ);
3058 return check_condition_result;
3059 }
3060 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION && 3054 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3061 (cmd[1] & 0xe0)) { 3055 (cmd[1] & 0xe0)) {
3062 mk_sense_invalid_opcode(scp); 3056 mk_sense_invalid_opcode(scp);
@@ -3079,6 +3073,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); 3073 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3080 return check_condition_result; 3074 return check_condition_result;
3081 } 3075 }
3076 dnum = 2 * num;
3077 arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3078 if (NULL == arr) {
3079 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3080 INSUFF_RES_ASCQ);
3081 return check_condition_result;
3082 }
3082 3083
3083 write_lock_irqsave(&atomic_rw, iflags); 3084 write_lock_irqsave(&atomic_rw, iflags);
3084 3085
@@ -3089,24 +3090,24 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3089 ret = do_device_access(scp, 0, dnum, true); 3090 ret = do_device_access(scp, 0, dnum, true);
3090 fake_storep = fake_storep_hold; 3091 fake_storep = fake_storep_hold;
3091 if (ret == -1) { 3092 if (ret == -1) {
3092 write_unlock_irqrestore(&atomic_rw, iflags); 3093 retval = DID_ERROR << 16;
3093 kfree(arr); 3094 goto cleanup;
3094 return DID_ERROR << 16;
3095 } else if ((ret < (dnum * lb_size)) && 3095 } else if ((ret < (dnum * lb_size)) &&
3096 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3096 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3097 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb " 3097 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3098 "indicated=%u, IO sent=%d bytes\n", my_name, 3098 "indicated=%u, IO sent=%d bytes\n", my_name,
3099 dnum * lb_size, ret); 3099 dnum * lb_size, ret);
3100 if (!comp_write_worker(lba, num, arr)) { 3100 if (!comp_write_worker(lba, num, arr)) {
3101 write_unlock_irqrestore(&atomic_rw, iflags);
3102 kfree(arr);
3103 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0); 3101 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3104 return check_condition_result; 3102 retval = check_condition_result;
3103 goto cleanup;
3105 } 3104 }
3106 if (scsi_debug_lbp()) 3105 if (scsi_debug_lbp())
3107 map_region(lba, num); 3106 map_region(lba, num);
3107cleanup:
3108 write_unlock_irqrestore(&atomic_rw, iflags); 3108 write_unlock_irqrestore(&atomic_rw, iflags);
3109 return 0; 3109 kfree(arr);
3110 return retval;
3110} 3111}
3111 3112
3112struct unmap_block_desc { 3113struct unmap_block_desc {
@@ -4438,6 +4439,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4438 struct sdebug_host_info *sdhp; 4439 struct sdebug_host_info *sdhp;
4439 struct sdebug_dev_info *dp; 4440 struct sdebug_dev_info *dp;
4440 4441
4442 spin_lock(&sdebug_host_list_lock);
4441 list_for_each_entry(sdhp, &sdebug_host_list, 4443 list_for_each_entry(sdhp, &sdebug_host_list,
4442 host_list) { 4444 host_list) {
4443 list_for_each_entry(dp, &sdhp->dev_info_list, 4445 list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -4446,6 +4448,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4446 dp->uas_bm); 4448 dp->uas_bm);
4447 } 4449 }
4448 } 4450 }
4451 spin_unlock(&sdebug_host_list_lock);
4449 } 4452 }
4450 return count; 4453 return count;
4451 } 4454 }
@@ -4988,32 +4991,6 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
4988} 4991}
4989 4992
4990static int 4993static int
4991sdebug_change_qtype(struct scsi_device *sdev, int qtype)
4992{
4993 qtype = scsi_change_queue_type(sdev, qtype);
4994 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
4995 const char *cp;
4996
4997 switch (qtype) {
4998 case 0:
4999 cp = "untagged";
5000 break;
5001 case MSG_SIMPLE_TAG:
5002 cp = "simple tags";
5003 break;
5004 case MSG_ORDERED_TAG:
5005 cp = "ordered tags";
5006 break;
5007 default:
5008 cp = "unknown";
5009 break;
5010 }
5011 sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
5012 }
5013 return qtype;
5014}
5015
5016static int
5017check_inject(struct scsi_cmnd *scp) 4994check_inject(struct scsi_cmnd *scp)
5018{ 4995{
5019 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp); 4996 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
@@ -5212,7 +5189,6 @@ static struct scsi_host_template sdebug_driver_template = {
5212 .ioctl = scsi_debug_ioctl, 5189 .ioctl = scsi_debug_ioctl,
5213 .queuecommand = sdebug_queuecommand_lock_or_not, 5190 .queuecommand = sdebug_queuecommand_lock_or_not,
5214 .change_queue_depth = sdebug_change_qdepth, 5191 .change_queue_depth = sdebug_change_qdepth,
5215 .change_queue_type = sdebug_change_qtype,
5216 .eh_abort_handler = scsi_debug_abort, 5192 .eh_abort_handler = scsi_debug_abort,
5217 .eh_device_reset_handler = scsi_debug_device_reset, 5193 .eh_device_reset_handler = scsi_debug_device_reset,
5218 .eh_target_reset_handler = scsi_debug_target_reset, 5194 .eh_target_reset_handler = scsi_debug_target_reset,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c1d04d4d3c6c..262ab837a704 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -211,6 +211,7 @@ static struct {
211 {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN}, 211 {"Medion", "Flash XL MMC/SD", "2.6D", BLIST_FORCELUN},
212 {"MegaRAID", "LD", NULL, BLIST_FORCELUN}, 212 {"MegaRAID", "LD", NULL, BLIST_FORCELUN},
213 {"MICROP", "4110", NULL, BLIST_NOTQ}, 213 {"MICROP", "4110", NULL, BLIST_NOTQ},
214 {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
214 {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2}, 215 {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
215 {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN}, 216 {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
216 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 217 {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 43318d556cbc..9ea95dd3e260 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1918,7 +1918,9 @@ static int scsi_mq_prep_fn(struct request *req)
1918 1918
1919 if (scsi_host_get_prot(shost)) { 1919 if (scsi_host_get_prot(shost)) {
1920 cmd->prot_sdb = (void *)sg + 1920 cmd->prot_sdb = (void *)sg +
1921 shost->sg_tablesize * sizeof(struct scatterlist); 1921 min_t(unsigned int,
1922 shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
1923 sizeof(struct scatterlist);
1922 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); 1924 memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
1923 1925
1924 cmd->prot_sdb->table.sgl = 1926 cmd->prot_sdb->table.sgl =
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 1cb64a8e18c9..1ac38e73df7e 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -738,30 +738,12 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr,
738 const char *buf, size_t count) 738 const char *buf, size_t count)
739{ 739{
740 struct scsi_device *sdev = to_scsi_device(dev); 740 struct scsi_device *sdev = to_scsi_device(dev);
741 struct scsi_host_template *sht = sdev->host->hostt;
742 int tag_type = 0, retval;
743 int prev_tag_type = scsi_get_tag_type(sdev);
744
745 if (!sdev->tagged_supported || !sht->change_queue_type)
746 return -EINVAL;
747 741
748 /* 742 if (!sdev->tagged_supported)
749 * We're never issueing order tags these days, but allow the value
750 * for backwards compatibility.
751 */
752 if (strncmp(buf, "ordered", 7) == 0 ||
753 strncmp(buf, "simple", 6) == 0)
754 tag_type = MSG_SIMPLE_TAG;
755 else if (strncmp(buf, "none", 4) != 0)
756 return -EINVAL; 743 return -EINVAL;
757 744
758 if (tag_type == prev_tag_type) 745 sdev_printk(KERN_INFO, sdev,
759 return count; 746 "ignoring write to deprecated queue_type attribute");
760
761 retval = sht->change_queue_type(sdev, tag_type);
762 if (retval < 0)
763 return retval;
764
765 return count; 747 return count;
766} 748}
767 749
@@ -938,10 +920,6 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
938 !sdev->host->hostt->change_queue_depth) 920 !sdev->host->hostt->change_queue_depth)
939 return 0; 921 return 0;
940 922
941 if (attr == &dev_attr_queue_type.attr &&
942 !sdev->host->hostt->change_queue_type)
943 return S_IRUGO;
944
945 return attr->mode; 923 return attr->mode;
946} 924}
947 925
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index fa2aece76cc2..31bbb0da3397 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1221,7 +1221,7 @@ EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
1221int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) 1221int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
1222{ 1222{
1223 if (cmd->flags & SCMD_TAGGED) { 1223 if (cmd->flags & SCMD_TAGGED) {
1224 *msg++ = MSG_SIMPLE_TAG; 1224 *msg++ = SIMPLE_QUEUE_TAG;
1225 *msg++ = cmd->request->tag; 1225 *msg++ = cmd->request->tag;
1226 return 2; 1226 return 2;
1227 } 1227 }
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index e3ba251fb6e7..4cff0ddc2c25 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1688,13 +1688,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1688 if (ret == -EAGAIN) { 1688 if (ret == -EAGAIN) {
1689 /* no more space */ 1689 /* no more space */
1690 1690
1691 if (cmd_request->bounce_sgl_count) { 1691 if (cmd_request->bounce_sgl_count)
1692 destroy_bounce_buffer(cmd_request->bounce_sgl, 1692 destroy_bounce_buffer(cmd_request->bounce_sgl,
1693 cmd_request->bounce_sgl_count); 1693 cmd_request->bounce_sgl_count);
1694 1694
1695 ret = SCSI_MLQUEUE_DEVICE_BUSY; 1695 ret = SCSI_MLQUEUE_DEVICE_BUSY;
1696 goto queue_error; 1696 goto queue_error;
1697 }
1698 } 1697 }
1699 1698
1700 return 0; 1699 return 0;
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 43781c9fe521..b410499cddca 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -663,7 +663,7 @@ static int img_spfi_remove(struct platform_device *pdev)
663 return 0; 663 return 0;
664} 664}
665 665
666#ifdef CONFIG_PM_RUNTIME 666#ifdef CONFIG_PM
667static int img_spfi_runtime_suspend(struct device *dev) 667static int img_spfi_runtime_suspend(struct device *dev)
668{ 668{
669 struct spi_master *master = dev_get_drvdata(dev); 669 struct spi_master *master = dev_get_drvdata(dev);
@@ -692,7 +692,7 @@ static int img_spfi_runtime_resume(struct device *dev)
692 692
693 return 0; 693 return 0;
694} 694}
695#endif /* CONFIG_PM_RUNTIME */ 695#endif /* CONFIG_PM */
696 696
697#ifdef CONFIG_PM_SLEEP 697#ifdef CONFIG_PM_SLEEP
698static int img_spfi_suspend(struct device *dev) 698static int img_spfi_suspend(struct device *dev)
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 0e48f8c2037d..1bbac0378bf7 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -413,7 +413,7 @@ static int meson_spifc_resume(struct device *dev)
413} 413}
414#endif /* CONFIG_PM_SLEEP */ 414#endif /* CONFIG_PM_SLEEP */
415 415
416#ifdef CONFIG_PM_RUNTIME 416#ifdef CONFIG_PM
417static int meson_spifc_runtime_suspend(struct device *dev) 417static int meson_spifc_runtime_suspend(struct device *dev)
418{ 418{
419 struct spi_master *master = dev_get_drvdata(dev); 419 struct spi_master *master = dev_get_drvdata(dev);
@@ -431,7 +431,7 @@ static int meson_spifc_runtime_resume(struct device *dev)
431 431
432 return clk_prepare_enable(spifc->clk); 432 return clk_prepare_enable(spifc->clk);
433} 433}
434#endif /* CONFIG_PM_RUNTIME */ 434#endif /* CONFIG_PM */
435 435
436static const struct dev_pm_ops meson_spifc_pm_ops = { 436static const struct dev_pm_ops meson_spifc_pm_ops = {
437 SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume) 437 SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 8156b4c0f568..3925db160650 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -42,28 +42,6 @@
42 42
43#include "lustre_patchless_compat.h" 43#include "lustre_patchless_compat.h"
44 44
45# define LOCK_FS_STRUCT(fs) spin_lock(&(fs)->lock)
46# define UNLOCK_FS_STRUCT(fs) spin_unlock(&(fs)->lock)
47
48static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
49 struct dentry *dentry)
50{
51 struct path path;
52 struct path old_pwd;
53
54 path.mnt = mnt;
55 path.dentry = dentry;
56 LOCK_FS_STRUCT(fs);
57 old_pwd = fs->pwd;
58 path_get(&path);
59 fs->pwd = path;
60 UNLOCK_FS_STRUCT(fs);
61
62 if (old_pwd.dentry)
63 path_put(&old_pwd);
64}
65
66
67/* 45/*
68 * set ATTR_BLOCKS to a high value to avoid any risk of collision with other 46 * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
69 * ATTR_* attributes (see bug 13828) 47 * ATTR_* attributes (see bug 13828)
@@ -110,8 +88,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
110#define cfs_bio_io_error(a, b) bio_io_error((a)) 88#define cfs_bio_io_error(a, b) bio_io_error((a))
111#define cfs_bio_endio(a, b, c) bio_endio((a), (c)) 89#define cfs_bio_endio(a, b, c) bio_endio((a), (c))
112 90
113#define cfs_fs_pwd(fs) ((fs)->pwd.dentry)
114#define cfs_fs_mnt(fs) ((fs)->pwd.mnt)
115#define cfs_path_put(nd) path_put(&(nd)->path) 91#define cfs_path_put(nd) path_put(&(nd)->path)
116 92
117 93
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 407718a0026f..1ac7a702ce26 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -661,7 +661,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
661 int mode; 661 int mode;
662 int err; 662 int err;
663 663
664 mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR; 664 mode = (0755 & ~current_umask()) | S_IFDIR;
665 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename, 665 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
666 strlen(filename), mode, LUSTRE_OPC_MKDIR, 666 strlen(filename), mode, LUSTRE_OPC_MKDIR,
667 lump); 667 lump);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 6e423aa6a6e4..a3367bfb1456 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -2372,21 +2372,6 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
2372 return buf; 2372 return buf;
2373} 2373}
2374 2374
2375static char *ll_d_path(struct dentry *dentry, char *buf, int bufsize)
2376{
2377 char *path = NULL;
2378
2379 struct path p;
2380
2381 p.dentry = dentry;
2382 p.mnt = current->fs->root.mnt;
2383 path_get(&p);
2384 path = d_path(&p, buf, bufsize);
2385 path_put(&p);
2386
2387 return path;
2388}
2389
2390void ll_dirty_page_discard_warn(struct page *page, int ioret) 2375void ll_dirty_page_discard_warn(struct page *page, int ioret)
2391{ 2376{
2392 char *buf, *path = NULL; 2377 char *buf, *path = NULL;
@@ -2398,7 +2383,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
2398 if (buf != NULL) { 2383 if (buf != NULL) {
2399 dentry = d_find_alias(page->mapping->host); 2384 dentry = d_find_alias(page->mapping->host);
2400 if (dentry != NULL) 2385 if (dentry != NULL)
2401 path = ll_d_path(dentry, buf, PAGE_SIZE); 2386 path = dentry_path_raw(dentry, buf, PAGE_SIZE);
2402 } 2387 }
2403 2388
2404 CDEBUG(D_WARNING, 2389 CDEBUG(D_WARNING,
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 73e58d22e325..55f6774f706f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void)
609 609
610 return ret; 610 return ret;
611r2t_out: 611r2t_out:
612 iscsit_unregister_transport(&iscsi_target_transport);
612 kmem_cache_destroy(lio_r2t_cache); 613 kmem_cache_destroy(lio_r2t_cache);
613ooo_out: 614ooo_out:
614 kmem_cache_destroy(lio_ooo_cache); 615 kmem_cache_destroy(lio_ooo_cache);
@@ -943,17 +944,17 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
943 */ 944 */
944 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) || 945 if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
945 (iscsi_task_attr == ISCSI_ATTR_SIMPLE)) 946 (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
946 sam_task_attr = MSG_SIMPLE_TAG; 947 sam_task_attr = TCM_SIMPLE_TAG;
947 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED) 948 else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
948 sam_task_attr = MSG_ORDERED_TAG; 949 sam_task_attr = TCM_ORDERED_TAG;
949 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE) 950 else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
950 sam_task_attr = MSG_HEAD_TAG; 951 sam_task_attr = TCM_HEAD_TAG;
951 else if (iscsi_task_attr == ISCSI_ATTR_ACA) 952 else if (iscsi_task_attr == ISCSI_ATTR_ACA)
952 sam_task_attr = MSG_ACA_TAG; 953 sam_task_attr = TCM_ACA_TAG;
953 else { 954 else {
954 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using" 955 pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
955 " MSG_SIMPLE_TAG\n", iscsi_task_attr); 956 " TCM_SIMPLE_TAG\n", iscsi_task_attr);
956 sam_task_attr = MSG_SIMPLE_TAG; 957 sam_task_attr = TCM_SIMPLE_TAG;
957 } 958 }
958 959
959 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD; 960 cmd->iscsi_opcode = ISCSI_OP_SCSI_CMD;
@@ -1811,7 +1812,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1811 transport_init_se_cmd(&cmd->se_cmd, 1812 transport_init_se_cmd(&cmd->se_cmd,
1812 &lio_target_fabric_configfs->tf_ops, 1813 &lio_target_fabric_configfs->tf_ops,
1813 conn->sess->se_sess, 0, DMA_NONE, 1814 conn->sess->se_sess, 0, DMA_NONE,
1814 MSG_SIMPLE_TAG, cmd->sense_buffer + 2); 1815 TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
1815 1816
1816 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true); 1817 target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
1817 sess_ref = true; 1818 sess_ref = true;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 302eb3b78715..09a522bae222 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -790,7 +790,6 @@ struct iscsi_np {
790 void *np_context; 790 void *np_context;
791 struct iscsit_transport *np_transport; 791 struct iscsit_transport *np_transport;
792 struct list_head np_list; 792 struct list_head np_list;
793 struct iscsi_tpg_np *tpg_np;
794} ____cacheline_aligned; 793} ____cacheline_aligned;
795 794
796struct iscsi_tpg_np { 795struct iscsi_tpg_np {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 480f2e0ecc11..713c0c1877ab 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
281{ 281{
282 struct iscsi_session *sess = NULL; 282 struct iscsi_session *sess = NULL;
283 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 283 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
284 enum target_prot_op sup_pro_ops;
285 int ret; 284 int ret;
286 285
287 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); 286 sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
343 kfree(sess); 342 kfree(sess);
344 return -ENOMEM; 343 return -ENOMEM;
345 } 344 }
346 sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
347 345
348 sess->se_sess = transport_init_session(sup_pro_ops); 346 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
349 if (IS_ERR(sess->se_sess)) { 347 if (IS_ERR(sess->se_sess)) {
350 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 348 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
351 ISCSI_LOGIN_STATUS_NO_RESOURCES); 349 ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
1161 } 1159 }
1162 kfree(conn->sess->sess_ops); 1160 kfree(conn->sess->sess_ops);
1163 kfree(conn->sess); 1161 kfree(conn->sess);
1162 conn->sess = NULL;
1164 1163
1165old_sess_out: 1164old_sess_out:
1166 iscsi_stop_login_thread_timer(np); 1165 iscsi_stop_login_thread_timer(np);
@@ -1204,6 +1203,9 @@ old_sess_out:
1204 conn->sock = NULL; 1203 conn->sock = NULL;
1205 } 1204 }
1206 1205
1206 if (conn->conn_transport->iscsit_wait_conn)
1207 conn->conn_transport->iscsit_wait_conn(conn);
1208
1207 if (conn->conn_transport->iscsit_free_conn) 1209 if (conn->conn_transport->iscsit_free_conn)
1208 conn->conn_transport->iscsit_free_conn(conn); 1210 conn->conn_transport->iscsit_free_conn(conn);
1209 1211
@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1364 } 1366 }
1365 login->zero_tsih = zero_tsih; 1367 login->zero_tsih = zero_tsih;
1366 1368
1369 conn->sess->se_sess->sup_prot_ops =
1370 conn->conn_transport->iscsit_get_sup_prot_ops(conn);
1371
1367 tpg = conn->tpg; 1372 tpg = conn->tpg;
1368 if (!tpg) { 1373 if (!tpg) {
1369 pr_err("Unable to locate struct iscsi_conn->tpg\n"); 1374 pr_err("Unable to locate struct iscsi_conn->tpg\n");
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index c3cb5c15efda..9053a3c0c6e5 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
501 init_completion(&tpg_np->tpg_np_comp); 501 init_completion(&tpg_np->tpg_np_comp);
502 kref_init(&tpg_np->tpg_np_kref); 502 kref_init(&tpg_np->tpg_np_kref);
503 tpg_np->tpg_np = np; 503 tpg_np->tpg_np = np;
504 np->tpg_np = tpg_np;
505 tpg_np->tpg = tpg; 504 tpg_np->tpg = tpg;
506 505
507 spin_lock(&tpg->tpg_np_lock); 506 spin_lock(&tpg->tpg_np_lock);
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
index 882728fac30c..08217d62fb0d 100644
--- a/drivers/target/iscsi/iscsi_target_transport.c
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type)
26 26
27void iscsit_put_transport(struct iscsit_transport *t) 27void iscsit_put_transport(struct iscsit_transport *t)
28{ 28{
29 if (t->owner) 29 module_put(t->owner);
30 module_put(t->owner);
31} 30}
32 31
33int iscsit_register_transport(struct iscsit_transport *t) 32int iscsit_register_transport(struct iscsit_transport *t)
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7c6a95bcb35e..bcd88ec99793 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
1356 struct iscsi_conn *conn, 1356 struct iscsi_conn *conn,
1357 struct iscsi_data_count *count) 1357 struct iscsi_data_count *count)
1358{ 1358{
1359 int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; 1359 int ret, iov_len;
1360 struct kvec *iov_p; 1360 struct kvec *iov_p;
1361 struct msghdr msg; 1361 struct msghdr msg;
1362 1362
1363 if (!conn || !conn->sock || !conn->conn_ops) 1363 if (!conn || !conn->sock || !conn->conn_ops)
1364 return -1; 1364 return -1;
1365 1365
1366 if (data <= 0) { 1366 if (count->data_length <= 0) {
1367 pr_err("Data length is: %d\n", data); 1367 pr_err("Data length is: %d\n", count->data_length);
1368 return -1; 1368 return -1;
1369 } 1369 }
1370 1370
@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
1373 iov_p = count->iov; 1373 iov_p = count->iov;
1374 iov_len = count->iov_count; 1374 iov_len = count->iov_count;
1375 1375
1376 while (total_tx < data) { 1376 ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
1377 tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, 1377 count->data_length);
1378 (data - total_tx)); 1378 if (ret != count->data_length) {
1379 if (tx_loop <= 0) { 1379 pr_err("Unexpected ret: %d send data %d\n",
1380 pr_debug("tx_loop: %d total_tx %d\n", 1380 ret, count->data_length);
1381 tx_loop, total_tx); 1381 return -EPIPE;
1382 return tx_loop;
1383 }
1384 total_tx += tx_loop;
1385 pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1386 tx_loop, total_tx, data);
1387 } 1382 }
1383 pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
1388 1384
1389 return total_tx; 1385 return ret;
1390} 1386}
1391 1387
1392int rx_data( 1388int rx_data(
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 4d1b7224a7f2..6b3c32954689 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
138 set_host_byte(sc, DID_TRANSPORT_DISRUPTED); 138 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
139 goto out_done; 139 goto out_done;
140 } 140 }
141 tl_nexus = tl_hba->tl_nexus; 141 tl_nexus = tl_tpg->tl_nexus;
142 if (!tl_nexus) { 142 if (!tl_nexus) {
143 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus" 143 scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
144 " does not exist\n"); 144 " does not exist\n");
@@ -168,7 +168,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
168 168
169 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 169 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
170 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 170 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
171 transfer_length, MSG_SIMPLE_TAG, 171 transfer_length, TCM_SIMPLE_TAG,
172 sc->sc_data_direction, 0, 172 sc->sc_data_direction, 0,
173 scsi_sglist(sc), scsi_sg_count(sc), 173 scsi_sglist(sc), scsi_sg_count(sc),
174 sgl_bidi, sgl_bidi_count, 174 sgl_bidi, sgl_bidi_count,
@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
218 * to struct scsi_device 218 * to struct scsi_device
219 */ 219 */
220static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, 220static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
221 struct tcm_loop_nexus *tl_nexus,
222 int lun, int task, enum tcm_tmreq_table tmr) 221 int lun, int task, enum tcm_tmreq_table tmr)
223{ 222{
224 struct se_cmd *se_cmd = NULL; 223 struct se_cmd *se_cmd = NULL;
225 struct se_session *se_sess; 224 struct se_session *se_sess;
226 struct se_portal_group *se_tpg; 225 struct se_portal_group *se_tpg;
226 struct tcm_loop_nexus *tl_nexus;
227 struct tcm_loop_cmd *tl_cmd = NULL; 227 struct tcm_loop_cmd *tl_cmd = NULL;
228 struct tcm_loop_tmr *tl_tmr = NULL; 228 struct tcm_loop_tmr *tl_tmr = NULL;
229 int ret = TMR_FUNCTION_FAILED, rc; 229 int ret = TMR_FUNCTION_FAILED, rc;
230 230
231 /*
232 * Locate the tl_nexus and se_sess pointers
233 */
234 tl_nexus = tl_tpg->tl_nexus;
235 if (!tl_nexus) {
236 pr_err("Unable to perform device reset without"
237 " active I_T Nexus\n");
238 return ret;
239 }
240
231 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); 241 tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
232 if (!tl_cmd) { 242 if (!tl_cmd) {
233 pr_err("Unable to allocate memory for tl_cmd\n"); 243 pr_err("Unable to allocate memory for tl_cmd\n");
@@ -243,12 +253,12 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
243 253
244 se_cmd = &tl_cmd->tl_se_cmd; 254 se_cmd = &tl_cmd->tl_se_cmd;
245 se_tpg = &tl_tpg->tl_se_tpg; 255 se_tpg = &tl_tpg->tl_se_tpg;
246 se_sess = tl_nexus->se_sess; 256 se_sess = tl_tpg->tl_nexus->se_sess;
247 /* 257 /*
248 * Initialize struct se_cmd descriptor from target_core_mod infrastructure 258 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
249 */ 259 */
250 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, 260 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
251 DMA_NONE, MSG_SIMPLE_TAG, 261 DMA_NONE, TCM_SIMPLE_TAG,
252 &tl_cmd->tl_sense_buf[0]); 262 &tl_cmd->tl_sense_buf[0]);
253 263
254 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); 264 rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
@@ -288,7 +298,6 @@ release:
288static int tcm_loop_abort_task(struct scsi_cmnd *sc) 298static int tcm_loop_abort_task(struct scsi_cmnd *sc)
289{ 299{
290 struct tcm_loop_hba *tl_hba; 300 struct tcm_loop_hba *tl_hba;
291 struct tcm_loop_nexus *tl_nexus;
292 struct tcm_loop_tpg *tl_tpg; 301 struct tcm_loop_tpg *tl_tpg;
293 int ret = FAILED; 302 int ret = FAILED;
294 303
@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
296 * Locate the tcm_loop_hba_t pointer 305 * Locate the tcm_loop_hba_t pointer
297 */ 306 */
298 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 307 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
299 /*
300 * Locate the tl_nexus and se_sess pointers
301 */
302 tl_nexus = tl_hba->tl_nexus;
303 if (!tl_nexus) {
304 pr_err("Unable to perform device reset without"
305 " active I_T Nexus\n");
306 return FAILED;
307 }
308
309 /*
310 * Locate the tl_tpg pointer from TargetID in sc->device->id
311 */
312 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 308 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
313 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 309 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
314 sc->request->tag, TMR_ABORT_TASK); 310 sc->request->tag, TMR_ABORT_TASK);
315 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 311 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
316} 312}
@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
322static int tcm_loop_device_reset(struct scsi_cmnd *sc) 318static int tcm_loop_device_reset(struct scsi_cmnd *sc)
323{ 319{
324 struct tcm_loop_hba *tl_hba; 320 struct tcm_loop_hba *tl_hba;
325 struct tcm_loop_nexus *tl_nexus;
326 struct tcm_loop_tpg *tl_tpg; 321 struct tcm_loop_tpg *tl_tpg;
327 int ret = FAILED; 322 int ret = FAILED;
328 323
@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
330 * Locate the tcm_loop_hba_t pointer 325 * Locate the tcm_loop_hba_t pointer
331 */ 326 */
332 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); 327 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
333 /*
334 * Locate the tl_nexus and se_sess pointers
335 */
336 tl_nexus = tl_hba->tl_nexus;
337 if (!tl_nexus) {
338 pr_err("Unable to perform device reset without"
339 " active I_T Nexus\n");
340 return FAILED;
341 }
342 /*
343 * Locate the tl_tpg pointer from TargetID in sc->device->id
344 */
345 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 328 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
346 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 329
330 ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
347 0, TMR_LUN_RESET); 331 0, TMR_LUN_RESET);
348 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 332 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
349} 333}
@@ -385,7 +369,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
385 .name = "TCM_Loopback", 369 .name = "TCM_Loopback",
386 .queuecommand = tcm_loop_queuecommand, 370 .queuecommand = tcm_loop_queuecommand,
387 .change_queue_depth = scsi_change_queue_depth, 371 .change_queue_depth = scsi_change_queue_depth,
388 .change_queue_type = scsi_change_queue_type,
389 .eh_abort_handler = tcm_loop_abort_task, 372 .eh_abort_handler = tcm_loop_abort_task,
390 .eh_device_reset_handler = tcm_loop_device_reset, 373 .eh_device_reset_handler = tcm_loop_device_reset,
391 .eh_target_reset_handler = tcm_loop_target_reset, 374 .eh_target_reset_handler = tcm_loop_target_reset,
@@ -940,8 +923,8 @@ static int tcm_loop_make_nexus(
940 struct tcm_loop_nexus *tl_nexus; 923 struct tcm_loop_nexus *tl_nexus;
941 int ret = -ENOMEM; 924 int ret = -ENOMEM;
942 925
943 if (tl_tpg->tl_hba->tl_nexus) { 926 if (tl_tpg->tl_nexus) {
944 pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); 927 pr_debug("tl_tpg->tl_nexus already exists\n");
945 return -EEXIST; 928 return -EEXIST;
946 } 929 }
947 se_tpg = &tl_tpg->tl_se_tpg; 930 se_tpg = &tl_tpg->tl_se_tpg;
@@ -976,7 +959,7 @@ static int tcm_loop_make_nexus(
976 */ 959 */
977 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, 960 __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
978 tl_nexus->se_sess, tl_nexus); 961 tl_nexus->se_sess, tl_nexus);
979 tl_tpg->tl_hba->tl_nexus = tl_nexus; 962 tl_tpg->tl_nexus = tl_nexus;
980 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" 963 pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
981 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 964 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
982 name); 965 name);
@@ -992,12 +975,8 @@ static int tcm_loop_drop_nexus(
992{ 975{
993 struct se_session *se_sess; 976 struct se_session *se_sess;
994 struct tcm_loop_nexus *tl_nexus; 977 struct tcm_loop_nexus *tl_nexus;
995 struct tcm_loop_hba *tl_hba = tpg->tl_hba;
996 978
997 if (!tl_hba) 979 tl_nexus = tpg->tl_nexus;
998 return -ENODEV;
999
1000 tl_nexus = tl_hba->tl_nexus;
1001 if (!tl_nexus) 980 if (!tl_nexus)
1002 return -ENODEV; 981 return -ENODEV;
1003 982
@@ -1013,13 +992,13 @@ static int tcm_loop_drop_nexus(
1013 } 992 }
1014 993
1015 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" 994 pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
1016 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), 995 " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
1017 tl_nexus->se_sess->se_node_acl->initiatorname); 996 tl_nexus->se_sess->se_node_acl->initiatorname);
1018 /* 997 /*
1019 * Release the SCSI I_T Nexus to the emulated SAS Target Port 998 * Release the SCSI I_T Nexus to the emulated SAS Target Port
1020 */ 999 */
1021 transport_deregister_session(tl_nexus->se_sess); 1000 transport_deregister_session(tl_nexus->se_sess);
1022 tpg->tl_hba->tl_nexus = NULL; 1001 tpg->tl_nexus = NULL;
1023 kfree(tl_nexus); 1002 kfree(tl_nexus);
1024 return 0; 1003 return 0;
1025} 1004}
@@ -1035,7 +1014,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
1035 struct tcm_loop_nexus *tl_nexus; 1014 struct tcm_loop_nexus *tl_nexus;
1036 ssize_t ret; 1015 ssize_t ret;
1037 1016
1038 tl_nexus = tl_tpg->tl_hba->tl_nexus; 1017 tl_nexus = tl_tpg->tl_nexus;
1039 if (!tl_nexus) 1018 if (!tl_nexus)
1040 return -ENODEV; 1019 return -ENODEV;
1041 1020
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 54c59d0b6608..6ae49f272ba6 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
27}; 27};
28 28
29struct tcm_loop_nexus { 29struct tcm_loop_nexus {
30 int it_nexus_active;
31 /*
32 * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
33 */
34 struct scsi_host *sh;
35 /* 30 /*
36 * Pointer to TCM session for I_T Nexus 31 * Pointer to TCM session for I_T Nexus
37 */ 32 */
@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
51 atomic_t tl_tpg_port_count; 46 atomic_t tl_tpg_port_count;
52 struct se_portal_group tl_se_tpg; 47 struct se_portal_group tl_se_tpg;
53 struct tcm_loop_hba *tl_hba; 48 struct tcm_loop_hba *tl_hba;
49 struct tcm_loop_nexus *tl_nexus;
54}; 50};
55 51
56struct tcm_loop_hba { 52struct tcm_loop_hba {
@@ -59,7 +55,6 @@ struct tcm_loop_hba {
59 struct se_hba_s *se_hba; 55 struct se_hba_s *se_hba;
60 struct se_lun *tl_hba_lun; 56 struct se_lun *tl_hba_lun;
61 struct se_port *tl_hba_lun_sep; 57 struct se_port *tl_hba_lun_sep;
62 struct tcm_loop_nexus *tl_nexus;
63 struct device dev; 58 struct device dev;
64 struct Scsi_Host *sh; 59 struct Scsi_Host *sh;
65 struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA]; 60 struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e7e93727553c..9512af6a8114 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1237,7 +1237,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
1237 1237
1238 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1238 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1239 req->sense_buf, unpacked_lun, data_length, 1239 req->sense_buf, unpacked_lun, data_length,
1240 MSG_SIMPLE_TAG, data_dir, 0)) 1240 TCM_SIMPLE_TAG, data_dir, 0))
1241 goto err; 1241 goto err;
1242 1242
1243 return; 1243 return;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 79f9296a08ae..75d89adfccc0 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -50,6 +50,19 @@
50#include "target_core_rd.h" 50#include "target_core_rd.h"
51#include "target_core_xcopy.h" 51#include "target_core_xcopy.h"
52 52
53#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
54static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
55{ \
56 struct target_backend_cits *tbc = &sa->tb_cits; \
57 struct config_item_type *cit = &tbc->tb_##_name##_cit; \
58 \
59 cit->ct_item_ops = _item_ops; \
60 cit->ct_group_ops = _group_ops; \
61 cit->ct_attrs = _attrs; \
62 cit->ct_owner = sa->owner; \
63 pr_debug("Setup generic %s\n", __stringify(_name)); \
64}
65
53extern struct t10_alua_lu_gp *default_lu_gp; 66extern struct t10_alua_lu_gp *default_lu_gp;
54 67
55static LIST_HEAD(g_tf_list); 68static LIST_HEAD(g_tf_list);
@@ -126,48 +139,57 @@ static struct config_group *target_core_register_fabric(
126 139
127 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" 140 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
128 " %s\n", group, name); 141 " %s\n", group, name);
129 /* 142
130 * Below are some hardcoded request_module() calls to automatically 143 tf = target_core_get_fabric(name);
131 * local fabric modules when the following is called: 144 if (!tf) {
132 * 145 pr_err("target_core_register_fabric() trying autoload for %s\n",
133 * mkdir -p /sys/kernel/config/target/$MODULE_NAME 146 name);
134 * 147
135 * Note that this does not limit which TCM fabric module can be
136 * registered, but simply provids auto loading logic for modules with
137 * mkdir(2) system calls with known TCM fabric modules.
138 */
139 if (!strncmp(name, "iscsi", 5)) {
140 /* 148 /*
141 * Automatically load the LIO Target fabric module when the 149 * Below are some hardcoded request_module() calls to automatically
142 * following is called: 150 * local fabric modules when the following is called:
143 * 151 *
144 * mkdir -p $CONFIGFS/target/iscsi 152 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
145 */
146 ret = request_module("iscsi_target_mod");
147 if (ret < 0) {
148 pr_err("request_module() failed for"
149 " iscsi_target_mod.ko: %d\n", ret);
150 return ERR_PTR(-EINVAL);
151 }
152 } else if (!strncmp(name, "loopback", 8)) {
153 /*
154 * Automatically load the tcm_loop fabric module when the
155 * following is called:
156 * 153 *
157 * mkdir -p $CONFIGFS/target/loopback 154 * Note that this does not limit which TCM fabric module can be
155 * registered, but simply provids auto loading logic for modules with
156 * mkdir(2) system calls with known TCM fabric modules.
158 */ 157 */
159 ret = request_module("tcm_loop"); 158
160 if (ret < 0) { 159 if (!strncmp(name, "iscsi", 5)) {
161 pr_err("request_module() failed for" 160 /*
162 " tcm_loop.ko: %d\n", ret); 161 * Automatically load the LIO Target fabric module when the
163 return ERR_PTR(-EINVAL); 162 * following is called:
163 *
164 * mkdir -p $CONFIGFS/target/iscsi
165 */
166 ret = request_module("iscsi_target_mod");
167 if (ret < 0) {
168 pr_err("request_module() failed for"
169 " iscsi_target_mod.ko: %d\n", ret);
170 return ERR_PTR(-EINVAL);
171 }
172 } else if (!strncmp(name, "loopback", 8)) {
173 /*
174 * Automatically load the tcm_loop fabric module when the
175 * following is called:
176 *
177 * mkdir -p $CONFIGFS/target/loopback
178 */
179 ret = request_module("tcm_loop");
180 if (ret < 0) {
181 pr_err("request_module() failed for"
182 " tcm_loop.ko: %d\n", ret);
183 return ERR_PTR(-EINVAL);
184 }
164 } 185 }
186
187 tf = target_core_get_fabric(name);
165 } 188 }
166 189
167 tf = target_core_get_fabric(name);
168 if (!tf) { 190 if (!tf) {
169 pr_err("target_core_get_fabric() failed for %s\n", 191 pr_err("target_core_get_fabric() failed for %s\n",
170 name); 192 name);
171 return ERR_PTR(-EINVAL); 193 return ERR_PTR(-EINVAL);
172 } 194 }
173 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" 195 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
@@ -562,198 +584,21 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister);
562// Stop functions called by external Target Fabrics Modules 584// Stop functions called by external Target Fabrics Modules
563//############################################################################*/ 585//############################################################################*/
564 586
565/* Start functions for struct config_item_type target_core_dev_attrib_cit */ 587/* Start functions for struct config_item_type tb_dev_attrib_cit */
566
567#define DEF_DEV_ATTRIB_SHOW(_name) \
568static ssize_t target_core_dev_show_attr_##_name( \
569 struct se_dev_attrib *da, \
570 char *page) \
571{ \
572 return snprintf(page, PAGE_SIZE, "%u\n", \
573 (u32)da->da_dev->dev_attrib._name); \
574}
575
576#define DEF_DEV_ATTRIB_STORE(_name) \
577static ssize_t target_core_dev_store_attr_##_name( \
578 struct se_dev_attrib *da, \
579 const char *page, \
580 size_t count) \
581{ \
582 unsigned long val; \
583 int ret; \
584 \
585 ret = kstrtoul(page, 0, &val); \
586 if (ret < 0) { \
587 pr_err("kstrtoul() failed with" \
588 " ret: %d\n", ret); \
589 return -EINVAL; \
590 } \
591 ret = se_dev_set_##_name(da->da_dev, (u32)val); \
592 \
593 return (!ret) ? count : -EINVAL; \
594}
595
596#define DEF_DEV_ATTRIB(_name) \
597DEF_DEV_ATTRIB_SHOW(_name); \
598DEF_DEV_ATTRIB_STORE(_name);
599
600#define DEF_DEV_ATTRIB_RO(_name) \
601DEF_DEV_ATTRIB_SHOW(_name);
602 588
603CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib); 589CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
604#define SE_DEV_ATTR(_name, _mode) \
605static struct target_core_dev_attrib_attribute \
606 target_core_dev_attrib_##_name = \
607 __CONFIGFS_EATTR(_name, _mode, \
608 target_core_dev_show_attr_##_name, \
609 target_core_dev_store_attr_##_name);
610
611#define SE_DEV_ATTR_RO(_name); \
612static struct target_core_dev_attrib_attribute \
613 target_core_dev_attrib_##_name = \
614 __CONFIGFS_EATTR_RO(_name, \
615 target_core_dev_show_attr_##_name);
616
617DEF_DEV_ATTRIB(emulate_model_alias);
618SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
619
620DEF_DEV_ATTRIB(emulate_dpo);
621SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
622
623DEF_DEV_ATTRIB(emulate_fua_write);
624SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
625
626DEF_DEV_ATTRIB(emulate_fua_read);
627SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
628
629DEF_DEV_ATTRIB(emulate_write_cache);
630SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
631
632DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
633SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
634
635DEF_DEV_ATTRIB(emulate_tas);
636SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
637
638DEF_DEV_ATTRIB(emulate_tpu);
639SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
640
641DEF_DEV_ATTRIB(emulate_tpws);
642SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
643
644DEF_DEV_ATTRIB(emulate_caw);
645SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
646
647DEF_DEV_ATTRIB(emulate_3pc);
648SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
649
650DEF_DEV_ATTRIB(pi_prot_type);
651SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
652
653DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
654SE_DEV_ATTR_RO(hw_pi_prot_type);
655
656DEF_DEV_ATTRIB(pi_prot_format);
657SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
658
659DEF_DEV_ATTRIB(enforce_pr_isids);
660SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
661
662DEF_DEV_ATTRIB(is_nonrot);
663SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
664
665DEF_DEV_ATTRIB(emulate_rest_reord);
666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
667
668DEF_DEV_ATTRIB(force_pr_aptpl);
669SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
670
671DEF_DEV_ATTRIB_RO(hw_block_size);
672SE_DEV_ATTR_RO(hw_block_size);
673
674DEF_DEV_ATTRIB(block_size);
675SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
676
677DEF_DEV_ATTRIB_RO(hw_max_sectors);
678SE_DEV_ATTR_RO(hw_max_sectors);
679
680DEF_DEV_ATTRIB(fabric_max_sectors);
681SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
682
683DEF_DEV_ATTRIB(optimal_sectors);
684SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
685
686DEF_DEV_ATTRIB_RO(hw_queue_depth);
687SE_DEV_ATTR_RO(hw_queue_depth);
688
689DEF_DEV_ATTRIB(queue_depth);
690SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
691
692DEF_DEV_ATTRIB(max_unmap_lba_count);
693SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
694
695DEF_DEV_ATTRIB(max_unmap_block_desc_count);
696SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
697
698DEF_DEV_ATTRIB(unmap_granularity);
699SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
700
701DEF_DEV_ATTRIB(unmap_granularity_alignment);
702SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
703
704DEF_DEV_ATTRIB(max_write_same_len);
705SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
706
707CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group); 590CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
708 591
709static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
710 &target_core_dev_attrib_emulate_model_alias.attr,
711 &target_core_dev_attrib_emulate_dpo.attr,
712 &target_core_dev_attrib_emulate_fua_write.attr,
713 &target_core_dev_attrib_emulate_fua_read.attr,
714 &target_core_dev_attrib_emulate_write_cache.attr,
715 &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
716 &target_core_dev_attrib_emulate_tas.attr,
717 &target_core_dev_attrib_emulate_tpu.attr,
718 &target_core_dev_attrib_emulate_tpws.attr,
719 &target_core_dev_attrib_emulate_caw.attr,
720 &target_core_dev_attrib_emulate_3pc.attr,
721 &target_core_dev_attrib_pi_prot_type.attr,
722 &target_core_dev_attrib_hw_pi_prot_type.attr,
723 &target_core_dev_attrib_pi_prot_format.attr,
724 &target_core_dev_attrib_enforce_pr_isids.attr,
725 &target_core_dev_attrib_force_pr_aptpl.attr,
726 &target_core_dev_attrib_is_nonrot.attr,
727 &target_core_dev_attrib_emulate_rest_reord.attr,
728 &target_core_dev_attrib_hw_block_size.attr,
729 &target_core_dev_attrib_block_size.attr,
730 &target_core_dev_attrib_hw_max_sectors.attr,
731 &target_core_dev_attrib_fabric_max_sectors.attr,
732 &target_core_dev_attrib_optimal_sectors.attr,
733 &target_core_dev_attrib_hw_queue_depth.attr,
734 &target_core_dev_attrib_queue_depth.attr,
735 &target_core_dev_attrib_max_unmap_lba_count.attr,
736 &target_core_dev_attrib_max_unmap_block_desc_count.attr,
737 &target_core_dev_attrib_unmap_granularity.attr,
738 &target_core_dev_attrib_unmap_granularity_alignment.attr,
739 &target_core_dev_attrib_max_write_same_len.attr,
740 NULL,
741};
742
743static struct configfs_item_operations target_core_dev_attrib_ops = { 592static struct configfs_item_operations target_core_dev_attrib_ops = {
744 .show_attribute = target_core_dev_attrib_attr_show, 593 .show_attribute = target_core_dev_attrib_attr_show,
745 .store_attribute = target_core_dev_attrib_attr_store, 594 .store_attribute = target_core_dev_attrib_attr_store,
746}; 595};
747 596
748static struct config_item_type target_core_dev_attrib_cit = { 597TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
749 .ct_item_ops = &target_core_dev_attrib_ops,
750 .ct_attrs = target_core_dev_attrib_attrs,
751 .ct_owner = THIS_MODULE,
752};
753 598
754/* End functions for struct config_item_type target_core_dev_attrib_cit */ 599/* End functions for struct config_item_type tb_dev_attrib_cit */
755 600
756/* Start functions for struct config_item_type target_core_dev_wwn_cit */ 601/* Start functions for struct config_item_type tb_dev_wwn_cit */
757 602
758CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn); 603CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
759#define SE_DEV_WWN_ATTR(_name, _mode) \ 604#define SE_DEV_WWN_ATTR(_name, _mode) \
@@ -984,15 +829,11 @@ static struct configfs_item_operations target_core_dev_wwn_ops = {
984 .store_attribute = target_core_dev_wwn_attr_store, 829 .store_attribute = target_core_dev_wwn_attr_store,
985}; 830};
986 831
987static struct config_item_type target_core_dev_wwn_cit = { 832TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
988 .ct_item_ops = &target_core_dev_wwn_ops,
989 .ct_attrs = target_core_dev_wwn_attrs,
990 .ct_owner = THIS_MODULE,
991};
992 833
993/* End functions for struct config_item_type target_core_dev_wwn_cit */ 834/* End functions for struct config_item_type tb_dev_wwn_cit */
994 835
995/* Start functions for struct config_item_type target_core_dev_pr_cit */ 836/* Start functions for struct config_item_type tb_dev_pr_cit */
996 837
997CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device); 838CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
998#define SE_DEV_PR_ATTR(_name, _mode) \ 839#define SE_DEV_PR_ATTR(_name, _mode) \
@@ -1453,15 +1294,11 @@ static struct configfs_item_operations target_core_dev_pr_ops = {
1453 .store_attribute = target_core_dev_pr_attr_store, 1294 .store_attribute = target_core_dev_pr_attr_store,
1454}; 1295};
1455 1296
1456static struct config_item_type target_core_dev_pr_cit = { 1297TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
1457 .ct_item_ops = &target_core_dev_pr_ops,
1458 .ct_attrs = target_core_dev_pr_attrs,
1459 .ct_owner = THIS_MODULE,
1460};
1461 1298
1462/* End functions for struct config_item_type target_core_dev_pr_cit */ 1299/* End functions for struct config_item_type tb_dev_pr_cit */
1463 1300
1464/* Start functions for struct config_item_type target_core_dev_cit */ 1301/* Start functions for struct config_item_type tb_dev_cit */
1465 1302
1466static ssize_t target_core_show_dev_info(void *p, char *page) 1303static ssize_t target_core_show_dev_info(void *p, char *page)
1467{ 1304{
@@ -1925,7 +1762,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
1925 .store = target_core_store_dev_lba_map, 1762 .store = target_core_store_dev_lba_map,
1926}; 1763};
1927 1764
1928static struct configfs_attribute *lio_core_dev_attrs[] = { 1765static struct configfs_attribute *target_core_dev_attrs[] = {
1929 &target_core_attr_dev_info.attr, 1766 &target_core_attr_dev_info.attr,
1930 &target_core_attr_dev_control.attr, 1767 &target_core_attr_dev_control.attr,
1931 &target_core_attr_dev_alias.attr, 1768 &target_core_attr_dev_alias.attr,
@@ -1984,13 +1821,9 @@ static struct configfs_item_operations target_core_dev_item_ops = {
1984 .store_attribute = target_core_dev_store, 1821 .store_attribute = target_core_dev_store,
1985}; 1822};
1986 1823
1987static struct config_item_type target_core_dev_cit = { 1824TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
1988 .ct_item_ops = &target_core_dev_item_ops,
1989 .ct_attrs = lio_core_dev_attrs,
1990 .ct_owner = THIS_MODULE,
1991};
1992 1825
1993/* End functions for struct config_item_type target_core_dev_cit */ 1826/* End functions for struct config_item_type tb_dev_cit */
1994 1827
1995/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */ 1828/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
1996 1829
@@ -2670,7 +2503,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = {
2670 2503
2671/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */ 2504/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2672 2505
2673/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ 2506/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2674 2507
2675static struct config_group *target_core_alua_create_tg_pt_gp( 2508static struct config_group *target_core_alua_create_tg_pt_gp(
2676 struct config_group *group, 2509 struct config_group *group,
@@ -2721,12 +2554,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2721 .drop_item = &target_core_alua_drop_tg_pt_gp, 2554 .drop_item = &target_core_alua_drop_tg_pt_gp,
2722}; 2555};
2723 2556
2724static struct config_item_type target_core_alua_tg_pt_gps_cit = { 2557TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
2725 .ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
2726 .ct_owner = THIS_MODULE,
2727};
2728 2558
2729/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */ 2559/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2730 2560
2731/* Start functions for struct config_item_type target_core_alua_cit */ 2561/* Start functions for struct config_item_type target_core_alua_cit */
2732 2562
@@ -2744,7 +2574,7 @@ static struct config_item_type target_core_alua_cit = {
2744 2574
2745/* End functions for struct config_item_type target_core_alua_cit */ 2575/* End functions for struct config_item_type target_core_alua_cit */
2746 2576
2747/* Start functions for struct config_item_type target_core_stat_cit */ 2577/* Start functions for struct config_item_type tb_dev_stat_cit */
2748 2578
2749static struct config_group *target_core_stat_mkdir( 2579static struct config_group *target_core_stat_mkdir(
2750 struct config_group *group, 2580 struct config_group *group,
@@ -2765,12 +2595,9 @@ static struct configfs_group_operations target_core_stat_group_ops = {
2765 .drop_item = &target_core_stat_rmdir, 2595 .drop_item = &target_core_stat_rmdir,
2766}; 2596};
2767 2597
2768static struct config_item_type target_core_stat_cit = { 2598TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
2769 .ct_group_ops = &target_core_stat_group_ops,
2770 .ct_owner = THIS_MODULE,
2771};
2772 2599
2773/* End functions for struct config_item_type target_core_stat_cit */ 2600/* End functions for struct config_item_type tb_dev_stat_cit */
2774 2601
2775/* Start functions for struct config_item_type target_core_hba_cit */ 2602/* Start functions for struct config_item_type target_core_hba_cit */
2776 2603
@@ -2806,17 +2633,17 @@ static struct config_group *target_core_make_subdev(
2806 if (!dev_cg->default_groups) 2633 if (!dev_cg->default_groups)
2807 goto out_free_device; 2634 goto out_free_device;
2808 2635
2809 config_group_init_type_name(dev_cg, name, &target_core_dev_cit); 2636 config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
2810 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib", 2637 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2811 &target_core_dev_attrib_cit); 2638 &t->tb_cits.tb_dev_attrib_cit);
2812 config_group_init_type_name(&dev->dev_pr_group, "pr", 2639 config_group_init_type_name(&dev->dev_pr_group, "pr",
2813 &target_core_dev_pr_cit); 2640 &t->tb_cits.tb_dev_pr_cit);
2814 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn", 2641 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2815 &target_core_dev_wwn_cit); 2642 &t->tb_cits.tb_dev_wwn_cit);
2816 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group, 2643 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2817 "alua", &target_core_alua_tg_pt_gps_cit); 2644 "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
2818 config_group_init_type_name(&dev->dev_stat_grps.stat_group, 2645 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2819 "statistics", &target_core_stat_cit); 2646 "statistics", &t->tb_cits.tb_dev_stat_cit);
2820 2647
2821 dev_cg->default_groups[0] = &dev->dev_attrib.da_group; 2648 dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
2822 dev_cg->default_groups[1] = &dev->dev_pr_group; 2649 dev_cg->default_groups[1] = &dev->dev_pr_group;
@@ -3110,6 +2937,17 @@ static struct config_item_type target_core_cit = {
3110 2937
3111/* Stop functions for struct config_item_type target_core_hba_cit */ 2938/* Stop functions for struct config_item_type target_core_hba_cit */
3112 2939
2940void target_core_setup_sub_cits(struct se_subsystem_api *sa)
2941{
2942 target_core_setup_dev_cit(sa);
2943 target_core_setup_dev_attrib_cit(sa);
2944 target_core_setup_dev_pr_cit(sa);
2945 target_core_setup_dev_wwn_cit(sa);
2946 target_core_setup_dev_alua_tg_pt_gps_cit(sa);
2947 target_core_setup_dev_stat_cit(sa);
2948}
2949EXPORT_SYMBOL(target_core_setup_sub_cits);
2950
3113static int __init target_core_init_configfs(void) 2951static int __init target_core_init_configfs(void)
3114{ 2952{
3115 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 2953 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index c45f9e907e44..7653cfb027a2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count(
659 dev, dev->dev_attrib.max_unmap_lba_count); 659 dev, dev->dev_attrib.max_unmap_lba_count);
660 return 0; 660 return 0;
661} 661}
662EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
662 663
663int se_dev_set_max_unmap_block_desc_count( 664int se_dev_set_max_unmap_block_desc_count(
664 struct se_device *dev, 665 struct se_device *dev,
@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count(
670 dev, dev->dev_attrib.max_unmap_block_desc_count); 671 dev, dev->dev_attrib.max_unmap_block_desc_count);
671 return 0; 672 return 0;
672} 673}
674EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
673 675
674int se_dev_set_unmap_granularity( 676int se_dev_set_unmap_granularity(
675 struct se_device *dev, 677 struct se_device *dev,
@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity(
680 dev, dev->dev_attrib.unmap_granularity); 682 dev, dev->dev_attrib.unmap_granularity);
681 return 0; 683 return 0;
682} 684}
685EXPORT_SYMBOL(se_dev_set_unmap_granularity);
683 686
684int se_dev_set_unmap_granularity_alignment( 687int se_dev_set_unmap_granularity_alignment(
685 struct se_device *dev, 688 struct se_device *dev,
@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment(
690 dev, dev->dev_attrib.unmap_granularity_alignment); 693 dev, dev->dev_attrib.unmap_granularity_alignment);
691 return 0; 694 return 0;
692} 695}
696EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
693 697
694int se_dev_set_max_write_same_len( 698int se_dev_set_max_write_same_len(
695 struct se_device *dev, 699 struct se_device *dev,
@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len(
700 dev, dev->dev_attrib.max_write_same_len); 704 dev, dev->dev_attrib.max_write_same_len);
701 return 0; 705 return 0;
702} 706}
707EXPORT_SYMBOL(se_dev_set_max_write_same_len);
703 708
704static void dev_set_t10_wwn_model_alias(struct se_device *dev) 709static void dev_set_t10_wwn_model_alias(struct se_device *dev)
705{ 710{
@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
738 743
739 return 0; 744 return 0;
740} 745}
746EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
741 747
742int se_dev_set_emulate_dpo(struct se_device *dev, int flag) 748int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
743{ 749{
@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
753 759
754 return 0; 760 return 0;
755} 761}
762EXPORT_SYMBOL(se_dev_set_emulate_dpo);
756 763
757int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) 764int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
758{ 765{
@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
760 pr_err("Illegal value %d\n", flag); 767 pr_err("Illegal value %d\n", flag);
761 return -EINVAL; 768 return -EINVAL;
762 } 769 }
763
764 if (flag &&
765 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
766 pr_err("emulate_fua_write not supported for pSCSI\n");
767 return -EINVAL;
768 }
769 dev->dev_attrib.emulate_fua_write = flag; 770 dev->dev_attrib.emulate_fua_write = flag;
770 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", 771 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
771 dev, dev->dev_attrib.emulate_fua_write); 772 dev, dev->dev_attrib.emulate_fua_write);
772 return 0; 773 return 0;
773} 774}
775EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
774 776
775int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) 777int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
776{ 778{
@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
786 788
787 return 0; 789 return 0;
788} 790}
791EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
789 792
790int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) 793int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
791{ 794{
@@ -794,11 +797,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
794 return -EINVAL; 797 return -EINVAL;
795 } 798 }
796 if (flag && 799 if (flag &&
797 dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
798 pr_err("emulate_write_cache not supported for pSCSI\n");
799 return -EINVAL;
800 }
801 if (flag &&
802 dev->transport->get_write_cache) { 800 dev->transport->get_write_cache) {
803 pr_err("emulate_write_cache not supported for this device\n"); 801 pr_err("emulate_write_cache not supported for this device\n");
804 return -EINVAL; 802 return -EINVAL;
@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
809 dev, dev->dev_attrib.emulate_write_cache); 807 dev, dev->dev_attrib.emulate_write_cache);
810 return 0; 808 return 0;
811} 809}
810EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
812 811
813int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) 812int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
814{ 813{
@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
829 828
830 return 0; 829 return 0;
831} 830}
831EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
832 832
833int se_dev_set_emulate_tas(struct se_device *dev, int flag) 833int se_dev_set_emulate_tas(struct se_device *dev, int flag)
834{ 834{
@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
849 849
850 return 0; 850 return 0;
851} 851}
852EXPORT_SYMBOL(se_dev_set_emulate_tas);
852 853
853int se_dev_set_emulate_tpu(struct se_device *dev, int flag) 854int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
854{ 855{
@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
870 dev, flag); 871 dev, flag);
871 return 0; 872 return 0;
872} 873}
874EXPORT_SYMBOL(se_dev_set_emulate_tpu);
873 875
874int se_dev_set_emulate_tpws(struct se_device *dev, int flag) 876int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
875{ 877{
@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
891 dev, flag); 893 dev, flag);
892 return 0; 894 return 0;
893} 895}
896EXPORT_SYMBOL(se_dev_set_emulate_tpws);
894 897
895int se_dev_set_emulate_caw(struct se_device *dev, int flag) 898int se_dev_set_emulate_caw(struct se_device *dev, int flag)
896{ 899{
@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag)
904 907
905 return 0; 908 return 0;
906} 909}
910EXPORT_SYMBOL(se_dev_set_emulate_caw);
907 911
908int se_dev_set_emulate_3pc(struct se_device *dev, int flag) 912int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
909{ 913{
@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
917 921
918 return 0; 922 return 0;
919} 923}
924EXPORT_SYMBOL(se_dev_set_emulate_3pc);
920 925
921int se_dev_set_pi_prot_type(struct se_device *dev, int flag) 926int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
922{ 927{
@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
970 975
971 return 0; 976 return 0;
972} 977}
978EXPORT_SYMBOL(se_dev_set_pi_prot_type);
973 979
974int se_dev_set_pi_prot_format(struct se_device *dev, int flag) 980int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
975{ 981{
@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
1005 1011
1006 return 0; 1012 return 0;
1007} 1013}
1014EXPORT_SYMBOL(se_dev_set_pi_prot_format);
1008 1015
1009int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) 1016int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1010{ 1017{
@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1017 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); 1024 (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
1018 return 0; 1025 return 0;
1019} 1026}
1027EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
1020 1028
1021int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag) 1029int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1022{ 1030{
@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1034 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag); 1042 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
1035 return 0; 1043 return 0;
1036} 1044}
1045EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
1037 1046
1038int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1047int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1039{ 1048{
@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1046 dev, flag); 1055 dev, flag);
1047 return 0; 1056 return 0;
1048} 1057}
1058EXPORT_SYMBOL(se_dev_set_is_nonrot);
1049 1059
1050int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) 1060int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1051{ 1061{
@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
1058 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); 1068 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
1059 return 0; 1069 return 0;
1060} 1070}
1071EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
1061 1072
1062/* 1073/*
1063 * Note, this can only be called on unexported SE Device Object. 1074 * Note, this can only be called on unexported SE Device Object.
@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
1076 return -EINVAL; 1087 return -EINVAL;
1077 } 1088 }
1078 1089
1079 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1090 if (queue_depth > dev->dev_attrib.queue_depth) {
1080 if (queue_depth > dev->dev_attrib.hw_queue_depth) { 1091 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1081 pr_err("dev[%p]: Passed queue_depth: %u" 1092 pr_err("dev[%p]: Passed queue_depth:"
1082 " exceeds TCM/SE_Device TCQ: %u\n", 1093 " %u exceeds TCM/SE_Device MAX"
1083 dev, queue_depth, 1094 " TCQ: %u\n", dev, queue_depth,
1084 dev->dev_attrib.hw_queue_depth); 1095 dev->dev_attrib.hw_queue_depth);
1085 return -EINVAL; 1096 return -EINVAL;
1086 } 1097 }
1087 } else {
1088 if (queue_depth > dev->dev_attrib.queue_depth) {
1089 if (queue_depth > dev->dev_attrib.hw_queue_depth) {
1090 pr_err("dev[%p]: Passed queue_depth:"
1091 " %u exceeds TCM/SE_Device MAX"
1092 " TCQ: %u\n", dev, queue_depth,
1093 dev->dev_attrib.hw_queue_depth);
1094 return -EINVAL;
1095 }
1096 }
1097 } 1098 }
1098
1099 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth; 1099 dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
1100 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", 1100 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1101 dev, queue_depth); 1101 dev, queue_depth);
1102 return 0; 1102 return 0;
1103} 1103}
1104EXPORT_SYMBOL(se_dev_set_queue_depth);
1104 1105
1105int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) 1106int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1106{ 1107{
@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1123 DA_STATUS_MAX_SECTORS_MIN); 1124 DA_STATUS_MAX_SECTORS_MIN);
1124 return -EINVAL; 1125 return -EINVAL;
1125 } 1126 }
1126 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { 1127 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1127 if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) { 1128 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1128 pr_err("dev[%p]: Passed fabric_max_sectors: %u" 1129 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1129 " greater than TCM/SE_Device max_sectors:" 1130 " %u\n", dev, fabric_max_sectors,
1130 " %u\n", dev, fabric_max_sectors, 1131 DA_STATUS_MAX_SECTORS_MAX);
1131 dev->dev_attrib.hw_max_sectors); 1132 return -EINVAL;
1132 return -EINVAL;
1133 }
1134 } else {
1135 if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
1136 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1137 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1138 " %u\n", dev, fabric_max_sectors,
1139 DA_STATUS_MAX_SECTORS_MAX);
1140 return -EINVAL;
1141 }
1142 } 1133 }
1143 /* 1134 /*
1144 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() 1135 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
1155 dev, fabric_max_sectors); 1146 dev, fabric_max_sectors);
1156 return 0; 1147 return 0;
1157} 1148}
1149EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
1158 1150
1159int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) 1151int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1160{ 1152{
@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1164 dev, dev->export_count); 1156 dev, dev->export_count);
1165 return -EINVAL; 1157 return -EINVAL;
1166 } 1158 }
1167 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1168 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1169 " changed for TCM/pSCSI\n", dev);
1170 return -EINVAL;
1171 }
1172 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { 1159 if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
1173 pr_err("dev[%p]: Passed optimal_sectors %u cannot be" 1160 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1174 " greater than fabric_max_sectors: %u\n", dev, 1161 " greater than fabric_max_sectors: %u\n", dev,
@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
1181 dev, optimal_sectors); 1168 dev, optimal_sectors);
1182 return 0; 1169 return 0;
1183} 1170}
1171EXPORT_SYMBOL(se_dev_set_optimal_sectors);
1184 1172
1185int se_dev_set_block_size(struct se_device *dev, u32 block_size) 1173int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1186{ 1174{
@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1201 return -EINVAL; 1189 return -EINVAL;
1202 } 1190 }
1203 1191
1204 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
1205 pr_err("dev[%p]: Not allowed to change block_size for"
1206 " Physical Device, use for Linux/SCSI to change"
1207 " block_size for underlying hardware\n", dev);
1208 return -EINVAL;
1209 }
1210
1211 dev->dev_attrib.block_size = block_size; 1192 dev->dev_attrib.block_size = block_size;
1212 pr_debug("dev[%p]: SE Device block_size changed to %u\n", 1193 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1213 dev, block_size); 1194 dev, block_size);
@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1218 1199
1219 return 0; 1200 return 0;
1220} 1201}
1202EXPORT_SYMBOL(se_dev_set_block_size);
1221 1203
1222struct se_lun *core_dev_add_lun( 1204struct se_lun *core_dev_add_lun(
1223 struct se_portal_group *tpg, 1205 struct se_portal_group *tpg,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 72c83d98662b..c2aea099ea4a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,6 +37,7 @@
37 37
38#include <target/target_core_base.h> 38#include <target/target_core_base.h>
39#include <target/target_core_backend.h> 39#include <target/target_core_backend.h>
40#include <target/target_core_backend_configfs.h>
40 41
41#include "target_core_file.h" 42#include "target_core_file.h"
42 43
@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd)
934 return sbc_parse_cdb(cmd, &fd_sbc_ops); 935 return sbc_parse_cdb(cmd, &fd_sbc_ops);
935} 936}
936 937
938DEF_TB_DEFAULT_ATTRIBS(fileio);
939
940static struct configfs_attribute *fileio_backend_dev_attrs[] = {
941 &fileio_dev_attrib_emulate_model_alias.attr,
942 &fileio_dev_attrib_emulate_dpo.attr,
943 &fileio_dev_attrib_emulate_fua_write.attr,
944 &fileio_dev_attrib_emulate_fua_read.attr,
945 &fileio_dev_attrib_emulate_write_cache.attr,
946 &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
947 &fileio_dev_attrib_emulate_tas.attr,
948 &fileio_dev_attrib_emulate_tpu.attr,
949 &fileio_dev_attrib_emulate_tpws.attr,
950 &fileio_dev_attrib_emulate_caw.attr,
951 &fileio_dev_attrib_emulate_3pc.attr,
952 &fileio_dev_attrib_pi_prot_type.attr,
953 &fileio_dev_attrib_hw_pi_prot_type.attr,
954 &fileio_dev_attrib_pi_prot_format.attr,
955 &fileio_dev_attrib_enforce_pr_isids.attr,
956 &fileio_dev_attrib_is_nonrot.attr,
957 &fileio_dev_attrib_emulate_rest_reord.attr,
958 &fileio_dev_attrib_force_pr_aptpl.attr,
959 &fileio_dev_attrib_hw_block_size.attr,
960 &fileio_dev_attrib_block_size.attr,
961 &fileio_dev_attrib_hw_max_sectors.attr,
962 &fileio_dev_attrib_fabric_max_sectors.attr,
963 &fileio_dev_attrib_optimal_sectors.attr,
964 &fileio_dev_attrib_hw_queue_depth.attr,
965 &fileio_dev_attrib_queue_depth.attr,
966 &fileio_dev_attrib_max_unmap_lba_count.attr,
967 &fileio_dev_attrib_max_unmap_block_desc_count.attr,
968 &fileio_dev_attrib_unmap_granularity.attr,
969 &fileio_dev_attrib_unmap_granularity_alignment.attr,
970 &fileio_dev_attrib_max_write_same_len.attr,
971 NULL,
972};
973
937static struct se_subsystem_api fileio_template = { 974static struct se_subsystem_api fileio_template = {
938 .name = "fileio", 975 .name = "fileio",
939 .inquiry_prod = "FILEIO", 976 .inquiry_prod = "FILEIO",
@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = {
957 994
958static int __init fileio_module_init(void) 995static int __init fileio_module_init(void)
959{ 996{
997 struct target_backend_cits *tbc = &fileio_template.tb_cits;
998
999 target_core_setup_sub_cits(&fileio_template);
1000 tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
1001
960 return transport_subsystem_register(&fileio_template); 1002 return transport_subsystem_register(&fileio_template);
961} 1003}
962 1004
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index a25051a37dd7..ff95f95dcd13 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -36,6 +36,7 @@
36#include <target/target_core_base.h> 36#include <target/target_core_base.h>
37#include <target/target_core_backend.h> 37#include <target/target_core_backend.h>
38#include <target/target_core_fabric.h> 38#include <target/target_core_fabric.h>
39#include <target/target_core_configfs.h>
39 40
40#include "target_core_internal.h" 41#include "target_core_internal.h"
41 42
@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
137 return hba; 138 return hba;
138 139
139out_module_put: 140out_module_put:
140 if (hba->transport->owner) 141 module_put(hba->transport->owner);
141 module_put(hba->transport->owner);
142 hba->transport = NULL; 142 hba->transport = NULL;
143out_free_hba: 143out_free_hba:
144 kfree(hba); 144 kfree(hba);
@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba)
159 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" 159 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
160 " Core\n", hba->hba_id); 160 " Core\n", hba->hba_id);
161 161
162 if (hba->transport->owner) 162 module_put(hba->transport->owner);
163 module_put(hba->transport->owner);
164 163
165 hba->transport = NULL; 164 hba->transport = NULL;
166 kfree(hba); 165 kfree(hba);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7e6b857c6b3f..3efff94fbd97 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -41,6 +41,7 @@
41 41
42#include <target/target_core_base.h> 42#include <target/target_core_base.h>
43#include <target/target_core_backend.h> 43#include <target/target_core_backend.h>
44#include <target/target_core_backend_configfs.h>
44 45
45#include "target_core_iblock.h" 46#include "target_core_iblock.h"
46 47
@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev)
858 return q->flush_flags & REQ_FLUSH; 859 return q->flush_flags & REQ_FLUSH;
859} 860}
860 861
862DEF_TB_DEFAULT_ATTRIBS(iblock);
863
864static struct configfs_attribute *iblock_backend_dev_attrs[] = {
865 &iblock_dev_attrib_emulate_model_alias.attr,
866 &iblock_dev_attrib_emulate_dpo.attr,
867 &iblock_dev_attrib_emulate_fua_write.attr,
868 &iblock_dev_attrib_emulate_fua_read.attr,
869 &iblock_dev_attrib_emulate_write_cache.attr,
870 &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
871 &iblock_dev_attrib_emulate_tas.attr,
872 &iblock_dev_attrib_emulate_tpu.attr,
873 &iblock_dev_attrib_emulate_tpws.attr,
874 &iblock_dev_attrib_emulate_caw.attr,
875 &iblock_dev_attrib_emulate_3pc.attr,
876 &iblock_dev_attrib_pi_prot_type.attr,
877 &iblock_dev_attrib_hw_pi_prot_type.attr,
878 &iblock_dev_attrib_pi_prot_format.attr,
879 &iblock_dev_attrib_enforce_pr_isids.attr,
880 &iblock_dev_attrib_is_nonrot.attr,
881 &iblock_dev_attrib_emulate_rest_reord.attr,
882 &iblock_dev_attrib_force_pr_aptpl.attr,
883 &iblock_dev_attrib_hw_block_size.attr,
884 &iblock_dev_attrib_block_size.attr,
885 &iblock_dev_attrib_hw_max_sectors.attr,
886 &iblock_dev_attrib_fabric_max_sectors.attr,
887 &iblock_dev_attrib_optimal_sectors.attr,
888 &iblock_dev_attrib_hw_queue_depth.attr,
889 &iblock_dev_attrib_queue_depth.attr,
890 &iblock_dev_attrib_max_unmap_lba_count.attr,
891 &iblock_dev_attrib_max_unmap_block_desc_count.attr,
892 &iblock_dev_attrib_unmap_granularity.attr,
893 &iblock_dev_attrib_unmap_granularity_alignment.attr,
894 &iblock_dev_attrib_max_write_same_len.attr,
895 NULL,
896};
897
861static struct se_subsystem_api iblock_template = { 898static struct se_subsystem_api iblock_template = {
862 .name = "iblock", 899 .name = "iblock",
863 .inquiry_prod = "IBLOCK", 900 .inquiry_prod = "IBLOCK",
@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = {
883 920
884static int __init iblock_module_init(void) 921static int __init iblock_module_init(void)
885{ 922{
923 struct target_backend_cits *tbc = &iblock_template.tb_cits;
924
925 target_core_setup_sub_cits(&iblock_template);
926 tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
927
886 return transport_subsystem_register(&iblock_template); 928 return transport_subsystem_register(&iblock_template);
887} 929}
888 930
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index e31f42f369ff..60381db90026 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
18 struct se_lun *); 18 struct se_lun *);
19void core_dev_unexport(struct se_device *, struct se_portal_group *, 19void core_dev_unexport(struct se_device *, struct se_portal_group *,
20 struct se_lun *); 20 struct se_lun *);
21int se_dev_set_task_timeout(struct se_device *, u32);
22int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
23int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
24int se_dev_set_unmap_granularity(struct se_device *, u32);
25int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
26int se_dev_set_max_write_same_len(struct se_device *, u32);
27int se_dev_set_emulate_model_alias(struct se_device *, int);
28int se_dev_set_emulate_dpo(struct se_device *, int);
29int se_dev_set_emulate_fua_write(struct se_device *, int);
30int se_dev_set_emulate_fua_read(struct se_device *, int);
31int se_dev_set_emulate_write_cache(struct se_device *, int);
32int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
33int se_dev_set_emulate_tas(struct se_device *, int);
34int se_dev_set_emulate_tpu(struct se_device *, int);
35int se_dev_set_emulate_tpws(struct se_device *, int);
36int se_dev_set_emulate_caw(struct se_device *, int);
37int se_dev_set_emulate_3pc(struct se_device *, int);
38int se_dev_set_pi_prot_type(struct se_device *, int);
39int se_dev_set_pi_prot_format(struct se_device *, int);
40int se_dev_set_enforce_pr_isids(struct se_device *, int);
41int se_dev_set_force_pr_aptpl(struct se_device *, int);
42int se_dev_set_is_nonrot(struct se_device *, int);
43int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
44int se_dev_set_queue_depth(struct se_device *, u32);
45int se_dev_set_max_sectors(struct se_device *, u32);
46int se_dev_set_fabric_max_sectors(struct se_device *, u32);
47int se_dev_set_optimal_sectors(struct se_device *, u32);
48int se_dev_set_block_size(struct se_device *, u32);
49struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); 21struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
50void core_dev_del_lun(struct se_portal_group *, struct se_lun *); 22void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
51struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); 23struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4c261c33cf55..d56f2aaba9af 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -76,7 +76,7 @@ enum preempt_type {
76}; 76};
77 77
78static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *, 78static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
79 struct t10_pr_registration *, int); 79 struct t10_pr_registration *, int, int);
80 80
81static sense_reason_t 81static sense_reason_t
82target_scsi2_reservation_check(struct se_cmd *cmd) 82target_scsi2_reservation_check(struct se_cmd *cmd)
@@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release(
1177 * service action with the SERVICE ACTION RESERVATION KEY 1177 * service action with the SERVICE ACTION RESERVATION KEY
1178 * field set to zero (see 5.7.11.3). 1178 * field set to zero (see 5.7.11.3).
1179 */ 1179 */
1180 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0); 1180 __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
1181 ret = 1; 1181 ret = 1;
1182 /* 1182 /*
1183 * For 'All Registrants' reservation types, all existing 1183 * For 'All Registrants' reservation types, all existing
@@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration(
1219 1219
1220 pr_reg->pr_reg_deve->def_pr_registered = 0; 1220 pr_reg->pr_reg_deve->def_pr_registered = 0;
1221 pr_reg->pr_reg_deve->pr_res_key = 0; 1221 pr_reg->pr_reg_deve->pr_res_key = 0;
1222 list_del(&pr_reg->pr_reg_list); 1222 if (!list_empty(&pr_reg->pr_reg_list))
1223 list_del(&pr_reg->pr_reg_list);
1223 /* 1224 /*
1224 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(), 1225 * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
1225 * so call core_scsi3_put_pr_reg() to decrement our reference. 1226 * so call core_scsi3_put_pr_reg() to decrement our reference.
@@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl(
1271{ 1272{
1272 struct t10_reservation *pr_tmpl = &dev->t10_pr; 1273 struct t10_reservation *pr_tmpl = &dev->t10_pr;
1273 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; 1274 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
1275 bool free_reg = false;
1274 /* 1276 /*
1275 * If the passed se_node_acl matches the reservation holder, 1277 * If the passed se_node_acl matches the reservation holder,
1276 * release the reservation. 1278 * release the reservation.
@@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl(
1278 spin_lock(&dev->dev_reservation_lock); 1280 spin_lock(&dev->dev_reservation_lock);
1279 pr_res_holder = dev->dev_pr_res_holder; 1281 pr_res_holder = dev->dev_pr_res_holder;
1280 if ((pr_res_holder != NULL) && 1282 if ((pr_res_holder != NULL) &&
1281 (pr_res_holder->pr_reg_nacl == nacl)) 1283 (pr_res_holder->pr_reg_nacl == nacl)) {
1282 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0); 1284 __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
1285 free_reg = true;
1286 }
1283 spin_unlock(&dev->dev_reservation_lock); 1287 spin_unlock(&dev->dev_reservation_lock);
1284 /* 1288 /*
1285 * Release any registration associated with the struct se_node_acl. 1289 * Release any registration associated with the struct se_node_acl.
1286 */ 1290 */
1287 spin_lock(&pr_tmpl->registration_lock); 1291 spin_lock(&pr_tmpl->registration_lock);
1292 if (pr_res_holder && free_reg)
1293 __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
1294
1288 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 1295 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
1289 &pr_tmpl->registration_list, pr_reg_list) { 1296 &pr_tmpl->registration_list, pr_reg_list) {
1290 1297
@@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations(
1307 if (pr_res_holder != NULL) { 1314 if (pr_res_holder != NULL) {
1308 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 1315 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
1309 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 1316 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
1310 pr_res_holder, 0); 1317 pr_res_holder, 0, 0);
1311 } 1318 }
1312 spin_unlock(&dev->dev_reservation_lock); 1319 spin_unlock(&dev->dev_reservation_lock);
1313 1320
@@ -1429,14 +1436,12 @@ core_scsi3_decode_spec_i_port(
1429 struct target_core_fabric_ops *tmp_tf_ops; 1436 struct target_core_fabric_ops *tmp_tf_ops;
1430 unsigned char *buf; 1437 unsigned char *buf;
1431 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; 1438 unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
1432 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 1439 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
1433 sense_reason_t ret; 1440 sense_reason_t ret;
1434 u32 tpdl, tid_len = 0; 1441 u32 tpdl, tid_len = 0;
1435 int dest_local_nexus; 1442 int dest_local_nexus;
1436 u32 dest_rtpi = 0; 1443 u32 dest_rtpi = 0;
1437 1444
1438 memset(dest_iport, 0, 64);
1439
1440 local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; 1445 local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
1441 /* 1446 /*
1442 * Allocate a struct pr_transport_id_holder and setup the 1447 * Allocate a struct pr_transport_id_holder and setup the
@@ -2105,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
2105 /* 2110 /*
2106 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus. 2111 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
2107 */ 2112 */
2108 pr_holder = core_scsi3_check_implicit_release( 2113 type = pr_reg->pr_res_type;
2109 cmd->se_dev, pr_reg); 2114 pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
2115 pr_reg);
2110 if (pr_holder < 0) { 2116 if (pr_holder < 0) {
2111 ret = TCM_RESERVATION_CONFLICT; 2117 ret = TCM_RESERVATION_CONFLICT;
2112 goto out; 2118 goto out;
2113 } 2119 }
2114 type = pr_reg->pr_res_type;
2115 2120
2116 spin_lock(&pr_tmpl->registration_lock); 2121 spin_lock(&pr_tmpl->registration_lock);
2117 /* 2122 /*
@@ -2269,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2269 spin_lock(&dev->dev_reservation_lock); 2274 spin_lock(&dev->dev_reservation_lock);
2270 pr_res_holder = dev->dev_pr_res_holder; 2275 pr_res_holder = dev->dev_pr_res_holder;
2271 if (pr_res_holder) { 2276 if (pr_res_holder) {
2277 int pr_res_type = pr_res_holder->pr_res_type;
2272 /* 2278 /*
2273 * From spc4r17 Section 5.7.9: Reserving: 2279 * From spc4r17 Section 5.7.9: Reserving:
2274 * 2280 *
@@ -2279,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
2279 * the logical unit, then the command shall be completed with 2285 * the logical unit, then the command shall be completed with
2280 * RESERVATION CONFLICT status. 2286 * RESERVATION CONFLICT status.
2281 */ 2287 */
2282 if (pr_res_holder != pr_reg) { 2288 if ((pr_res_holder != pr_reg) &&
2289 (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
2290 (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
2283 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2291 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2284 pr_err("SPC-3 PR: Attempted RESERVE from" 2292 pr_err("SPC-3 PR: Attempted RESERVE from"
2285 " [%s]: %s while reservation already held by" 2293 " [%s]: %s while reservation already held by"
@@ -2385,23 +2393,59 @@ static void __core_scsi3_complete_pro_release(
2385 struct se_device *dev, 2393 struct se_device *dev,
2386 struct se_node_acl *se_nacl, 2394 struct se_node_acl *se_nacl,
2387 struct t10_pr_registration *pr_reg, 2395 struct t10_pr_registration *pr_reg,
2388 int explicit) 2396 int explicit,
2397 int unreg)
2389{ 2398{
2390 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo; 2399 struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
2391 char i_buf[PR_REG_ISID_ID_LEN]; 2400 char i_buf[PR_REG_ISID_ID_LEN];
2401 int pr_res_type = 0, pr_res_scope = 0;
2392 2402
2393 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 2403 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
2394 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN); 2404 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
2395 /* 2405 /*
2396 * Go ahead and release the current PR reservation holder. 2406 * Go ahead and release the current PR reservation holder.
2407 * If an All Registrants reservation is currently active and
2408 * a unregister operation is requested, replace the current
2409 * dev_pr_res_holder with another active registration.
2397 */ 2410 */
2398 dev->dev_pr_res_holder = NULL; 2411 if (dev->dev_pr_res_holder) {
2412 pr_res_type = dev->dev_pr_res_holder->pr_res_type;
2413 pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
2414 dev->dev_pr_res_holder->pr_res_type = 0;
2415 dev->dev_pr_res_holder->pr_res_scope = 0;
2416 dev->dev_pr_res_holder->pr_res_holder = 0;
2417 dev->dev_pr_res_holder = NULL;
2418 }
2419 if (!unreg)
2420 goto out;
2399 2421
2400 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" 2422 spin_lock(&dev->t10_pr.registration_lock);
2401 " reservation holder TYPE: %s ALL_TG_PT: %d\n", 2423 list_del_init(&pr_reg->pr_reg_list);
2402 tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit", 2424 /*
2403 core_scsi3_pr_dump_type(pr_reg->pr_res_type), 2425 * If the I_T nexus is a reservation holder, the persistent reservation
2404 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); 2426 * is of an all registrants type, and the I_T nexus is the last remaining
2427 * registered I_T nexus, then the device server shall also release the
2428 * persistent reservation.
2429 */
2430 if (!list_empty(&dev->t10_pr.registration_list) &&
2431 ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
2432 (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
2433 dev->dev_pr_res_holder =
2434 list_entry(dev->t10_pr.registration_list.next,
2435 struct t10_pr_registration, pr_reg_list);
2436 dev->dev_pr_res_holder->pr_res_type = pr_res_type;
2437 dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
2438 dev->dev_pr_res_holder->pr_res_holder = 1;
2439 }
2440 spin_unlock(&dev->t10_pr.registration_lock);
2441out:
2442 if (!dev->dev_pr_res_holder) {
2443 pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
2444 " reservation holder TYPE: %s ALL_TG_PT: %d\n",
2445 tfo->get_fabric_name(), (explicit) ? "explicit" :
2446 "implicit", core_scsi3_pr_dump_type(pr_res_type),
2447 (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
2448 }
2405 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", 2449 pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
2406 tfo->get_fabric_name(), se_nacl->initiatorname, 2450 tfo->get_fabric_name(), se_nacl->initiatorname,
2407 i_buf); 2451 i_buf);
@@ -2532,7 +2576,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
2532 * server shall not establish a unit attention condition. 2576 * server shall not establish a unit attention condition.
2533 */ 2577 */
2534 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, 2578 __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
2535 pr_reg, 1); 2579 pr_reg, 1, 0);
2536 2580
2537 spin_unlock(&dev->dev_reservation_lock); 2581 spin_unlock(&dev->dev_reservation_lock);
2538 2582
@@ -2620,7 +2664,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
2620 if (pr_res_holder) { 2664 if (pr_res_holder) {
2621 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; 2665 struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
2622 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 2666 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
2623 pr_res_holder, 0); 2667 pr_res_holder, 0, 0);
2624 } 2668 }
2625 spin_unlock(&dev->dev_reservation_lock); 2669 spin_unlock(&dev->dev_reservation_lock);
2626 /* 2670 /*
@@ -2679,7 +2723,7 @@ static void __core_scsi3_complete_pro_preempt(
2679 */ 2723 */
2680 if (dev->dev_pr_res_holder) 2724 if (dev->dev_pr_res_holder)
2681 __core_scsi3_complete_pro_release(dev, nacl, 2725 __core_scsi3_complete_pro_release(dev, nacl,
2682 dev->dev_pr_res_holder, 0); 2726 dev->dev_pr_res_holder, 0, 0);
2683 2727
2684 dev->dev_pr_res_holder = pr_reg; 2728 dev->dev_pr_res_holder = pr_reg;
2685 pr_reg->pr_res_holder = 1; 2729 pr_reg->pr_res_holder = 1;
@@ -2924,8 +2968,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2924 */ 2968 */
2925 if (pr_reg_n != pr_res_holder) 2969 if (pr_reg_n != pr_res_holder)
2926 __core_scsi3_complete_pro_release(dev, 2970 __core_scsi3_complete_pro_release(dev,
2927 pr_res_holder->pr_reg_nacl, 2971 pr_res_holder->pr_reg_nacl,
2928 dev->dev_pr_res_holder, 0); 2972 dev->dev_pr_res_holder, 0, 0);
2929 /* 2973 /*
2930 * b) Remove the registrations for all I_T nexuses identified 2974 * b) Remove the registrations for all I_T nexuses identified
2931 * by the SERVICE ACTION RESERVATION KEY field, except the 2975 * by the SERVICE ACTION RESERVATION KEY field, except the
@@ -3059,7 +3103,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3059 struct t10_reservation *pr_tmpl = &dev->t10_pr; 3103 struct t10_reservation *pr_tmpl = &dev->t10_pr;
3060 unsigned char *buf; 3104 unsigned char *buf;
3061 unsigned char *initiator_str; 3105 unsigned char *initiator_str;
3062 char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; 3106 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
3063 u32 tid_len, tmp_tid_len; 3107 u32 tid_len, tmp_tid_len;
3064 int new_reg = 0, type, scope, matching_iname; 3108 int new_reg = 0, type, scope, matching_iname;
3065 sense_reason_t ret; 3109 sense_reason_t ret;
@@ -3071,7 +3115,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3071 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3115 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3072 } 3116 }
3073 3117
3074 memset(dest_iport, 0, 64);
3075 memset(i_buf, 0, PR_REG_ISID_ID_LEN); 3118 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
3076 se_tpg = se_sess->se_tpg; 3119 se_tpg = se_sess->se_tpg;
3077 tf_ops = se_tpg->se_tpg_tfo; 3120 tf_ops = se_tpg->se_tpg_tfo;
@@ -3389,7 +3432,7 @@ after_iport_check:
3389 * holder (i.e., the I_T nexus on which the 3432 * holder (i.e., the I_T nexus on which the
3390 */ 3433 */
3391 __core_scsi3_complete_pro_release(dev, pr_res_nacl, 3434 __core_scsi3_complete_pro_release(dev, pr_res_nacl,
3392 dev->dev_pr_res_holder, 0); 3435 dev->dev_pr_res_holder, 0, 0);
3393 /* 3436 /*
3394 * g) Move the persistent reservation to the specified I_T nexus using 3437 * g) Move the persistent reservation to the specified I_T nexus using
3395 * the same scope and type as the persistent reservation released in 3438 * the same scope and type as the persistent reservation released in
@@ -3837,7 +3880,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3837 unsigned char *buf; 3880 unsigned char *buf;
3838 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; 3881 u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
3839 u32 off = 8; /* off into first Full Status descriptor */ 3882 u32 off = 8; /* off into first Full Status descriptor */
3840 int format_code = 0; 3883 int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
3884 bool all_reg = false;
3841 3885
3842 if (cmd->data_length < 8) { 3886 if (cmd->data_length < 8) {
3843 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" 3887 pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
@@ -3854,6 +3898,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3854 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); 3898 buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
3855 buf[3] = (dev->t10_pr.pr_generation & 0xff); 3899 buf[3] = (dev->t10_pr.pr_generation & 0xff);
3856 3900
3901 spin_lock(&dev->dev_reservation_lock);
3902 if (dev->dev_pr_res_holder) {
3903 struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
3904
3905 if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
3906 pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
3907 all_reg = true;
3908 pr_res_type = pr_holder->pr_res_type;
3909 pr_res_scope = pr_holder->pr_res_scope;
3910 }
3911 }
3912 spin_unlock(&dev->dev_reservation_lock);
3913
3857 spin_lock(&pr_tmpl->registration_lock); 3914 spin_lock(&pr_tmpl->registration_lock);
3858 list_for_each_entry_safe(pr_reg, pr_reg_tmp, 3915 list_for_each_entry_safe(pr_reg, pr_reg_tmp,
3859 &pr_tmpl->registration_list, pr_reg_list) { 3916 &pr_tmpl->registration_list, pr_reg_list) {
@@ -3901,14 +3958,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3901 * reservation holder for PR_HOLDER bit. 3958 * reservation holder for PR_HOLDER bit.
3902 * 3959 *
3903 * Also, if this registration is the reservation 3960 * Also, if this registration is the reservation
3904 * holder, fill in SCOPE and TYPE in the next byte. 3961 * holder or there is an All Registrants reservation
3962 * active, fill in SCOPE and TYPE in the next byte.
3905 */ 3963 */
3906 if (pr_reg->pr_res_holder) { 3964 if (pr_reg->pr_res_holder) {
3907 buf[off++] |= 0x01; 3965 buf[off++] |= 0x01;
3908 buf[off++] = (pr_reg->pr_res_scope & 0xf0) | 3966 buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
3909 (pr_reg->pr_res_type & 0x0f); 3967 (pr_reg->pr_res_type & 0x0f);
3910 } else 3968 } else if (all_reg) {
3969 buf[off++] |= 0x01;
3970 buf[off++] = (pr_res_scope & 0xf0) |
3971 (pr_res_type & 0x0f);
3972 } else {
3911 off += 2; 3973 off += 2;
3974 }
3912 3975
3913 off += 4; /* Skip over reserved area */ 3976 off += 4; /* Skip over reserved area */
3914 /* 3977 /*
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7c8291f0bbbc..1045dcd7bf65 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,6 +44,7 @@
44 44
45#include <target/target_core_base.h> 45#include <target/target_core_base.h>
46#include <target/target_core_backend.h> 46#include <target/target_core_backend.h>
47#include <target/target_core_backend_configfs.h>
47 48
48#include "target_core_alua.h" 49#include "target_core_alua.h"
49#include "target_core_pscsi.h" 50#include "target_core_pscsi.h"
@@ -1094,7 +1095,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
1094 req->retries = PS_RETRY; 1095 req->retries = PS_RETRY;
1095 1096
1096 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, 1097 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
1097 (cmd->sam_task_attr == MSG_HEAD_TAG), 1098 (cmd->sam_task_attr == TCM_HEAD_TAG),
1098 pscsi_req_done); 1099 pscsi_req_done);
1099 1100
1100 return 0; 1101 return 0;
@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate)
1165 kfree(pt); 1166 kfree(pt);
1166} 1167}
1167 1168
1169DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
1170TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
1171
1172DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
1173TB_DEV_ATTR_RO(pscsi, hw_block_size);
1174
1175DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
1176TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
1177
1178DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
1179TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
1180
1181static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
1182 &pscsi_dev_attrib_hw_pi_prot_type.attr,
1183 &pscsi_dev_attrib_hw_block_size.attr,
1184 &pscsi_dev_attrib_hw_max_sectors.attr,
1185 &pscsi_dev_attrib_hw_queue_depth.attr,
1186 NULL,
1187};
1188
1168static struct se_subsystem_api pscsi_template = { 1189static struct se_subsystem_api pscsi_template = {
1169 .name = "pscsi", 1190 .name = "pscsi",
1170 .owner = THIS_MODULE, 1191 .owner = THIS_MODULE,
@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = {
1185 1206
1186static int __init pscsi_module_init(void) 1207static int __init pscsi_module_init(void)
1187{ 1208{
1209 struct target_backend_cits *tbc = &pscsi_template.tb_cits;
1210
1211 target_core_setup_sub_cits(&pscsi_template);
1212 tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
1213
1188 return transport_subsystem_register(&pscsi_template); 1214 return transport_subsystem_register(&pscsi_template);
1189} 1215}
1190 1216
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index b920db3388cd..60ebd170a561 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -34,6 +34,7 @@
34 34
35#include <target/target_core_base.h> 35#include <target/target_core_base.h>
36#include <target/target_core_backend.h> 36#include <target/target_core_backend.h>
37#include <target/target_core_backend_configfs.h>
37 38
38#include "target_core_rd.h" 39#include "target_core_rd.h"
39 40
@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd)
632 return sbc_parse_cdb(cmd, &rd_sbc_ops); 633 return sbc_parse_cdb(cmd, &rd_sbc_ops);
633} 634}
634 635
636DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
637
638static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
639 &rd_mcp_dev_attrib_emulate_model_alias.attr,
640 &rd_mcp_dev_attrib_emulate_dpo.attr,
641 &rd_mcp_dev_attrib_emulate_fua_write.attr,
642 &rd_mcp_dev_attrib_emulate_fua_read.attr,
643 &rd_mcp_dev_attrib_emulate_write_cache.attr,
644 &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
645 &rd_mcp_dev_attrib_emulate_tas.attr,
646 &rd_mcp_dev_attrib_emulate_tpu.attr,
647 &rd_mcp_dev_attrib_emulate_tpws.attr,
648 &rd_mcp_dev_attrib_emulate_caw.attr,
649 &rd_mcp_dev_attrib_emulate_3pc.attr,
650 &rd_mcp_dev_attrib_pi_prot_type.attr,
651 &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
652 &rd_mcp_dev_attrib_pi_prot_format.attr,
653 &rd_mcp_dev_attrib_enforce_pr_isids.attr,
654 &rd_mcp_dev_attrib_is_nonrot.attr,
655 &rd_mcp_dev_attrib_emulate_rest_reord.attr,
656 &rd_mcp_dev_attrib_force_pr_aptpl.attr,
657 &rd_mcp_dev_attrib_hw_block_size.attr,
658 &rd_mcp_dev_attrib_block_size.attr,
659 &rd_mcp_dev_attrib_hw_max_sectors.attr,
660 &rd_mcp_dev_attrib_fabric_max_sectors.attr,
661 &rd_mcp_dev_attrib_optimal_sectors.attr,
662 &rd_mcp_dev_attrib_hw_queue_depth.attr,
663 &rd_mcp_dev_attrib_queue_depth.attr,
664 &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
665 &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
666 &rd_mcp_dev_attrib_unmap_granularity.attr,
667 &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
668 &rd_mcp_dev_attrib_max_write_same_len.attr,
669 NULL,
670};
671
635static struct se_subsystem_api rd_mcp_template = { 672static struct se_subsystem_api rd_mcp_template = {
636 .name = "rd_mcp", 673 .name = "rd_mcp",
637 .inquiry_prod = "RAMDISK-MCP", 674 .inquiry_prod = "RAMDISK-MCP",
@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = {
653 690
654int __init rd_module_init(void) 691int __init rd_module_init(void)
655{ 692{
693 struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
656 int ret; 694 int ret;
657 695
696 target_core_setup_sub_cits(&rd_mcp_template);
697 tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
698
658 ret = transport_subsystem_register(&rd_mcp_template); 699 ret = transport_subsystem_register(&rd_mcp_template);
659 if (ret < 0) { 700 if (ret < 0) {
660 return ret; 701 return ret;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 8d171ff77e75..11bea1952435 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -485,7 +485,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
485 cmd->t_data_nents_orig = cmd->t_data_nents; 485 cmd->t_data_nents_orig = cmd->t_data_nents;
486 cmd->t_data_nents = 1; 486 cmd->t_data_nents = 1;
487 487
488 cmd->sam_task_attr = MSG_HEAD_TAG; 488 cmd->sam_task_attr = TCM_HEAD_TAG;
489 cmd->transport_complete_callback = compare_and_write_post; 489 cmd->transport_complete_callback = compare_and_write_post;
490 /* 490 /*
491 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 491 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index bc286a67af7c..1307600fe726 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1357,7 +1357,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1357 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1357 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
1358 * See spc4r17 section 5.3 1358 * See spc4r17 section 5.3
1359 */ 1359 */
1360 cmd->sam_task_attr = MSG_HEAD_TAG; 1360 cmd->sam_task_attr = TCM_HEAD_TAG;
1361 cmd->execute_cmd = spc_emulate_inquiry; 1361 cmd->execute_cmd = spc_emulate_inquiry;
1362 break; 1362 break;
1363 case SECURITY_PROTOCOL_IN: 1363 case SECURITY_PROTOCOL_IN:
@@ -1391,7 +1391,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
1391 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1391 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
1392 * See spc4r17 section 5.3 1392 * See spc4r17 section 5.3
1393 */ 1393 */
1394 cmd->sam_task_attr = MSG_HEAD_TAG; 1394 cmd->sam_task_attr = TCM_HEAD_TAG;
1395 break; 1395 break;
1396 case TEST_UNIT_READY: 1396 case TEST_UNIT_READY:
1397 cmd->execute_cmd = spc_emulate_testunitready; 1397 cmd->execute_cmd = spc_emulate_testunitready;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index be877bf6f730..0adc0f650213 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1159,7 +1159,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1159 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1159 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
1160 return 0; 1160 return 0;
1161 1161
1162 if (cmd->sam_task_attr == MSG_ACA_TAG) { 1162 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1163 pr_debug("SAM Task Attribute ACA" 1163 pr_debug("SAM Task Attribute ACA"
1164 " emulation is not supported\n"); 1164 " emulation is not supported\n");
1165 return TCM_INVALID_CDB_FIELD; 1165 return TCM_INVALID_CDB_FIELD;
@@ -1531,7 +1531,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1531 BUG_ON(!se_tpg); 1531 BUG_ON(!se_tpg);
1532 1532
1533 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1533 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1534 0, DMA_NONE, MSG_SIMPLE_TAG, sense); 1534 0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1535 /* 1535 /*
1536 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req 1536 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1537 * allocation failure. 1537 * allocation failure.
@@ -1718,12 +1718,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1718 * to allow the passed struct se_cmd list of tasks to the front of the list. 1718 * to allow the passed struct se_cmd list of tasks to the front of the list.
1719 */ 1719 */
1720 switch (cmd->sam_task_attr) { 1720 switch (cmd->sam_task_attr) {
1721 case MSG_HEAD_TAG: 1721 case TCM_HEAD_TAG:
1722 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, " 1722 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
1723 "se_ordered_id: %u\n", 1723 "se_ordered_id: %u\n",
1724 cmd->t_task_cdb[0], cmd->se_ordered_id); 1724 cmd->t_task_cdb[0], cmd->se_ordered_id);
1725 return false; 1725 return false;
1726 case MSG_ORDERED_TAG: 1726 case TCM_ORDERED_TAG:
1727 atomic_inc_mb(&dev->dev_ordered_sync); 1727 atomic_inc_mb(&dev->dev_ordered_sync);
1728 1728
1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
@@ -1828,7 +1828,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
1828 1828
1829 __target_execute_cmd(cmd); 1829 __target_execute_cmd(cmd);
1830 1830
1831 if (cmd->sam_task_attr == MSG_ORDERED_TAG) 1831 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1832 break; 1832 break;
1833 } 1833 }
1834} 1834}
@@ -1844,18 +1844,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1844 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) 1844 if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
1845 return; 1845 return;
1846 1846
1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1847 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1848 atomic_dec_mb(&dev->simple_cmds); 1848 atomic_dec_mb(&dev->simple_cmds);
1849 dev->dev_cur_ordered_id++; 1849 dev->dev_cur_ordered_id++;
1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
1852 cmd->se_ordered_id); 1852 cmd->se_ordered_id);
1853 } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { 1853 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1854 dev->dev_cur_ordered_id++; 1854 dev->dev_cur_ordered_id++;
1855 pr_debug("Incremented dev_cur_ordered_id: %u for" 1855 pr_debug("Incremented dev_cur_ordered_id: %u for"
1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1857 cmd->se_ordered_id); 1857 cmd->se_ordered_id);
1858 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1858 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1859 atomic_dec_mb(&dev->dev_ordered_sync); 1859 atomic_dec_mb(&dev->dev_ordered_sync);
1860 1860
1861 dev->dev_cur_ordered_id++; 1861 dev->dev_cur_ordered_id++;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9a1b314f6482..8bfa61c9693d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,8 @@
28#include <target/target_core_base.h> 28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h> 29#include <target/target_core_fabric.h>
30#include <target/target_core_backend.h> 30#include <target/target_core_backend.h>
31#include <target/target_core_backend_configfs.h>
32
31#include <linux/target_core_user.h> 33#include <linux/target_core_user.h>
32 34
33/* 35/*
@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd)
1092 return ret; 1094 return ret;
1093} 1095}
1094 1096
1097DEF_TB_DEFAULT_ATTRIBS(tcmu);
1098
1099static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
1100 &tcmu_dev_attrib_emulate_model_alias.attr,
1101 &tcmu_dev_attrib_emulate_dpo.attr,
1102 &tcmu_dev_attrib_emulate_fua_write.attr,
1103 &tcmu_dev_attrib_emulate_fua_read.attr,
1104 &tcmu_dev_attrib_emulate_write_cache.attr,
1105 &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
1106 &tcmu_dev_attrib_emulate_tas.attr,
1107 &tcmu_dev_attrib_emulate_tpu.attr,
1108 &tcmu_dev_attrib_emulate_tpws.attr,
1109 &tcmu_dev_attrib_emulate_caw.attr,
1110 &tcmu_dev_attrib_emulate_3pc.attr,
1111 &tcmu_dev_attrib_pi_prot_type.attr,
1112 &tcmu_dev_attrib_hw_pi_prot_type.attr,
1113 &tcmu_dev_attrib_pi_prot_format.attr,
1114 &tcmu_dev_attrib_enforce_pr_isids.attr,
1115 &tcmu_dev_attrib_is_nonrot.attr,
1116 &tcmu_dev_attrib_emulate_rest_reord.attr,
1117 &tcmu_dev_attrib_force_pr_aptpl.attr,
1118 &tcmu_dev_attrib_hw_block_size.attr,
1119 &tcmu_dev_attrib_block_size.attr,
1120 &tcmu_dev_attrib_hw_max_sectors.attr,
1121 &tcmu_dev_attrib_fabric_max_sectors.attr,
1122 &tcmu_dev_attrib_optimal_sectors.attr,
1123 &tcmu_dev_attrib_hw_queue_depth.attr,
1124 &tcmu_dev_attrib_queue_depth.attr,
1125 &tcmu_dev_attrib_max_unmap_lba_count.attr,
1126 &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
1127 &tcmu_dev_attrib_unmap_granularity.attr,
1128 &tcmu_dev_attrib_unmap_granularity_alignment.attr,
1129 &tcmu_dev_attrib_max_write_same_len.attr,
1130 NULL,
1131};
1132
1095static struct se_subsystem_api tcmu_template = { 1133static struct se_subsystem_api tcmu_template = {
1096 .name = "user", 1134 .name = "user",
1097 .inquiry_prod = "USER", 1135 .inquiry_prod = "USER",
@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = {
1112 1150
1113static int __init tcmu_module_init(void) 1151static int __init tcmu_module_init(void)
1114{ 1152{
1153 struct target_backend_cits *tbc = &tcmu_template.tb_cits;
1115 int ret; 1154 int ret;
1116 1155
1117 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1156 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void)
1134 goto out_unreg_device; 1173 goto out_unreg_device;
1135 } 1174 }
1136 1175
1176 target_core_setup_sub_cits(&tcmu_template);
1177 tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
1178
1137 ret = transport_subsystem_register(&tcmu_template); 1179 ret = transport_subsystem_register(&tcmu_template);
1138 if (ret) 1180 if (ret)
1139 goto out_unreg_genl; 1181 goto out_unreg_genl;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index be0c0d08c56a..edcafa4490c0 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -554,17 +554,17 @@ static void ft_send_work(struct work_struct *work)
554 */ 554 */
555 switch (fcp->fc_pri_ta & FCP_PTA_MASK) { 555 switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
556 case FCP_PTA_HEADQ: 556 case FCP_PTA_HEADQ:
557 task_attr = MSG_HEAD_TAG; 557 task_attr = TCM_HEAD_TAG;
558 break; 558 break;
559 case FCP_PTA_ORDERED: 559 case FCP_PTA_ORDERED:
560 task_attr = MSG_ORDERED_TAG; 560 task_attr = TCM_ORDERED_TAG;
561 break; 561 break;
562 case FCP_PTA_ACA: 562 case FCP_PTA_ACA:
563 task_attr = MSG_ACA_TAG; 563 task_attr = TCM_ACA_TAG;
564 break; 564 break;
565 case FCP_PTA_SIMPLE: /* Fallthrough */ 565 case FCP_PTA_SIMPLE: /* Fallthrough */
566 default: 566 default:
567 task_attr = MSG_SIMPLE_TAG; 567 task_attr = TCM_SIMPLE_TAG;
568 } 568 }
569 569
570 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); 570 fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 336602eb453e..96b69bfd773f 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -561,7 +561,7 @@ static int omap_8250_startup(struct uart_port *port)
561 if (ret) 561 if (ret)
562 goto err; 562 goto err;
563 563
564#ifdef CONFIG_PM_RUNTIME 564#ifdef CONFIG_PM
565 up->capabilities |= UART_CAP_RPM; 565 up->capabilities |= UART_CAP_RPM;
566#endif 566#endif
567 567
@@ -997,12 +997,12 @@ static int omap8250_probe(struct platform_device *pdev)
997 up.port.fifosize = 64; 997 up.port.fifosize = 64;
998 up.tx_loadsz = 64; 998 up.tx_loadsz = 64;
999 up.capabilities = UART_CAP_FIFO; 999 up.capabilities = UART_CAP_FIFO;
1000#ifdef CONFIG_PM_RUNTIME 1000#ifdef CONFIG_PM
1001 /* 1001 /*
1002 * PM_RUNTIME is mostly transparent. However to do it right we need to a 1002 * Runtime PM is mostly transparent. However to do it right we need to a
1003 * TX empty interrupt before we can put the device to auto idle. So if 1003 * TX empty interrupt before we can put the device to auto idle. So if
1004 * PM_RUNTIME is not enabled we don't add that flag and can spare that 1004 * PM is not enabled we don't add that flag and can spare that one extra
1005 * one extra interrupt in the TX path. 1005 * interrupt in the TX path.
1006 */ 1006 */
1007 up.capabilities |= UART_CAP_RPM; 1007 up.capabilities |= UART_CAP_RPM;
1008#endif 1008#endif
@@ -1105,7 +1105,7 @@ static int omap8250_remove(struct platform_device *pdev)
1105 return 0; 1105 return 0;
1106} 1106}
1107 1107
1108#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME) 1108#ifdef CONFIG_PM
1109 1109
1110static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv, 1110static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
1111 bool enable) 1111 bool enable)
@@ -1179,7 +1179,7 @@ static int omap8250_resume(struct device *dev)
1179#define omap8250_complete NULL 1179#define omap8250_complete NULL
1180#endif 1180#endif
1181 1181
1182#ifdef CONFIG_PM_RUNTIME 1182#ifdef CONFIG_PM
1183static int omap8250_lost_context(struct uart_8250_port *up) 1183static int omap8250_lost_context(struct uart_8250_port *up)
1184{ 1184{
1185 u32 val; 1185 u32 val;
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 024f58475a94..3a494168661e 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -1131,19 +1131,19 @@ static int usbg_submit_command(struct f_uas *fu,
1131 1131
1132 switch (cmd_iu->prio_attr & 0x7) { 1132 switch (cmd_iu->prio_attr & 0x7) {
1133 case UAS_HEAD_TAG: 1133 case UAS_HEAD_TAG:
1134 cmd->prio_attr = MSG_HEAD_TAG; 1134 cmd->prio_attr = TCM_HEAD_TAG;
1135 break; 1135 break;
1136 case UAS_ORDERED_TAG: 1136 case UAS_ORDERED_TAG:
1137 cmd->prio_attr = MSG_ORDERED_TAG; 1137 cmd->prio_attr = TCM_ORDERED_TAG;
1138 break; 1138 break;
1139 case UAS_ACA: 1139 case UAS_ACA:
1140 cmd->prio_attr = MSG_ACA_TAG; 1140 cmd->prio_attr = TCM_ACA_TAG;
1141 break; 1141 break;
1142 default: 1142 default:
1143 pr_debug_once("Unsupported prio_attr: %02x.\n", 1143 pr_debug_once("Unsupported prio_attr: %02x.\n",
1144 cmd_iu->prio_attr); 1144 cmd_iu->prio_attr);
1145 case UAS_SIMPLE_TAG: 1145 case UAS_SIMPLE_TAG:
1146 cmd->prio_attr = MSG_SIMPLE_TAG; 1146 cmd->prio_attr = TCM_SIMPLE_TAG;
1147 break; 1147 break;
1148 } 1148 }
1149 1149
@@ -1240,7 +1240,7 @@ static int bot_submit_command(struct f_uas *fu,
1240 goto err; 1240 goto err;
1241 } 1241 }
1242 1242
1243 cmd->prio_attr = MSG_SIMPLE_TAG; 1243 cmd->prio_attr = TCM_SIMPLE_TAG;
1244 se_cmd = &cmd->se_cmd; 1244 se_cmd = &cmd->se_cmd;
1245 cmd->unpacked_lun = cbw->Lun; 1245 cmd->unpacked_lun = cbw->Lun;
1246 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0; 1246 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index e752c3098f38..395649f357aa 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
1739 int retval = 1; 1739 int retval = 1;
1740 unsigned long flags; 1740 unsigned long flags;
1741 1741
1742 /* if !PM_RUNTIME, root hub timers won't get shut down ... */ 1742 /* if !PM, root hub timers won't get shut down ... */
1743 if (!HC_IS_RUNNING(hcd->state)) 1743 if (!HC_IS_RUNNING(hcd->state))
1744 return 0; 1744 return 0;
1745 1745
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 75811dd5a9d7..036924e640f5 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3087,7 +3087,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
3087 int ports, i, retval = 1; 3087 int ports, i, retval = 1;
3088 unsigned long flags; 3088 unsigned long flags;
3089 3089
3090 /* if !PM_RUNTIME, root hub timers won't get shut down ... */ 3090 /* if !PM, root hub timers won't get shut down ... */
3091 if (!HC_IS_RUNNING(hcd->state)) 3091 if (!HC_IS_RUNNING(hcd->state))
3092 return 0; 3092 return 0;
3093 3093
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 50610a6acf3d..e999496eda3e 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -606,7 +606,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
606 init_waitqueue_head(&tmr->tmr_wait); 606 init_waitqueue_head(&tmr->tmr_wait);
607 607
608 transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo, 608 transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
609 tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG, 609 tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
610 &pending_req->sense_buffer[0]); 610 &pending_req->sense_buffer[0]);
611 611
612 rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL); 612 rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index c04ef1d4f18a..97aff2879cda 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -254,6 +254,7 @@ static char *scanarg(char *s, char del)
254 return NULL; 254 return NULL;
255 } 255 }
256 } 256 }
257 s[-1] ='\0';
257 return s; 258 return s;
258} 259}
259 260
@@ -378,8 +379,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
378 p = scanarg(p, del); 379 p = scanarg(p, del);
379 if (!p) 380 if (!p)
380 goto einval; 381 goto einval;
381 p[-1] = '\0'; 382 if (!e->magic[0])
382 if (p == e->magic)
383 goto einval; 383 goto einval;
384 if (USE_DEBUG) 384 if (USE_DEBUG)
385 print_hex_dump_bytes( 385 print_hex_dump_bytes(
@@ -391,8 +391,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
391 p = scanarg(p, del); 391 p = scanarg(p, del);
392 if (!p) 392 if (!p)
393 goto einval; 393 goto einval;
394 p[-1] = '\0'; 394 if (!e->mask[0]) {
395 if (p == e->mask) {
396 e->mask = NULL; 395 e->mask = NULL;
397 pr_debug("register: mask[raw]: none\n"); 396 pr_debug("register: mask[raw]: none\n");
398 } else if (USE_DEBUG) 397 } else if (USE_DEBUG)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index e6fbbd74b716..7e607416755a 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3481,8 +3481,8 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
3481u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); 3481u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
3482int btrfs_error_unpin_extent_range(struct btrfs_root *root, 3482int btrfs_error_unpin_extent_range(struct btrfs_root *root,
3483 u64 start, u64 end); 3483 u64 start, u64 end);
3484int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 3484int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
3485 u64 num_bytes, u64 *actual_bytes); 3485 u64 num_bytes, u64 *actual_bytes);
3486int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, 3486int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
3487 struct btrfs_root *root, u64 type); 3487 struct btrfs_root *root, u64 type);
3488int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range); 3488int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 30965120772b..8c63419a7f70 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4121,12 +4121,6 @@ again:
4121 if (ret) 4121 if (ret)
4122 break; 4122 break;
4123 4123
4124 /* opt_discard */
4125 if (btrfs_test_opt(root, DISCARD))
4126 ret = btrfs_error_discard_extent(root, start,
4127 end + 1 - start,
4128 NULL);
4129
4130 clear_extent_dirty(unpin, start, end, GFP_NOFS); 4124 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4131 btrfs_error_unpin_extent_range(root, start, end); 4125 btrfs_error_unpin_extent_range(root, start, end);
4132 cond_resched(); 4126 cond_resched();
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 222d6aea4a8a..a80b97100d90 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1889,8 +1889,8 @@ static int btrfs_issue_discard(struct block_device *bdev,
1889 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0); 1889 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1890} 1890}
1891 1891
1892static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1892int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1893 u64 num_bytes, u64 *actual_bytes) 1893 u64 num_bytes, u64 *actual_bytes)
1894{ 1894{
1895 int ret; 1895 int ret;
1896 u64 discarded_bytes = 0; 1896 u64 discarded_bytes = 0;
@@ -5727,7 +5727,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5727 update_global_block_rsv(fs_info); 5727 update_global_block_rsv(fs_info);
5728} 5728}
5729 5729
5730static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 5730static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
5731 const bool return_free_space)
5731{ 5732{
5732 struct btrfs_fs_info *fs_info = root->fs_info; 5733 struct btrfs_fs_info *fs_info = root->fs_info;
5733 struct btrfs_block_group_cache *cache = NULL; 5734 struct btrfs_block_group_cache *cache = NULL;
@@ -5751,7 +5752,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5751 5752
5752 if (start < cache->last_byte_to_unpin) { 5753 if (start < cache->last_byte_to_unpin) {
5753 len = min(len, cache->last_byte_to_unpin - start); 5754 len = min(len, cache->last_byte_to_unpin - start);
5754 btrfs_add_free_space(cache, start, len); 5755 if (return_free_space)
5756 btrfs_add_free_space(cache, start, len);
5755 } 5757 }
5756 5758
5757 start += len; 5759 start += len;
@@ -5815,7 +5817,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5815 end + 1 - start, NULL); 5817 end + 1 - start, NULL);
5816 5818
5817 clear_extent_dirty(unpin, start, end, GFP_NOFS); 5819 clear_extent_dirty(unpin, start, end, GFP_NOFS);
5818 unpin_extent_range(root, start, end); 5820 unpin_extent_range(root, start, end, true);
5819 cond_resched(); 5821 cond_resched();
5820 } 5822 }
5821 5823
@@ -8872,6 +8874,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
8872 cache_node); 8874 cache_node);
8873 rb_erase(&block_group->cache_node, 8875 rb_erase(&block_group->cache_node,
8874 &info->block_group_cache_tree); 8876 &info->block_group_cache_tree);
8877 RB_CLEAR_NODE(&block_group->cache_node);
8875 spin_unlock(&info->block_group_cache_lock); 8878 spin_unlock(&info->block_group_cache_lock);
8876 8879
8877 down_write(&block_group->space_info->groups_sem); 8880 down_write(&block_group->space_info->groups_sem);
@@ -9130,6 +9133,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
9130 spin_lock(&info->block_group_cache_lock); 9133 spin_lock(&info->block_group_cache_lock);
9131 rb_erase(&cache->cache_node, 9134 rb_erase(&cache->cache_node,
9132 &info->block_group_cache_tree); 9135 &info->block_group_cache_tree);
9136 RB_CLEAR_NODE(&cache->cache_node);
9133 spin_unlock(&info->block_group_cache_lock); 9137 spin_unlock(&info->block_group_cache_lock);
9134 btrfs_put_block_group(cache); 9138 btrfs_put_block_group(cache);
9135 goto error; 9139 goto error;
@@ -9271,6 +9275,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9271 spin_lock(&root->fs_info->block_group_cache_lock); 9275 spin_lock(&root->fs_info->block_group_cache_lock);
9272 rb_erase(&cache->cache_node, 9276 rb_erase(&cache->cache_node,
9273 &root->fs_info->block_group_cache_tree); 9277 &root->fs_info->block_group_cache_tree);
9278 RB_CLEAR_NODE(&cache->cache_node);
9274 spin_unlock(&root->fs_info->block_group_cache_lock); 9279 spin_unlock(&root->fs_info->block_group_cache_lock);
9275 btrfs_put_block_group(cache); 9280 btrfs_put_block_group(cache);
9276 return ret; 9281 return ret;
@@ -9690,13 +9695,7 @@ out:
9690 9695
9691int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) 9696int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
9692{ 9697{
9693 return unpin_extent_range(root, start, end); 9698 return unpin_extent_range(root, start, end, false);
9694}
9695
9696int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
9697 u64 num_bytes, u64 *actual_bytes)
9698{
9699 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
9700} 9699}
9701 9700
9702int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range) 9701int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 030847bf7cec..d6c03f7f136b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2966,8 +2966,8 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
2966 spin_unlock(&block_group->lock); 2966 spin_unlock(&block_group->lock);
2967 spin_unlock(&space_info->lock); 2967 spin_unlock(&space_info->lock);
2968 2968
2969 ret = btrfs_error_discard_extent(fs_info->extent_root, 2969 ret = btrfs_discard_extent(fs_info->extent_root,
2970 start, bytes, &trimmed); 2970 start, bytes, &trimmed);
2971 if (!ret) 2971 if (!ret)
2972 *total_trimmed += trimmed; 2972 *total_trimmed += trimmed;
2973 2973
@@ -3185,16 +3185,18 @@ out:
3185 3185
3186 spin_unlock(&block_group->lock); 3186 spin_unlock(&block_group->lock);
3187 3187
3188 lock_chunks(block_group->fs_info->chunk_root);
3188 em_tree = &block_group->fs_info->mapping_tree.map_tree; 3189 em_tree = &block_group->fs_info->mapping_tree.map_tree;
3189 write_lock(&em_tree->lock); 3190 write_lock(&em_tree->lock);
3190 em = lookup_extent_mapping(em_tree, block_group->key.objectid, 3191 em = lookup_extent_mapping(em_tree, block_group->key.objectid,
3191 1); 3192 1);
3192 BUG_ON(!em); /* logic error, can't happen */ 3193 BUG_ON(!em); /* logic error, can't happen */
3194 /*
3195 * remove_extent_mapping() will delete us from the pinned_chunks
3196 * list, which is protected by the chunk mutex.
3197 */
3193 remove_extent_mapping(em_tree, em); 3198 remove_extent_mapping(em_tree, em);
3194 write_unlock(&em_tree->lock); 3199 write_unlock(&em_tree->lock);
3195
3196 lock_chunks(block_group->fs_info->chunk_root);
3197 list_del_init(&em->list);
3198 unlock_chunks(block_group->fs_info->chunk_root); 3200 unlock_chunks(block_group->fs_info->chunk_root);
3199 3201
3200 /* once for us and once for the tree */ 3202 /* once for us and once for the tree */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0144790e296e..50c5a8762aed 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1485,7 +1485,7 @@ static void update_dev_time(char *path_name)
1485 struct file *filp; 1485 struct file *filp;
1486 1486
1487 filp = filp_open(path_name, O_RDWR, 0); 1487 filp = filp_open(path_name, O_RDWR, 0);
1488 if (!filp) 1488 if (IS_ERR(filp))
1489 return; 1489 return;
1490 file_update_time(filp); 1490 file_update_time(filp);
1491 filp_close(filp, NULL); 1491 filp_close(filp, NULL);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index c2d6604667b0..719e1ce1c609 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
1917 break; 1917 break;
1918 case 2: 1918 case 2:
1919 dst[dst_byte_offset++] |= (src_byte); 1919 dst[dst_byte_offset++] |= (src_byte);
1920 dst[dst_byte_offset] = 0;
1921 current_bit_offset = 0; 1920 current_bit_offset = 0;
1922 break; 1921 break;
1923 } 1922 }
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 80154ec4f8c2..6f4e659f508f 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -190,23 +190,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
190{ 190{
191 int rc = 0; 191 int rc = 0;
192 struct ecryptfs_crypt_stat *crypt_stat = NULL; 192 struct ecryptfs_crypt_stat *crypt_stat = NULL;
193 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
194 struct dentry *ecryptfs_dentry = file->f_path.dentry; 193 struct dentry *ecryptfs_dentry = file->f_path.dentry;
195 /* Private value of ecryptfs_dentry allocated in 194 /* Private value of ecryptfs_dentry allocated in
196 * ecryptfs_lookup() */ 195 * ecryptfs_lookup() */
197 struct ecryptfs_file_info *file_info; 196 struct ecryptfs_file_info *file_info;
198 197
199 mount_crypt_stat = &ecryptfs_superblock_to_private(
200 ecryptfs_dentry->d_sb)->mount_crypt_stat;
201 if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
202 && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
203 || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
204 || (file->f_flags & O_APPEND))) {
205 printk(KERN_WARNING "Mount has encrypted view enabled; "
206 "files may only be read\n");
207 rc = -EPERM;
208 goto out;
209 }
210 /* Released in ecryptfs_release or end of function if failure */ 198 /* Released in ecryptfs_release or end of function if failure */
211 file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL); 199 file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
212 ecryptfs_set_file_private(file, file_info); 200 ecryptfs_set_file_private(file, file_info);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 635e8e16a5b7..917bd5c9776a 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -100,12 +100,12 @@ int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
100 (*size) = 0; 100 (*size) = 0;
101 if (data[0] < 192) { 101 if (data[0] < 192) {
102 /* One-byte length */ 102 /* One-byte length */
103 (*size) = (unsigned char)data[0]; 103 (*size) = data[0];
104 (*length_size) = 1; 104 (*length_size) = 1;
105 } else if (data[0] < 224) { 105 } else if (data[0] < 224) {
106 /* Two-byte length */ 106 /* Two-byte length */
107 (*size) = (((unsigned char)(data[0]) - 192) * 256); 107 (*size) = (data[0] - 192) * 256;
108 (*size) += ((unsigned char)(data[1]) + 192); 108 (*size) += data[1] + 192;
109 (*length_size) = 2; 109 (*length_size) = 2;
110 } else if (data[0] == 255) { 110 } else if (data[0] == 255) {
111 /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */ 111 /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index c4cd1fd86cc2..d9eb84bda559 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
493{ 493{
494 struct super_block *s; 494 struct super_block *s;
495 struct ecryptfs_sb_info *sbi; 495 struct ecryptfs_sb_info *sbi;
496 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
496 struct ecryptfs_dentry_info *root_info; 497 struct ecryptfs_dentry_info *root_info;
497 const char *err = "Getting sb failed"; 498 const char *err = "Getting sb failed";
498 struct inode *inode; 499 struct inode *inode;
@@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
511 err = "Error parsing options"; 512 err = "Error parsing options";
512 goto out; 513 goto out;
513 } 514 }
515 mount_crypt_stat = &sbi->mount_crypt_stat;
514 516
515 s = sget(fs_type, NULL, set_anon_super, flags, NULL); 517 s = sget(fs_type, NULL, set_anon_super, flags, NULL);
516 if (IS_ERR(s)) { 518 if (IS_ERR(s)) {
@@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
557 559
558 /** 560 /**
559 * Set the POSIX ACL flag based on whether they're enabled in the lower 561 * Set the POSIX ACL flag based on whether they're enabled in the lower
560 * mount. Force a read-only eCryptfs mount if the lower mount is ro. 562 * mount.
561 * Allow a ro eCryptfs mount even when the lower mount is rw.
562 */ 563 */
563 s->s_flags = flags & ~MS_POSIXACL; 564 s->s_flags = flags & ~MS_POSIXACL;
564 s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL); 565 s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
566
567 /**
568 * Force a read-only eCryptfs mount when:
569 * 1) The lower mount is ro
570 * 2) The ecryptfs_encrypted_view mount option is specified
571 */
572 if (path.dentry->d_sb->s_flags & MS_RDONLY ||
573 mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
574 s->s_flags |= MS_RDONLY;
565 575
566 s->s_maxbytes = path.dentry->d_sb->s_maxbytes; 576 s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
567 s->s_blocksize = path.dentry->d_sb->s_blocksize; 577 s->s_blocksize = path.dentry->d_sb->s_blocksize;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 503ea15dc5db..370420bfae8d 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -267,7 +267,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
267 handle_t *handle; 267 handle_t *handle;
268 ext4_lblk_t orig_blk_offset, donor_blk_offset; 268 ext4_lblk_t orig_blk_offset, donor_blk_offset;
269 unsigned long blocksize = orig_inode->i_sb->s_blocksize; 269 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
270 unsigned int w_flags = 0;
271 unsigned int tmp_data_size, data_size, replaced_size; 270 unsigned int tmp_data_size, data_size, replaced_size;
272 int err2, jblocks, retries = 0; 271 int err2, jblocks, retries = 0;
273 int replaced_count = 0; 272 int replaced_count = 0;
@@ -288,9 +287,6 @@ again:
288 return 0; 287 return 0;
289 } 288 }
290 289
291 if (segment_eq(get_fs(), KERNEL_DS))
292 w_flags |= AOP_FLAG_UNINTERRUPTIBLE;
293
294 orig_blk_offset = orig_page_offset * blocks_per_page + 290 orig_blk_offset = orig_page_offset * blocks_per_page +
295 data_offset_in_page; 291 data_offset_in_page;
296 292
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 697390ea47b8..ddc9f9612f16 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -448,27 +448,6 @@ static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
448 return pol; 448 return pol;
449} 449}
450 450
451static int kernfs_vma_migrate(struct vm_area_struct *vma,
452 const nodemask_t *from, const nodemask_t *to,
453 unsigned long flags)
454{
455 struct file *file = vma->vm_file;
456 struct kernfs_open_file *of = kernfs_of(file);
457 int ret;
458
459 if (!of->vm_ops)
460 return 0;
461
462 if (!kernfs_get_active(of->kn))
463 return 0;
464
465 ret = 0;
466 if (of->vm_ops->migrate)
467 ret = of->vm_ops->migrate(vma, from, to, flags);
468
469 kernfs_put_active(of->kn);
470 return ret;
471}
472#endif 451#endif
473 452
474static const struct vm_operations_struct kernfs_vm_ops = { 453static const struct vm_operations_struct kernfs_vm_ops = {
@@ -479,7 +458,6 @@ static const struct vm_operations_struct kernfs_vm_ops = {
479#ifdef CONFIG_NUMA 458#ifdef CONFIG_NUMA
480 .set_policy = kernfs_vma_set_policy, 459 .set_policy = kernfs_vma_set_policy,
481 .get_policy = kernfs_vma_get_policy, 460 .get_policy = kernfs_vma_get_policy,
482 .migrate = kernfs_vma_migrate,
483#endif 461#endif
484}; 462};
485 463
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 73ca1740d839..0f96f71ab32b 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -91,6 +91,7 @@ static void show_type(struct seq_file *m, struct super_block *sb)
91 91
92static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) 92static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
93{ 93{
94 struct proc_mounts *p = proc_mounts(m);
94 struct mount *r = real_mount(mnt); 95 struct mount *r = real_mount(mnt);
95 int err = 0; 96 int err = 0;
96 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 97 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -104,7 +105,10 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
104 mangle(m, r->mnt_devname ? r->mnt_devname : "none"); 105 mangle(m, r->mnt_devname ? r->mnt_devname : "none");
105 } 106 }
106 seq_putc(m, ' '); 107 seq_putc(m, ' ');
107 seq_path(m, &mnt_path, " \t\n\\"); 108 /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
109 err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
110 if (err)
111 goto out;
108 seq_putc(m, ' '); 112 seq_putc(m, ' ');
109 show_type(m, sb); 113 show_type(m, sb);
110 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw"); 114 seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
@@ -125,7 +129,6 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
125 struct mount *r = real_mount(mnt); 129 struct mount *r = real_mount(mnt);
126 struct super_block *sb = mnt->mnt_sb; 130 struct super_block *sb = mnt->mnt_sb;
127 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 131 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
128 struct path root = p->root;
129 int err = 0; 132 int err = 0;
130 133
131 seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id, 134 seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
@@ -139,7 +142,7 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
139 seq_putc(m, ' '); 142 seq_putc(m, ' ');
140 143
141 /* mountpoints outside of chroot jail will give SEQ_SKIP on this */ 144 /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
142 err = seq_path_root(m, &mnt_path, &root, " \t\n\\"); 145 err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
143 if (err) 146 if (err)
144 goto out; 147 goto out;
145 148
@@ -182,6 +185,7 @@ out:
182 185
183static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) 186static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
184{ 187{
188 struct proc_mounts *p = proc_mounts(m);
185 struct mount *r = real_mount(mnt); 189 struct mount *r = real_mount(mnt);
186 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; 190 struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
187 struct super_block *sb = mnt_path.dentry->d_sb; 191 struct super_block *sb = mnt_path.dentry->d_sb;
@@ -201,7 +205,10 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
201 205
202 /* mount point */ 206 /* mount point */
203 seq_puts(m, " mounted on "); 207 seq_puts(m, " mounted on ");
204 seq_path(m, &mnt_path, " \t\n\\"); 208 /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
209 err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
210 if (err)
211 goto out;
205 seq_putc(m, ' '); 212 seq_putc(m, ' ');
206 213
207 /* file system type */ 214 /* file system type */
@@ -216,6 +223,7 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
216 } 223 }
217 224
218 seq_putc(m, '\n'); 225 seq_putc(m, '\n');
226out:
219 return err; 227 return err;
220} 228}
221 229
diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h
new file mode 100644
index 000000000000..7eed55100721
--- /dev/null
+++ b/include/dt-bindings/clock/exynos4415.h
@@ -0,0 +1,360 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * Author: Chanwoo Choi <cw00.choi@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Device Tree binding constants for Samsung Exynos4415 clock controllers.
10 */
11
12#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H
13#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H
14
15/*
16 * Let each exported clock get a unique index, which is used on DT-enabled
17 * platforms to lookup the clock from a clock specifier. These indices are
18 * therefore considered an ABI and so must not be changed. This implies
19 * that new clocks should be added either in free spaces between clock groups
20 * or at the end.
21 */
22
23/*
24 * Main CMU
25 */
26
27#define CLK_OSCSEL 1
28#define CLK_FIN_PLL 2
29#define CLK_FOUT_APLL 3
30#define CLK_FOUT_MPLL 4
31#define CLK_FOUT_EPLL 5
32#define CLK_FOUT_G3D_PLL 6
33#define CLK_FOUT_ISP_PLL 7
34#define CLK_FOUT_DISP_PLL 8
35
36/* Muxes */
37#define CLK_MOUT_MPLL_USER_L 16
38#define CLK_MOUT_GDL 17
39#define CLK_MOUT_MPLL_USER_R 18
40#define CLK_MOUT_GDR 19
41#define CLK_MOUT_EBI 20
42#define CLK_MOUT_ACLK_200 21
43#define CLK_MOUT_ACLK_160 22
44#define CLK_MOUT_ACLK_100 23
45#define CLK_MOUT_ACLK_266 24
46#define CLK_MOUT_G3D_PLL 25
47#define CLK_MOUT_EPLL 26
48#define CLK_MOUT_EBI_1 27
49#define CLK_MOUT_ISP_PLL 28
50#define CLK_MOUT_DISP_PLL 29
51#define CLK_MOUT_MPLL_USER_T 30
52#define CLK_MOUT_ACLK_400_MCUISP 31
53#define CLK_MOUT_G3D_PLLSRC 32
54#define CLK_MOUT_CSIS1 33
55#define CLK_MOUT_CSIS0 34
56#define CLK_MOUT_CAM1 35
57#define CLK_MOUT_FIMC3_LCLK 36
58#define CLK_MOUT_FIMC2_LCLK 37
59#define CLK_MOUT_FIMC1_LCLK 38
60#define CLK_MOUT_FIMC0_LCLK 39
61#define CLK_MOUT_MFC 40
62#define CLK_MOUT_MFC_1 41
63#define CLK_MOUT_MFC_0 42
64#define CLK_MOUT_G3D 43
65#define CLK_MOUT_G3D_1 44
66#define CLK_MOUT_G3D_0 45
67#define CLK_MOUT_MIPI0 46
68#define CLK_MOUT_FIMD0 47
69#define CLK_MOUT_TSADC_ISP 48
70#define CLK_MOUT_UART_ISP 49
71#define CLK_MOUT_SPI1_ISP 50
72#define CLK_MOUT_SPI0_ISP 51
73#define CLK_MOUT_PWM_ISP 52
74#define CLK_MOUT_AUDIO0 53
75#define CLK_MOUT_TSADC 54
76#define CLK_MOUT_MMC2 55
77#define CLK_MOUT_MMC1 56
78#define CLK_MOUT_MMC0 57
79#define CLK_MOUT_UART3 58
80#define CLK_MOUT_UART2 59
81#define CLK_MOUT_UART1 60
82#define CLK_MOUT_UART0 61
83#define CLK_MOUT_SPI2 62
84#define CLK_MOUT_SPI1 63
85#define CLK_MOUT_SPI0 64
86#define CLK_MOUT_SPDIF 65
87#define CLK_MOUT_AUDIO2 66
88#define CLK_MOUT_AUDIO1 67
89#define CLK_MOUT_MPLL_USER_C 68
90#define CLK_MOUT_HPM 69
91#define CLK_MOUT_CORE 70
92#define CLK_MOUT_APLL 71
93#define CLK_MOUT_PXLASYNC_CSIS1_FIMC 72
94#define CLK_MOUT_PXLASYNC_CSIS0_FIMC 73
95#define CLK_MOUT_JPEG 74
96#define CLK_MOUT_JPEG1 75
97#define CLK_MOUT_JPEG0 76
98#define CLK_MOUT_ACLK_ISP0_300 77
99#define CLK_MOUT_ACLK_ISP0_400 78
100#define CLK_MOUT_ACLK_ISP0_300_USER 79
101#define CLK_MOUT_ACLK_ISP1_300 80
102#define CLK_MOUT_ACLK_ISP1_300_USER 81
103#define CLK_MOUT_HDMI 82
104
105/* Dividers */
106#define CLK_DIV_GPL 90
107#define CLK_DIV_GDL 91
108#define CLK_DIV_GPR 92
109#define CLK_DIV_GDR 93
110#define CLK_DIV_ACLK_400_MCUISP 94
111#define CLK_DIV_EBI 95
112#define CLK_DIV_ACLK_200 96
113#define CLK_DIV_ACLK_160 97
114#define CLK_DIV_ACLK_100 98
115#define CLK_DIV_ACLK_266 99
116#define CLK_DIV_CSIS1 100
117#define CLK_DIV_CSIS0 101
118#define CLK_DIV_CAM1 102
119#define CLK_DIV_FIMC3_LCLK 103
120#define CLK_DIV_FIMC2_LCLK 104
121#define CLK_DIV_FIMC1_LCLK 105
122#define CLK_DIV_FIMC0_LCLK 106
123#define CLK_DIV_TV_BLK 107
124#define CLK_DIV_MFC 108
125#define CLK_DIV_G3D 109
126#define CLK_DIV_MIPI0_PRE 110
127#define CLK_DIV_MIPI0 111
128#define CLK_DIV_FIMD0 112
129#define CLK_DIV_UART_ISP 113
130#define CLK_DIV_SPI1_ISP_PRE 114
131#define CLK_DIV_SPI1_ISP 115
132#define CLK_DIV_SPI0_ISP_PRE 116
133#define CLK_DIV_SPI0_ISP 117
134#define CLK_DIV_PWM_ISP 118
135#define CLK_DIV_PCM0 119
136#define CLK_DIV_AUDIO0 120
137#define CLK_DIV_TSADC_PRE 121
138#define CLK_DIV_TSADC 122
139#define CLK_DIV_MMC1_PRE 123
140#define CLK_DIV_MMC1 124
141#define CLK_DIV_MMC0_PRE 125
142#define CLK_DIV_MMC0 126
143#define CLK_DIV_MMC2_PRE 127
144#define CLK_DIV_MMC2 128
145#define CLK_DIV_UART3 129
146#define CLK_DIV_UART2 130
147#define CLK_DIV_UART1 131
148#define CLK_DIV_UART0 132
149#define CLK_DIV_SPI1_PRE 133
150#define CLK_DIV_SPI1 134
151#define CLK_DIV_SPI0_PRE 135
152#define CLK_DIV_SPI0 136
153#define CLK_DIV_SPI2_PRE 137
154#define CLK_DIV_SPI2 138
155#define CLK_DIV_PCM2 139
156#define CLK_DIV_AUDIO2 140
157#define CLK_DIV_PCM1 141
158#define CLK_DIV_AUDIO1 142
159#define CLK_DIV_I2S1 143
160#define CLK_DIV_PXLASYNC_CSIS1_FIMC 144
161#define CLK_DIV_PXLASYNC_CSIS0_FIMC 145
162#define CLK_DIV_JPEG 146
163#define CLK_DIV_CORE2 147
164#define CLK_DIV_APLL 148
165#define CLK_DIV_PCLK_DBG 149
166#define CLK_DIV_ATB 150
167#define CLK_DIV_PERIPH 151
168#define CLK_DIV_COREM1 152
169#define CLK_DIV_COREM0 153
170#define CLK_DIV_CORE 154
171#define CLK_DIV_HPM 155
172#define CLK_DIV_COPY 156
173
174/* Gates */
175#define CLK_ASYNC_G3D 180
176#define CLK_ASYNC_MFCL 181
177#define CLK_ASYNC_TVX 182
178#define CLK_PPMULEFT 183
179#define CLK_GPIO_LEFT 184
180#define CLK_PPMUIMAGE 185
181#define CLK_QEMDMA2 186
182#define CLK_QEROTATOR 187
183#define CLK_SMMUMDMA2 188
184#define CLK_SMMUROTATOR 189
185#define CLK_MDMA2 190
186#define CLK_ROTATOR 191
187#define CLK_ASYNC_ISPMX 192
188#define CLK_ASYNC_MAUDIOX 193
189#define CLK_ASYNC_MFCR 194
190#define CLK_ASYNC_FSYSD 195
191#define CLK_ASYNC_LCD0X 196
192#define CLK_ASYNC_CAMX 197
193#define CLK_PPMURIGHT 198
194#define CLK_GPIO_RIGHT 199
195#define CLK_ANTIRBK_APBIF 200
196#define CLK_EFUSE_WRITER_APBIF 201
197#define CLK_MONOCNT 202
198#define CLK_TZPC6 203
199#define CLK_PROVISIONKEY1 204
200#define CLK_PROVISIONKEY0 205
201#define CLK_CMU_ISPPART 206
202#define CLK_TMU_APBIF 207
203#define CLK_KEYIF 208
204#define CLK_RTC 209
205#define CLK_WDT 210
206#define CLK_MCT 211
207#define CLK_SECKEY 212
208#define CLK_HDMI_CEC 213
209#define CLK_TZPC5 214
210#define CLK_TZPC4 215
211#define CLK_TZPC3 216
212#define CLK_TZPC2 217
213#define CLK_TZPC1 218
214#define CLK_TZPC0 219
215#define CLK_CMU_COREPART 220
216#define CLK_CMU_TOPPART 221
217#define CLK_PMU_APBIF 222
218#define CLK_SYSREG 223
219#define CLK_CHIP_ID 224
220#define CLK_SMMUFIMC_LITE2 225
221#define CLK_FIMC_LITE2 226
222#define CLK_PIXELASYNCM1 227
223#define CLK_PIXELASYNCM0 228
224#define CLK_PPMUCAMIF 229
225#define CLK_SMMUJPEG 230
226#define CLK_SMMUFIMC3 231
227#define CLK_SMMUFIMC2 232
228#define CLK_SMMUFIMC1 233
229#define CLK_SMMUFIMC0 234
230#define CLK_JPEG 235
231#define CLK_CSIS1 236
232#define CLK_CSIS0 237
233#define CLK_FIMC3 238
234#define CLK_FIMC2 239
235#define CLK_FIMC1 240
236#define CLK_FIMC0 241
237#define CLK_PPMUTV 242
238#define CLK_SMMUTV 243
239#define CLK_HDMI 244
240#define CLK_MIXER 245
241#define CLK_VP 246
242#define CLK_PPMUMFC_R 247
243#define CLK_PPMUMFC_L 248
244#define CLK_SMMUMFC_R 249
245#define CLK_SMMUMFC_L 250
246#define CLK_MFC 251
247#define CLK_PPMUG3D 252
248#define CLK_G3D 253
249#define CLK_PPMULCD0 254
250#define CLK_SMMUFIMD0 255
251#define CLK_DSIM0 256
252#define CLK_SMIES 257
253#define CLK_MIE0 258
254#define CLK_FIMD0 259
255#define CLK_TSADC 260
256#define CLK_PPMUFILE 261
257#define CLK_NFCON 262
258#define CLK_USBDEVICE 263
259#define CLK_USBHOST 264
260#define CLK_SROMC 265
261#define CLK_SDMMC2 266
262#define CLK_SDMMC1 267
263#define CLK_SDMMC0 268
264#define CLK_PDMA1 269
265#define CLK_PDMA0 270
266#define CLK_SPDIF 271
267#define CLK_PWM 272
268#define CLK_PCM2 273
269#define CLK_PCM1 274
270#define CLK_I2S1 275
271#define CLK_SPI2 276
272#define CLK_SPI1 277
273#define CLK_SPI0 278
274#define CLK_I2CHDMI 279
275#define CLK_I2C7 280
276#define CLK_I2C6 281
277#define CLK_I2C5 282
278#define CLK_I2C4 283
279#define CLK_I2C3 284
280#define CLK_I2C2 285
281#define CLK_I2C1 286
282#define CLK_I2C0 287
283#define CLK_UART3 288
284#define CLK_UART2 289
285#define CLK_UART1 290
286#define CLK_UART0 291
287
288/* Special clocks */
289#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC 330
290#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC 331
291#define CLK_SCLK_JPEG 332
292#define CLK_SCLK_CSIS1 333
293#define CLK_SCLK_CSIS0 334
294#define CLK_SCLK_CAM1 335
295#define CLK_SCLK_FIMC3_LCLK 336
296#define CLK_SCLK_FIMC2_LCLK 337
297#define CLK_SCLK_FIMC1_LCLK 338
298#define CLK_SCLK_FIMC0_LCLK 339
299#define CLK_SCLK_PIXEL 340
300#define CLK_SCLK_HDMI 341
301#define CLK_SCLK_MIXER 342
302#define CLK_SCLK_MFC 343
303#define CLK_SCLK_G3D 344
304#define CLK_SCLK_MIPIDPHY4L 345
305#define CLK_SCLK_MIPI0 346
306#define CLK_SCLK_MDNIE0 347
307#define CLK_SCLK_FIMD0 348
308#define CLK_SCLK_PCM0 349
309#define CLK_SCLK_AUDIO0 350
310#define CLK_SCLK_TSADC 351
311#define CLK_SCLK_EBI 352
312#define CLK_SCLK_MMC2 353
313#define CLK_SCLK_MMC1 354
314#define CLK_SCLK_MMC0 355
315#define CLK_SCLK_I2S 356
316#define CLK_SCLK_PCM2 357
317#define CLK_SCLK_PCM1 358
318#define CLK_SCLK_AUDIO2 359
319#define CLK_SCLK_AUDIO1 360
320#define CLK_SCLK_SPDIF 361
321#define CLK_SCLK_SPI2 362
322#define CLK_SCLK_SPI1 363
323#define CLK_SCLK_SPI0 364
324#define CLK_SCLK_UART3 365
325#define CLK_SCLK_UART2 366
326#define CLK_SCLK_UART1 367
327#define CLK_SCLK_UART0 368
328#define CLK_SCLK_HDMIPHY 369
329
330/*
331 * Total number of clocks of main CMU.
332 * NOTE: Must be equal to last clock ID increased by one.
333 */
334#define CLK_NR_CLKS 370
335
336/*
337 * CMU DMC
338 */
339#define CLK_DMC_FOUT_MPLL 1
340#define CLK_DMC_FOUT_BPLL 2
341
342#define CLK_DMC_MOUT_MPLL 3
343#define CLK_DMC_MOUT_BPLL 4
344#define CLK_DMC_MOUT_DPHY 5
345#define CLK_DMC_MOUT_DMC_BUS 6
346
347#define CLK_DMC_DIV_DMC 7
348#define CLK_DMC_DIV_DPHY 8
349#define CLK_DMC_DIV_DMC_PRE 9
350#define CLK_DMC_DIV_DMCP 10
351#define CLK_DMC_DIV_DMCD 11
352#define CLK_DMC_DIV_MPLL_PRE 12
353
354/*
355 * Total number of clocks of CMU_DMC.
356 * NOTE: Must be equal to highest clock ID increased by one.
357 */
358#define NR_CLKS_DMC 13
359
360#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
new file mode 100644
index 000000000000..8e4681b07ae7
--- /dev/null
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3 * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8*/
9
10#ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H
11#define _DT_BINDINGS_CLOCK_EXYNOS7_H
12
13/* TOPC */
14#define DOUT_ACLK_PERIS 1
15#define DOUT_SCLK_BUS0_PLL 2
16#define DOUT_SCLK_BUS1_PLL 3
17#define DOUT_SCLK_CC_PLL 4
18#define DOUT_SCLK_MFC_PLL 5
19#define DOUT_ACLK_CCORE_133 6
20#define TOPC_NR_CLK 7
21
22/* TOP0 */
23#define DOUT_ACLK_PERIC1 1
24#define DOUT_ACLK_PERIC0 2
25#define CLK_SCLK_UART0 3
26#define CLK_SCLK_UART1 4
27#define CLK_SCLK_UART2 5
28#define CLK_SCLK_UART3 6
29#define TOP0_NR_CLK 7
30
31/* TOP1 */
32#define DOUT_ACLK_FSYS1_200 1
33#define DOUT_ACLK_FSYS0_200 2
34#define DOUT_SCLK_MMC2 3
35#define DOUT_SCLK_MMC1 4
36#define DOUT_SCLK_MMC0 5
37#define CLK_SCLK_MMC2 6
38#define CLK_SCLK_MMC1 7
39#define CLK_SCLK_MMC0 8
40#define TOP1_NR_CLK 9
41
42/* CCORE */
43#define PCLK_RTC 1
44#define CCORE_NR_CLK 2
45
46/* PERIC0 */
47#define PCLK_UART0 1
48#define SCLK_UART0 2
49#define PCLK_HSI2C0 3
50#define PCLK_HSI2C1 4
51#define PCLK_HSI2C4 5
52#define PCLK_HSI2C5 6
53#define PCLK_HSI2C9 7
54#define PCLK_HSI2C10 8
55#define PCLK_HSI2C11 9
56#define PCLK_PWM 10
57#define SCLK_PWM 11
58#define PCLK_ADCIF 12
59#define PERIC0_NR_CLK 13
60
61/* PERIC1 */
62#define PCLK_UART1 1
63#define PCLK_UART2 2
64#define PCLK_UART3 3
65#define SCLK_UART1 4
66#define SCLK_UART2 5
67#define SCLK_UART3 6
68#define PCLK_HSI2C2 7
69#define PCLK_HSI2C3 8
70#define PCLK_HSI2C6 9
71#define PCLK_HSI2C7 10
72#define PCLK_HSI2C8 11
73#define PERIC1_NR_CLK 12
74
75/* PERIS */
76#define PCLK_CHIPID 1
77#define SCLK_CHIPID 2
78#define PCLK_WDT 3
79#define PCLK_TMU 4
80#define SCLK_TMU 5
81#define PERIS_NR_CLK 6
82
83/* FSYS0 */
84#define ACLK_MMC2 1
85#define FSYS0_NR_CLK 2
86
87/* FSYS1 */
88#define ACLK_MMC1 1
89#define ACLK_MMC0 2
90#define FSYS1_NR_CLK 3
91
92#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
new file mode 100644
index 000000000000..591f7fba89e2
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -0,0 +1,74 @@
1#ifndef __DTS_MARVELL_MMP2_CLOCK_H
2#define __DTS_MARVELL_MMP2_CLOCK_H
3
4/* fixed clocks and plls */
5#define MMP2_CLK_CLK32 1
6#define MMP2_CLK_VCTCXO 2
7#define MMP2_CLK_PLL1 3
8#define MMP2_CLK_PLL1_2 8
9#define MMP2_CLK_PLL1_4 9
10#define MMP2_CLK_PLL1_8 10
11#define MMP2_CLK_PLL1_16 11
12#define MMP2_CLK_PLL1_3 12
13#define MMP2_CLK_PLL1_6 13
14#define MMP2_CLK_PLL1_12 14
15#define MMP2_CLK_PLL1_20 15
16#define MMP2_CLK_PLL2 16
17#define MMP2_CLK_PLL2_2 17
18#define MMP2_CLK_PLL2_4 18
19#define MMP2_CLK_PLL2_8 19
20#define MMP2_CLK_PLL2_16 20
21#define MMP2_CLK_PLL2_3 21
22#define MMP2_CLK_PLL2_6 22
23#define MMP2_CLK_PLL2_12 23
24#define MMP2_CLK_VCTCXO_2 24
25#define MMP2_CLK_VCTCXO_4 25
26#define MMP2_CLK_UART_PLL 26
27#define MMP2_CLK_USB_PLL 27
28
29/* apb periphrals */
30#define MMP2_CLK_TWSI0 60
31#define MMP2_CLK_TWSI1 61
32#define MMP2_CLK_TWSI2 62
33#define MMP2_CLK_TWSI3 63
34#define MMP2_CLK_TWSI4 64
35#define MMP2_CLK_TWSI5 65
36#define MMP2_CLK_GPIO 66
37#define MMP2_CLK_KPC 67
38#define MMP2_CLK_RTC 68
39#define MMP2_CLK_PWM0 69
40#define MMP2_CLK_PWM1 70
41#define MMP2_CLK_PWM2 71
42#define MMP2_CLK_PWM3 72
43#define MMP2_CLK_UART0 73
44#define MMP2_CLK_UART1 74
45#define MMP2_CLK_UART2 75
46#define MMP2_CLK_UART3 76
47#define MMP2_CLK_SSP0 77
48#define MMP2_CLK_SSP1 78
49#define MMP2_CLK_SSP2 79
50#define MMP2_CLK_SSP3 80
51
52/* axi periphrals */
53#define MMP2_CLK_SDH0 101
54#define MMP2_CLK_SDH1 102
55#define MMP2_CLK_SDH2 103
56#define MMP2_CLK_SDH3 104
57#define MMP2_CLK_USB 105
58#define MMP2_CLK_DISP0 106
59#define MMP2_CLK_DISP0_MUX 107
60#define MMP2_CLK_DISP0_SPHY 108
61#define MMP2_CLK_DISP1 109
62#define MMP2_CLK_DISP1_MUX 110
63#define MMP2_CLK_CCIC_ARBITER 111
64#define MMP2_CLK_CCIC0 112
65#define MMP2_CLK_CCIC0_MIX 113
66#define MMP2_CLK_CCIC0_PHY 114
67#define MMP2_CLK_CCIC0_SPHY 115
68#define MMP2_CLK_CCIC1 116
69#define MMP2_CLK_CCIC1_MIX 117
70#define MMP2_CLK_CCIC1_PHY 118
71#define MMP2_CLK_CCIC1_SPHY 119
72
73#define MMP2_NR_CLKS 200
74#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
new file mode 100644
index 000000000000..79630b9d74b8
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -0,0 +1,57 @@
1#ifndef __DTS_MARVELL_PXA168_CLOCK_H
2#define __DTS_MARVELL_PXA168_CLOCK_H
3
4/* fixed clocks and plls */
5#define PXA168_CLK_CLK32 1
6#define PXA168_CLK_VCTCXO 2
7#define PXA168_CLK_PLL1 3
8#define PXA168_CLK_PLL1_2 8
9#define PXA168_CLK_PLL1_4 9
10#define PXA168_CLK_PLL1_8 10
11#define PXA168_CLK_PLL1_16 11
12#define PXA168_CLK_PLL1_6 12
13#define PXA168_CLK_PLL1_12 13
14#define PXA168_CLK_PLL1_24 14
15#define PXA168_CLK_PLL1_48 15
16#define PXA168_CLK_PLL1_96 16
17#define PXA168_CLK_PLL1_13 17
18#define PXA168_CLK_PLL1_13_1_5 18
19#define PXA168_CLK_PLL1_2_1_5 19
20#define PXA168_CLK_PLL1_3_16 20
21#define PXA168_CLK_UART_PLL 27
22
23/* apb periphrals */
24#define PXA168_CLK_TWSI0 60
25#define PXA168_CLK_TWSI1 61
26#define PXA168_CLK_TWSI2 62
27#define PXA168_CLK_TWSI3 63
28#define PXA168_CLK_GPIO 64
29#define PXA168_CLK_KPC 65
30#define PXA168_CLK_RTC 66
31#define PXA168_CLK_PWM0 67
32#define PXA168_CLK_PWM1 68
33#define PXA168_CLK_PWM2 69
34#define PXA168_CLK_PWM3 70
35#define PXA168_CLK_UART0 71
36#define PXA168_CLK_UART1 72
37#define PXA168_CLK_UART2 73
38#define PXA168_CLK_SSP0 74
39#define PXA168_CLK_SSP1 75
40#define PXA168_CLK_SSP2 76
41#define PXA168_CLK_SSP3 77
42#define PXA168_CLK_SSP4 78
43
44/* axi periphrals */
45#define PXA168_CLK_DFC 100
46#define PXA168_CLK_SDH0 101
47#define PXA168_CLK_SDH1 102
48#define PXA168_CLK_SDH2 103
49#define PXA168_CLK_USB 104
50#define PXA168_CLK_SPH 105
51#define PXA168_CLK_DISP0 106
52#define PXA168_CLK_CCIC0 107
53#define PXA168_CLK_CCIC0_PHY 108
54#define PXA168_CLK_CCIC0_SPHY 109
55
56#define PXA168_NR_CLKS 200
57#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
new file mode 100644
index 000000000000..719cffb2bea2
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -0,0 +1,54 @@
1#ifndef __DTS_MARVELL_PXA910_CLOCK_H
2#define __DTS_MARVELL_PXA910_CLOCK_H
3
4/* fixed clocks and plls */
5#define PXA910_CLK_CLK32 1
6#define PXA910_CLK_VCTCXO 2
7#define PXA910_CLK_PLL1 3
8#define PXA910_CLK_PLL1_2 8
9#define PXA910_CLK_PLL1_4 9
10#define PXA910_CLK_PLL1_8 10
11#define PXA910_CLK_PLL1_16 11
12#define PXA910_CLK_PLL1_6 12
13#define PXA910_CLK_PLL1_12 13
14#define PXA910_CLK_PLL1_24 14
15#define PXA910_CLK_PLL1_48 15
16#define PXA910_CLK_PLL1_96 16
17#define PXA910_CLK_PLL1_13 17
18#define PXA910_CLK_PLL1_13_1_5 18
19#define PXA910_CLK_PLL1_2_1_5 19
20#define PXA910_CLK_PLL1_3_16 20
21#define PXA910_CLK_UART_PLL 27
22
23/* apb periphrals */
24#define PXA910_CLK_TWSI0 60
25#define PXA910_CLK_TWSI1 61
26#define PXA910_CLK_TWSI2 62
27#define PXA910_CLK_TWSI3 63
28#define PXA910_CLK_GPIO 64
29#define PXA910_CLK_KPC 65
30#define PXA910_CLK_RTC 66
31#define PXA910_CLK_PWM0 67
32#define PXA910_CLK_PWM1 68
33#define PXA910_CLK_PWM2 69
34#define PXA910_CLK_PWM3 70
35#define PXA910_CLK_UART0 71
36#define PXA910_CLK_UART1 72
37#define PXA910_CLK_UART2 73
38#define PXA910_CLK_SSP0 74
39#define PXA910_CLK_SSP1 75
40
41/* axi periphrals */
42#define PXA910_CLK_DFC 100
43#define PXA910_CLK_SDH0 101
44#define PXA910_CLK_SDH1 102
45#define PXA910_CLK_SDH2 103
46#define PXA910_CLK_USB 104
47#define PXA910_CLK_SPH 105
48#define PXA910_CLK_DISP0 106
49#define PXA910_CLK_CCIC0 107
50#define PXA910_CLK_CCIC0_PHY 108
51#define PXA910_CLK_CCIC0_SPHY 109
52
53#define PXA910_NR_CLKS 200
54#endif
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index 100a08c47692..f60ce72a2b2c 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -71,6 +71,15 @@
71#define SCLK_HDMI_CEC 110 71#define SCLK_HDMI_CEC 110
72#define SCLK_HEVC_CABAC 111 72#define SCLK_HEVC_CABAC 111
73#define SCLK_HEVC_CORE 112 73#define SCLK_HEVC_CORE 112
74#define SCLK_I2S0_OUT 113
75#define SCLK_SDMMC_DRV 114
76#define SCLK_SDIO0_DRV 115
77#define SCLK_SDIO1_DRV 116
78#define SCLK_EMMC_DRV 117
79#define SCLK_SDMMC_SAMPLE 118
80#define SCLK_SDIO0_SAMPLE 119
81#define SCLK_SDIO1_SAMPLE 120
82#define SCLK_EMMC_SAMPLE 121
74 83
75#define DCLK_VOP0 190 84#define DCLK_VOP0 190
76#define DCLK_VOP1 191 85#define DCLK_VOP1 191
@@ -141,6 +150,10 @@
141#define PCLK_VIO2_H2P 361 150#define PCLK_VIO2_H2P 361
142#define PCLK_CPU 362 151#define PCLK_CPU 362
143#define PCLK_PERI 363 152#define PCLK_PERI 363
153#define PCLK_DDRUPCTL0 364
154#define PCLK_PUBL0 365
155#define PCLK_DDRUPCTL1 366
156#define PCLK_PUBL1 367
144 157
145/* hclk gates */ 158/* hclk gates */
146#define HCLK_GPS 448 159#define HCLK_GPS 448
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2839c639f092..d936409520f8 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -176,7 +176,7 @@ struct clk_ops {
176 unsigned long *parent_rate); 176 unsigned long *parent_rate);
177 long (*determine_rate)(struct clk_hw *hw, unsigned long rate, 177 long (*determine_rate)(struct clk_hw *hw, unsigned long rate,
178 unsigned long *best_parent_rate, 178 unsigned long *best_parent_rate,
179 struct clk **best_parent_clk); 179 struct clk_hw **best_parent_hw);
180 int (*set_parent)(struct clk_hw *hw, u8 index); 180 int (*set_parent)(struct clk_hw *hw, u8 index);
181 u8 (*get_parent)(struct clk_hw *hw); 181 u8 (*get_parent)(struct clk_hw *hw);
182 int (*set_rate)(struct clk_hw *hw, unsigned long rate, 182 int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -544,16 +544,14 @@ u8 __clk_get_num_parents(struct clk *clk);
544struct clk *__clk_get_parent(struct clk *clk); 544struct clk *__clk_get_parent(struct clk *clk);
545struct clk *clk_get_parent_by_index(struct clk *clk, u8 index); 545struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
546unsigned int __clk_get_enable_count(struct clk *clk); 546unsigned int __clk_get_enable_count(struct clk *clk);
547unsigned int __clk_get_prepare_count(struct clk *clk);
548unsigned long __clk_get_rate(struct clk *clk); 547unsigned long __clk_get_rate(struct clk *clk);
549unsigned long __clk_get_accuracy(struct clk *clk);
550unsigned long __clk_get_flags(struct clk *clk); 548unsigned long __clk_get_flags(struct clk *clk);
551bool __clk_is_prepared(struct clk *clk); 549bool __clk_is_prepared(struct clk *clk);
552bool __clk_is_enabled(struct clk *clk); 550bool __clk_is_enabled(struct clk *clk);
553struct clk *__clk_lookup(const char *name); 551struct clk *__clk_lookup(const char *name);
554long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 552long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
555 unsigned long *best_parent_rate, 553 unsigned long *best_parent_rate,
556 struct clk **best_parent_p); 554 struct clk_hw **best_parent_p);
557 555
558/* 556/*
559 * FIXME clock api without lock protection 557 * FIXME clock api without lock protection
@@ -652,7 +650,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
652#endif /* platform dependent I/O accessors */ 650#endif /* platform dependent I/O accessors */
653 651
654#ifdef CONFIG_DEBUG_FS 652#ifdef CONFIG_DEBUG_FS
655struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, 653struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
656 void *data, const struct file_operations *fops); 654 void *data, const struct file_operations *fops);
657#endif 655#endif
658 656
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 74e5341463c9..55ef529a0dbf 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -264,7 +264,7 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
264long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, 264long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
265 unsigned long rate, 265 unsigned long rate,
266 unsigned long *best_parent_rate, 266 unsigned long *best_parent_rate,
267 struct clk **best_parent_clk); 267 struct clk_hw **best_parent_clk);
268unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, 268unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
269 unsigned long parent_rate); 269 unsigned long parent_rate);
270long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, 270long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
@@ -273,7 +273,7 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
273long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, 273long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
274 unsigned long rate, 274 unsigned long rate,
275 unsigned long *best_parent_rate, 275 unsigned long *best_parent_rate,
276 struct clk **best_parent_clk); 276 struct clk_hw **best_parent_clk);
277u8 omap2_init_dpll_parent(struct clk_hw *hw); 277u8 omap2_init_dpll_parent(struct clk_hw *hw);
278unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); 278unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
279long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, 279long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d5ad7b1118fc..a1c81f80978e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
186# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 186# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
187#endif 187#endif
188 188
189#include <uapi/linux/types.h>
190
191static __always_inline void data_access_exceeds_word_size(void)
192#ifdef __compiletime_warning
193__compiletime_warning("data access exceeds word size and won't be atomic")
194#endif
195;
196
197static __always_inline void data_access_exceeds_word_size(void)
198{
199}
200
201static __always_inline void __read_once_size(volatile void *p, void *res, int size)
202{
203 switch (size) {
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
207#ifdef CONFIG_64BIT
208 case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
209#endif
210 default:
211 barrier();
212 __builtin_memcpy((void *)res, (const void *)p, size);
213 data_access_exceeds_word_size();
214 barrier();
215 }
216}
217
218static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
219{
220 switch (size) {
221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
222 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
223 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
224#ifdef CONFIG_64BIT
225 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
226#endif
227 default:
228 barrier();
229 __builtin_memcpy((void *)p, (const void *)res, size);
230 data_access_exceeds_word_size();
231 barrier();
232 }
233}
234
235/*
236 * Prevent the compiler from merging or refetching reads or writes. The
237 * compiler is also forbidden from reordering successive instances of
238 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
239 * compiler is aware of some particular ordering. One way to make the
240 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
242 *
243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244 * data types like structs or unions. If the size of the accessed data
245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246 * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
247 * compile-time warning.
248 *
249 * Their two major use cases are: (1) Mediating communication between
250 * process-level code and irq/NMI handlers, all running on the same CPU,
251 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
252 * mutilate accesses that either do not require ordering or that interact
253 * with an explicit memory barrier or atomic instruction that provides the
254 * required ordering.
255 */
256
257#define READ_ONCE(x) \
258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259
260#define ASSIGN_ONCE(val, x) \
261 ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
262
189#endif /* __KERNEL__ */ 263#endif /* __KERNEL__ */
190 264
191#endif /* __ASSEMBLY__ */ 265#endif /* __ASSEMBLY__ */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index f1863dcd83ea..ce447f0f1bad 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -188,7 +188,7 @@ extern struct devfreq *devm_devfreq_add_device(struct device *dev,
188extern void devm_devfreq_remove_device(struct device *dev, 188extern void devm_devfreq_remove_device(struct device *dev,
189 struct devfreq *devfreq); 189 struct devfreq *devfreq);
190 190
191/* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */ 191/* Supposed to be called by PM callbacks */
192extern int devfreq_suspend_device(struct devfreq *devfreq); 192extern int devfreq_suspend_device(struct devfreq *devfreq);
193extern int devfreq_resume_device(struct devfreq *devfreq); 193extern int devfreq_resume_device(struct devfreq *devfreq);
194 194
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 01aad3ed89ec..fab9b32ace8e 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -36,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
36 36
37extern int migrate_prep(void); 37extern int migrate_prep(void);
38extern int migrate_prep_local(void); 38extern int migrate_prep_local(void);
39extern int migrate_vmas(struct mm_struct *mm,
40 const nodemask_t *from, const nodemask_t *to,
41 unsigned long flags);
42extern void migrate_page_copy(struct page *newpage, struct page *page); 39extern void migrate_page_copy(struct page *newpage, struct page *page);
43extern int migrate_huge_page_move_mapping(struct address_space *mapping, 40extern int migrate_huge_page_move_mapping(struct address_space *mapping,
44 struct page *newpage, struct page *page); 41 struct page *newpage, struct page *page);
@@ -57,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
57static inline int migrate_prep(void) { return -ENOSYS; } 54static inline int migrate_prep(void) { return -ENOSYS; }
58static inline int migrate_prep_local(void) { return -ENOSYS; } 55static inline int migrate_prep_local(void) { return -ENOSYS; }
59 56
60static inline int migrate_vmas(struct mm_struct *mm,
61 const nodemask_t *from, const nodemask_t *to,
62 unsigned long flags)
63{
64 return -ENOSYS;
65}
66
67static inline void migrate_page_copy(struct page *newpage, 57static inline void migrate_page_copy(struct page *newpage,
68 struct page *page) {} 58 struct page *page) {}
69 59
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c0a67b894c4c..f80d0194c9bc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -286,8 +286,6 @@ struct vm_operations_struct {
286 */ 286 */
287 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 287 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
288 unsigned long addr); 288 unsigned long addr);
289 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
290 const nodemask_t *to, unsigned long flags);
291#endif 289#endif
292 /* called by sys_remap_file_pages() to populate non-linear mapping */ 290 /* called by sys_remap_file_pages() to populate non-linear mapping */
293 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, 291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index a41e252396c0..1c5e453f7ea9 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -101,6 +101,11 @@ static inline size_t iov_iter_count(struct iov_iter *i)
101 return i->count; 101 return i->count;
102} 102}
103 103
104static inline bool iter_is_iovec(struct iov_iter *i)
105{
106 return !(i->type & (ITER_BVEC | ITER_KVEC));
107}
108
104/* 109/*
105 * Cap the iov_iter by given limit; note that the second argument is 110 * Cap the iov_iter by given limit; note that the second argument is
106 * *not* the new size - it's upper limit for such. Passing it a value 111 * *not* the new size - it's upper limit for such. Passing it a value
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 9d87a37aecad..dae99d7d2bc0 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -688,7 +688,6 @@ extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
688extern int sas_target_alloc(struct scsi_target *); 688extern int sas_target_alloc(struct scsi_target *);
689extern int sas_slave_configure(struct scsi_device *); 689extern int sas_slave_configure(struct scsi_device *);
690extern int sas_change_queue_depth(struct scsi_device *, int new_depth); 690extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
691extern int sas_change_queue_type(struct scsi_device *, int qt);
692extern int sas_bios_param(struct scsi_device *, 691extern int sas_bios_param(struct scsi_device *,
693 struct block_device *, 692 struct block_device *,
694 sector_t capacity, int *hsc); 693 sector_t capacity, int *hsc);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index e939d2b3757a..019e66858ce6 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -278,19 +278,6 @@ struct scsi_host_template {
278 int (* change_queue_depth)(struct scsi_device *, int); 278 int (* change_queue_depth)(struct scsi_device *, int);
279 279
280 /* 280 /*
281 * Fill in this function to allow the changing of tag types
282 * (this also allows the enabling/disabling of tag command
283 * queueing). An error should only be returned if something
284 * went wrong in the driver while trying to set the tag type.
285 * If the driver doesn't support the requested tag type, then
286 * it should set the closest type it does support without
287 * returning an error. Returns the actual tag type set.
288 *
289 * Status: OPTIONAL
290 */
291 int (* change_queue_type)(struct scsi_device *, int);
292
293 /*
294 * This function determines the BIOS parameters for a given 281 * This function determines the BIOS parameters for a given
295 * harddisk. These tend to be numbers that are made up by 282 * harddisk. These tend to be numbers that are made up by
296 * the host adapter. Parameters: 283 * the host adapter. Parameters:
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index fe4a70299419..9708b28bd2aa 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -6,46 +6,10 @@
6#include <scsi/scsi_device.h> 6#include <scsi/scsi_device.h>
7#include <scsi/scsi_host.h> 7#include <scsi/scsi_host.h>
8 8
9#define MSG_SIMPLE_TAG 0x20
10#define MSG_HEAD_TAG 0x21
11#define MSG_ORDERED_TAG 0x22
12#define MSG_ACA_TAG 0x24 /* unsupported */
13
14#define SCSI_NO_TAG (-1) /* identify no tag in use */ 9#define SCSI_NO_TAG (-1) /* identify no tag in use */
15 10
16 11
17#ifdef CONFIG_BLOCK 12#ifdef CONFIG_BLOCK
18
19int scsi_change_queue_type(struct scsi_device *sdev, int tag_type);
20
21/**
22 * scsi_get_tag_type - get the type of tag the device supports
23 * @sdev: the scsi device
24 */
25static inline int scsi_get_tag_type(struct scsi_device *sdev)
26{
27 if (!sdev->tagged_supported)
28 return 0;
29 if (sdev->simple_tags)
30 return MSG_SIMPLE_TAG;
31 return 0;
32}
33
34static inline void scsi_set_tag_type(struct scsi_device *sdev, int tag)
35{
36 switch (tag) {
37 case MSG_ORDERED_TAG:
38 case MSG_SIMPLE_TAG:
39 sdev->simple_tags = 1;
40 break;
41 case 0:
42 /* fall through */
43 default:
44 sdev->simple_tags = 0;
45 break;
46 }
47}
48
49static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost, 13static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
50 int unique_tag) 14 int unique_tag)
51{ 15{
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 9adc1bca1178..430cfaf92285 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -5,6 +5,15 @@
5#define TRANSPORT_PLUGIN_VHBA_PDEV 2 5#define TRANSPORT_PLUGIN_VHBA_PDEV 2
6#define TRANSPORT_PLUGIN_VHBA_VDEV 3 6#define TRANSPORT_PLUGIN_VHBA_VDEV 3
7 7
8struct target_backend_cits {
9 struct config_item_type tb_dev_cit;
10 struct config_item_type tb_dev_attrib_cit;
11 struct config_item_type tb_dev_pr_cit;
12 struct config_item_type tb_dev_wwn_cit;
13 struct config_item_type tb_dev_alua_tg_pt_gps_cit;
14 struct config_item_type tb_dev_stat_cit;
15};
16
8struct se_subsystem_api { 17struct se_subsystem_api {
9 struct list_head sub_api_list; 18 struct list_head sub_api_list;
10 19
@@ -44,6 +53,8 @@ struct se_subsystem_api {
44 int (*init_prot)(struct se_device *); 53 int (*init_prot)(struct se_device *);
45 int (*format_prot)(struct se_device *); 54 int (*format_prot)(struct se_device *);
46 void (*free_prot)(struct se_device *); 55 void (*free_prot)(struct se_device *);
56
57 struct target_backend_cits tb_cits;
47}; 58};
48 59
49struct sbc_ops { 60struct sbc_ops {
@@ -96,4 +107,36 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
96 107
97void array_free(void *array, int n); 108void array_free(void *array, int n);
98 109
110/* From target_core_configfs.c to setup default backend config_item_types */
111void target_core_setup_sub_cits(struct se_subsystem_api *);
112
113/* attribute helpers from target_core_device.c for backend drivers */
114int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
115int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
116int se_dev_set_unmap_granularity(struct se_device *, u32);
117int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
118int se_dev_set_max_write_same_len(struct se_device *, u32);
119int se_dev_set_emulate_model_alias(struct se_device *, int);
120int se_dev_set_emulate_dpo(struct se_device *, int);
121int se_dev_set_emulate_fua_write(struct se_device *, int);
122int se_dev_set_emulate_fua_read(struct se_device *, int);
123int se_dev_set_emulate_write_cache(struct se_device *, int);
124int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
125int se_dev_set_emulate_tas(struct se_device *, int);
126int se_dev_set_emulate_tpu(struct se_device *, int);
127int se_dev_set_emulate_tpws(struct se_device *, int);
128int se_dev_set_emulate_caw(struct se_device *, int);
129int se_dev_set_emulate_3pc(struct se_device *, int);
130int se_dev_set_pi_prot_type(struct se_device *, int);
131int se_dev_set_pi_prot_format(struct se_device *, int);
132int se_dev_set_enforce_pr_isids(struct se_device *, int);
133int se_dev_set_force_pr_aptpl(struct se_device *, int);
134int se_dev_set_is_nonrot(struct se_device *, int);
135int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
136int se_dev_set_queue_depth(struct se_device *, u32);
137int se_dev_set_max_sectors(struct se_device *, u32);
138int se_dev_set_fabric_max_sectors(struct se_device *, u32);
139int se_dev_set_optimal_sectors(struct se_device *, u32);
140int se_dev_set_block_size(struct se_device *, u32);
141
99#endif /* TARGET_CORE_BACKEND_H */ 142#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
new file mode 100644
index 000000000000..3247d7530107
--- /dev/null
+++ b/include/target/target_core_backend_configfs.h
@@ -0,0 +1,120 @@
1#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
2#define TARGET_CORE_BACKEND_CONFIGFS_H
3
4#include <target/configfs_macros.h>
5
6#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
7static ssize_t _backend##_dev_show_attr_##_name( \
8 struct se_dev_attrib *da, \
9 char *page) \
10{ \
11 return snprintf(page, PAGE_SIZE, "%u\n", \
12 (u32)da->da_dev->dev_attrib._name); \
13}
14
15#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
16static ssize_t _backend##_dev_store_attr_##_name( \
17 struct se_dev_attrib *da, \
18 const char *page, \
19 size_t count) \
20{ \
21 unsigned long val; \
22 int ret; \
23 \
24 ret = kstrtoul(page, 0, &val); \
25 if (ret < 0) { \
26 pr_err("kstrtoul() failed with ret: %d\n", ret); \
27 return -EINVAL; \
28 } \
29 ret = se_dev_set_##_name(da->da_dev, (u32)val); \
30 \
31 return (!ret) ? count : -EINVAL; \
32}
33
34#define DEF_TB_DEV_ATTRIB(_backend, _name) \
35DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
36DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
37
38#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
39DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
40
41CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
42#define TB_DEV_ATTR(_backend, _name, _mode) \
43static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
44 __CONFIGFS_EATTR(_name, _mode, \
45 _backend##_dev_show_attr_##_name, \
46 _backend##_dev_store_attr_##_name);
47
48#define TB_DEV_ATTR_RO(_backend, _name) \
49static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
50 __CONFIGFS_EATTR_RO(_name, \
51 _backend##_dev_show_attr_##_name);
52
53/*
54 * Default list of target backend device attributes as defined by
55 * struct se_dev_attrib
56 */
57
58#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
59 DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
60 TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
61 DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
62 TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
63 DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
64 TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
65 DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
66 TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
67 DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
68 TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
69 DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
70 TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
71 DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
72 TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
73 DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
74 TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
75 DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
76 TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
77 DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
78 TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
79 DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
80 TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
81 DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
82 TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
83 DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
84 TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
85 DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
86 TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
87 DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
88 TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
89 DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
90 TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
91 DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
92 TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
93 DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
94 TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
95 DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
96 TB_DEV_ATTR_RO(_backend, hw_block_size); \
97 DEF_TB_DEV_ATTRIB(_backend, block_size); \
98 TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
99 DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
100 TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
101 DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
102 TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
103 DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
104 TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
105 DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
106 TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
107 DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
108 TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
109 DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
110 TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
111 DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
112 TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
113 DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
114 TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
115 DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
116 TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
117 DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
118 TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
119
120#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 23c518a0340c..397fb635766a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -476,6 +476,12 @@ struct se_dif_v1_tuple {
476 __be32 ref_tag; 476 __be32 ref_tag;
477}; 477};
478 478
479/* for sam_task_attr */
480#define TCM_SIMPLE_TAG 0x20
481#define TCM_HEAD_TAG 0x21
482#define TCM_ORDERED_TAG 0x22
483#define TCM_ACA_TAG 0x24
484
479struct se_cmd { 485struct se_cmd {
480 /* SAM response code being sent to initiator */ 486 /* SAM response code being sent to initiator */
481 u8 scsi_status; 487 u8 scsi_status;
diff --git a/include/trace/events/target.h b/include/trace/events/target.h
index 45403443dd82..04c3c6efdcc2 100644
--- a/include/trace/events/target.h
+++ b/include/trace/events/target.h
@@ -109,10 +109,10 @@
109 109
110#define show_task_attribute_name(val) \ 110#define show_task_attribute_name(val) \
111 __print_symbolic(val, \ 111 __print_symbolic(val, \
112 { MSG_SIMPLE_TAG, "SIMPLE" }, \ 112 { TCM_SIMPLE_TAG, "SIMPLE" }, \
113 { MSG_HEAD_TAG, "HEAD" }, \ 113 { TCM_HEAD_TAG, "HEAD" }, \
114 { MSG_ORDERED_TAG, "ORDERED" }, \ 114 { TCM_ORDERED_TAG, "ORDERED" }, \
115 { MSG_ACA_TAG, "ACA" } ) 115 { TCM_ACA_TAG, "ACA" } )
116 116
117#define show_scsi_status_name(val) \ 117#define show_scsi_status_name(val) \
118 __print_symbolic(val, \ 118 __print_symbolic(val, \
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 7dcfbe6771b1..b483d1909d3e 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -6,10 +6,6 @@
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/uio.h> 7#include <linux/uio.h>
8 8
9#ifndef __packed
10#define __packed __attribute__((packed))
11#endif
12
13#define TCMU_VERSION "1.0" 9#define TCMU_VERSION "1.0"
14 10
15/* 11/*
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 9b3565c41502..eb410083e8e0 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -395,8 +395,6 @@ retry:
395 case 0: 395 case 0:
396 goto out; 396 goto out;
397 case -EACCES: 397 case -EACCES:
398 flags |= MS_RDONLY;
399 goto retry;
400 case -EINVAL: 398 case -EINVAL:
401 continue; 399 continue;
402 } 400 }
@@ -419,6 +417,10 @@ retry:
419#endif 417#endif
420 panic("VFS: Unable to mount root fs on %s", b); 418 panic("VFS: Unable to mount root fs on %s", b);
421 } 419 }
420 if (!(flags & MS_RDONLY)) {
421 flags |= MS_RDONLY;
422 goto retry;
423 }
422 424
423 printk("List of all partitions:\n"); 425 printk("List of all partitions:\n");
424 printk_all_partitions(); 426 printk_all_partitions();
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 6e7708c2c21f..48b28d387c7f 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -94,7 +94,7 @@ config PM_STD_PARTITION
94config PM_SLEEP 94config PM_SLEEP
95 def_bool y 95 def_bool y
96 depends on SUSPEND || HIBERNATE_CALLBACKS 96 depends on SUSPEND || HIBERNATE_CALLBACKS
97 select PM_RUNTIME 97 select PM
98 98
99config PM_SLEEP_SMP 99config PM_SLEEP_SMP
100 def_bool y 100 def_bool y
@@ -130,23 +130,19 @@ config PM_WAKELOCKS_GC
130 depends on PM_WAKELOCKS 130 depends on PM_WAKELOCKS
131 default y 131 default y
132 132
133config PM_RUNTIME 133config PM
134 bool "Run-time PM core functionality" 134 bool "Device power management core functionality"
135 ---help--- 135 ---help---
136 Enable functionality allowing I/O devices to be put into energy-saving 136 Enable functionality allowing I/O devices to be put into energy-saving
137 (low power) states at run time (or autosuspended) after a specified 137 (low power) states, for example after a specified period of inactivity
138 period of inactivity and woken up in response to a hardware-generated 138 (autosuspended), and woken up in response to a hardware-generated
139 wake-up event or a driver's request. 139 wake-up event or a driver's request.
140 140
141 Hardware support is generally required for this functionality to work 141 Hardware support is generally required for this functionality to work
142 and the bus type drivers of the buses the devices are on are 142 and the bus type drivers of the buses the devices are on are
143 responsible for the actual handling of the autosuspend requests and 143 responsible for the actual handling of device suspend requests and
144 wake-up events. 144 wake-up events.
145 145
146config PM
147 def_bool y
148 depends on PM_SLEEP || PM_RUNTIME
149
150config PM_DEBUG 146config PM_DEBUG
151 bool "Power Management Debug Support" 147 bool "Power Management Debug Support"
152 depends on PM 148 depends on PM
diff --git a/mm/filemap.c b/mm/filemap.c
index e8905bc3cbd7..bd8543c6508f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2464,7 +2464,7 @@ ssize_t generic_perform_write(struct file *file,
2464 /* 2464 /*
2465 * Copies from kernel address space cannot fail (NFSD is a big user). 2465 * Copies from kernel address space cannot fail (NFSD is a big user).
2466 */ 2466 */
2467 if (segment_eq(get_fs(), KERNEL_DS)) 2467 if (!iter_is_iovec(i))
2468 flags |= AOP_FLAG_UNINTERRUPTIBLE; 2468 flags |= AOP_FLAG_UNINTERRUPTIBLE;
2469 2469
2470 do { 2470 do {
diff --git a/mm/gup.c b/mm/gup.c
index 0ca1df9075ab..a900759cc807 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -968,7 +968,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
968 968
969 pudp = pud_offset(&pgd, addr); 969 pudp = pud_offset(&pgd, addr);
970 do { 970 do {
971 pud_t pud = ACCESS_ONCE(*pudp); 971 pud_t pud = READ_ONCE(*pudp);
972 972
973 next = pud_addr_end(addr, end); 973 next = pud_addr_end(addr, end);
974 if (pud_none(pud)) 974 if (pud_none(pud))
diff --git a/mm/memory.c b/mm/memory.c
index d8aebc52265f..649e7d440bd7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3195,7 +3195,16 @@ static int handle_pte_fault(struct mm_struct *mm,
3195 pte_t entry; 3195 pte_t entry;
3196 spinlock_t *ptl; 3196 spinlock_t *ptl;
3197 3197
3198 entry = ACCESS_ONCE(*pte); 3198 /*
3199 * some architectures can have larger ptes than wordsize,
3200 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y,
3201 * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses.
3202 * The code below just needs a consistent view for the ifs and
3203 * we later double check anyway with the ptl lock held. So here
3204 * a barrier will do.
3205 */
3206 entry = *pte;
3207 barrier();
3199 if (!pte_present(entry)) { 3208 if (!pte_present(entry)) {
3200 if (pte_none(entry)) { 3209 if (pte_none(entry)) {
3201 if (vma->vm_ops) { 3210 if (vma->vm_ops) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f22c55947181..0e0961b8c39c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1041,10 +1041,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1041 1041
1042 down_read(&mm->mmap_sem); 1042 down_read(&mm->mmap_sem);
1043 1043
1044 err = migrate_vmas(mm, from, to, flags);
1045 if (err)
1046 goto out;
1047
1048 /* 1044 /*
1049 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 1045 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1050 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 1046 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
@@ -1124,7 +1120,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1124 if (err < 0) 1120 if (err < 0)
1125 break; 1121 break;
1126 } 1122 }
1127out:
1128 up_read(&mm->mmap_sem); 1123 up_read(&mm->mmap_sem);
1129 if (err < 0) 1124 if (err < 0)
1130 return err; 1125 return err;
diff --git a/mm/migrate.c b/mm/migrate.c
index b1d02127e1be..344cdf692fc8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1536,27 +1536,6 @@ out:
1536 return err; 1536 return err;
1537} 1537}
1538 1538
1539/*
1540 * Call migration functions in the vma_ops that may prepare
1541 * memory in a vm for migration. migration functions may perform
1542 * the migration for vmas that do not have an underlying page struct.
1543 */
1544int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1545 const nodemask_t *from, unsigned long flags)
1546{
1547 struct vm_area_struct *vma;
1548 int err = 0;
1549
1550 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1551 if (vma->vm_ops && vma->vm_ops->migrate) {
1552 err = vma->vm_ops->migrate(vma, to, from, flags);
1553 if (err)
1554 break;
1555 }
1556 }
1557 return err;
1558}
1559
1560#ifdef CONFIG_NUMA_BALANCING 1539#ifdef CONFIG_NUMA_BALANCING
1561/* 1540/*
1562 * Returns true if this is a safe migration target node for misplaced NUMA 1541 * Returns true if this is a safe migration target node for misplaced NUMA
diff --git a/mm/rmap.c b/mm/rmap.c
index 45ba250babd8..c5bc241127b2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -583,7 +583,8 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
583 * without holding anon_vma lock for write. So when looking for a 583 * without holding anon_vma lock for write. So when looking for a
584 * genuine pmde (in which to find pte), test present and !THP together. 584 * genuine pmde (in which to find pte), test present and !THP together.
585 */ 585 */
586 pmde = ACCESS_ONCE(*pmd); 586 pmde = *pmd;
587 barrier();
587 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 588 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
588 pmd = NULL; 589 pmd = NULL;
589out: 590out:
diff --git a/mm/shmem.c b/mm/shmem.c
index 185836ba53ef..73ba1df7c8ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1536,7 +1536,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
1536 * holes of a sparse file, we actually need to allocate those pages, 1536 * holes of a sparse file, we actually need to allocate those pages,
1537 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1537 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1538 */ 1538 */
1539 if (segment_eq(get_fs(), KERNEL_DS)) 1539 if (!iter_is_iovec(to))
1540 sgp = SGP_DIRTY; 1540 sgp = SGP_DIRTY;
1541 1541
1542 index = *ppos >> PAGE_CACHE_SHIFT; 1542 index = *ppos >> PAGE_CACHE_SHIFT;
diff --git a/net/socket.c b/net/socket.c
index 70bbde65e4ca..a2c33a4dc7ba 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -372,7 +372,6 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
372 path.mnt = mntget(sock_mnt); 372 path.mnt = mntget(sock_mnt);
373 373
374 d_instantiate(path.dentry, SOCK_INODE(sock)); 374 d_instantiate(path.dentry, SOCK_INODE(sock));
375 SOCK_INODE(sock)->i_fop = &socket_file_ops;
376 375
377 file = alloc_file(&path, FMODE_READ | FMODE_WRITE, 376 file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
378 &socket_file_ops); 377 &socket_file_ops);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 5374b1bdf02f..edd2794569db 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -185,6 +185,18 @@ modbuiltin := -f $(srctree)/scripts/Makefile.modbuiltin obj
185# $(Q)$(MAKE) $(dtbinst)=dir 185# $(Q)$(MAKE) $(dtbinst)=dir
186dtbinst := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.dtbinst obj 186dtbinst := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.dtbinst obj
187 187
188###
189# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=
190# Usage:
191# $(Q)$(MAKE) $(clean)=dir
192clean := -f $(srctree)/scripts/Makefile.clean obj
193
194###
195# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.headersinst obj=
196# Usage:
197# $(Q)$(MAKE) $(hdr-inst)=dir
198hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
199
188# Prefix -I with $(srctree) if it is not an absolute path. 200# Prefix -I with $(srctree) if it is not an absolute path.
189# skip if -I has no parameter 201# skip if -I has no parameter
190addtree = $(if $(patsubst -I%,%,$(1)), \ 202addtree = $(if $(patsubst -I%,%,$(1)), \
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index b1c668dc6815..1bca180db8ad 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -7,10 +7,7 @@ src := $(obj)
7PHONY := __clean 7PHONY := __clean
8__clean: 8__clean:
9 9
10# Shorthand for $(Q)$(MAKE) scripts/Makefile.clean obj=dir 10include scripts/Kbuild.include
11# Usage:
12# $(Q)$(MAKE) $(clean)=dir
13clean := -f $(srctree)/scripts/Makefile.clean obj
14 11
15# The filename Kbuild has precedence over Makefile 12# The filename Kbuild has precedence over Makefile
16kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src)) 13kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
@@ -91,11 +88,6 @@ PHONY += $(subdir-ymn)
91$(subdir-ymn): 88$(subdir-ymn):
92 $(Q)$(MAKE) $(clean)=$@ 89 $(Q)$(MAKE) $(clean)=$@
93 90
94# If quiet is set, only print short version of command
95
96cmd = @$(if $($(quiet)cmd_$(1)),echo ' $($(quiet)cmd_$(1))' &&) $(cmd_$(1))
97
98
99# Declare the contents of the .PHONY variable as phony. We keep that 91# Declare the contents of the .PHONY variable as phony. We keep that
100# information in a variable se we can use it in if_changed and friends. 92# information in a variable se we can use it in if_changed and friends.
101 93
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index 8ccf83056a7a..1106d6ca3a38 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -122,7 +122,6 @@ $(check-file): scripts/headers_check.pl $(output-files) FORCE
122endif 122endif
123 123
124# Recursion 124# Recursion
125hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
126.PHONY: $(subdirs) 125.PHONY: $(subdirs)
127$(subdirs): 126$(subdirs):
128 $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@ 127 $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
diff --git a/scripts/coccinelle/misc/bugon.cocci b/scripts/coccinelle/misc/bugon.cocci
index 556456ca761c..3b7eec24fb5a 100644
--- a/scripts/coccinelle/misc/bugon.cocci
+++ b/scripts/coccinelle/misc/bugon.cocci
@@ -8,7 +8,7 @@
8// Confidence: High 8// Confidence: High
9// Copyright: (C) 2014 Himangi Saraogi. GPLv2. 9// Copyright: (C) 2014 Himangi Saraogi. GPLv2.
10// Comments: 10// Comments:
11// Options: --no-includes, --include-headers 11// Options: --no-includes --include-headers
12 12
13virtual patch 13virtual patch
14virtual context 14virtual context
diff --git a/scripts/headers.sh b/scripts/headers.sh
index 95ece06599a5..d4dc4de5cea1 100755
--- a/scripts/headers.sh
+++ b/scripts/headers.sh
@@ -19,8 +19,6 @@ for arch in ${archs}; do
19 case ${arch} in 19 case ${arch} in
20 um) # no userspace export 20 um) # no userspace export
21 ;; 21 ;;
22 cris) # headers export are known broken
23 ;;
24 *) 22 *)
25 if [ -d ${srctree}/arch/${arch} ]; then 23 if [ -d ${srctree}/arch/${arch} ]; then
26 do_command $1 ${arch} 24 do_command $1 ${arch}
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index a26cc5d2a9b0..72c9dba84c5d 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -548,7 +548,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
548{ 548{
549 int i, j; 549 int i, j;
550 struct menu *submenu[8], *menu, *location = NULL; 550 struct menu *submenu[8], *menu, *location = NULL;
551 struct jump_key *jump; 551 struct jump_key *jump = NULL;
552 552
553 str_printf(r, _("Prompt: %s\n"), _(prop->text)); 553 str_printf(r, _("Prompt: %s\n"), _(prop->text));
554 menu = prop->menu->parent; 554 menu = prop->menu->parent;
@@ -586,7 +586,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
586 str_printf(r, _(" Location:\n")); 586 str_printf(r, _(" Location:\n"));
587 for (j = 4; --i >= 0; j += 2) { 587 for (j = 4; --i >= 0; j += 2) {
588 menu = submenu[i]; 588 menu = submenu[i];
589 if (head && location && menu == location) 589 if (jump && menu == location)
590 jump->offset = strlen(r->s); 590 jump->offset = strlen(r->s);
591 str_printf(r, "%*c-> %s", j, ' ', 591 str_printf(r, "%*c-> %s", j, ' ',
592 _(menu_get_prompt(menu))); 592 _(menu_get_prompt(menu)));
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 13957602f7ca..d9ab94b17de0 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -117,6 +117,7 @@ echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
117echo 'mv vmlinux.orig vmlinux' 117echo 'mv vmlinux.orig vmlinux'
118echo "%endif" 118echo "%endif"
119 119
120if ! $PREBUILT; then
120echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}" 121echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
121echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE" 122echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
122echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\"" 123echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
@@ -124,6 +125,7 @@ echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNEL
124echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE" 125echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
125echo "ln -sf /usr/src/kernels/$KERNELRELEASE build" 126echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
126echo "ln -sf /usr/src/kernels/$KERNELRELEASE source" 127echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
128fi
127 129
128echo "" 130echo ""
129echo "%clean" 131echo "%clean"
@@ -151,9 +153,11 @@ echo "%files headers"
151echo '%defattr (-, root, root)' 153echo '%defattr (-, root, root)'
152echo "/usr/include" 154echo "/usr/include"
153echo "" 155echo ""
156if ! $PREBUILT; then
154echo "%files devel" 157echo "%files devel"
155echo '%defattr (-, root, root)' 158echo '%defattr (-, root, root)'
156echo "/usr/src/kernels/$KERNELRELEASE" 159echo "/usr/src/kernels/$KERNELRELEASE"
157echo "/lib/modules/$KERNELRELEASE/build" 160echo "/lib/modules/$KERNELRELEASE/build"
158echo "/lib/modules/$KERNELRELEASE/source" 161echo "/lib/modules/$KERNELRELEASE/source"
159echo "" 162echo ""
163fi
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9bc556b15a92..67ade0775a5b 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -19,7 +19,7 @@ static int hw_rule_rate(struct snd_pcm_hw_params *params,
19 .min = UINT_MAX, .max = 0, .integer = 1 19 .min = UINT_MAX, .max = 0, .integer = 1
20 }; 20 };
21 struct snd_oxfw_stream_formation formation; 21 struct snd_oxfw_stream_formation formation;
22 unsigned int i, err; 22 int i, err;
23 23
24 for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) { 24 for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
25 if (formats[i] == NULL) 25 if (formats[i] == NULL)
@@ -47,7 +47,7 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
47 const struct snd_interval *r = 47 const struct snd_interval *r =
48 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 48 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
49 struct snd_oxfw_stream_formation formation; 49 struct snd_oxfw_stream_formation formation;
50 unsigned int i, j, err; 50 int i, j, err;
51 unsigned int count, list[SND_OXFW_STREAM_FORMAT_ENTRIES] = {0}; 51 unsigned int count, list[SND_OXFW_STREAM_FORMAT_ENTRIES] = {0};
52 52
53 count = 0; 53 count = 0;
@@ -80,7 +80,7 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
80static void limit_channels_and_rates(struct snd_pcm_hardware *hw, u8 **formats) 80static void limit_channels_and_rates(struct snd_pcm_hardware *hw, u8 **formats)
81{ 81{
82 struct snd_oxfw_stream_formation formation; 82 struct snd_oxfw_stream_formation formation;
83 unsigned int i, err; 83 int i, err;
84 84
85 hw->channels_min = UINT_MAX; 85 hw->channels_min = UINT_MAX;
86 hw->channels_max = 0; 86 hw->channels_max = 0;
diff --git a/sound/firewire/oxfw/oxfw-proc.c b/sound/firewire/oxfw/oxfw-proc.c
index 604808e5526d..8ba4f9f262b8 100644
--- a/sound/firewire/oxfw/oxfw-proc.c
+++ b/sound/firewire/oxfw/oxfw-proc.c
@@ -15,7 +15,7 @@ static void proc_read_formation(struct snd_info_entry *entry,
15 struct snd_oxfw_stream_formation formation, curr; 15 struct snd_oxfw_stream_formation formation, curr;
16 u8 *format; 16 u8 *format;
17 char flag; 17 char flag;
18 unsigned int i, err; 18 int i, err;
19 19
20 /* Show input. */ 20 /* Show input. */
21 err = snd_oxfw_stream_get_current_formation(oxfw, 21 err = snd_oxfw_stream_get_current_formation(oxfw,
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index b77cf80f1678..bda845afb470 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -61,7 +61,8 @@ static int set_stream_format(struct snd_oxfw *oxfw, struct amdtp_stream *s,
61 u8 **formats; 61 u8 **formats;
62 struct snd_oxfw_stream_formation formation; 62 struct snd_oxfw_stream_formation formation;
63 enum avc_general_plug_dir dir; 63 enum avc_general_plug_dir dir;
64 unsigned int i, err, len; 64 unsigned int len;
65 int i, err;
65 66
66 if (s == &oxfw->tx_stream) { 67 if (s == &oxfw->tx_stream) {
67 formats = oxfw->tx_stream_formats; 68 formats = oxfw->tx_stream_formats;
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index cf1d0b55e827..60e5cad0531a 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -43,7 +43,7 @@ static bool detect_loud_models(struct fw_unit *unit)
43 err = fw_csr_string(unit->directory, CSR_MODEL, 43 err = fw_csr_string(unit->directory, CSR_MODEL,
44 model, sizeof(model)); 44 model, sizeof(model));
45 if (err < 0) 45 if (err < 0)
46 return err; 46 return false;
47 47
48 for (i = 0; i < ARRAY_SIZE(models); i++) { 48 for (i = 0; i < ARRAY_SIZE(models); i++) {
49 if (strcmp(models[i], model) == 0) 49 if (strcmp(models[i], model) == 0)
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 48380ce2c81b..aeea679b2281 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -1367,9 +1367,9 @@ struct hpi_control_cache_single {
1367struct hpi_control_cache_pad { 1367struct hpi_control_cache_pad {
1368 struct hpi_control_cache_info i; 1368 struct hpi_control_cache_info i;
1369 u32 field_valid_flags; 1369 u32 field_valid_flags;
1370 u8 c_channel[8]; 1370 u8 c_channel[40];
1371 u8 c_artist[40]; 1371 u8 c_artist[100];
1372 u8 c_title[40]; 1372 u8 c_title[100];
1373 u8 c_comment[200]; 1373 u8 c_comment[200];
1374 u32 pTY; 1374 u32 pTY;
1375 u32 pI; 1375 u32 pI;
diff --git a/sound/pci/asihpi/hpi_version.h b/sound/pci/asihpi/hpi_version.h
index e9146e53bd50..6623ab110038 100644
--- a/sound/pci/asihpi/hpi_version.h
+++ b/sound/pci/asihpi/hpi_version.h
@@ -11,13 +11,13 @@ Production releases have even minor version.
11/* Use single digits for versions less that 10 to avoid octal. */ 11/* Use single digits for versions less that 10 to avoid octal. */
12/* *** HPI_VER is the only edit required to update version *** */ 12/* *** HPI_VER is the only edit required to update version *** */
13/** HPI version */ 13/** HPI version */
14#define HPI_VER HPI_VERSION_CONSTRUCTOR(4, 10, 1) 14#define HPI_VER HPI_VERSION_CONSTRUCTOR(4, 14, 3)
15 15
16/** HPI version string in dotted decimal format */ 16/** HPI version string in dotted decimal format */
17#define HPI_VER_STRING "4.10.01" 17#define HPI_VER_STRING "4.14.03"
18 18
19/** Library version as documented in hpi-api-versions.txt */ 19/** Library version as documented in hpi-api-versions.txt */
20#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(10, 2, 0) 20#define HPI_LIB_VER HPI_VERSION_CONSTRUCTOR(10, 4, 0)
21 21
22/** Construct hpi version number from major, minor, release numbers */ 22/** Construct hpi version number from major, minor, release numbers */
23#define HPI_VERSION_CONSTRUCTOR(maj, min, r) ((maj << 16) + (min << 8) + r) 23#define HPI_VERSION_CONSTRUCTOR(maj, min, r) ((maj << 16) + (min << 8) + r)
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index ac9163770013..3603c24f34d2 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -1,8 +1,9 @@
1/***********************************************************************/ 1/***********************************************************************
2/**
3 2
4 AudioScience HPI driver 3 AudioScience HPI driver
5 Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com> 4 Functions for reading DSP code using hotplug firmware loader
5
6 Copyright (C) 1997-2014 AudioScience Inc. <support@audioscience.com>
6 7
7 This program is free software; you can redistribute it and/or modify 8 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as 9 it under the terms of version 2 of the GNU General Public License as
@@ -17,11 +18,7 @@
17 along with this program; if not, write to the Free Software 18 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 20
20\file 21***********************************************************************/
21Functions for reading DSP code using
22hotplug firmware loader from individual dsp code files
23*/
24/***********************************************************************/
25#define SOURCEFILE_NAME "hpidspcd.c" 22#define SOURCEFILE_NAME "hpidspcd.c"
26#include "hpidspcd.h" 23#include "hpidspcd.h"
27#include "hpidebug.h" 24#include "hpidebug.h"
@@ -68,17 +65,18 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
68 goto error2; 65 goto error2;
69 } 66 }
70 67
71 if ((header.version >> 9) != (HPI_VER >> 9)) { 68 if (HPI_VER_MAJOR(header.version) != HPI_VER_MAJOR(HPI_VER)) {
72 /* Consider even and subsequent odd minor versions to be compatible */ 69 /* Major version change probably means Host-DSP protocol change */
73 dev_err(&dev->dev, "Incompatible firmware version DSP image %X != Driver %X\n", 70 dev_err(&dev->dev,
71 "Incompatible firmware version DSP image %X != Driver %X\n",
74 header.version, HPI_VER); 72 header.version, HPI_VER);
75 goto error2; 73 goto error2;
76 } 74 }
77 75
78 if (header.version != HPI_VER) { 76 if (header.version != HPI_VER) {
79 dev_info(&dev->dev, 77 dev_warn(&dev->dev,
80 "Firmware: release version mismatch DSP image %X != Driver %X\n", 78 "Firmware version mismatch: DSP image %X != Driver %X\n",
81 header.version, HPI_VER); 79 header.version, HPI_VER);
82 } 80 }
83 81
84 HPI_DEBUG_LOG(DEBUG, "dsp code %s opened\n", fw_name); 82 HPI_DEBUG_LOG(DEBUG, "dsp code %s opened\n", fw_name);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 63b69f750d8e..b680b4ec6331 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3218,12 +3218,13 @@ static int create_input_ctls(struct hda_codec *codec)
3218 } 3218 }
3219 3219
3220 /* add stereo mix when explicitly enabled via hint */ 3220 /* add stereo mix when explicitly enabled via hint */
3221 if (mixer && spec->add_stereo_mix_input && 3221 if (mixer && spec->add_stereo_mix_input == HDA_HINT_STEREO_MIX_ENABLE) {
3222 snd_hda_get_bool_hint(codec, "add_stereo_mix_input") > 0) {
3223 err = parse_capture_source(codec, mixer, CFG_IDX_MIX, num_adcs, 3222 err = parse_capture_source(codec, mixer, CFG_IDX_MIX, num_adcs,
3224 "Stereo Mix", 0); 3223 "Stereo Mix", 0);
3225 if (err < 0) 3224 if (err < 0)
3226 return err; 3225 return err;
3226 else
3227 spec->suppress_auto_mic = 1;
3227 } 3228 }
3228 3229
3229 return 0; 3230 return 0;
@@ -4542,9 +4543,8 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
4542 4543
4543 /* add stereo mix if available and not enabled yet */ 4544 /* add stereo mix if available and not enabled yet */
4544 if (!spec->auto_mic && spec->mixer_nid && 4545 if (!spec->auto_mic && spec->mixer_nid &&
4545 spec->add_stereo_mix_input && 4546 spec->add_stereo_mix_input == HDA_HINT_STEREO_MIX_AUTO &&
4546 spec->input_mux.num_items > 1 && 4547 spec->input_mux.num_items > 1) {
4547 snd_hda_get_bool_hint(codec, "add_stereo_mix_input") < 0) {
4548 err = parse_capture_source(codec, spec->mixer_nid, 4548 err = parse_capture_source(codec, spec->mixer_nid,
4549 CFG_IDX_MIX, spec->num_all_adcs, 4549 CFG_IDX_MIX, spec->num_all_adcs,
4550 "Stereo Mix", 0); 4550 "Stereo Mix", 0);
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 61dd5153f512..3d852660443a 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -222,7 +222,7 @@ struct hda_gen_spec {
222 unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */ 222 unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */
223 unsigned int indep_hp:1; /* independent HP supported */ 223 unsigned int indep_hp:1; /* independent HP supported */
224 unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */ 224 unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */
225 unsigned int add_stereo_mix_input:1; /* add aamix as a capture src */ 225 unsigned int add_stereo_mix_input:2; /* add aamix as a capture src */
226 unsigned int add_jack_modes:1; /* add i/o jack mode enum ctls */ 226 unsigned int add_jack_modes:1; /* add i/o jack mode enum ctls */
227 unsigned int power_down_unused:1; /* power down unused widgets */ 227 unsigned int power_down_unused:1; /* power down unused widgets */
228 unsigned int dac_min_mute:1; /* minimal = mute for DACs */ 228 unsigned int dac_min_mute:1; /* minimal = mute for DACs */
@@ -291,6 +291,13 @@ struct hda_gen_spec {
291 struct hda_jack_callback *cb); 291 struct hda_jack_callback *cb);
292}; 292};
293 293
294/* values for add_stereo_mix_input flag */
295enum {
296 HDA_HINT_STEREO_MIX_DISABLE, /* No stereo mix input */
297 HDA_HINT_STEREO_MIX_ENABLE, /* Add stereo mix input */
298 HDA_HINT_STEREO_MIX_AUTO, /* Add only if auto-mic is disabled */
299};
300
294int snd_hda_gen_spec_init(struct hda_gen_spec *spec); 301int snd_hda_gen_spec_init(struct hda_gen_spec *spec);
295 302
296int snd_hda_gen_init(struct hda_codec *codec); 303int snd_hda_gen_init(struct hda_codec *codec);
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index bef721592c3a..ccc962a1699f 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -468,7 +468,7 @@ int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
468EXPORT_SYMBOL_GPL(snd_hda_get_bool_hint); 468EXPORT_SYMBOL_GPL(snd_hda_get_bool_hint);
469 469
470/** 470/**
471 * snd_hda_get_bool_hint - Get a boolean hint value 471 * snd_hda_get_int_hint - Get an integer hint value
472 * @codec: the HDA codec 472 * @codec: the HDA codec
473 * @key: the hint key string 473 * @key: the hint key string
474 * @valp: pointer to store a value 474 * @valp: pointer to store a value
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index c81b715d6c98..a9d78e275138 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -195,7 +195,8 @@ static int ad198x_parse_auto_config(struct hda_codec *codec, bool indep_hp)
195 codec->no_sticky_stream = 1; 195 codec->no_sticky_stream = 1;
196 196
197 spec->gen.indep_hp = indep_hp; 197 spec->gen.indep_hp = indep_hp;
198 spec->gen.add_stereo_mix_input = 1; 198 if (!spec->gen.add_stereo_mix_input)
199 spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
199 200
200 err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0); 201 err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0);
201 if (err < 0) 202 if (err < 0)
@@ -256,6 +257,18 @@ static void ad1986a_fixup_eapd(struct hda_codec *codec,
256 } 257 }
257} 258}
258 259
260/* enable stereo-mix input for avoiding regression on KDE (bko#88251) */
261static void ad1986a_fixup_eapd_mix_in(struct hda_codec *codec,
262 const struct hda_fixup *fix, int action)
263{
264 struct ad198x_spec *spec = codec->spec;
265
266 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
267 ad1986a_fixup_eapd(codec, fix, action);
268 spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_ENABLE;
269 }
270}
271
259enum { 272enum {
260 AD1986A_FIXUP_INV_JACK_DETECT, 273 AD1986A_FIXUP_INV_JACK_DETECT,
261 AD1986A_FIXUP_ULTRA, 274 AD1986A_FIXUP_ULTRA,
@@ -264,6 +277,8 @@ enum {
264 AD1986A_FIXUP_LAPTOP, 277 AD1986A_FIXUP_LAPTOP,
265 AD1986A_FIXUP_LAPTOP_IMIC, 278 AD1986A_FIXUP_LAPTOP_IMIC,
266 AD1986A_FIXUP_EAPD, 279 AD1986A_FIXUP_EAPD,
280 AD1986A_FIXUP_EAPD_MIX_IN,
281 AD1986A_FIXUP_EASYNOTE,
267}; 282};
268 283
269static const struct hda_fixup ad1986a_fixups[] = { 284static const struct hda_fixup ad1986a_fixups[] = {
@@ -328,6 +343,30 @@ static const struct hda_fixup ad1986a_fixups[] = {
328 .type = HDA_FIXUP_FUNC, 343 .type = HDA_FIXUP_FUNC,
329 .v.func = ad1986a_fixup_eapd, 344 .v.func = ad1986a_fixup_eapd,
330 }, 345 },
346 [AD1986A_FIXUP_EAPD_MIX_IN] = {
347 .type = HDA_FIXUP_FUNC,
348 .v.func = ad1986a_fixup_eapd_mix_in,
349 },
350 [AD1986A_FIXUP_EASYNOTE] = {
351 .type = HDA_FIXUP_PINS,
352 .v.pins = (const struct hda_pintbl[]) {
353 { 0x1a, 0x0421402f }, /* headphone */
354 { 0x1b, 0x90170110 }, /* speaker */
355 { 0x1c, 0x411111f0 }, /* N/A */
356 { 0x1d, 0x90a70130 }, /* int mic */
357 { 0x1e, 0x411111f0 }, /* N/A */
358 { 0x1f, 0x04a19040 }, /* mic */
359 { 0x20, 0x411111f0 }, /* N/A */
360 { 0x21, 0x411111f0 }, /* N/A */
361 { 0x22, 0x411111f0 }, /* N/A */
362 { 0x23, 0x411111f0 }, /* N/A */
363 { 0x24, 0x411111f0 }, /* N/A */
364 { 0x25, 0x411111f0 }, /* N/A */
365 {}
366 },
367 .chained = true,
368 .chain_id = AD1986A_FIXUP_EAPD_MIX_IN,
369 },
331}; 370};
332 371
333static const struct snd_pci_quirk ad1986a_fixup_tbl[] = { 372static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -341,6 +380,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
341 SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP), 380 SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
342 SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG), 381 SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
343 SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA), 382 SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
383 SND_PCI_QUIRK(0x1631, 0xc022, "PackardBell EasyNote MX65", AD1986A_FIXUP_EASYNOTE),
344 SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_FIXUP_INV_JACK_DETECT), 384 SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_FIXUP_INV_JACK_DETECT),
345 SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_FIXUP_3STACK), 385 SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_FIXUP_3STACK),
346 SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_FIXUP_3STACK), 386 SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_FIXUP_3STACK),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index e9ebc7bd752c..fd3ed18670e9 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -855,14 +855,14 @@ static int patch_conexant_auto(struct hda_codec *codec)
855 case 0x14f15045: 855 case 0x14f15045:
856 codec->single_adc_amp = 1; 856 codec->single_adc_amp = 1;
857 spec->gen.mixer_nid = 0x17; 857 spec->gen.mixer_nid = 0x17;
858 spec->gen.add_stereo_mix_input = 1; 858 spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
859 snd_hda_pick_fixup(codec, cxt5045_fixup_models, 859 snd_hda_pick_fixup(codec, cxt5045_fixup_models,
860 cxt5045_fixups, cxt_fixups); 860 cxt5045_fixups, cxt_fixups);
861 break; 861 break;
862 case 0x14f15047: 862 case 0x14f15047:
863 codec->pin_amp_workaround = 1; 863 codec->pin_amp_workaround = 1;
864 spec->gen.mixer_nid = 0x19; 864 spec->gen.mixer_nid = 0x19;
865 spec->gen.add_stereo_mix_input = 1; 865 spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
866 snd_hda_pick_fixup(codec, cxt5047_fixup_models, 866 snd_hda_pick_fixup(codec, cxt5047_fixup_models,
867 cxt5047_fixups, cxt_fixups); 867 cxt5047_fixups, cxt_fixups);
868 break; 868 break;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 9dc9cf8c90e9..5f13d2d18079 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -47,7 +47,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
47 47
48#define is_haswell(codec) ((codec)->vendor_id == 0x80862807) 48#define is_haswell(codec) ((codec)->vendor_id == 0x80862807)
49#define is_broadwell(codec) ((codec)->vendor_id == 0x80862808) 49#define is_broadwell(codec) ((codec)->vendor_id == 0x80862808)
50#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec)) 50#define is_skylake(codec) ((codec)->vendor_id == 0x80862809)
51#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
52 || is_skylake(codec))
51 53
52#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882) 54#define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
53#define is_cherryview(codec) ((codec)->vendor_id == 0x80862883) 55#define is_cherryview(codec) ((codec)->vendor_id == 0x80862883)
@@ -3365,6 +3367,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3365{ .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi }, 3367{ .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
3366{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi }, 3368{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
3367{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi }, 3369{ .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi },
3370{ .id = 0x80862809, .name = "Skylake HDMI", .patch = patch_generic_hdmi },
3368{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi }, 3371{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
3369{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi }, 3372{ .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi },
3370{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi }, 3373{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi },
@@ -3425,6 +3428,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
3425MODULE_ALIAS("snd-hda-codec-id:80862806"); 3428MODULE_ALIAS("snd-hda-codec-id:80862806");
3426MODULE_ALIAS("snd-hda-codec-id:80862807"); 3429MODULE_ALIAS("snd-hda-codec-id:80862807");
3427MODULE_ALIAS("snd-hda-codec-id:80862808"); 3430MODULE_ALIAS("snd-hda-codec-id:80862808");
3431MODULE_ALIAS("snd-hda-codec-id:80862809");
3428MODULE_ALIAS("snd-hda-codec-id:80862880"); 3432MODULE_ALIAS("snd-hda-codec-id:80862880");
3429MODULE_ALIAS("snd-hda-codec-id:80862882"); 3433MODULE_ALIAS("snd-hda-codec-id:80862882");
3430MODULE_ALIAS("snd-hda-codec-id:80862883"); 3434MODULE_ALIAS("snd-hda-codec-id:80862883");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a722067c491c..65f1f4e18ea5 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -321,10 +321,12 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
321 break; 321 break;
322 case 0x10ec0233: 322 case 0x10ec0233:
323 case 0x10ec0255: 323 case 0x10ec0255:
324 case 0x10ec0256:
324 case 0x10ec0282: 325 case 0x10ec0282:
325 case 0x10ec0283: 326 case 0x10ec0283:
326 case 0x10ec0286: 327 case 0x10ec0286:
327 case 0x10ec0288: 328 case 0x10ec0288:
329 case 0x10ec0298:
328 alc_update_coef_idx(codec, 0x10, 1<<9, 0); 330 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
329 break; 331 break;
330 case 0x10ec0285: 332 case 0x10ec0285:
@@ -2659,7 +2661,9 @@ enum {
2659 ALC269_TYPE_ALC284, 2661 ALC269_TYPE_ALC284,
2660 ALC269_TYPE_ALC285, 2662 ALC269_TYPE_ALC285,
2661 ALC269_TYPE_ALC286, 2663 ALC269_TYPE_ALC286,
2664 ALC269_TYPE_ALC298,
2662 ALC269_TYPE_ALC255, 2665 ALC269_TYPE_ALC255,
2666 ALC269_TYPE_ALC256,
2663}; 2667};
2664 2668
2665/* 2669/*
@@ -2686,7 +2690,9 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
2686 case ALC269_TYPE_ALC282: 2690 case ALC269_TYPE_ALC282:
2687 case ALC269_TYPE_ALC283: 2691 case ALC269_TYPE_ALC283:
2688 case ALC269_TYPE_ALC286: 2692 case ALC269_TYPE_ALC286:
2693 case ALC269_TYPE_ALC298:
2689 case ALC269_TYPE_ALC255: 2694 case ALC269_TYPE_ALC255:
2695 case ALC269_TYPE_ALC256:
2690 ssids = alc269_ssids; 2696 ssids = alc269_ssids;
2691 break; 2697 break;
2692 default: 2698 default:
@@ -4829,6 +4835,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4829 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), 4835 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
4830 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4836 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4831 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4837 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4838 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
4832 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4839 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4833 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4840 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4834 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4841 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5417,9 +5424,15 @@ static int patch_alc269(struct hda_codec *codec)
5417 spec->codec_variant = ALC269_TYPE_ALC286; 5424 spec->codec_variant = ALC269_TYPE_ALC286;
5418 spec->shutup = alc286_shutup; 5425 spec->shutup = alc286_shutup;
5419 break; 5426 break;
5427 case 0x10ec0298:
5428 spec->codec_variant = ALC269_TYPE_ALC298;
5429 break;
5420 case 0x10ec0255: 5430 case 0x10ec0255:
5421 spec->codec_variant = ALC269_TYPE_ALC255; 5431 spec->codec_variant = ALC269_TYPE_ALC255;
5422 break; 5432 break;
5433 case 0x10ec0256:
5434 spec->codec_variant = ALC269_TYPE_ALC256;
5435 break;
5423 } 5436 }
5424 5437
5425 if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { 5438 if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6341,6 +6354,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
6341 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 }, 6354 { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
6342 { .id = 0x10ec0235, .name = "ALC233", .patch = patch_alc269 }, 6355 { .id = 0x10ec0235, .name = "ALC233", .patch = patch_alc269 },
6343 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 }, 6356 { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
6357 { .id = 0x10ec0256, .name = "ALC256", .patch = patch_alc269 },
6344 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 }, 6358 { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
6345 { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 }, 6359 { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
6346 { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 }, 6360 { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
@@ -6360,6 +6374,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
6360 { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, 6374 { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
6361 { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, 6375 { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
6362 { .id = 0x10ec0293, .name = "ALC293", .patch = patch_alc269 }, 6376 { .id = 0x10ec0293, .name = "ALC293", .patch = patch_alc269 },
6377 { .id = 0x10ec0298, .name = "ALC298", .patch = patch_alc269 },
6363 { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", 6378 { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
6364 .patch = patch_alc861 }, 6379 .patch = patch_alc861 },
6365 { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, 6380 { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 6c206b6c8d65..3de6d3d779c9 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -137,7 +137,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
137 spec->gen.indep_hp = 1; 137 spec->gen.indep_hp = 1;
138 spec->gen.keep_eapd_on = 1; 138 spec->gen.keep_eapd_on = 1;
139 spec->gen.pcm_playback_hook = via_playback_pcm_hook; 139 spec->gen.pcm_playback_hook = via_playback_pcm_hook;
140 spec->gen.add_stereo_mix_input = 1; 140 spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
141 return spec; 141 return spec;
142} 142}
143 143
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index b1cc2a4a7fc0..99ff35e2a25d 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -267,7 +267,7 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
267 if (!ssc_p->dir_mask) { 267 if (!ssc_p->dir_mask) {
268 if (ssc_p->initialized) { 268 if (ssc_p->initialized) {
269 /* Shutdown the SSC clock. */ 269 /* Shutdown the SSC clock. */
270 pr_debug("atmel_ssc_dau: Stopping clock\n"); 270 pr_debug("atmel_ssc_dai: Stopping clock\n");
271 clk_disable(ssc_p->ssc->clk); 271 clk_disable(ssc_p->ssc->clk);
272 272
273 free_irq(ssc_p->ssc->irq, ssc_p); 273 free_irq(ssc_p->ssc->irq, ssc_p);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 883c5778b309..8349f982a586 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -520,6 +520,8 @@ config SND_SOC_RT5670
520 520
521config SND_SOC_RT5677 521config SND_SOC_RT5677
522 tristate 522 tristate
523 select REGMAP_I2C
524 select REGMAP_IRQ
523 525
524config SND_SOC_RT5677_SPI 526config SND_SOC_RT5677_SPI
525 tristate 527 tristate
diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c
index 4d62230bd378..d0547fa275fc 100644
--- a/sound/soc/codecs/pcm512x-i2c.c
+++ b/sound/soc/codecs/pcm512x-i2c.c
@@ -24,8 +24,13 @@ static int pcm512x_i2c_probe(struct i2c_client *i2c,
24 const struct i2c_device_id *id) 24 const struct i2c_device_id *id)
25{ 25{
26 struct regmap *regmap; 26 struct regmap *regmap;
27 struct regmap_config config = pcm512x_regmap;
27 28
28 regmap = devm_regmap_init_i2c(i2c, &pcm512x_regmap); 29 /* msb needs to be set to enable auto-increment of addresses */
30 config.read_flag_mask = 0x80;
31 config.write_flag_mask = 0x80;
32
33 regmap = devm_regmap_init_i2c(i2c, &config);
29 if (IS_ERR(regmap)) 34 if (IS_ERR(regmap))
30 return PTR_ERR(regmap); 35 return PTR_ERR(regmap);
31 36
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index a7789a8726e3..27141e2df878 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -2209,6 +2209,10 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec)
2209 int gpio_state, jack_type = 0; 2209 int gpio_state, jack_type = 0;
2210 unsigned int val; 2210 unsigned int val;
2211 2211
2212 if (!gpio_is_valid(rt5645->pdata.hp_det_gpio)) {
2213 dev_err(codec->dev, "invalid gpio\n");
2214 return -EINVAL;
2215 }
2212 gpio_state = gpio_get_value(rt5645->pdata.hp_det_gpio); 2216 gpio_state = gpio_get_value(rt5645->pdata.hp_det_gpio);
2213 2217
2214 dev_dbg(codec->dev, "gpio = %d(%d)\n", rt5645->pdata.hp_det_gpio, 2218 dev_dbg(codec->dev, "gpio = %d(%d)\n", rt5645->pdata.hp_det_gpio,
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index b8a782c0d4cd..619525200705 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -998,7 +998,7 @@ static int hsw_pcm_dev_remove(struct platform_device *pdev)
998 return 0; 998 return 0;
999} 999}
1000 1000
1001#ifdef CONFIG_PM_RUNTIME 1001#ifdef CONFIG_PM
1002 1002
1003static int hsw_pcm_runtime_idle(struct device *dev) 1003static int hsw_pcm_runtime_idle(struct device *dev)
1004{ 1004{
@@ -1057,7 +1057,7 @@ static int hsw_pcm_runtime_resume(struct device *dev)
1057#define hsw_pcm_runtime_resume NULL 1057#define hsw_pcm_runtime_resume NULL
1058#endif 1058#endif
1059 1059
1060#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_RUNTIME) 1060#ifdef CONFIG_PM
1061 1061
1062static void hsw_pcm_complete(struct device *dev) 1062static void hsw_pcm_complete(struct device *dev)
1063{ 1063{
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 31124aa4434e..3abc29e8a928 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -43,7 +43,7 @@
43#include "sst.h" 43#include "sst.h"
44 44
45struct sst_machines { 45struct sst_machines {
46 char codec_id[32]; 46 char *codec_id;
47 char board[32]; 47 char board[32];
48 char machine[32]; 48 char machine[32];
49 void (*machine_quirk)(void); 49 void (*machine_quirk)(void);
@@ -277,16 +277,16 @@ int sst_acpi_probe(struct platform_device *pdev)
277 dev_dbg(dev, "ACPI device id: %x\n", dev_id); 277 dev_dbg(dev, "ACPI device id: %x\n", dev_id);
278 278
279 plat_dev = platform_device_register_data(dev, mach->pdata->platform, -1, NULL, 0); 279 plat_dev = platform_device_register_data(dev, mach->pdata->platform, -1, NULL, 0);
280 if (plat_dev == NULL) { 280 if (IS_ERR(plat_dev)) {
281 dev_err(dev, "Failed to create machine device: %s\n", mach->pdata->platform); 281 dev_err(dev, "Failed to create machine device: %s\n", mach->pdata->platform);
282 return -ENODEV; 282 return PTR_ERR(plat_dev);
283 } 283 }
284 284
285 /* Create platform device for sst machine driver */ 285 /* Create platform device for sst machine driver */
286 mdev = platform_device_register_data(dev, mach->machine, -1, NULL, 0); 286 mdev = platform_device_register_data(dev, mach->machine, -1, NULL, 0);
287 if (mdev == NULL) { 287 if (IS_ERR(mdev)) {
288 dev_err(dev, "Failed to create machine device: %s\n", mach->machine); 288 dev_err(dev, "Failed to create machine device: %s\n", mach->machine);
289 return -ENODEV; 289 return PTR_ERR(mdev);
290 } 290 }
291 291
292 ret = sst_alloc_drv_context(&ctx, dev, dev_id); 292 ret = sst_alloc_drv_context(&ctx, dev, dev_id);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index b1a7c5bce4a1..b5a80c528d86 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -1261,6 +1261,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
1261 ret = -ENOMEM; 1261 ret = -ENOMEM;
1262 goto err; 1262 goto err;
1263 } 1263 }
1264
1265 sec_dai->variant_regs = pri_dai->variant_regs;
1264 sec_dai->dma_playback.dma_addr = regs_base + I2STXDS; 1266 sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
1265 sec_dai->dma_playback.ch_name = "tx-sec"; 1267 sec_dai->dma_playback.ch_name = "tx-sec";
1266 1268
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 1994d41348f8..b703cb3cda19 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -333,8 +333,11 @@ static struct usbmix_name_map gamecom780_map[] = {
333 {} 333 {}
334}; 334};
335 335
336static const struct usbmix_name_map kef_x300a_map[] = { 336/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
337 { 10, NULL }, /* firmware locks up (?) when we try to access this FU */ 337 * when anything attempts to access FU 10 (control)
338 */
339static const struct usbmix_name_map scms_usb3318_map[] = {
340 { 10, NULL },
338 { 0 } 341 { 0 }
339}; 342};
340 343
@@ -434,8 +437,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
434 .map = ebox44_map, 437 .map = ebox44_map,
435 }, 438 },
436 { 439 {
440 /* KEF X300A */
437 .id = USB_ID(0x27ac, 0x1000), 441 .id = USB_ID(0x27ac, 0x1000),
438 .map = kef_x300a_map, 442 .map = scms_usb3318_map,
443 },
444 {
445 /* Arcam rPAC */
446 .id = USB_ID(0x25c4, 0x0003),
447 .map = scms_usb3318_map,
439 }, 448 },
440 { 0 } /* terminator */ 449 { 0 } /* terminator */
441}; 450};
diff --git a/sound/usb/mixer_scarlett.c b/sound/usb/mixer_scarlett.c
index 9109652b88b9..7438e7c4a842 100644
--- a/sound/usb/mixer_scarlett.c
+++ b/sound/usb/mixer_scarlett.c
@@ -655,7 +655,7 @@ static struct scarlett_device_info s6i6_info = {
655 .names = NULL 655 .names = NULL
656 }, 656 },
657 657
658 .num_controls = 0, 658 .num_controls = 9,
659 .controls = { 659 .controls = {
660 { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" }, 660 { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
661 { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone" }, 661 { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone" },
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 4dbfb3d18ee2..a7398412310b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1245,8 +1245,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1245 1245
1246 /* XMOS based USB DACs */ 1246 /* XMOS based USB DACs */
1247 switch (chip->usb_id) { 1247 switch (chip->usb_id) {
1248 /* iFi Audio micro/nano iDSD */ 1248 case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
1249 case USB_ID(0x20b1, 0x3008): 1249 case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
1250 case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
1250 if (fp->altsetting == 2) 1251 if (fp->altsetting == 2)
1251 return SNDRV_PCM_FMTBIT_DSD_U32_BE; 1252 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1252 break; 1253 break;