summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 16:39:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 16:39:22 -0400
commitf678d6da749983791850876e3421e7c48a0a7127 (patch)
tree553f818ef8e73bf9d6b1e53bdf623240c1279ffb
parent2310673c3c12e4b7f8a31c41f67f701d24b0de86 (diff)
parentaad14ad3cf3a63bd258b65e18d49c3eb8472d344 (diff)
Merge tag 'char-misc-5.2-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc update part 2 from Greg KH: "Here is the "real" big set of char/misc driver patches for 5.2-rc1 Loads of different driver subsystem stuff in here, all over the places: - thunderbolt driver updates - habanalabs driver updates - nvmem driver updates - extcon driver updates - intel_th driver updates - mei driver updates - coresight driver updates - soundwire driver cleanups and updates - fastrpc driver updates - other minor driver updates - chardev minor fixups Feels like this tree is getting to be a dumping ground of "small driver subsystems" these days. Which is fine with me, if it makes things easier for those subsystem maintainers. All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-5.2-rc1-part2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (255 commits) intel_th: msu: Add current window tracking intel_th: msu: Add a sysfs attribute to trigger window switch intel_th: msu: Correct the block wrap detection intel_th: Add switch triggering support intel_th: gth: Factor out trace start/stop intel_th: msu: Factor out pipeline draining intel_th: msu: Switch over to scatterlist intel_th: msu: Replace open-coded list_{first,last,next}_entry variants intel_th: Only report useful IRQs to subdevices intel_th: msu: Start handling IRQs intel_th: pci: Use MSI interrupt signalling intel_th: Communicate IRQ via resource intel_th: Add "rtit" source device intel_th: Skip subdevices if their MMIO is missing intel_th: Rework resource passing between glue layers and core intel_th: SPDX-ify the documentation intel_th: msu: Fix single mode with IOMMU coresight: funnel: Support static funnel dt-bindings: arm: coresight: Unify funnel DT binding coresight: replicator: Add new device id for static replicator ...
-rw-r--r--Documentation/ABI/stable/sysfs-bus-nvmem2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc8
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei15
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt60
-rw-r--r--Documentation/devicetree/bindings/gnss/u-blox.txt1
-rw-r--r--Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt47
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt3
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt4
-rw-r--r--Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt31
-rw-r--r--Documentation/trace/intel_th.rst2
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/android/binder.c12
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/extcon/Kconfig9
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c2
-rw-r--r--drivers/extcon/extcon-arizona.c10
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c81
-rw-r--r--drivers/extcon/extcon-intel-mrfld.c284
-rw-r--r--drivers/extcon/extcon-intel.h20
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/gnss/ubx.c1
-rw-r--r--drivers/hwtracing/coresight/Kconfig9
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c255
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c97
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c114
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c116
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c238
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c82
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c266
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h12
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c18
-rw-r--r--drivers/hwtracing/coresight/coresight.c29
-rw-r--r--drivers/hwtracing/intel_th/acpi.c10
-rw-r--r--drivers/hwtracing/intel_th/core.c139
-rw-r--r--drivers/hwtracing/intel_th/gth.c125
-rw-r--r--drivers/hwtracing/intel_th/gth.h19
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h30
-rw-r--r--drivers/hwtracing/intel_th/msu.c407
-rw-r--r--drivers/hwtracing/intel_th/msu.h10
-rw-r--r--drivers/hwtracing/intel_th/pci.c32
-rw-r--r--drivers/interconnect/core.c13
-rw-r--r--drivers/misc/Kconfig8
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/aspeed-p2a-ctrl.c444
-rw-r--r--drivers/misc/cardreader/rts5260.c4
-rw-r--r--drivers/misc/fastrpc.c235
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/habanalabs/Makefile2
-rw-r--r--drivers/misc/habanalabs/command_buffer.c13
-rw-r--r--drivers/misc/habanalabs/command_submission.c22
-rw-r--r--drivers/misc/habanalabs/context.c4
-rw-r--r--drivers/misc/habanalabs/debugfs.c96
-rw-r--r--drivers/misc/habanalabs/device.c93
-rw-r--r--drivers/misc/habanalabs/firmware_if.c322
-rw-r--r--drivers/misc/habanalabs/goya/Makefile3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c1209
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h81
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c628
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c15
-rw-r--r--drivers/misc/habanalabs/habanalabs.h220
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c9
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c139
-rw-r--r--drivers/misc/habanalabs/hw_queue.c46
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h12
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h3
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h306
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h4
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h9
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_coresight.h199
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_fw_if.h2
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h3
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h16
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h23
-rw-r--r--drivers/misc/habanalabs/irq.c14
-rw-r--r--drivers/misc/habanalabs/memory.c197
-rw-r--r--drivers/misc/habanalabs/mmu.c600
-rw-r--r--drivers/misc/habanalabs/pci.c408
-rw-r--r--drivers/misc/kgdbts.c4
-rw-r--r--drivers/misc/mei/Kconfig12
-rw-r--r--drivers/misc/mei/Makefile2
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/bus.c13
-rw-r--r--drivers/misc/mei/client.c16
-rw-r--r--drivers/misc/mei/client.h14
-rw-r--r--drivers/misc/mei/debugfs.c15
-rw-r--r--drivers/misc/mei/dma-ring.c2
-rw-r--r--drivers/misc/mei/hbm.c15
-rw-r--r--drivers/misc/mei/hbm.h14
-rw-r--r--drivers/misc/mei/hdcp/Kconfig13
-rw-r--r--drivers/misc/mei/hdcp/Makefile2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h68
-rw-r--r--drivers/misc/mei/hw-me.c14
-rw-r--r--drivers/misc/mei/hw-me.h16
-rw-r--r--drivers/misc/mei/hw-txe-regs.h63
-rw-r--r--drivers/misc/mei/hw-txe.c14
-rw-r--r--drivers/misc/mei/hw-txe.h14
-rw-r--r--drivers/misc/mei/hw.h14
-rw-r--r--drivers/misc/mei/init.c34
-rw-r--r--drivers/misc/mei/interrupt.c15
-rw-r--r--drivers/misc/mei/main.c80
-rw-r--r--drivers/misc/mei/mei-trace.c14
-rw-r--r--drivers/misc/mei/mei-trace.h14
-rw-r--r--drivers/misc/mei/mei_dev.h17
-rw-r--r--drivers/misc/mei/pci-me.c15
-rw-r--r--drivers/misc/mei/pci-txe.c14
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c1
-rw-r--r--drivers/net/thunderbolt.c3
-rw-r--r--drivers/nfc/mei_phy.c18
-rw-r--r--drivers/nfc/microread/mei.c17
-rw-r--r--drivers/nfc/pn544/mei.c15
-rw-r--r--drivers/nvmem/Kconfig24
-rw-r--r--drivers/nvmem/Makefile5
-rw-r--r--drivers/nvmem/core.c316
-rw-r--r--drivers/nvmem/imx-iim.c4
-rw-r--r--drivers/nvmem/imx-ocotp.c11
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/nvmem/nvmem-sysfs.c256
-rw-r--r--drivers/nvmem/nvmem.h62
-rw-r--r--drivers/nvmem/stm32-romem.c202
-rw-r--r--drivers/nvmem/sunxi_sid.c115
-rw-r--r--drivers/parport/ieee1284.c2
-rw-r--r--drivers/parport/parport_cs.c5
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c4
-rw-r--r--drivers/soundwire/Kconfig4
-rw-r--r--drivers/soundwire/bus.c152
-rw-r--r--drivers/soundwire/bus.h16
-rw-r--r--drivers/soundwire/bus_type.c4
-rw-r--r--drivers/soundwire/cadence_master.c100
-rw-r--r--drivers/soundwire/cadence_master.h22
-rw-r--r--drivers/soundwire/intel.c138
-rw-r--r--drivers/soundwire/intel.h4
-rw-r--r--drivers/soundwire/intel_init.c15
-rw-r--r--drivers/soundwire/mipi_disco.c122
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/stream.c285
-rw-r--r--drivers/thunderbolt/Makefile4
-rw-r--r--drivers/thunderbolt/cap.c85
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/icm.c65
-rw-r--r--drivers/thunderbolt/lc.c179
-rw-r--r--drivers/thunderbolt/nhi.c3
-rw-r--r--drivers/thunderbolt/path.c420
-rw-r--r--drivers/thunderbolt/property.c16
-rw-r--r--drivers/thunderbolt/switch.c557
-rw-r--r--drivers/thunderbolt/tb.c608
-rw-r--r--drivers/thunderbolt/tb.h227
-rw-r--r--drivers/thunderbolt/tb_msgs.h11
-rw-r--r--drivers/thunderbolt/tb_regs.h50
-rw-r--r--drivers/thunderbolt/tunnel.c691
-rw-r--r--drivers/thunderbolt/tunnel.h78
-rw-r--r--drivers/thunderbolt/tunnel_pci.c226
-rw-r--r--drivers/thunderbolt/tunnel_pci.h31
-rw-r--r--drivers/thunderbolt/xdomain.c170
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c4
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c31
-rw-r--r--drivers/w1/masters/ds2482.c18
-rw-r--r--drivers/w1/slaves/w1_ds2408.c76
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--fs/char_dev.c78
-rw-r--r--include/linux/coresight-pmu.h2
-rw-r--r--include/linux/coresight.h7
-rw-r--r--include/linux/mei_cl_bus.h3
-rw-r--r--include/linux/nvmem-consumer.h7
-rw-r--r--include/linux/soundwire/sdw.h16
-rw-r--r--include/linux/soundwire/sdw_intel.h6
-rw-r--r--include/linux/soundwire/sdw_registers.h5
-rw-r--r--include/linux/soundwire/sdw_type.h6
-rw-r--r--include/linux/thunderbolt.h8
-rw-r--r--include/linux/vmw_vmci_defs.h35
-rw-r--r--include/uapi/linux/aspeed-p2a-ctrl.h62
-rw-r--r--include/uapi/linux/mei.h67
-rw-r--r--include/uapi/misc/habanalabs.h161
-rw-r--r--lib/siphash.c36
-rw-r--r--tools/include/linux/coresight-pmu.h2
274 files changed, 10394 insertions, 4319 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem
index 5923ab4620c5..9ffba8576f7b 100644
--- a/Documentation/ABI/stable/sysfs-bus-nvmem
+++ b/Documentation/ABI/stable/sysfs-bus-nvmem
@@ -6,6 +6,8 @@ Description:
6 This file allows user to read/write the raw NVMEM contents. 6 This file allows user to read/write the raw NVMEM contents.
7 Permissions for write to this file depends on the nvmem 7 Permissions for write to this file depends on the nvmem
8 provider configuration. 8 provider configuration.
9 Note: This file is only present if CONFIG_NVMEM_SYSFS
10 is enabled
9 11
10 ex: 12 ex:
11 hexdump /sys/bus/nvmem/devices/qfprom0/nvmem 13 hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
index b940c5d91cf7..f54ae244f3f1 100644
--- a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
@@ -30,4 +30,12 @@ Description: (RW) Configure MSC buffer size for "single" or "multi" modes.
30 there are no active users and tracing is not enabled) and then 30 there are no active users and tracing is not enabled) and then
31 allocates a new one. 31 allocates a new one.
32 32
33What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/win_switch
34Date: May 2019
35KernelVersion: 5.2
36Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
37Description: (RW) Trigger window switch for the MSC's buffer, in
38 multi-window mode. In "multi" mode, accepts writes of "1", thereby
39 triggering a window switch for the buffer. Returns an error in any
40 other operating mode or attempts to write something other than "1".
33 41
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 17d7444a2397..a92d844f806e 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -65,3 +65,18 @@ Description: Display the ME firmware version.
65 <platform>:<major>.<minor>.<milestone>.<build_no>. 65 <platform>:<major>.<minor>.<milestone>.<build_no>.
66 There can be up to three such blocks for different 66 There can be up to three such blocks for different
67 FW components. 67 FW components.
68
69What: /sys/class/mei/meiN/dev_state
70Date: Mar 2019
71KernelVersion: 5.1
72Contact: Tomas Winkler <tomas.winkler@intel.com>
73Description: Display the ME device state.
74
75 The device state can have following values:
76 INITIALIZING
77 INIT_CLIENTS
78 ENABLED
79 RESETTING
80 DISABLED
81 POWER_DOWN
82 POWER_UP
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index f8aff65ab921..8a88ddebc1a2 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -8,7 +8,8 @@ through the intermediate links connecting the source to the currently selected
8sink. Each CoreSight component device should use these properties to describe 8sink. Each CoreSight component device should use these properties to describe
9its hardware characteristcs. 9its hardware characteristcs.
10 10
11* Required properties for all components *except* non-configurable replicators: 11* Required properties for all components *except* non-configurable replicators
12 and non-configurable funnels:
12 13
13 * compatible: These have to be supplemented with "arm,primecell" as 14 * compatible: These have to be supplemented with "arm,primecell" as
14 drivers are using the AMBA bus interface. Possible values include: 15 drivers are using the AMBA bus interface. Possible values include:
@@ -24,8 +25,10 @@ its hardware characteristcs.
24 discovered at boot time when the device is probed. 25 discovered at boot time when the device is probed.
25 "arm,coresight-tmc", "arm,primecell"; 26 "arm,coresight-tmc", "arm,primecell";
26 27
27 - Trace Funnel: 28 - Trace Programmable Funnel:
28 "arm,coresight-funnel", "arm,primecell"; 29 "arm,coresight-dynamic-funnel", "arm,primecell";
30 "arm,coresight-funnel", "arm,primecell"; (OBSOLETE. For
31 backward compatibility and will be removed)
29 32
30 - Embedded Trace Macrocell (version 3.x) and 33 - Embedded Trace Macrocell (version 3.x) and
31 Program Flow Trace Macrocell: 34 Program Flow Trace Macrocell:
@@ -65,11 +68,17 @@ its hardware characteristcs.
65 "stm-stimulus-base", each corresponding to the areas defined in "reg". 68 "stm-stimulus-base", each corresponding to the areas defined in "reg".
66 69
67* Required properties for devices that don't show up on the AMBA bus, such as 70* Required properties for devices that don't show up on the AMBA bus, such as
68 non-configurable replicators: 71 non-configurable replicators and non-configurable funnels:
69 72
70 * compatible: Currently supported value is (note the absence of the 73 * compatible: Currently supported value is (note the absence of the
71 AMBA markee): 74 AMBA markee):
72 - "arm,coresight-replicator" 75 - Coresight Non-configurable Replicator:
76 "arm,coresight-static-replicator";
77 "arm,coresight-replicator"; (OBSOLETE. For backward
78 compatibility and will be removed)
79
80 - Coresight Non-configurable Funnel:
81 "arm,coresight-static-funnel";
73 82
74 * port or ports: see "Graph bindings for Coresight" below. 83 * port or ports: see "Graph bindings for Coresight" below.
75 84
@@ -169,7 +178,7 @@ Example:
169 /* non-configurable replicators don't show up on the 178 /* non-configurable replicators don't show up on the
170 * AMBA bus. As such no need to add "arm,primecell". 179 * AMBA bus. As such no need to add "arm,primecell".
171 */ 180 */
172 compatible = "arm,coresight-replicator"; 181 compatible = "arm,coresight-static-replicator";
173 182
174 out-ports { 183 out-ports {
175 #address-cells = <1>; 184 #address-cells = <1>;
@@ -200,8 +209,45 @@ Example:
200 }; 209 };
201 }; 210 };
202 211
212 funnel {
213 /*
214 * non-configurable funnel don't show up on the AMBA
215 * bus. As such no need to add "arm,primecell".
216 */
217 compatible = "arm,coresight-static-funnel";
218 clocks = <&crg_ctrl HI3660_PCLK>;
219 clock-names = "apb_pclk";
220
221 out-ports {
222 port {
223 combo_funnel_out: endpoint {
224 remote-endpoint = <&top_funnel_in>;
225 };
226 };
227 };
228
229 in-ports {
230 #address-cells = <1>;
231 #size-cells = <0>;
232
233 port@0 {
234 reg = <0>;
235 combo_funnel_in0: endpoint {
236 remote-endpoint = <&cluster0_etf_out>;
237 };
238 };
239
240 port@1 {
241 reg = <1>;
242 combo_funnel_in1: endpoint {
243 remote-endpoint = <&cluster1_etf_out>;
244 };
245 };
246 };
247 };
248
203 funnel@20040000 { 249 funnel@20040000 {
204 compatible = "arm,coresight-funnel", "arm,primecell"; 250 compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
205 reg = <0 0x20040000 0 0x1000>; 251 reg = <0 0x20040000 0 0x1000>;
206 252
207 clocks = <&oscclk6a>; 253 clocks = <&oscclk6a>;
diff --git a/Documentation/devicetree/bindings/gnss/u-blox.txt b/Documentation/devicetree/bindings/gnss/u-blox.txt
index e475659cb85f..7cdefd058fe0 100644
--- a/Documentation/devicetree/bindings/gnss/u-blox.txt
+++ b/Documentation/devicetree/bindings/gnss/u-blox.txt
@@ -9,6 +9,7 @@ Required properties:
9 9
10- compatible : Must be one of 10- compatible : Must be one of
11 11
12 "u-blox,neo-6m"
12 "u-blox,neo-8" 13 "u-blox,neo-8"
13 "u-blox,neo-m8" 14 "u-blox,neo-m8"
14 15
diff --git a/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt b/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
new file mode 100644
index 000000000000..854bd67ffec6
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
@@ -0,0 +1,47 @@
1======================================================================
2Device tree bindings for Aspeed AST2400/AST2500 PCI-to-AHB Bridge Control Driver
3======================================================================
4
5The bridge is available on platforms with the VGA enabled on the Aspeed device.
6In this case, the host has access to a 64KiB window into all of the BMC's
7memory. The BMC can disable this bridge. If the bridge is enabled, the host
8has read access to all the regions of memory, however the host only has read
9and write access depending on a register controlled by the BMC.
10
11Required properties:
12===================
13
14 - compatible: must be one of:
15 - "aspeed,ast2400-p2a-ctrl"
16 - "aspeed,ast2500-p2a-ctrl"
17
18Optional properties:
19===================
20
21- memory-region: A phandle to a reserved_memory region to be used for the PCI
22 to AHB mapping
23
24The p2a-control node should be the child of a syscon node with the required
25property:
26
27- compatible : Should be one of the following:
28 "aspeed,ast2400-scu", "syscon", "simple-mfd"
29 "aspeed,g4-scu", "syscon", "simple-mfd"
30 "aspeed,ast2500-scu", "syscon", "simple-mfd"
31 "aspeed,g5-scu", "syscon", "simple-mfd"
32
33Example
34===================
35
36g4 Example
37----------
38
39syscon: scu@1e6e2000 {
40 compatible = "aspeed,ast2400-scu", "syscon", "simple-mfd";
41 reg = <0x1e6e2000 0x1a8>;
42
43 p2a: p2a-control {
44 compatible = "aspeed,ast2400-p2a-ctrl";
45 memory-region = <&reserved_memory>;
46 };
47};
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
index 99c4ba6a3f61..cfb18b4ef8f7 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
@@ -8,11 +8,12 @@ Required properties:
8 "allwinner,sun8i-h3-sid" 8 "allwinner,sun8i-h3-sid"
9 "allwinner,sun50i-a64-sid" 9 "allwinner,sun50i-a64-sid"
10 "allwinner,sun50i-h5-sid" 10 "allwinner,sun50i-h5-sid"
11 "allwinner,sun50i-h6-sid"
11 12
12- reg: Should contain registers location and length 13- reg: Should contain registers location and length
13 14
14= Data cells = 15= Data cells =
15Are child nodes of qfprom, bindings of which as described in 16Are child nodes of sunxi-sid, bindings of which as described in
16bindings/nvmem/nvmem.txt 17bindings/nvmem/nvmem.txt
17 18
18Example for sun4i: 19Example for sun4i:
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 7a999a135e56..68f7d6fdd140 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -1,7 +1,8 @@
1Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings 1Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
2 2
3This binding represents the on-chip eFuse OTP controller found on 3This binding represents the on-chip eFuse OTP controller found on
4i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ and i.MX6SLL SoCs. 4i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
5i.MX7D/S, i.MX7ULP and i.MX8MQ SoCs.
5 6
6Required properties: 7Required properties:
7- compatible: should be one of 8- compatible: should be one of
@@ -13,6 +14,7 @@ Required properties:
13 "fsl,imx7d-ocotp" (i.MX7D/S), 14 "fsl,imx7d-ocotp" (i.MX7D/S),
14 "fsl,imx6sll-ocotp" (i.MX6SLL), 15 "fsl,imx6sll-ocotp" (i.MX6SLL),
15 "fsl,imx7ulp-ocotp" (i.MX7ULP), 16 "fsl,imx7ulp-ocotp" (i.MX7ULP),
17 "fsl,imx8mq-ocotp" (i.MX8MQ),
16 followed by "syscon". 18 followed by "syscon".
17- #address-cells : Should be 1 19- #address-cells : Should be 1
18- #size-cells : Should be 1 20- #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt
new file mode 100644
index 000000000000..142a51d5a9be
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt
@@ -0,0 +1,31 @@
1STMicroelectronics STM32 Factory-programmed data device tree bindings
2
3This represents STM32 Factory-programmed read only non-volatile area: locked
4flash, OTP, read-only HW regs... This contains various information such as:
5analog calibration data for temperature sensor (e.g. TS_CAL1, TS_CAL2),
6internal vref (VREFIN_CAL), unique device ID...
7
8Required properties:
9- compatible: Should be one of:
10 "st,stm32f4-otp"
11 "st,stm32mp15-bsec"
12- reg: Offset and length of factory-programmed area.
13- #address-cells: Should be '<1>'.
14- #size-cells: Should be '<1>'.
15
16Optional Data cells:
17- Must be child nodes as described in nvmem.txt.
18
19Example on stm32f4:
20 romem: nvmem@1fff7800 {
21 compatible = "st,stm32f4-otp";
22 reg = <0x1fff7800 0x400>;
23 #address-cells = <1>;
24 #size-cells = <1>;
25
26 /* Data cells: ts_cal1 at 0x1fff7a2c */
27 ts_cal1: calib@22c {
28 reg = <0x22c 0x2>;
29 };
30 ...
31 };
diff --git a/Documentation/trace/intel_th.rst b/Documentation/trace/intel_th.rst
index 19e2d633f3c7..baa12eb09ef4 100644
--- a/Documentation/trace/intel_th.rst
+++ b/Documentation/trace/intel_th.rst
@@ -1,3 +1,5 @@
1.. SPDX-License-Identifier: GPL-2.0
2
1======================= 3=======================
2Intel(R) Trace Hub (TH) 4Intel(R) Trace Hub (TH)
3======================= 5=======================
diff --git a/MAINTAINERS b/MAINTAINERS
index 978563bcbeac..fbb6e45018f5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8068,6 +8068,7 @@ F: drivers/gpio/gpio-intel-mid.c
8068 8068
8069INTERCONNECT API 8069INTERCONNECT API
8070M: Georgi Djakov <georgi.djakov@linaro.org> 8070M: Georgi Djakov <georgi.djakov@linaro.org>
8071L: linux-pm@vger.kernel.org
8071S: Maintained 8072S: Maintained
8072F: Documentation/interconnect/ 8073F: Documentation/interconnect/
8073F: Documentation/devicetree/bindings/interconnect/ 8074F: Documentation/devicetree/bindings/interconnect/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4b9c7ca492e6..6f0712f0767c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3121,6 +3121,7 @@ static void binder_transaction(struct binder_proc *proc,
3121 3121
3122 if (target_node && target_node->txn_security_ctx) { 3122 if (target_node && target_node->txn_security_ctx) {
3123 u32 secid; 3123 u32 secid;
3124 size_t added_size;
3124 3125
3125 security_task_getsecid(proc->tsk, &secid); 3126 security_task_getsecid(proc->tsk, &secid);
3126 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); 3127 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
@@ -3130,7 +3131,15 @@ static void binder_transaction(struct binder_proc *proc,
3130 return_error_line = __LINE__; 3131 return_error_line = __LINE__;
3131 goto err_get_secctx_failed; 3132 goto err_get_secctx_failed;
3132 } 3133 }
3133 extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); 3134 added_size = ALIGN(secctx_sz, sizeof(u64));
3135 extra_buffers_size += added_size;
3136 if (extra_buffers_size < added_size) {
3137 /* integer overflow of extra_buffers_size */
3138 return_error = BR_FAILED_REPLY;
3139 return_error_param = EINVAL;
3140 return_error_line = __LINE__;
3141 goto err_bad_extra_size;
3142 }
3134 } 3143 }
3135 3144
3136 trace_binder_transaction(reply, t, target_node); 3145 trace_binder_transaction(reply, t, target_node);
@@ -3480,6 +3489,7 @@ err_copy_data_failed:
3480 t->buffer->transaction = NULL; 3489 t->buffer->transaction = NULL;
3481 binder_alloc_free_buf(&target_proc->alloc, t->buffer); 3490 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3482err_binder_alloc_buf_failed: 3491err_binder_alloc_buf_failed:
3492err_bad_extra_size:
3483 if (secctx) 3493 if (secctx)
3484 security_release_secctx(secctx, secctx_sz); 3494 security_release_secctx(secctx, secctx_sz);
3485err_get_secctx_failed: 3495err_get_secctx_failed:
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d0ad85900b79..3a1e6b3ccd10 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -973,6 +973,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
973 if (ACPI_SUCCESS(status)) { 973 if (ACPI_SUCCESS(status)) {
974 hdp->hd_phys_address = addr.address.minimum; 974 hdp->hd_phys_address = addr.address.minimum;
975 hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length); 975 hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
976 if (!hdp->hd_address)
977 return AE_ERROR;
976 978
977 if (hpet_is_known(hdp)) { 979 if (hpet_is_known(hdp)) {
978 iounmap(hdp->hd_address); 980 iounmap(hdp->hd_address);
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 540e8cd16ee6..de06fafb52ff 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -30,7 +30,7 @@ config EXTCON_ARIZONA
30 30
31config EXTCON_AXP288 31config EXTCON_AXP288
32 tristate "X-Power AXP288 EXTCON support" 32 tristate "X-Power AXP288 EXTCON support"
33 depends on MFD_AXP20X && USB_SUPPORT && X86 33 depends on MFD_AXP20X && USB_SUPPORT && X86 && ACPI
34 select USB_ROLE_SWITCH 34 select USB_ROLE_SWITCH
35 help 35 help
36 Say Y here to enable support for USB peripheral detection 36 Say Y here to enable support for USB peripheral detection
@@ -60,6 +60,13 @@ config EXTCON_INTEL_CHT_WC
60 Say Y here to enable extcon support for charger detection / control 60 Say Y here to enable extcon support for charger detection / control
61 on the Intel Cherrytrail Whiskey Cove PMIC. 61 on the Intel Cherrytrail Whiskey Cove PMIC.
62 62
63config EXTCON_INTEL_MRFLD
64 tristate "Intel Merrifield Basin Cove PMIC extcon driver"
65 depends on INTEL_SOC_PMIC_MRFLD
66 help
67 Say Y here to enable extcon support for charger detection / control
68 on the Intel Merrifield Basin Cove PMIC.
69
63config EXTCON_MAX14577 70config EXTCON_MAX14577
64 tristate "Maxim MAX14577/77836 EXTCON Support" 71 tristate "Maxim MAX14577/77836 EXTCON Support"
65 depends on MFD_MAX14577 72 depends on MFD_MAX14577
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 261ce4cfe209..d3941a735df3 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
11obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o 11obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
12obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o 12obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
13obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o 13obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
14obj-$(CONFIG_EXTCON_INTEL_MRFLD) += extcon-intel-mrfld.o
14obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o 15obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
15obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o 16obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
16obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o 17obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index f599aeddf8e5..f487d877ab5d 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -205,7 +205,7 @@ EXPORT_SYMBOL(devm_extcon_register_notifier);
205 205
206/** 206/**
207 * devm_extcon_unregister_notifier() 207 * devm_extcon_unregister_notifier()
208 - Resource-managed extcon_unregister_notifier() 208 * - Resource-managed extcon_unregister_notifier()
209 * @dev: the device owning the extcon device being created 209 * @dev: the device owning the extcon device being created
210 * @edev: the extcon device 210 * @edev: the extcon device
211 * @id: the unique id among the extcon enumeration 211 * @id: the unique id among the extcon enumeration
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index da0e9bc4262f..9327479c719c 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
1726 struct arizona_extcon_info *info = platform_get_drvdata(pdev); 1726 struct arizona_extcon_info *info = platform_get_drvdata(pdev);
1727 struct arizona *arizona = info->arizona; 1727 struct arizona *arizona = info->arizona;
1728 int jack_irq_rise, jack_irq_fall; 1728 int jack_irq_rise, jack_irq_fall;
1729 bool change;
1730
1731 regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
1732 ARIZONA_MICD_ENA, 0,
1733 &change);
1734
1735 if (change) {
1736 regulator_disable(info->micvdd);
1737 pm_runtime_put(info->dev);
1738 }
1729 1739
1730 gpiod_put(info->micd_pol_gpio); 1740 gpiod_put(info->micd_pol_gpio);
1731 1741
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 5ef215297101..9d32150e68db 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -17,6 +17,8 @@
17#include <linux/regmap.h> 17#include <linux/regmap.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include "extcon-intel.h"
21
20#define CHT_WC_PHYCTRL 0x5e07 22#define CHT_WC_PHYCTRL 0x5e07
21 23
22#define CHT_WC_CHGRCTRL0 0x5e16 24#define CHT_WC_CHGRCTRL0 0x5e16
@@ -29,7 +31,15 @@
29#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6) 31#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6)
30#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7) 32#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
31 33
32#define CHT_WC_CHGRCTRL1 0x5e17 34#define CHT_WC_CHGRCTRL1 0x5e17
35#define CHT_WC_CHGRCTRL1_FUSB_INLMT_100 BIT(0)
36#define CHT_WC_CHGRCTRL1_FUSB_INLMT_150 BIT(1)
37#define CHT_WC_CHGRCTRL1_FUSB_INLMT_500 BIT(2)
38#define CHT_WC_CHGRCTRL1_FUSB_INLMT_900 BIT(3)
39#define CHT_WC_CHGRCTRL1_FUSB_INLMT_1500 BIT(4)
40#define CHT_WC_CHGRCTRL1_FTEMP_EVENT BIT(5)
41#define CHT_WC_CHGRCTRL1_OTGMODE BIT(6)
42#define CHT_WC_CHGRCTRL1_DBPEN BIT(7)
33 43
34#define CHT_WC_USBSRC 0x5e29 44#define CHT_WC_USBSRC 0x5e29
35#define CHT_WC_USBSRC_STS_MASK GENMASK(1, 0) 45#define CHT_WC_USBSRC_STS_MASK GENMASK(1, 0)
@@ -48,6 +58,13 @@
48#define CHT_WC_USBSRC_TYPE_OTHER 8 58#define CHT_WC_USBSRC_TYPE_OTHER 8
49#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9 59#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
50 60
61#define CHT_WC_CHGDISCTRL 0x5e2f
62#define CHT_WC_CHGDISCTRL_OUT BIT(0)
63/* 0 - open drain, 1 - regular push-pull output */
64#define CHT_WC_CHGDISCTRL_DRV BIT(4)
65/* 0 - pin is controlled by SW, 1 - by HW */
66#define CHT_WC_CHGDISCTRL_FN BIT(6)
67
51#define CHT_WC_PWRSRC_IRQ 0x6e03 68#define CHT_WC_PWRSRC_IRQ 0x6e03
52#define CHT_WC_PWRSRC_IRQ_MASK 0x6e0f 69#define CHT_WC_PWRSRC_IRQ_MASK 0x6e0f
53#define CHT_WC_PWRSRC_STS 0x6e1e 70#define CHT_WC_PWRSRC_STS 0x6e1e
@@ -65,15 +82,6 @@
65#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4) 82#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4)
66#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5) 83#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5)
67 84
68enum cht_wc_usb_id {
69 USB_ID_OTG,
70 USB_ID_GND,
71 USB_ID_FLOAT,
72 USB_RID_A,
73 USB_RID_B,
74 USB_RID_C,
75};
76
77enum cht_wc_mux_select { 85enum cht_wc_mux_select {
78 MUX_SEL_PMIC = 0, 86 MUX_SEL_PMIC = 0,
79 MUX_SEL_SOC, 87 MUX_SEL_SOC,
@@ -101,9 +109,9 @@ static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
101{ 109{
102 switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) { 110 switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) {
103 case CHT_WC_PWRSRC_RID_GND: 111 case CHT_WC_PWRSRC_RID_GND:
104 return USB_ID_GND; 112 return INTEL_USB_ID_GND;
105 case CHT_WC_PWRSRC_RID_FLOAT: 113 case CHT_WC_PWRSRC_RID_FLOAT:
106 return USB_ID_FLOAT; 114 return INTEL_USB_ID_FLOAT;
107 case CHT_WC_PWRSRC_RID_ACA: 115 case CHT_WC_PWRSRC_RID_ACA:
108 default: 116 default:
109 /* 117 /*
@@ -111,7 +119,7 @@ static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
111 * the USBID GPADC channel here and determine ACA role 119 * the USBID GPADC channel here and determine ACA role
112 * based on that. 120 * based on that.
113 */ 121 */
114 return USB_ID_FLOAT; 122 return INTEL_USB_ID_FLOAT;
115 } 123 }
116} 124}
117 125
@@ -198,6 +206,30 @@ static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
198 dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret); 206 dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
199} 207}
200 208
209static void cht_wc_extcon_set_otgmode(struct cht_wc_extcon_data *ext,
210 bool enable)
211{
212 unsigned int val = enable ? CHT_WC_CHGRCTRL1_OTGMODE : 0;
213 int ret;
214
215 ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL1,
216 CHT_WC_CHGRCTRL1_OTGMODE, val);
217 if (ret)
218 dev_err(ext->dev, "Error updating CHGRCTRL1 reg: %d\n", ret);
219}
220
221static void cht_wc_extcon_enable_charging(struct cht_wc_extcon_data *ext,
222 bool enable)
223{
224 unsigned int val = enable ? 0 : CHT_WC_CHGDISCTRL_OUT;
225 int ret;
226
227 ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
228 CHT_WC_CHGDISCTRL_OUT, val);
229 if (ret)
230 dev_err(ext->dev, "Error updating CHGDISCTRL reg: %d\n", ret);
231}
232
201/* Small helper to sync EXTCON_CHG_USB_SDP and EXTCON_USB state */ 233/* Small helper to sync EXTCON_CHG_USB_SDP and EXTCON_USB state */
202static void cht_wc_extcon_set_state(struct cht_wc_extcon_data *ext, 234static void cht_wc_extcon_set_state(struct cht_wc_extcon_data *ext,
203 unsigned int cable, bool state) 235 unsigned int cable, bool state)
@@ -221,11 +253,17 @@ static void cht_wc_extcon_pwrsrc_event(struct cht_wc_extcon_data *ext)
221 } 253 }
222 254
223 id = cht_wc_extcon_get_id(ext, pwrsrc_sts); 255 id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
224 if (id == USB_ID_GND) { 256 if (id == INTEL_USB_ID_GND) {
257 cht_wc_extcon_enable_charging(ext, false);
258 cht_wc_extcon_set_otgmode(ext, true);
259
225 /* The 5v boost causes a false VBUS / SDP detect, skip */ 260 /* The 5v boost causes a false VBUS / SDP detect, skip */
226 goto charger_det_done; 261 goto charger_det_done;
227 } 262 }
228 263
264 cht_wc_extcon_set_otgmode(ext, false);
265 cht_wc_extcon_enable_charging(ext, true);
266
229 /* Plugged into a host/charger or not connected? */ 267 /* Plugged into a host/charger or not connected? */
230 if (!(pwrsrc_sts & CHT_WC_PWRSRC_VBUS)) { 268 if (!(pwrsrc_sts & CHT_WC_PWRSRC_VBUS)) {
231 /* Route D+ and D- to PMIC for future charger detection */ 269 /* Route D+ and D- to PMIC for future charger detection */
@@ -248,7 +286,7 @@ set_state:
248 ext->previous_cable = cable; 286 ext->previous_cable = cable;
249 } 287 }
250 288
251 ext->usb_host = ((id == USB_ID_GND) || (id == USB_RID_A)); 289 ext->usb_host = ((id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A));
252 extcon_set_state_sync(ext->edev, EXTCON_USB_HOST, ext->usb_host); 290 extcon_set_state_sync(ext->edev, EXTCON_USB_HOST, ext->usb_host);
253} 291}
254 292
@@ -278,6 +316,14 @@ static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
278{ 316{
279 int ret, mask, val; 317 int ret, mask, val;
280 318
319 val = enable ? 0 : CHT_WC_CHGDISCTRL_FN;
320 ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
321 CHT_WC_CHGDISCTRL_FN, val);
322 if (ret)
323 dev_err(ext->dev,
324 "Error setting sw control for CHGDIS pin: %d\n",
325 ret);
326
281 mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF; 327 mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF;
282 val = enable ? mask : 0; 328 val = enable ? mask : 0;
283 ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val); 329 ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
@@ -329,7 +375,10 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
329 /* Enable sw control */ 375 /* Enable sw control */
330 ret = cht_wc_extcon_sw_control(ext, true); 376 ret = cht_wc_extcon_sw_control(ext, true);
331 if (ret) 377 if (ret)
332 return ret; 378 goto disable_sw_control;
379
380 /* Disable charging by external battery charger */
381 cht_wc_extcon_enable_charging(ext, false);
333 382
334 /* Register extcon device */ 383 /* Register extcon device */
335 ret = devm_extcon_dev_register(ext->dev, ext->edev); 384 ret = devm_extcon_dev_register(ext->dev, ext->edev);
diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
new file mode 100644
index 000000000000..f47016fb28a8
--- /dev/null
+++ b/drivers/extcon/extcon-intel-mrfld.c
@@ -0,0 +1,284 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * extcon driver for Basin Cove PMIC
4 *
5 * Copyright (c) 2019, Intel Corporation.
6 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 */
8
9#include <linux/extcon-provider.h>
10#include <linux/interrupt.h>
11#include <linux/mfd/intel_soc_pmic.h>
12#include <linux/mfd/intel_soc_pmic_mrfld.h>
13#include <linux/mod_devicetable.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/regmap.h>
17
18#include "extcon-intel.h"
19
20#define BCOVE_USBIDCTRL 0x19
21#define BCOVE_USBIDCTRL_ID BIT(0)
22#define BCOVE_USBIDCTRL_ACA BIT(1)
23#define BCOVE_USBIDCTRL_ALL (BCOVE_USBIDCTRL_ID | BCOVE_USBIDCTRL_ACA)
24
25#define BCOVE_USBIDSTS 0x1a
26#define BCOVE_USBIDSTS_GND BIT(0)
27#define BCOVE_USBIDSTS_RARBRC_MASK GENMASK(2, 1)
28#define BCOVE_USBIDSTS_RARBRC_SHIFT 1
29#define BCOVE_USBIDSTS_NO_ACA 0
30#define BCOVE_USBIDSTS_R_ID_A 1
31#define BCOVE_USBIDSTS_R_ID_B 2
32#define BCOVE_USBIDSTS_R_ID_C 3
33#define BCOVE_USBIDSTS_FLOAT BIT(3)
34#define BCOVE_USBIDSTS_SHORT BIT(4)
35
36#define BCOVE_CHGRIRQ_ALL (BCOVE_CHGRIRQ_VBUSDET | BCOVE_CHGRIRQ_DCDET | \
37 BCOVE_CHGRIRQ_BATTDET | BCOVE_CHGRIRQ_USBIDDET)
38
39#define BCOVE_CHGRCTRL0 0x4b
40#define BCOVE_CHGRCTRL0_CHGRRESET BIT(0)
41#define BCOVE_CHGRCTRL0_EMRGCHREN BIT(1)
42#define BCOVE_CHGRCTRL0_EXTCHRDIS BIT(2)
43#define BCOVE_CHGRCTRL0_SWCONTROL BIT(3)
44#define BCOVE_CHGRCTRL0_TTLCK BIT(4)
45#define BCOVE_CHGRCTRL0_BIT_5 BIT(5)
46#define BCOVE_CHGRCTRL0_BIT_6 BIT(6)
47#define BCOVE_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
48
49struct mrfld_extcon_data {
50 struct device *dev;
51 struct regmap *regmap;
52 struct extcon_dev *edev;
53 unsigned int status;
54 unsigned int id;
55};
56
57static const unsigned int mrfld_extcon_cable[] = {
58 EXTCON_USB,
59 EXTCON_USB_HOST,
60 EXTCON_CHG_USB_SDP,
61 EXTCON_CHG_USB_CDP,
62 EXTCON_CHG_USB_DCP,
63 EXTCON_CHG_USB_ACA,
64 EXTCON_NONE,
65};
66
67static int mrfld_extcon_clear(struct mrfld_extcon_data *data, unsigned int reg,
68 unsigned int mask)
69{
70 return regmap_update_bits(data->regmap, reg, mask, 0x00);
71}
72
73static int mrfld_extcon_set(struct mrfld_extcon_data *data, unsigned int reg,
74 unsigned int mask)
75{
76 return regmap_update_bits(data->regmap, reg, mask, 0xff);
77}
78
79static int mrfld_extcon_sw_control(struct mrfld_extcon_data *data, bool enable)
80{
81 unsigned int mask = BCOVE_CHGRCTRL0_SWCONTROL;
82 struct device *dev = data->dev;
83 int ret;
84
85 if (enable)
86 ret = mrfld_extcon_set(data, BCOVE_CHGRCTRL0, mask);
87 else
88 ret = mrfld_extcon_clear(data, BCOVE_CHGRCTRL0, mask);
89 if (ret)
90 dev_err(dev, "can't set SW control: %d\n", ret);
91 return ret;
92}
93
94static int mrfld_extcon_get_id(struct mrfld_extcon_data *data)
95{
96 struct regmap *regmap = data->regmap;
97 unsigned int id;
98 bool ground;
99 int ret;
100
101 ret = regmap_read(regmap, BCOVE_USBIDSTS, &id);
102 if (ret)
103 return ret;
104
105 if (id & BCOVE_USBIDSTS_FLOAT)
106 return INTEL_USB_ID_FLOAT;
107
108 switch ((id & BCOVE_USBIDSTS_RARBRC_MASK) >> BCOVE_USBIDSTS_RARBRC_SHIFT) {
109 case BCOVE_USBIDSTS_R_ID_A:
110 return INTEL_USB_RID_A;
111 case BCOVE_USBIDSTS_R_ID_B:
112 return INTEL_USB_RID_B;
113 case BCOVE_USBIDSTS_R_ID_C:
114 return INTEL_USB_RID_C;
115 }
116
117 /*
118 * PMIC A0 reports USBIDSTS_GND = 1 for ID_GND,
119 * but PMIC B0 reports USBIDSTS_GND = 0 for ID_GND.
120 * Thus we must check this bit at last.
121 */
122 ground = id & BCOVE_USBIDSTS_GND;
123 switch ('A' + BCOVE_MAJOR(data->id)) {
124 case 'A':
125 return ground ? INTEL_USB_ID_GND : INTEL_USB_ID_FLOAT;
126 case 'B':
127 return ground ? INTEL_USB_ID_FLOAT : INTEL_USB_ID_GND;
128 }
129
130 /* Unknown or unsupported type */
131 return INTEL_USB_ID_FLOAT;
132}
133
134static int mrfld_extcon_role_detect(struct mrfld_extcon_data *data)
135{
136 unsigned int id;
137 bool usb_host;
138 int ret;
139
140 ret = mrfld_extcon_get_id(data);
141 if (ret < 0)
142 return ret;
143
144 id = ret;
145
146 usb_host = (id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A);
147 extcon_set_state_sync(data->edev, EXTCON_USB_HOST, usb_host);
148
149 return 0;
150}
151
152static int mrfld_extcon_cable_detect(struct mrfld_extcon_data *data)
153{
154 struct regmap *regmap = data->regmap;
155 unsigned int status, change;
156 int ret;
157
158 /*
159 * It seems SCU firmware clears the content of BCOVE_CHGRIRQ1
160 * and makes it useless for OS. Instead we compare a previously
161 * stored status to the current one, provided by BCOVE_SCHGRIRQ1.
162 */
163 ret = regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
164 if (ret)
165 return ret;
166
167 change = status ^ data->status;
168 if (!change)
169 return -ENODATA;
170
171 if (change & BCOVE_CHGRIRQ_USBIDDET) {
172 ret = mrfld_extcon_role_detect(data);
173 if (ret)
174 return ret;
175 }
176
177 data->status = status;
178
179 return 0;
180}
181
182static irqreturn_t mrfld_extcon_interrupt(int irq, void *dev_id)
183{
184 struct mrfld_extcon_data *data = dev_id;
185 int ret;
186
187 ret = mrfld_extcon_cable_detect(data);
188
189 mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
190
191 return ret ? IRQ_NONE: IRQ_HANDLED;
192}
193
194static int mrfld_extcon_probe(struct platform_device *pdev)
195{
196 struct device *dev = &pdev->dev;
197 struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
198 struct regmap *regmap = pmic->regmap;
199 struct mrfld_extcon_data *data;
200 unsigned int id;
201 int irq, ret;
202
203 irq = platform_get_irq(pdev, 0);
204 if (irq < 0)
205 return irq;
206
207 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
208 if (!data)
209 return -ENOMEM;
210
211 data->dev = dev;
212 data->regmap = regmap;
213
214 data->edev = devm_extcon_dev_allocate(dev, mrfld_extcon_cable);
215 if (IS_ERR(data->edev))
216 return -ENOMEM;
217
218 ret = devm_extcon_dev_register(dev, data->edev);
219 if (ret < 0) {
220 dev_err(dev, "can't register extcon device: %d\n", ret);
221 return ret;
222 }
223
224 ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_extcon_interrupt,
225 IRQF_ONESHOT | IRQF_SHARED, pdev->name,
226 data);
227 if (ret) {
228 dev_err(dev, "can't register IRQ handler: %d\n", ret);
229 return ret;
230 }
231
232 ret = regmap_read(regmap, BCOVE_ID, &id);
233 if (ret) {
234 dev_err(dev, "can't read PMIC ID: %d\n", ret);
235 return ret;
236 }
237
238 data->id = id;
239
240 ret = mrfld_extcon_sw_control(data, true);
241 if (ret)
242 return ret;
243
244 /* Get initial state */
245 mrfld_extcon_role_detect(data);
246
247 mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
248 mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);
249
250 mrfld_extcon_set(data, BCOVE_USBIDCTRL, BCOVE_USBIDCTRL_ALL);
251
252 platform_set_drvdata(pdev, data);
253
254 return 0;
255}
256
257static int mrfld_extcon_remove(struct platform_device *pdev)
258{
259 struct mrfld_extcon_data *data = platform_get_drvdata(pdev);
260
261 mrfld_extcon_sw_control(data, false);
262
263 return 0;
264}
265
266static const struct platform_device_id mrfld_extcon_id_table[] = {
267 { .name = "mrfld_bcove_pwrsrc" },
268 {}
269};
270MODULE_DEVICE_TABLE(platform, mrfld_extcon_id_table);
271
272static struct platform_driver mrfld_extcon_driver = {
273 .driver = {
274 .name = "mrfld_bcove_pwrsrc",
275 },
276 .probe = mrfld_extcon_probe,
277 .remove = mrfld_extcon_remove,
278 .id_table = mrfld_extcon_id_table,
279};
280module_platform_driver(mrfld_extcon_driver);
281
282MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
283MODULE_DESCRIPTION("extcon driver for Intel Merrifield Basin Cove PMIC");
284MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-intel.h b/drivers/extcon/extcon-intel.h
new file mode 100644
index 000000000000..0ad645ec7b33
--- /dev/null
+++ b/drivers/extcon/extcon-intel.h
@@ -0,0 +1,20 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Header file for Intel extcon hardware
4 *
5 * Copyright (C) 2019 Intel Corporation. All rights reserved.
6 */
7
8#ifndef __EXTCON_INTEL_H__
9#define __EXTCON_INTEL_H__
10
11enum extcon_intel_usb_id {
12 INTEL_USB_ID_OTG,
13 INTEL_USB_ID_GND,
14 INTEL_USB_ID_FLOAT,
15 INTEL_USB_RID_A,
16 INTEL_USB_RID_B,
17 INTEL_USB_RID_C,
18};
19
20#endif /* __EXTCON_INTEL_H__ */
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index c0c0b4e4e281..f240946ed701 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -254,7 +254,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
254 254
255static int vpd_sections_init(phys_addr_t physaddr) 255static int vpd_sections_init(phys_addr_t physaddr)
256{ 256{
257 struct vpd_cbmem __iomem *temp; 257 struct vpd_cbmem *temp;
258 struct vpd_cbmem header; 258 struct vpd_cbmem header;
259 int ret = 0; 259 int ret = 0;
260 260
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
262 if (!temp) 262 if (!temp)
263 return -ENOMEM; 263 return -ENOMEM;
264 264
265 memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem)); 265 memcpy(&header, temp, sizeof(struct vpd_cbmem));
266 memunmap(temp); 266 memunmap(temp);
267 267
268 if (header.magic != VPD_CBMEM_MAGIC) 268 if (header.magic != VPD_CBMEM_MAGIC)
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index 12568aebb7f6..7b05bc40532e 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -130,6 +130,7 @@ static void ubx_remove(struct serdev_device *serdev)
130 130
131#ifdef CONFIG_OF 131#ifdef CONFIG_OF
132static const struct of_device_id ubx_of_match[] = { 132static const struct of_device_id ubx_of_match[] = {
133 { .compatible = "u-blox,neo-6m" },
133 { .compatible = "u-blox,neo-8" }, 134 { .compatible = "u-blox,neo-8" },
134 { .compatible = "u-blox,neo-m8" }, 135 { .compatible = "u-blox,neo-m8" },
135 {}, 136 {},
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ad34380cac49..18e8d03321d6 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -75,20 +75,13 @@ config CORESIGHT_SOURCE_ETM4X
75 bool "CoreSight Embedded Trace Macrocell 4.x driver" 75 bool "CoreSight Embedded Trace Macrocell 4.x driver"
76 depends on ARM64 76 depends on ARM64
77 select CORESIGHT_LINKS_AND_SINKS 77 select CORESIGHT_LINKS_AND_SINKS
78 select PID_IN_CONTEXTIDR
78 help 79 help
79 This driver provides support for the ETM4.x tracer module, tracing the 80 This driver provides support for the ETM4.x tracer module, tracing the
80 instructions that a processor is executing. This is primarily useful 81 instructions that a processor is executing. This is primarily useful
81 for instruction level tracing. Depending on the implemented version 82 for instruction level tracing. Depending on the implemented version
82 data tracing may also be available. 83 data tracing may also be available.
83 84
84config CORESIGHT_DYNAMIC_REPLICATOR
85 bool "CoreSight Programmable Replicator driver"
86 depends on CORESIGHT_LINKS_AND_SINKS
87 help
88 This enables support for dynamic CoreSight replicator link driver.
89 The programmable ATB replicator allows independent filtering of the
90 trace data based on the traceid.
91
92config CORESIGHT_STM 85config CORESIGHT_STM
93 bool "CoreSight System Trace Macrocell driver" 86 bool "CoreSight System Trace Macrocell driver"
94 depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64 87 depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 41870ded51a3..3b435aa42af5 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
15 coresight-etm3x-sysfs.o 15 coresight-etm3x-sysfs.o
16obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \ 16obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
17 coresight-etm4x-sysfs.o 17 coresight-etm4x-sysfs.o
18obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
19obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o 18obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
20obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o 19obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
21obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o 20obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 170fbb66bda2..4ea68a3522e9 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -485,12 +485,12 @@ static int catu_disable(struct coresight_device *csdev, void *__unused)
485 return rc; 485 return rc;
486} 486}
487 487
488const struct coresight_ops_helper catu_helper_ops = { 488static const struct coresight_ops_helper catu_helper_ops = {
489 .enable = catu_enable, 489 .enable = catu_enable,
490 .disable = catu_disable, 490 .disable = catu_disable,
491}; 491};
492 492
493const struct coresight_ops catu_ops = { 493static const struct coresight_ops catu_ops = {
494 .helper_ops = &catu_helper_ops, 494 .helper_ops = &catu_helper_ops,
495}; 495};
496 496
@@ -557,8 +557,9 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
557 drvdata->csdev = coresight_register(&catu_desc); 557 drvdata->csdev = coresight_register(&catu_desc);
558 if (IS_ERR(drvdata->csdev)) 558 if (IS_ERR(drvdata->csdev))
559 ret = PTR_ERR(drvdata->csdev); 559 ret = PTR_ERR(drvdata->csdev);
560 else
561 pm_runtime_put(&adev->dev);
560out: 562out:
561 pm_runtime_put(&adev->dev);
562 return ret; 563 return ret;
563} 564}
564 565
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 1b281f0dcccc..1d2ad183fd92 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -109,11 +109,6 @@ static inline bool coresight_is_catu_device(struct coresight_device *csdev)
109 return true; 109 return true;
110} 110}
111 111
112#ifdef CONFIG_CORESIGHT_CATU
113extern const struct etr_buf_operations etr_catu_buf_ops; 112extern const struct etr_buf_operations etr_catu_buf_ops;
114#else
115/* Dummy declaration for the CATU ops */
116static const struct etr_buf_operations etr_catu_buf_ops;
117#endif
118 113
119#endif 114#endif
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
deleted file mode 100644
index 299667b887fc..000000000000
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ /dev/null
@@ -1,255 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/amba/bus.h>
7#include <linux/clk.h>
8#include <linux/coresight.h>
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/init.h>
12#include <linux/io.h>
13#include <linux/kernel.h>
14#include <linux/of.h>
15#include <linux/pm_runtime.h>
16#include <linux/slab.h>
17
18#include "coresight-priv.h"
19
20#define REPLICATOR_IDFILTER0 0x000
21#define REPLICATOR_IDFILTER1 0x004
22
23/**
24 * struct replicator_state - specifics associated to a replicator component
25 * @base: memory mapped base address for this component.
26 * @dev: the device entity associated with this component
27 * @atclk: optional clock for the core parts of the replicator.
28 * @csdev: component vitals needed by the framework
29 */
30struct replicator_state {
31 void __iomem *base;
32 struct device *dev;
33 struct clk *atclk;
34 struct coresight_device *csdev;
35};
36
37/*
38 * replicator_reset : Reset the replicator configuration to sane values.
39 */
40static void replicator_reset(struct replicator_state *drvdata)
41{
42 CS_UNLOCK(drvdata->base);
43
44 if (!coresight_claim_device_unlocked(drvdata->base)) {
45 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
46 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
47 coresight_disclaim_device_unlocked(drvdata->base);
48 }
49
50 CS_LOCK(drvdata->base);
51}
52
53static int replicator_enable(struct coresight_device *csdev, int inport,
54 int outport)
55{
56 int rc = 0;
57 u32 reg;
58 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
59
60 switch (outport) {
61 case 0:
62 reg = REPLICATOR_IDFILTER0;
63 break;
64 case 1:
65 reg = REPLICATOR_IDFILTER1;
66 break;
67 default:
68 WARN_ON(1);
69 return -EINVAL;
70 }
71
72 CS_UNLOCK(drvdata->base);
73
74 if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
75 (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
76 rc = coresight_claim_device_unlocked(drvdata->base);
77
78 /* Ensure that the outport is enabled. */
79 if (!rc) {
80 writel_relaxed(0x00, drvdata->base + reg);
81 dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
82 }
83
84 CS_LOCK(drvdata->base);
85
86 return rc;
87}
88
89static void replicator_disable(struct coresight_device *csdev, int inport,
90 int outport)
91{
92 u32 reg;
93 struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
94
95 switch (outport) {
96 case 0:
97 reg = REPLICATOR_IDFILTER0;
98 break;
99 case 1:
100 reg = REPLICATOR_IDFILTER1;
101 break;
102 default:
103 WARN_ON(1);
104 return;
105 }
106
107 CS_UNLOCK(drvdata->base);
108
109 /* disable the flow of ATB data through port */
110 writel_relaxed(0xff, drvdata->base + reg);
111
112 if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
113 (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
114 coresight_disclaim_device_unlocked(drvdata->base);
115 CS_LOCK(drvdata->base);
116
117 dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
118}
119
120static const struct coresight_ops_link replicator_link_ops = {
121 .enable = replicator_enable,
122 .disable = replicator_disable,
123};
124
125static const struct coresight_ops replicator_cs_ops = {
126 .link_ops = &replicator_link_ops,
127};
128
129#define coresight_replicator_reg(name, offset) \
130 coresight_simple_reg32(struct replicator_state, name, offset)
131
132coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
133coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
134
135static struct attribute *replicator_mgmt_attrs[] = {
136 &dev_attr_idfilter0.attr,
137 &dev_attr_idfilter1.attr,
138 NULL,
139};
140
141static const struct attribute_group replicator_mgmt_group = {
142 .attrs = replicator_mgmt_attrs,
143 .name = "mgmt",
144};
145
146static const struct attribute_group *replicator_groups[] = {
147 &replicator_mgmt_group,
148 NULL,
149};
150
151static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
152{
153 int ret;
154 struct device *dev = &adev->dev;
155 struct resource *res = &adev->res;
156 struct coresight_platform_data *pdata = NULL;
157 struct replicator_state *drvdata;
158 struct coresight_desc desc = { 0 };
159 struct device_node *np = adev->dev.of_node;
160 void __iomem *base;
161
162 if (np) {
163 pdata = of_get_coresight_platform_data(dev, np);
164 if (IS_ERR(pdata))
165 return PTR_ERR(pdata);
166 adev->dev.platform_data = pdata;
167 }
168
169 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
170 if (!drvdata)
171 return -ENOMEM;
172
173 drvdata->dev = &adev->dev;
174 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
175 if (!IS_ERR(drvdata->atclk)) {
176 ret = clk_prepare_enable(drvdata->atclk);
177 if (ret)
178 return ret;
179 }
180
181 /* Validity for the resource is already checked by the AMBA core */
182 base = devm_ioremap_resource(dev, res);
183 if (IS_ERR(base))
184 return PTR_ERR(base);
185
186 drvdata->base = base;
187 dev_set_drvdata(dev, drvdata);
188 pm_runtime_put(&adev->dev);
189
190 desc.type = CORESIGHT_DEV_TYPE_LINK;
191 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
192 desc.ops = &replicator_cs_ops;
193 desc.pdata = adev->dev.platform_data;
194 desc.dev = &adev->dev;
195 desc.groups = replicator_groups;
196 drvdata->csdev = coresight_register(&desc);
197
198 if (!IS_ERR(drvdata->csdev)) {
199 replicator_reset(drvdata);
200 return 0;
201 }
202 return PTR_ERR(drvdata->csdev);
203}
204
205#ifdef CONFIG_PM
206static int replicator_runtime_suspend(struct device *dev)
207{
208 struct replicator_state *drvdata = dev_get_drvdata(dev);
209
210 if (drvdata && !IS_ERR(drvdata->atclk))
211 clk_disable_unprepare(drvdata->atclk);
212
213 return 0;
214}
215
216static int replicator_runtime_resume(struct device *dev)
217{
218 struct replicator_state *drvdata = dev_get_drvdata(dev);
219
220 if (drvdata && !IS_ERR(drvdata->atclk))
221 clk_prepare_enable(drvdata->atclk);
222
223 return 0;
224}
225#endif
226
227static const struct dev_pm_ops replicator_dev_pm_ops = {
228 SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
229 replicator_runtime_resume,
230 NULL)
231};
232
233static const struct amba_id replicator_ids[] = {
234 {
235 .id = 0x000bb909,
236 .mask = 0x000fffff,
237 },
238 {
239 /* Coresight SoC-600 */
240 .id = 0x000bb9ec,
241 .mask = 0x000fffff,
242 },
243 { 0, 0 },
244};
245
246static struct amba_driver replicator_driver = {
247 .drv = {
248 .name = "coresight-dynamic-replicator",
249 .pm = &replicator_dev_pm_ops,
250 .suppress_bind_attrs = true,
251 },
252 .probe = replicator_probe,
253 .id_table = replicator_ids,
254};
255builtin_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 105782ea64c7..4ee4c80a4354 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -5,6 +5,7 @@
5 * Description: CoreSight Embedded Trace Buffer driver 5 * Description: CoreSight Embedded Trace Buffer driver
6 */ 6 */
7 7
8#include <linux/atomic.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/init.h> 10#include <linux/init.h>
10#include <linux/types.h> 11#include <linux/types.h>
@@ -71,6 +72,8 @@
71 * @miscdev: specifics to handle "/dev/xyz.etb" entry. 72 * @miscdev: specifics to handle "/dev/xyz.etb" entry.
72 * @spinlock: only one at a time pls. 73 * @spinlock: only one at a time pls.
73 * @reading: synchronise user space access to etb buffer. 74 * @reading: synchronise user space access to etb buffer.
75 * @pid: Process ID of the process being monitored by the session
76 * that is using this component.
74 * @buf: area of memory where ETB buffer content gets sent. 77 * @buf: area of memory where ETB buffer content gets sent.
75 * @mode: this ETB is being used. 78 * @mode: this ETB is being used.
76 * @buffer_depth: size of @buf. 79 * @buffer_depth: size of @buf.
@@ -84,6 +87,7 @@ struct etb_drvdata {
84 struct miscdevice miscdev; 87 struct miscdevice miscdev;
85 spinlock_t spinlock; 88 spinlock_t spinlock;
86 local_t reading; 89 local_t reading;
90 pid_t pid;
87 u8 *buf; 91 u8 *buf;
88 u32 mode; 92 u32 mode;
89 u32 buffer_depth; 93 u32 buffer_depth;
@@ -93,17 +97,9 @@ struct etb_drvdata {
93static int etb_set_buffer(struct coresight_device *csdev, 97static int etb_set_buffer(struct coresight_device *csdev,
94 struct perf_output_handle *handle); 98 struct perf_output_handle *handle);
95 99
96static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata) 100static inline unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
97{ 101{
98 u32 depth = 0; 102 return readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
99
100 pm_runtime_get_sync(drvdata->dev);
101
102 /* RO registers don't need locking */
103 depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
104
105 pm_runtime_put(drvdata->dev);
106 return depth;
107} 103}
108 104
109static void __etb_enable_hw(struct etb_drvdata *drvdata) 105static void __etb_enable_hw(struct etb_drvdata *drvdata)
@@ -159,14 +155,15 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
159 goto out; 155 goto out;
160 } 156 }
161 157
162 /* Nothing to do, the tracer is already enabled. */ 158 if (drvdata->mode == CS_MODE_DISABLED) {
163 if (drvdata->mode == CS_MODE_SYSFS) 159 ret = etb_enable_hw(drvdata);
164 goto out; 160 if (ret)
161 goto out;
165 162
166 ret = etb_enable_hw(drvdata);
167 if (!ret)
168 drvdata->mode = CS_MODE_SYSFS; 163 drvdata->mode = CS_MODE_SYSFS;
164 }
169 165
166 atomic_inc(csdev->refcnt);
170out: 167out:
171 spin_unlock_irqrestore(&drvdata->spinlock, flags); 168 spin_unlock_irqrestore(&drvdata->spinlock, flags);
172 return ret; 169 return ret;
@@ -175,29 +172,52 @@ out:
175static int etb_enable_perf(struct coresight_device *csdev, void *data) 172static int etb_enable_perf(struct coresight_device *csdev, void *data)
176{ 173{
177 int ret = 0; 174 int ret = 0;
175 pid_t pid;
178 unsigned long flags; 176 unsigned long flags;
179 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 177 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
178 struct perf_output_handle *handle = data;
180 179
181 spin_lock_irqsave(&drvdata->spinlock, flags); 180 spin_lock_irqsave(&drvdata->spinlock, flags);
182 181
183 /* No need to continue if the component is already in use. */ 182 /* No need to continue if the component is already in used by sysFS. */
184 if (drvdata->mode != CS_MODE_DISABLED) { 183 if (drvdata->mode == CS_MODE_SYSFS) {
184 ret = -EBUSY;
185 goto out;
186 }
187
188 /* Get a handle on the pid of the process to monitor */
189 pid = task_pid_nr(handle->event->owner);
190
191 if (drvdata->pid != -1 && drvdata->pid != pid) {
185 ret = -EBUSY; 192 ret = -EBUSY;
186 goto out; 193 goto out;
187 } 194 }
188 195
189 /* 196 /*
197 * No HW configuration is needed if the sink is already in
198 * use for this session.
199 */
200 if (drvdata->pid == pid) {
201 atomic_inc(csdev->refcnt);
202 goto out;
203 }
204
205 /*
190 * We don't have an internal state to clean up if we fail to setup 206 * We don't have an internal state to clean up if we fail to setup
191 * the perf buffer. So we can perform the step before we turn the 207 * the perf buffer. So we can perform the step before we turn the
192 * ETB on and leave without cleaning up. 208 * ETB on and leave without cleaning up.
193 */ 209 */
194 ret = etb_set_buffer(csdev, (struct perf_output_handle *)data); 210 ret = etb_set_buffer(csdev, handle);
195 if (ret) 211 if (ret)
196 goto out; 212 goto out;
197 213
198 ret = etb_enable_hw(drvdata); 214 ret = etb_enable_hw(drvdata);
199 if (!ret) 215 if (!ret) {
216 /* Associate with monitored process. */
217 drvdata->pid = pid;
200 drvdata->mode = CS_MODE_PERF; 218 drvdata->mode = CS_MODE_PERF;
219 atomic_inc(csdev->refcnt);
220 }
201 221
202out: 222out:
203 spin_unlock_irqrestore(&drvdata->spinlock, flags); 223 spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -325,27 +345,35 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
325 coresight_disclaim_device(drvdata->base); 345 coresight_disclaim_device(drvdata->base);
326} 346}
327 347
328static void etb_disable(struct coresight_device *csdev) 348static int etb_disable(struct coresight_device *csdev)
329{ 349{
330 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 350 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
331 unsigned long flags; 351 unsigned long flags;
332 352
333 spin_lock_irqsave(&drvdata->spinlock, flags); 353 spin_lock_irqsave(&drvdata->spinlock, flags);
334 354
335 /* Disable the ETB only if it needs to */ 355 if (atomic_dec_return(csdev->refcnt)) {
336 if (drvdata->mode != CS_MODE_DISABLED) { 356 spin_unlock_irqrestore(&drvdata->spinlock, flags);
337 etb_disable_hw(drvdata); 357 return -EBUSY;
338 drvdata->mode = CS_MODE_DISABLED;
339 } 358 }
359
360 /* Complain if we (somehow) got out of sync */
361 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
362 etb_disable_hw(drvdata);
363 /* Dissociate from monitored process. */
364 drvdata->pid = -1;
365 drvdata->mode = CS_MODE_DISABLED;
340 spin_unlock_irqrestore(&drvdata->spinlock, flags); 366 spin_unlock_irqrestore(&drvdata->spinlock, flags);
341 367
342 dev_dbg(drvdata->dev, "ETB disabled\n"); 368 dev_dbg(drvdata->dev, "ETB disabled\n");
369 return 0;
343} 370}
344 371
345static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu, 372static void *etb_alloc_buffer(struct coresight_device *csdev,
346 void **pages, int nr_pages, bool overwrite) 373 struct perf_event *event, void **pages,
374 int nr_pages, bool overwrite)
347{ 375{
348 int node; 376 int node, cpu = event->cpu;
349 struct cs_buffers *buf; 377 struct cs_buffers *buf;
350 378
351 if (cpu == -1) 379 if (cpu == -1)
@@ -404,7 +432,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
404 const u32 *barrier; 432 const u32 *barrier;
405 u32 read_ptr, write_ptr, capacity; 433 u32 read_ptr, write_ptr, capacity;
406 u32 status, read_data; 434 u32 status, read_data;
407 unsigned long offset, to_read; 435 unsigned long offset, to_read = 0, flags;
408 struct cs_buffers *buf = sink_config; 436 struct cs_buffers *buf = sink_config;
409 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 437 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
410 438
@@ -413,6 +441,12 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
413 441
414 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS; 442 capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
415 443
444 spin_lock_irqsave(&drvdata->spinlock, flags);
445
446 /* Don't do anything if another tracer is using this sink */
447 if (atomic_read(csdev->refcnt) != 1)
448 goto out;
449
416 __etb_disable_hw(drvdata); 450 __etb_disable_hw(drvdata);
417 CS_UNLOCK(drvdata->base); 451 CS_UNLOCK(drvdata->base);
418 452
@@ -523,6 +557,8 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
523 } 557 }
524 __etb_enable_hw(drvdata); 558 __etb_enable_hw(drvdata);
525 CS_LOCK(drvdata->base); 559 CS_LOCK(drvdata->base);
560out:
561 spin_unlock_irqrestore(&drvdata->spinlock, flags);
526 562
527 return to_read; 563 return to_read;
528} 564}
@@ -720,7 +756,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
720 spin_lock_init(&drvdata->spinlock); 756 spin_lock_init(&drvdata->spinlock);
721 757
722 drvdata->buffer_depth = etb_get_buffer_depth(drvdata); 758 drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
723 pm_runtime_put(&adev->dev);
724 759
725 if (drvdata->buffer_depth & 0x80000000) 760 if (drvdata->buffer_depth & 0x80000000)
726 return -EINVAL; 761 return -EINVAL;
@@ -730,6 +765,9 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
730 if (!drvdata->buf) 765 if (!drvdata->buf)
731 return -ENOMEM; 766 return -ENOMEM;
732 767
768 /* This device is not associated with a session */
769 drvdata->pid = -1;
770
733 desc.type = CORESIGHT_DEV_TYPE_SINK; 771 desc.type = CORESIGHT_DEV_TYPE_SINK;
734 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; 772 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
735 desc.ops = &etb_cs_ops; 773 desc.ops = &etb_cs_ops;
@@ -747,6 +785,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
747 if (ret) 785 if (ret)
748 goto err_misc_register; 786 goto err_misc_register;
749 787
788 pm_runtime_put(&adev->dev);
750 return 0; 789 return 0;
751 790
752err_misc_register: 791err_misc_register:
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 4d5a2b9f9d6a..3c6294432748 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -29,6 +29,7 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
29 29
30/* ETMv3.5/PTM's ETMCR is 'config' */ 30/* ETMv3.5/PTM's ETMCR is 'config' */
31PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); 31PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
32PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
32PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); 33PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
33PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); 34PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
34/* Sink ID - same for all ETMs */ 35/* Sink ID - same for all ETMs */
@@ -36,6 +37,7 @@ PMU_FORMAT_ATTR(sinkid, "config2:0-31");
36 37
37static struct attribute *etm_config_formats_attr[] = { 38static struct attribute *etm_config_formats_attr[] = {
38 &format_attr_cycacc.attr, 39 &format_attr_cycacc.attr,
40 &format_attr_contextid.attr,
39 &format_attr_timestamp.attr, 41 &format_attr_timestamp.attr,
40 &format_attr_retstack.attr, 42 &format_attr_retstack.attr,
41 &format_attr_sinkid.attr, 43 &format_attr_sinkid.attr,
@@ -118,23 +120,34 @@ out:
118 return ret; 120 return ret;
119} 121}
120 122
123static void free_sink_buffer(struct etm_event_data *event_data)
124{
125 int cpu;
126 cpumask_t *mask = &event_data->mask;
127 struct coresight_device *sink;
128
129 if (WARN_ON(cpumask_empty(mask)))
130 return;
131
132 if (!event_data->snk_config)
133 return;
134
135 cpu = cpumask_first(mask);
136 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
137 sink_ops(sink)->free_buffer(event_data->snk_config);
138}
139
121static void free_event_data(struct work_struct *work) 140static void free_event_data(struct work_struct *work)
122{ 141{
123 int cpu; 142 int cpu;
124 cpumask_t *mask; 143 cpumask_t *mask;
125 struct etm_event_data *event_data; 144 struct etm_event_data *event_data;
126 struct coresight_device *sink;
127 145
128 event_data = container_of(work, struct etm_event_data, work); 146 event_data = container_of(work, struct etm_event_data, work);
129 mask = &event_data->mask; 147 mask = &event_data->mask;
130 148
131 /* Free the sink buffers, if there are any */ 149 /* Free the sink buffers, if there are any */
132 if (event_data->snk_config && !WARN_ON(cpumask_empty(mask))) { 150 free_sink_buffer(event_data);
133 cpu = cpumask_first(mask);
134 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
135 if (sink_ops(sink)->free_buffer)
136 sink_ops(sink)->free_buffer(event_data->snk_config);
137 }
138 151
139 for_each_cpu(cpu, mask) { 152 for_each_cpu(cpu, mask) {
140 struct list_head **ppath; 153 struct list_head **ppath;
@@ -213,7 +226,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
213 sink = coresight_get_enabled_sink(true); 226 sink = coresight_get_enabled_sink(true);
214 } 227 }
215 228
216 if (!sink || !sink_ops(sink)->alloc_buffer) 229 if (!sink)
217 goto err; 230 goto err;
218 231
219 mask = &event_data->mask; 232 mask = &event_data->mask;
@@ -259,9 +272,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
259 if (cpu >= nr_cpu_ids) 272 if (cpu >= nr_cpu_ids)
260 goto err; 273 goto err;
261 274
275 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
276 goto err;
277
262 /* Allocate the sink buffer for this session */ 278 /* Allocate the sink buffer for this session */
263 event_data->snk_config = 279 event_data->snk_config =
264 sink_ops(sink)->alloc_buffer(sink, cpu, pages, 280 sink_ops(sink)->alloc_buffer(sink, event, pages,
265 nr_pages, overwrite); 281 nr_pages, overwrite);
266 if (!event_data->snk_config) 282 if (!event_data->snk_config)
267 goto err; 283 goto err;
@@ -566,7 +582,8 @@ static int __init etm_perf_init(void)
566{ 582{
567 int ret; 583 int ret;
568 584
569 etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE; 585 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
586 PERF_PMU_CAP_ITRACE);
570 587
571 etm_pmu.attr_groups = etm_pmu_attr_groups; 588 etm_pmu.attr_groups = etm_pmu_attr_groups;
572 etm_pmu.task_ctx_nr = perf_sw_context; 589 etm_pmu.task_ctx_nr = perf_sw_context;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 08ce37c9475d..8bb0092c7ec2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -138,8 +138,11 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
138 drvdata->base + TRCCNTVRn(i)); 138 drvdata->base + TRCCNTVRn(i));
139 } 139 }
140 140
141 /* Resource selector pair 0 is always implemented and reserved */ 141 /*
142 for (i = 0; i < drvdata->nr_resource * 2; i++) 142 * Resource selector pair 0 is always implemented and reserved. As
143 * such start at 2.
144 */
145 for (i = 2; i < drvdata->nr_resource * 2; i++)
143 writel_relaxed(config->res_ctrl[i], 146 writel_relaxed(config->res_ctrl[i],
144 drvdata->base + TRCRSCTLRn(i)); 147 drvdata->base + TRCRSCTLRn(i));
145 148
@@ -201,6 +204,91 @@ static void etm4_enable_hw_smp_call(void *info)
201 arg->rc = etm4_enable_hw(arg->drvdata); 204 arg->rc = etm4_enable_hw(arg->drvdata);
202} 205}
203 206
207/*
208 * The goal of function etm4_config_timestamp_event() is to configure a
209 * counter that will tell the tracer to emit a timestamp packet when it
210 * reaches zero. This is done in order to get a more fine grained idea
211 * of when instructions are executed so that they can be correlated
212 * with execution on other CPUs.
213 *
214 * To do this the counter itself is configured to self reload and
215 * TRCRSCTLR1 (always true) used to get the counter to decrement. From
216 * there a resource selector is configured with the counter and the
217 * timestamp control register to use the resource selector to trigger the
218 * event that will insert a timestamp packet in the stream.
219 */
220static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata)
221{
222 int ctridx, ret = -EINVAL;
223 int counter, rselector;
224 u32 val = 0;
225 struct etmv4_config *config = &drvdata->config;
226
227 /* No point in trying if we don't have at least one counter */
228 if (!drvdata->nr_cntr)
229 goto out;
230
231 /* Find a counter that hasn't been initialised */
232 for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++)
233 if (config->cntr_val[ctridx] == 0)
234 break;
235
236 /* All the counters have been configured already, bail out */
237 if (ctridx == drvdata->nr_cntr) {
238 pr_debug("%s: no available counter found\n", __func__);
239 ret = -ENOSPC;
240 goto out;
241 }
242
243 /*
244 * Searching for an available resource selector to use, starting at
245 * '2' since every implementation has at least 2 resource selector.
246 * ETMIDR4 gives the number of resource selector _pairs_,
247 * hence multiply by 2.
248 */
249 for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++)
250 if (!config->res_ctrl[rselector])
251 break;
252
253 if (rselector == drvdata->nr_resource * 2) {
254 pr_debug("%s: no available resource selector found\n",
255 __func__);
256 ret = -ENOSPC;
257 goto out;
258 }
259
260 /* Remember what counter we used */
261 counter = 1 << ctridx;
262
263 /*
264 * Initialise original and reload counter value to the smallest
265 * possible value in order to get as much precision as we can.
266 */
267 config->cntr_val[ctridx] = 1;
268 config->cntrldvr[ctridx] = 1;
269
270 /* Set the trace counter control register */
271 val = 0x1 << 16 | /* Bit 16, reload counter automatically */
272 0x0 << 7 | /* Select single resource selector */
273 0x1; /* Resource selector 1, i.e always true */
274
275 config->cntr_ctrl[ctridx] = val;
276
277 val = 0x2 << 16 | /* Group 0b0010 - Counter and sequencers */
278 counter << 0; /* Counter to use */
279
280 config->res_ctrl[rselector] = val;
281
282 val = 0x0 << 7 | /* Select single resource selector */
283 rselector; /* Resource selector */
284
285 config->ts_ctrl = val;
286
287 ret = 0;
288out:
289 return ret;
290}
291
204static int etm4_parse_event_config(struct etmv4_drvdata *drvdata, 292static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
205 struct perf_event *event) 293 struct perf_event *event)
206{ 294{
@@ -236,9 +324,29 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
236 /* TRM: Must program this for cycacc to work */ 324 /* TRM: Must program this for cycacc to work */
237 config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT; 325 config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
238 } 326 }
239 if (attr->config & BIT(ETM_OPT_TS)) 327 if (attr->config & BIT(ETM_OPT_TS)) {
328 /*
329 * Configure timestamps to be emitted at regular intervals in
330 * order to correlate instructions executed on different CPUs
331 * (CPU-wide trace scenarios).
332 */
333 ret = etm4_config_timestamp_event(drvdata);
334
335 /*
336 * No need to go further if timestamp intervals can't
337 * be configured.
338 */
339 if (ret)
340 goto out;
341
240 /* bit[11], Global timestamp tracing bit */ 342 /* bit[11], Global timestamp tracing bit */
241 config->cfg |= BIT(11); 343 config->cfg |= BIT(11);
344 }
345
346 if (attr->config & BIT(ETM_OPT_CTXTID))
347 /* bit[6], Context ID tracing bit */
348 config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
349
242 /* return stack - enable if selected and supported */ 350 /* return stack - enable if selected and supported */
243 if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack) 351 if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
244 /* bit[12], Return stack enable bit */ 352 /* bit[12], Return stack enable bit */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 927925151509..16b0c0e1e43a 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -12,6 +12,8 @@
12#include <linux/err.h> 12#include <linux/err.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/of.h>
16#include <linux/platform_device.h>
15#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
16#include <linux/coresight.h> 18#include <linux/coresight.h>
17#include <linux/amba/bus.h> 19#include <linux/amba/bus.h>
@@ -43,7 +45,7 @@ struct funnel_drvdata {
43 unsigned long priority; 45 unsigned long priority;
44}; 46};
45 47
46static int funnel_enable_hw(struct funnel_drvdata *drvdata, int port) 48static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
47{ 49{
48 u32 functl; 50 u32 functl;
49 int rc = 0; 51 int rc = 0;
@@ -71,17 +73,19 @@ done:
71static int funnel_enable(struct coresight_device *csdev, int inport, 73static int funnel_enable(struct coresight_device *csdev, int inport,
72 int outport) 74 int outport)
73{ 75{
74 int rc; 76 int rc = 0;
75 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 77 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
76 78
77 rc = funnel_enable_hw(drvdata, inport); 79 if (drvdata->base)
80 rc = dynamic_funnel_enable_hw(drvdata, inport);
78 81
79 if (!rc) 82 if (!rc)
80 dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport); 83 dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
81 return rc; 84 return rc;
82} 85}
83 86
84static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport) 87static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
88 int inport)
85{ 89{
86 u32 functl; 90 u32 functl;
87 91
@@ -103,7 +107,8 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
103{ 107{
104 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 108 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
105 109
106 funnel_disable_hw(drvdata, inport); 110 if (drvdata->base)
111 dynamic_funnel_disable_hw(drvdata, inport);
107 112
108 dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport); 113 dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
109} 114}
@@ -177,54 +182,70 @@ static struct attribute *coresight_funnel_attrs[] = {
177}; 182};
178ATTRIBUTE_GROUPS(coresight_funnel); 183ATTRIBUTE_GROUPS(coresight_funnel);
179 184
180static int funnel_probe(struct amba_device *adev, const struct amba_id *id) 185static int funnel_probe(struct device *dev, struct resource *res)
181{ 186{
182 int ret; 187 int ret;
183 void __iomem *base; 188 void __iomem *base;
184 struct device *dev = &adev->dev;
185 struct coresight_platform_data *pdata = NULL; 189 struct coresight_platform_data *pdata = NULL;
186 struct funnel_drvdata *drvdata; 190 struct funnel_drvdata *drvdata;
187 struct resource *res = &adev->res;
188 struct coresight_desc desc = { 0 }; 191 struct coresight_desc desc = { 0 };
189 struct device_node *np = adev->dev.of_node; 192 struct device_node *np = dev->of_node;
190 193
191 if (np) { 194 if (np) {
192 pdata = of_get_coresight_platform_data(dev, np); 195 pdata = of_get_coresight_platform_data(dev, np);
193 if (IS_ERR(pdata)) 196 if (IS_ERR(pdata))
194 return PTR_ERR(pdata); 197 return PTR_ERR(pdata);
195 adev->dev.platform_data = pdata; 198 dev->platform_data = pdata;
196 } 199 }
197 200
201 if (of_device_is_compatible(np, "arm,coresight-funnel"))
202 pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n");
203
198 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 204 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
199 if (!drvdata) 205 if (!drvdata)
200 return -ENOMEM; 206 return -ENOMEM;
201 207
202 drvdata->dev = &adev->dev; 208 drvdata->dev = dev;
203 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */ 209 drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
204 if (!IS_ERR(drvdata->atclk)) { 210 if (!IS_ERR(drvdata->atclk)) {
205 ret = clk_prepare_enable(drvdata->atclk); 211 ret = clk_prepare_enable(drvdata->atclk);
206 if (ret) 212 if (ret)
207 return ret; 213 return ret;
208 } 214 }
209 dev_set_drvdata(dev, drvdata);
210 215
211 /* Validity for the resource is already checked by the AMBA core */ 216 /*
212 base = devm_ioremap_resource(dev, res); 217 * Map the device base for dynamic-funnel, which has been
213 if (IS_ERR(base)) 218 * validated by AMBA core.
214 return PTR_ERR(base); 219 */
220 if (res) {
221 base = devm_ioremap_resource(dev, res);
222 if (IS_ERR(base)) {
223 ret = PTR_ERR(base);
224 goto out_disable_clk;
225 }
226 drvdata->base = base;
227 desc.groups = coresight_funnel_groups;
228 }
215 229
216 drvdata->base = base; 230 dev_set_drvdata(dev, drvdata);
217 pm_runtime_put(&adev->dev);
218 231
219 desc.type = CORESIGHT_DEV_TYPE_LINK; 232 desc.type = CORESIGHT_DEV_TYPE_LINK;
220 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG; 233 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
221 desc.ops = &funnel_cs_ops; 234 desc.ops = &funnel_cs_ops;
222 desc.pdata = pdata; 235 desc.pdata = pdata;
223 desc.dev = dev; 236 desc.dev = dev;
224 desc.groups = coresight_funnel_groups;
225 drvdata->csdev = coresight_register(&desc); 237 drvdata->csdev = coresight_register(&desc);
238 if (IS_ERR(drvdata->csdev)) {
239 ret = PTR_ERR(drvdata->csdev);
240 goto out_disable_clk;
241 }
242
243 pm_runtime_put(dev);
226 244
227 return PTR_ERR_OR_ZERO(drvdata->csdev); 245out_disable_clk:
246 if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
247 clk_disable_unprepare(drvdata->atclk);
248 return ret;
228} 249}
229 250
230#ifdef CONFIG_PM 251#ifdef CONFIG_PM
@@ -253,7 +274,48 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
253 SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL) 274 SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
254}; 275};
255 276
256static const struct amba_id funnel_ids[] = { 277static int static_funnel_probe(struct platform_device *pdev)
278{
279 int ret;
280
281 pm_runtime_get_noresume(&pdev->dev);
282 pm_runtime_set_active(&pdev->dev);
283 pm_runtime_enable(&pdev->dev);
284
285 /* Static funnel do not have programming base */
286 ret = funnel_probe(&pdev->dev, NULL);
287
288 if (ret) {
289 pm_runtime_put_noidle(&pdev->dev);
290 pm_runtime_disable(&pdev->dev);
291 }
292
293 return ret;
294}
295
296static const struct of_device_id static_funnel_match[] = {
297 {.compatible = "arm,coresight-static-funnel"},
298 {}
299};
300
301static struct platform_driver static_funnel_driver = {
302 .probe = static_funnel_probe,
303 .driver = {
304 .name = "coresight-static-funnel",
305 .of_match_table = static_funnel_match,
306 .pm = &funnel_dev_pm_ops,
307 .suppress_bind_attrs = true,
308 },
309};
310builtin_platform_driver(static_funnel_driver);
311
312static int dynamic_funnel_probe(struct amba_device *adev,
313 const struct amba_id *id)
314{
315 return funnel_probe(&adev->dev, &adev->res);
316}
317
318static const struct amba_id dynamic_funnel_ids[] = {
257 { 319 {
258 .id = 0x000bb908, 320 .id = 0x000bb908,
259 .mask = 0x000fffff, 321 .mask = 0x000fffff,
@@ -266,14 +328,14 @@ static const struct amba_id funnel_ids[] = {
266 { 0, 0}, 328 { 0, 0},
267}; 329};
268 330
269static struct amba_driver funnel_driver = { 331static struct amba_driver dynamic_funnel_driver = {
270 .drv = { 332 .drv = {
271 .name = "coresight-funnel", 333 .name = "coresight-dynamic-funnel",
272 .owner = THIS_MODULE, 334 .owner = THIS_MODULE,
273 .pm = &funnel_dev_pm_ops, 335 .pm = &funnel_dev_pm_ops,
274 .suppress_bind_attrs = true, 336 .suppress_bind_attrs = true,
275 }, 337 },
276 .probe = funnel_probe, 338 .probe = dynamic_funnel_probe,
277 .id_table = funnel_ids, 339 .id_table = dynamic_funnel_ids,
278}; 340};
279builtin_amba_driver(funnel_driver); 341builtin_amba_driver(dynamic_funnel_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index feac98315471..8c9ce74498e1 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -1,10 +1,11 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. 3 * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
4 * 4 *
5 * Description: CoreSight Replicator driver 5 * Description: CoreSight Replicator driver
6 */ 6 */
7 7
8#include <linux/amba/bus.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/device.h> 10#include <linux/device.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
@@ -18,25 +19,117 @@
18 19
19#include "coresight-priv.h" 20#include "coresight-priv.h"
20 21
22#define REPLICATOR_IDFILTER0 0x000
23#define REPLICATOR_IDFILTER1 0x004
24
21/** 25/**
22 * struct replicator_drvdata - specifics associated to a replicator component 26 * struct replicator_drvdata - specifics associated to a replicator component
27 * @base: memory mapped base address for this component. Also indicates
28 * whether this one is programmable or not.
23 * @dev: the device entity associated with this component 29 * @dev: the device entity associated with this component
24 * @atclk: optional clock for the core parts of the replicator. 30 * @atclk: optional clock for the core parts of the replicator.
25 * @csdev: component vitals needed by the framework 31 * @csdev: component vitals needed by the framework
26 */ 32 */
27struct replicator_drvdata { 33struct replicator_drvdata {
34 void __iomem *base;
28 struct device *dev; 35 struct device *dev;
29 struct clk *atclk; 36 struct clk *atclk;
30 struct coresight_device *csdev; 37 struct coresight_device *csdev;
31}; 38};
32 39
40static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
41{
42 CS_UNLOCK(drvdata->base);
43
44 if (!coresight_claim_device_unlocked(drvdata->base)) {
45 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
46 writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
47 coresight_disclaim_device_unlocked(drvdata->base);
48 }
49
50 CS_LOCK(drvdata->base);
51}
52
53/*
54 * replicator_reset : Reset the replicator configuration to sane values.
55 */
56static inline void replicator_reset(struct replicator_drvdata *drvdata)
57{
58 if (drvdata->base)
59 dynamic_replicator_reset(drvdata);
60}
61
62static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
63 int inport, int outport)
64{
65 int rc = 0;
66 u32 reg;
67
68 switch (outport) {
69 case 0:
70 reg = REPLICATOR_IDFILTER0;
71 break;
72 case 1:
73 reg = REPLICATOR_IDFILTER1;
74 break;
75 default:
76 WARN_ON(1);
77 return -EINVAL;
78 }
79
80 CS_UNLOCK(drvdata->base);
81
82 if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
83 (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
84 rc = coresight_claim_device_unlocked(drvdata->base);
85
86 /* Ensure that the outport is enabled. */
87 if (!rc)
88 writel_relaxed(0x00, drvdata->base + reg);
89 CS_LOCK(drvdata->base);
90
91 return rc;
92}
93
33static int replicator_enable(struct coresight_device *csdev, int inport, 94static int replicator_enable(struct coresight_device *csdev, int inport,
34 int outport) 95 int outport)
35{ 96{
97 int rc = 0;
36 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 98 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
37 99
38 dev_dbg(drvdata->dev, "REPLICATOR enabled\n"); 100 if (drvdata->base)
39 return 0; 101 rc = dynamic_replicator_enable(drvdata, inport, outport);
102 if (!rc)
103 dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
104 return rc;
105}
106
107static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
108 int inport, int outport)
109{
110 u32 reg;
111
112 switch (outport) {
113 case 0:
114 reg = REPLICATOR_IDFILTER0;
115 break;
116 case 1:
117 reg = REPLICATOR_IDFILTER1;
118 break;
119 default:
120 WARN_ON(1);
121 return;
122 }
123
124 CS_UNLOCK(drvdata->base);
125
126 /* disable the flow of ATB data through port */
127 writel_relaxed(0xff, drvdata->base + reg);
128
129 if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
130 (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
131 coresight_disclaim_device_unlocked(drvdata->base);
132 CS_LOCK(drvdata->base);
40} 133}
41 134
42static void replicator_disable(struct coresight_device *csdev, int inport, 135static void replicator_disable(struct coresight_device *csdev, int inport,
@@ -44,6 +137,8 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
44{ 137{
45 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 138 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
46 139
140 if (drvdata->base)
141 dynamic_replicator_disable(drvdata, inport, outport);
47 dev_dbg(drvdata->dev, "REPLICATOR disabled\n"); 142 dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
48} 143}
49 144
@@ -56,58 +151,110 @@ static const struct coresight_ops replicator_cs_ops = {
56 .link_ops = &replicator_link_ops, 151 .link_ops = &replicator_link_ops,
57}; 152};
58 153
59static int replicator_probe(struct platform_device *pdev) 154#define coresight_replicator_reg(name, offset) \
155 coresight_simple_reg32(struct replicator_drvdata, name, offset)
156
157coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
158coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
159
160static struct attribute *replicator_mgmt_attrs[] = {
161 &dev_attr_idfilter0.attr,
162 &dev_attr_idfilter1.attr,
163 NULL,
164};
165
166static const struct attribute_group replicator_mgmt_group = {
167 .attrs = replicator_mgmt_attrs,
168 .name = "mgmt",
169};
170
171static const struct attribute_group *replicator_groups[] = {
172 &replicator_mgmt_group,
173 NULL,
174};
175
176static int replicator_probe(struct device *dev, struct resource *res)
60{ 177{
61 int ret; 178 int ret = 0;
62 struct device *dev = &pdev->dev;
63 struct coresight_platform_data *pdata = NULL; 179 struct coresight_platform_data *pdata = NULL;
64 struct replicator_drvdata *drvdata; 180 struct replicator_drvdata *drvdata;
65 struct coresight_desc desc = { 0 }; 181 struct coresight_desc desc = { 0 };
66 struct device_node *np = pdev->dev.of_node; 182 struct device_node *np = dev->of_node;
183 void __iomem *base;
67 184
68 if (np) { 185 if (np) {
69 pdata = of_get_coresight_platform_data(dev, np); 186 pdata = of_get_coresight_platform_data(dev, np);
70 if (IS_ERR(pdata)) 187 if (IS_ERR(pdata))
71 return PTR_ERR(pdata); 188 return PTR_ERR(pdata);
72 pdev->dev.platform_data = pdata; 189 dev->platform_data = pdata;
73 } 190 }
74 191
192 if (of_device_is_compatible(np, "arm,coresight-replicator"))
193 pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n");
194
75 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); 195 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
76 if (!drvdata) 196 if (!drvdata)
77 return -ENOMEM; 197 return -ENOMEM;
78 198
79 drvdata->dev = &pdev->dev; 199 drvdata->dev = dev;
80 drvdata->atclk = devm_clk_get(&pdev->dev, "atclk"); /* optional */ 200 drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
81 if (!IS_ERR(drvdata->atclk)) { 201 if (!IS_ERR(drvdata->atclk)) {
82 ret = clk_prepare_enable(drvdata->atclk); 202 ret = clk_prepare_enable(drvdata->atclk);
83 if (ret) 203 if (ret)
84 return ret; 204 return ret;
85 } 205 }
86 pm_runtime_get_noresume(&pdev->dev); 206
87 pm_runtime_set_active(&pdev->dev); 207 /*
88 pm_runtime_enable(&pdev->dev); 208 * Map the device base for dynamic-replicator, which has been
89 platform_set_drvdata(pdev, drvdata); 209 * validated by AMBA core
210 */
211 if (res) {
212 base = devm_ioremap_resource(dev, res);
213 if (IS_ERR(base)) {
214 ret = PTR_ERR(base);
215 goto out_disable_clk;
216 }
217 drvdata->base = base;
218 desc.groups = replicator_groups;
219 }
220
221 dev_set_drvdata(dev, drvdata);
90 222
91 desc.type = CORESIGHT_DEV_TYPE_LINK; 223 desc.type = CORESIGHT_DEV_TYPE_LINK;
92 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT; 224 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
93 desc.ops = &replicator_cs_ops; 225 desc.ops = &replicator_cs_ops;
94 desc.pdata = pdev->dev.platform_data; 226 desc.pdata = dev->platform_data;
95 desc.dev = &pdev->dev; 227 desc.dev = dev;
96 drvdata->csdev = coresight_register(&desc); 228 drvdata->csdev = coresight_register(&desc);
97 if (IS_ERR(drvdata->csdev)) { 229 if (IS_ERR(drvdata->csdev)) {
98 ret = PTR_ERR(drvdata->csdev); 230 ret = PTR_ERR(drvdata->csdev);
99 goto out_disable_pm; 231 goto out_disable_clk;
100 } 232 }
101 233
102 pm_runtime_put(&pdev->dev); 234 replicator_reset(drvdata);
103 235 pm_runtime_put(dev);
104 return 0;
105 236
106out_disable_pm: 237out_disable_clk:
107 if (!IS_ERR(drvdata->atclk)) 238 if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
108 clk_disable_unprepare(drvdata->atclk); 239 clk_disable_unprepare(drvdata->atclk);
109 pm_runtime_put_noidle(&pdev->dev); 240 return ret;
110 pm_runtime_disable(&pdev->dev); 241}
242
243static int static_replicator_probe(struct platform_device *pdev)
244{
245 int ret;
246
247 pm_runtime_get_noresume(&pdev->dev);
248 pm_runtime_set_active(&pdev->dev);
249 pm_runtime_enable(&pdev->dev);
250
251 /* Static replicators do not have programming base */
252 ret = replicator_probe(&pdev->dev, NULL);
253
254 if (ret) {
255 pm_runtime_put_noidle(&pdev->dev);
256 pm_runtime_disable(&pdev->dev);
257 }
111 258
112 return ret; 259 return ret;
113} 260}
@@ -139,18 +286,49 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
139 replicator_runtime_resume, NULL) 286 replicator_runtime_resume, NULL)
140}; 287};
141 288
142static const struct of_device_id replicator_match[] = { 289static const struct of_device_id static_replicator_match[] = {
143 {.compatible = "arm,coresight-replicator"}, 290 {.compatible = "arm,coresight-replicator"},
291 {.compatible = "arm,coresight-static-replicator"},
144 {} 292 {}
145}; 293};
146 294
147static struct platform_driver replicator_driver = { 295static struct platform_driver static_replicator_driver = {
148 .probe = replicator_probe, 296 .probe = static_replicator_probe,
149 .driver = { 297 .driver = {
150 .name = "coresight-replicator", 298 .name = "coresight-static-replicator",
151 .of_match_table = replicator_match, 299 .of_match_table = static_replicator_match,
300 .pm = &replicator_dev_pm_ops,
301 .suppress_bind_attrs = true,
302 },
303};
304builtin_platform_driver(static_replicator_driver);
305
306static int dynamic_replicator_probe(struct amba_device *adev,
307 const struct amba_id *id)
308{
309 return replicator_probe(&adev->dev, &adev->res);
310}
311
312static const struct amba_id dynamic_replicator_ids[] = {
313 {
314 .id = 0x000bb909,
315 .mask = 0x000fffff,
316 },
317 {
318 /* Coresight SoC-600 */
319 .id = 0x000bb9ec,
320 .mask = 0x000fffff,
321 },
322 { 0, 0 },
323};
324
325static struct amba_driver dynamic_replicator_driver = {
326 .drv = {
327 .name = "coresight-dynamic-replicator",
152 .pm = &replicator_dev_pm_ops, 328 .pm = &replicator_dev_pm_ops,
153 .suppress_bind_attrs = true, 329 .suppress_bind_attrs = true,
154 }, 330 },
331 .probe = dynamic_replicator_probe,
332 .id_table = dynamic_replicator_ids,
155}; 333};
156builtin_platform_driver(replicator_driver); 334builtin_amba_driver(dynamic_replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index a5f053f2db2c..2527b5d3b65e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -4,6 +4,7 @@
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */ 5 */
6 6
7#include <linux/atomic.h>
7#include <linux/circ_buf.h> 8#include <linux/circ_buf.h>
8#include <linux/coresight.h> 9#include <linux/coresight.h>
9#include <linux/perf_event.h> 10#include <linux/perf_event.h>
@@ -180,8 +181,10 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
180 * sink is already enabled no memory is needed and the HW need not be 181 * sink is already enabled no memory is needed and the HW need not be
181 * touched. 182 * touched.
182 */ 183 */
183 if (drvdata->mode == CS_MODE_SYSFS) 184 if (drvdata->mode == CS_MODE_SYSFS) {
185 atomic_inc(csdev->refcnt);
184 goto out; 186 goto out;
187 }
185 188
186 /* 189 /*
187 * If drvdata::buf isn't NULL, memory was allocated for a previous 190 * If drvdata::buf isn't NULL, memory was allocated for a previous
@@ -200,11 +203,13 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
200 } 203 }
201 204
202 ret = tmc_etb_enable_hw(drvdata); 205 ret = tmc_etb_enable_hw(drvdata);
203 if (!ret) 206 if (!ret) {
204 drvdata->mode = CS_MODE_SYSFS; 207 drvdata->mode = CS_MODE_SYSFS;
205 else 208 atomic_inc(csdev->refcnt);
209 } else {
206 /* Free up the buffer if we failed to enable */ 210 /* Free up the buffer if we failed to enable */
207 used = false; 211 used = false;
212 }
208out: 213out:
209 spin_unlock_irqrestore(&drvdata->spinlock, flags); 214 spin_unlock_irqrestore(&drvdata->spinlock, flags);
210 215
@@ -218,6 +223,7 @@ out:
218static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) 223static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
219{ 224{
220 int ret = 0; 225 int ret = 0;
226 pid_t pid;
221 unsigned long flags; 227 unsigned long flags;
222 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 228 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
223 struct perf_output_handle *handle = data; 229 struct perf_output_handle *handle = data;
@@ -228,19 +234,42 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
228 if (drvdata->reading) 234 if (drvdata->reading)
229 break; 235 break;
230 /* 236 /*
231 * In Perf mode there can be only one writer per sink. There 237 * No need to continue if the ETB/ETF is already operated
232 * is also no need to continue if the ETB/ETF is already 238 * from sysFS.
233 * operated from sysFS.
234 */ 239 */
235 if (drvdata->mode != CS_MODE_DISABLED) 240 if (drvdata->mode == CS_MODE_SYSFS) {
241 ret = -EBUSY;
242 break;
243 }
244
245 /* Get a handle on the pid of the process to monitor */
246 pid = task_pid_nr(handle->event->owner);
247
248 if (drvdata->pid != -1 && drvdata->pid != pid) {
249 ret = -EBUSY;
236 break; 250 break;
251 }
237 252
238 ret = tmc_set_etf_buffer(csdev, handle); 253 ret = tmc_set_etf_buffer(csdev, handle);
239 if (ret) 254 if (ret)
240 break; 255 break;
256
257 /*
258 * No HW configuration is needed if the sink is already in
259 * use for this session.
260 */
261 if (drvdata->pid == pid) {
262 atomic_inc(csdev->refcnt);
263 break;
264 }
265
241 ret = tmc_etb_enable_hw(drvdata); 266 ret = tmc_etb_enable_hw(drvdata);
242 if (!ret) 267 if (!ret) {
268 /* Associate with monitored process. */
269 drvdata->pid = pid;
243 drvdata->mode = CS_MODE_PERF; 270 drvdata->mode = CS_MODE_PERF;
271 atomic_inc(csdev->refcnt);
272 }
244 } while (0); 273 } while (0);
245 spin_unlock_irqrestore(&drvdata->spinlock, flags); 274 spin_unlock_irqrestore(&drvdata->spinlock, flags);
246 275
@@ -273,26 +302,34 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
273 return 0; 302 return 0;
274} 303}
275 304
276static void tmc_disable_etf_sink(struct coresight_device *csdev) 305static int tmc_disable_etf_sink(struct coresight_device *csdev)
277{ 306{
278 unsigned long flags; 307 unsigned long flags;
279 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 308 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
280 309
281 spin_lock_irqsave(&drvdata->spinlock, flags); 310 spin_lock_irqsave(&drvdata->spinlock, flags);
311
282 if (drvdata->reading) { 312 if (drvdata->reading) {
283 spin_unlock_irqrestore(&drvdata->spinlock, flags); 313 spin_unlock_irqrestore(&drvdata->spinlock, flags);
284 return; 314 return -EBUSY;
285 } 315 }
286 316
287 /* Disable the TMC only if it needs to */ 317 if (atomic_dec_return(csdev->refcnt)) {
288 if (drvdata->mode != CS_MODE_DISABLED) { 318 spin_unlock_irqrestore(&drvdata->spinlock, flags);
289 tmc_etb_disable_hw(drvdata); 319 return -EBUSY;
290 drvdata->mode = CS_MODE_DISABLED;
291 } 320 }
292 321
322 /* Complain if we (somehow) got out of sync */
323 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
324 tmc_etb_disable_hw(drvdata);
325 /* Dissociate from monitored process. */
326 drvdata->pid = -1;
327 drvdata->mode = CS_MODE_DISABLED;
328
293 spin_unlock_irqrestore(&drvdata->spinlock, flags); 329 spin_unlock_irqrestore(&drvdata->spinlock, flags);
294 330
295 dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n"); 331 dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
332 return 0;
296} 333}
297 334
298static int tmc_enable_etf_link(struct coresight_device *csdev, 335static int tmc_enable_etf_link(struct coresight_device *csdev,
@@ -337,10 +374,11 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
337 dev_dbg(drvdata->dev, "TMC-ETF disabled\n"); 374 dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
338} 375}
339 376
340static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu, 377static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
341 void **pages, int nr_pages, bool overwrite) 378 struct perf_event *event, void **pages,
379 int nr_pages, bool overwrite)
342{ 380{
343 int node; 381 int node, cpu = event->cpu;
344 struct cs_buffers *buf; 382 struct cs_buffers *buf;
345 383
346 if (cpu == -1) 384 if (cpu == -1)
@@ -400,7 +438,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
400 u32 *buf_ptr; 438 u32 *buf_ptr;
401 u64 read_ptr, write_ptr; 439 u64 read_ptr, write_ptr;
402 u32 status; 440 u32 status;
403 unsigned long offset, to_read; 441 unsigned long offset, to_read = 0, flags;
404 struct cs_buffers *buf = sink_config; 442 struct cs_buffers *buf = sink_config;
405 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 443 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
406 444
@@ -411,6 +449,12 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
411 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF)) 449 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
412 return 0; 450 return 0;
413 451
452 spin_lock_irqsave(&drvdata->spinlock, flags);
453
454 /* Don't do anything if another tracer is using this sink */
455 if (atomic_read(csdev->refcnt) != 1)
456 goto out;
457
414 CS_UNLOCK(drvdata->base); 458 CS_UNLOCK(drvdata->base);
415 459
416 tmc_flush_and_stop(drvdata); 460 tmc_flush_and_stop(drvdata);
@@ -504,6 +548,8 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
504 to_read = buf->nr_pages << PAGE_SHIFT; 548 to_read = buf->nr_pages << PAGE_SHIFT;
505 } 549 }
506 CS_LOCK(drvdata->base); 550 CS_LOCK(drvdata->base);
551out:
552 spin_unlock_irqrestore(&drvdata->spinlock, flags);
507 553
508 return to_read; 554 return to_read;
509} 555}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index f684283890d3..df6e4b0b84e9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -4,10 +4,15 @@
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org> 4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */ 5 */
6 6
7#include <linux/atomic.h>
7#include <linux/coresight.h> 8#include <linux/coresight.h>
8#include <linux/dma-mapping.h> 9#include <linux/dma-mapping.h>
9#include <linux/iommu.h> 10#include <linux/iommu.h>
11#include <linux/idr.h>
12#include <linux/mutex.h>
13#include <linux/refcount.h>
10#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/types.h>
11#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
12#include "coresight-catu.h" 17#include "coresight-catu.h"
13#include "coresight-etm-perf.h" 18#include "coresight-etm-perf.h"
@@ -23,14 +28,18 @@ struct etr_flat_buf {
23 28
24/* 29/*
25 * etr_perf_buffer - Perf buffer used for ETR 30 * etr_perf_buffer - Perf buffer used for ETR
31 * @drvdata - The ETR drvdaga this buffer has been allocated for.
26 * @etr_buf - Actual buffer used by the ETR 32 * @etr_buf - Actual buffer used by the ETR
33 * @pid - The PID this etr_perf_buffer belongs to.
27 * @snaphost - Perf session mode 34 * @snaphost - Perf session mode
28 * @head - handle->head at the beginning of the session. 35 * @head - handle->head at the beginning of the session.
29 * @nr_pages - Number of pages in the ring buffer. 36 * @nr_pages - Number of pages in the ring buffer.
30 * @pages - Array of Pages in the ring buffer. 37 * @pages - Array of Pages in the ring buffer.
31 */ 38 */
32struct etr_perf_buffer { 39struct etr_perf_buffer {
40 struct tmc_drvdata *drvdata;
33 struct etr_buf *etr_buf; 41 struct etr_buf *etr_buf;
42 pid_t pid;
34 bool snapshot; 43 bool snapshot;
35 unsigned long head; 44 unsigned long head;
36 int nr_pages; 45 int nr_pages;
@@ -772,7 +781,8 @@ static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
772static const struct etr_buf_operations *etr_buf_ops[] = { 781static const struct etr_buf_operations *etr_buf_ops[] = {
773 [ETR_MODE_FLAT] = &etr_flat_buf_ops, 782 [ETR_MODE_FLAT] = &etr_flat_buf_ops,
774 [ETR_MODE_ETR_SG] = &etr_sg_buf_ops, 783 [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
775 [ETR_MODE_CATU] = &etr_catu_buf_ops, 784 [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
785 ? &etr_catu_buf_ops : NULL,
776}; 786};
777 787
778static inline int tmc_etr_mode_alloc_buf(int mode, 788static inline int tmc_etr_mode_alloc_buf(int mode,
@@ -786,7 +796,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
786 case ETR_MODE_FLAT: 796 case ETR_MODE_FLAT:
787 case ETR_MODE_ETR_SG: 797 case ETR_MODE_ETR_SG:
788 case ETR_MODE_CATU: 798 case ETR_MODE_CATU:
789 if (etr_buf_ops[mode]->alloc) 799 if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
790 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, 800 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
791 node, pages); 801 node, pages);
792 if (!rc) 802 if (!rc)
@@ -1124,8 +1134,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1124 * sink is already enabled no memory is needed and the HW need not be 1134 * sink is already enabled no memory is needed and the HW need not be
1125 * touched, even if the buffer size has changed. 1135 * touched, even if the buffer size has changed.
1126 */ 1136 */
1127 if (drvdata->mode == CS_MODE_SYSFS) 1137 if (drvdata->mode == CS_MODE_SYSFS) {
1138 atomic_inc(csdev->refcnt);
1128 goto out; 1139 goto out;
1140 }
1129 1141
1130 /* 1142 /*
1131 * If we don't have a buffer or it doesn't match the requested size, 1143 * If we don't have a buffer or it doesn't match the requested size,
@@ -1138,8 +1150,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1138 } 1150 }
1139 1151
1140 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf); 1152 ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1141 if (!ret) 1153 if (!ret) {
1142 drvdata->mode = CS_MODE_SYSFS; 1154 drvdata->mode = CS_MODE_SYSFS;
1155 atomic_inc(csdev->refcnt);
1156 }
1143out: 1157out:
1144 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1158 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1145 1159
@@ -1154,23 +1168,23 @@ out:
1154} 1168}
1155 1169
1156/* 1170/*
1157 * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf. 1171 * alloc_etr_buf: Allocate ETR buffer for use by perf.
1158 * The size of the hardware buffer is dependent on the size configured 1172 * The size of the hardware buffer is dependent on the size configured
1159 * via sysfs and the perf ring buffer size. We prefer to allocate the 1173 * via sysfs and the perf ring buffer size. We prefer to allocate the
1160 * largest possible size, scaling down the size by half until it 1174 * largest possible size, scaling down the size by half until it
1161 * reaches a minimum limit (1M), beyond which we give up. 1175 * reaches a minimum limit (1M), beyond which we give up.
1162 */ 1176 */
1163static struct etr_perf_buffer * 1177static struct etr_buf *
1164tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages, 1178alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1165 void **pages, bool snapshot) 1179 int nr_pages, void **pages, bool snapshot)
1166{ 1180{
1181 int node, cpu = event->cpu;
1167 struct etr_buf *etr_buf; 1182 struct etr_buf *etr_buf;
1168 struct etr_perf_buffer *etr_perf;
1169 unsigned long size; 1183 unsigned long size;
1170 1184
1171 etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node); 1185 if (cpu == -1)
1172 if (!etr_perf) 1186 cpu = smp_processor_id();
1173 return ERR_PTR(-ENOMEM); 1187 node = cpu_to_node(cpu);
1174 1188
1175 /* 1189 /*
1176 * Try to match the perf ring buffer size if it is larger 1190 * Try to match the perf ring buffer size if it is larger
@@ -1195,32 +1209,160 @@ tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
1195 size /= 2; 1209 size /= 2;
1196 } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE); 1210 } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
1197 1211
1212 return ERR_PTR(-ENOMEM);
1213
1214done:
1215 return etr_buf;
1216}
1217
1218static struct etr_buf *
1219get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
1220 struct perf_event *event, int nr_pages,
1221 void **pages, bool snapshot)
1222{
1223 int ret;
1224 pid_t pid = task_pid_nr(event->owner);
1225 struct etr_buf *etr_buf;
1226
1227retry:
1228 /*
1229 * An etr_perf_buffer is associated with an event and holds a reference
1230 * to the AUX ring buffer that was created for that event. In CPU-wide
1231 * N:1 mode multiple events (one per CPU), each with its own AUX ring
1232 * buffer, share a sink. As such an etr_perf_buffer is created for each
1233 * event but a single etr_buf associated with the ETR is shared between
1234 * them. The last event in a trace session will copy the content of the
1235 * etr_buf to its AUX ring buffer. Ring buffer associated to other
1236 * events are simply not used an freed as events are destoyed. We still
1237 * need to allocate a ring buffer for each event since we don't know
1238 * which event will be last.
1239 */
1240
1241 /*
1242 * The first thing to do here is check if an etr_buf has already been
1243 * allocated for this session. If so it is shared with this event,
1244 * otherwise it is created.
1245 */
1246 mutex_lock(&drvdata->idr_mutex);
1247 etr_buf = idr_find(&drvdata->idr, pid);
1248 if (etr_buf) {
1249 refcount_inc(&etr_buf->refcount);
1250 mutex_unlock(&drvdata->idr_mutex);
1251 return etr_buf;
1252 }
1253
1254 /* If we made it here no buffer has been allocated, do so now. */
1255 mutex_unlock(&drvdata->idr_mutex);
1256
1257 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1258 if (IS_ERR(etr_buf))
1259 return etr_buf;
1260
1261 refcount_set(&etr_buf->refcount, 1);
1262
1263 /* Now that we have a buffer, add it to the IDR. */
1264 mutex_lock(&drvdata->idr_mutex);
1265 ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
1266 mutex_unlock(&drvdata->idr_mutex);
1267
1268 /* Another event with this session ID has allocated this buffer. */
1269 if (ret == -ENOSPC) {
1270 tmc_free_etr_buf(etr_buf);
1271 goto retry;
1272 }
1273
1274 /* The IDR can't allocate room for a new session, abandon ship. */
1275 if (ret == -ENOMEM) {
1276 tmc_free_etr_buf(etr_buf);
1277 return ERR_PTR(ret);
1278 }
1279
1280
1281 return etr_buf;
1282}
1283
1284static struct etr_buf *
1285get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
1286 struct perf_event *event, int nr_pages,
1287 void **pages, bool snapshot)
1288{
1289 struct etr_buf *etr_buf;
1290
1291 /*
1292 * In per-thread mode the etr_buf isn't shared, so just go ahead
1293 * with memory allocation.
1294 */
1295 etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1296 if (IS_ERR(etr_buf))
1297 goto out;
1298
1299 refcount_set(&etr_buf->refcount, 1);
1300out:
1301 return etr_buf;
1302}
1303
1304static struct etr_buf *
1305get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1306 int nr_pages, void **pages, bool snapshot)
1307{
1308 if (event->cpu == -1)
1309 return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
1310 pages, snapshot);
1311
1312 return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
1313 pages, snapshot);
1314}
1315
1316static struct etr_perf_buffer *
1317tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
1318 int nr_pages, void **pages, bool snapshot)
1319{
1320 int node, cpu = event->cpu;
1321 struct etr_buf *etr_buf;
1322 struct etr_perf_buffer *etr_perf;
1323
1324 if (cpu == -1)
1325 cpu = smp_processor_id();
1326 node = cpu_to_node(cpu);
1327
1328 etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
1329 if (!etr_perf)
1330 return ERR_PTR(-ENOMEM);
1331
1332 etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot);
1333 if (!IS_ERR(etr_buf))
1334 goto done;
1335
1198 kfree(etr_perf); 1336 kfree(etr_perf);
1199 return ERR_PTR(-ENOMEM); 1337 return ERR_PTR(-ENOMEM);
1200 1338
1201done: 1339done:
1340 /*
1341 * Keep a reference to the ETR this buffer has been allocated for
1342 * in order to have access to the IDR in tmc_free_etr_buffer().
1343 */
1344 etr_perf->drvdata = drvdata;
1202 etr_perf->etr_buf = etr_buf; 1345 etr_perf->etr_buf = etr_buf;
1346
1203 return etr_perf; 1347 return etr_perf;
1204} 1348}
1205 1349
1206 1350
1207static void *tmc_alloc_etr_buffer(struct coresight_device *csdev, 1351static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
1208 int cpu, void **pages, int nr_pages, 1352 struct perf_event *event, void **pages,
1209 bool snapshot) 1353 int nr_pages, bool snapshot)
1210{ 1354{
1211 struct etr_perf_buffer *etr_perf; 1355 struct etr_perf_buffer *etr_perf;
1212 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1356 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1213 1357
1214 if (cpu == -1) 1358 etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
1215 cpu = smp_processor_id();
1216
1217 etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu),
1218 nr_pages, pages, snapshot); 1359 nr_pages, pages, snapshot);
1219 if (IS_ERR(etr_perf)) { 1360 if (IS_ERR(etr_perf)) {
1220 dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n"); 1361 dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
1221 return NULL; 1362 return NULL;
1222 } 1363 }
1223 1364
1365 etr_perf->pid = task_pid_nr(event->owner);
1224 etr_perf->snapshot = snapshot; 1366 etr_perf->snapshot = snapshot;
1225 etr_perf->nr_pages = nr_pages; 1367 etr_perf->nr_pages = nr_pages;
1226 etr_perf->pages = pages; 1368 etr_perf->pages = pages;
@@ -1231,9 +1373,33 @@ static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
1231static void tmc_free_etr_buffer(void *config) 1373static void tmc_free_etr_buffer(void *config)
1232{ 1374{
1233 struct etr_perf_buffer *etr_perf = config; 1375 struct etr_perf_buffer *etr_perf = config;
1376 struct tmc_drvdata *drvdata = etr_perf->drvdata;
1377 struct etr_buf *buf, *etr_buf = etr_perf->etr_buf;
1378
1379 if (!etr_buf)
1380 goto free_etr_perf_buffer;
1381
1382 mutex_lock(&drvdata->idr_mutex);
1383 /* If we are not the last one to use the buffer, don't touch it. */
1384 if (!refcount_dec_and_test(&etr_buf->refcount)) {
1385 mutex_unlock(&drvdata->idr_mutex);
1386 goto free_etr_perf_buffer;
1387 }
1388
1389 /* We are the last one, remove from the IDR and free the buffer. */
1390 buf = idr_remove(&drvdata->idr, etr_perf->pid);
1391 mutex_unlock(&drvdata->idr_mutex);
1392
1393 /*
1394 * Something went very wrong if the buffer associated with this ID
1395 * is not the same in the IDR. Leak to avoid use after free.
1396 */
1397 if (buf && WARN_ON(buf != etr_buf))
1398 goto free_etr_perf_buffer;
1399
1400 tmc_free_etr_buf(etr_perf->etr_buf);
1234 1401
1235 if (etr_perf->etr_buf) 1402free_etr_perf_buffer:
1236 tmc_free_etr_buf(etr_perf->etr_buf);
1237 kfree(etr_perf); 1403 kfree(etr_perf);
1238} 1404}
1239 1405
@@ -1308,6 +1474,13 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
1308 struct etr_buf *etr_buf = etr_perf->etr_buf; 1474 struct etr_buf *etr_buf = etr_perf->etr_buf;
1309 1475
1310 spin_lock_irqsave(&drvdata->spinlock, flags); 1476 spin_lock_irqsave(&drvdata->spinlock, flags);
1477
1478 /* Don't do anything if another tracer is using this sink */
1479 if (atomic_read(csdev->refcnt) != 1) {
1480 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1481 goto out;
1482 }
1483
1311 if (WARN_ON(drvdata->perf_data != etr_perf)) { 1484 if (WARN_ON(drvdata->perf_data != etr_perf)) {
1312 lost = true; 1485 lost = true;
1313 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1486 spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1347,17 +1520,15 @@ out:
1347static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) 1520static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1348{ 1521{
1349 int rc = 0; 1522 int rc = 0;
1523 pid_t pid;
1350 unsigned long flags; 1524 unsigned long flags;
1351 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1525 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1352 struct perf_output_handle *handle = data; 1526 struct perf_output_handle *handle = data;
1353 struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle); 1527 struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
1354 1528
1355 spin_lock_irqsave(&drvdata->spinlock, flags); 1529 spin_lock_irqsave(&drvdata->spinlock, flags);
1356 /* 1530 /* Don't use this sink if it is already claimed by sysFS */
1357 * There can be only one writer per sink in perf mode. If the sink 1531 if (drvdata->mode == CS_MODE_SYSFS) {
1358 * is already open in SYSFS mode, we can't use it.
1359 */
1360 if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) {
1361 rc = -EBUSY; 1532 rc = -EBUSY;
1362 goto unlock_out; 1533 goto unlock_out;
1363 } 1534 }
@@ -1367,11 +1538,34 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
1367 goto unlock_out; 1538 goto unlock_out;
1368 } 1539 }
1369 1540
1541 /* Get a handle on the pid of the process to monitor */
1542 pid = etr_perf->pid;
1543
1544 /* Do not proceed if this device is associated with another session */
1545 if (drvdata->pid != -1 && drvdata->pid != pid) {
1546 rc = -EBUSY;
1547 goto unlock_out;
1548 }
1549
1370 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf); 1550 etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
1371 drvdata->perf_data = etr_perf; 1551 drvdata->perf_data = etr_perf;
1552
1553 /*
1554 * No HW configuration is needed if the sink is already in
1555 * use for this session.
1556 */
1557 if (drvdata->pid == pid) {
1558 atomic_inc(csdev->refcnt);
1559 goto unlock_out;
1560 }
1561
1372 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf); 1562 rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
1373 if (!rc) 1563 if (!rc) {
1564 /* Associate with monitored process. */
1565 drvdata->pid = pid;
1374 drvdata->mode = CS_MODE_PERF; 1566 drvdata->mode = CS_MODE_PERF;
1567 atomic_inc(csdev->refcnt);
1568 }
1375 1569
1376unlock_out: 1570unlock_out:
1377 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1571 spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1392,26 +1586,34 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev,
1392 return -EINVAL; 1586 return -EINVAL;
1393} 1587}
1394 1588
1395static void tmc_disable_etr_sink(struct coresight_device *csdev) 1589static int tmc_disable_etr_sink(struct coresight_device *csdev)
1396{ 1590{
1397 unsigned long flags; 1591 unsigned long flags;
1398 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 1592 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1399 1593
1400 spin_lock_irqsave(&drvdata->spinlock, flags); 1594 spin_lock_irqsave(&drvdata->spinlock, flags);
1595
1401 if (drvdata->reading) { 1596 if (drvdata->reading) {
1402 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1597 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1403 return; 1598 return -EBUSY;
1404 } 1599 }
1405 1600
1406 /* Disable the TMC only if it needs to */ 1601 if (atomic_dec_return(csdev->refcnt)) {
1407 if (drvdata->mode != CS_MODE_DISABLED) { 1602 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1408 tmc_etr_disable_hw(drvdata); 1603 return -EBUSY;
1409 drvdata->mode = CS_MODE_DISABLED;
1410 } 1604 }
1411 1605
1606 /* Complain if we (somehow) got out of sync */
1607 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
1608 tmc_etr_disable_hw(drvdata);
1609 /* Dissociate from monitored process. */
1610 drvdata->pid = -1;
1611 drvdata->mode = CS_MODE_DISABLED;
1612
1412 spin_unlock_irqrestore(&drvdata->spinlock, flags); 1613 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1413 1614
1414 dev_dbg(drvdata->dev, "TMC-ETR disabled\n"); 1615 dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
1616 return 0;
1415} 1617}
1416 1618
1417static const struct coresight_ops_sink tmc_etr_sink_ops = { 1619static const struct coresight_ops_sink tmc_etr_sink_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 2a02da3d630f..3f718729d741 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -8,10 +8,12 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/idr.h>
11#include <linux/io.h> 12#include <linux/io.h>
12#include <linux/err.h> 13#include <linux/err.h>
13#include <linux/fs.h> 14#include <linux/fs.h>
14#include <linux/miscdevice.h> 15#include <linux/miscdevice.h>
16#include <linux/mutex.h>
15#include <linux/property.h> 17#include <linux/property.h>
16#include <linux/uaccess.h> 18#include <linux/uaccess.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
@@ -340,6 +342,8 @@ static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
340static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata, 342static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
341 u32 devid, void *dev_caps) 343 u32 devid, void *dev_caps)
342{ 344{
345 int rc;
346
343 u32 dma_mask = 0; 347 u32 dma_mask = 0;
344 348
345 /* Set the unadvertised capabilities */ 349 /* Set the unadvertised capabilities */
@@ -369,7 +373,10 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
369 dma_mask = 40; 373 dma_mask = 40;
370 } 374 }
371 375
372 return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask)); 376 rc = dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
377 if (rc)
378 dev_err(drvdata->dev, "Failed to setup DMA mask: %d\n", rc);
379 return rc;
373} 380}
374 381
375static int tmc_probe(struct amba_device *adev, const struct amba_id *id) 382static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
@@ -415,6 +422,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
415 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); 422 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
416 drvdata->config_type = BMVAL(devid, 6, 7); 423 drvdata->config_type = BMVAL(devid, 6, 7);
417 drvdata->memwidth = tmc_get_memwidth(devid); 424 drvdata->memwidth = tmc_get_memwidth(devid);
425 /* This device is not associated with a session */
426 drvdata->pid = -1;
418 427
419 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { 428 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
420 if (np) 429 if (np)
@@ -427,8 +436,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
427 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; 436 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
428 } 437 }
429 438
430 pm_runtime_put(&adev->dev);
431
432 desc.pdata = pdata; 439 desc.pdata = pdata;
433 desc.dev = dev; 440 desc.dev = dev;
434 desc.groups = coresight_tmc_groups; 441 desc.groups = coresight_tmc_groups;
@@ -447,6 +454,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
447 coresight_get_uci_data(id)); 454 coresight_get_uci_data(id));
448 if (ret) 455 if (ret)
449 goto out; 456 goto out;
457 idr_init(&drvdata->idr);
458 mutex_init(&drvdata->idr_mutex);
450 break; 459 break;
451 case TMC_CONFIG_TYPE_ETF: 460 case TMC_CONFIG_TYPE_ETF:
452 desc.type = CORESIGHT_DEV_TYPE_LINKSINK; 461 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
@@ -471,6 +480,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
471 ret = misc_register(&drvdata->miscdev); 480 ret = misc_register(&drvdata->miscdev);
472 if (ret) 481 if (ret)
473 coresight_unregister(drvdata->csdev); 482 coresight_unregister(drvdata->csdev);
483 else
484 pm_runtime_put(&adev->dev);
474out: 485out:
475 return ret; 486 return ret;
476} 487}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 487c53701e9c..503f1b3a3741 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -8,7 +8,10 @@
8#define _CORESIGHT_TMC_H 8#define _CORESIGHT_TMC_H
9 9
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11#include <linux/idr.h>
11#include <linux/miscdevice.h> 12#include <linux/miscdevice.h>
13#include <linux/mutex.h>
14#include <linux/refcount.h>
12 15
13#define TMC_RSZ 0x004 16#define TMC_RSZ 0x004
14#define TMC_STS 0x00c 17#define TMC_STS 0x00c
@@ -133,6 +136,7 @@ struct etr_buf_operations;
133 136
134/** 137/**
135 * struct etr_buf - Details of the buffer used by ETR 138 * struct etr_buf - Details of the buffer used by ETR
139 * refcount ; Number of sources currently using this etr_buf.
136 * @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc. 140 * @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
137 * @full : Trace data overflow 141 * @full : Trace data overflow
138 * @size : Size of the buffer. 142 * @size : Size of the buffer.
@@ -143,6 +147,7 @@ struct etr_buf_operations;
143 * @private : Backend specific information for the buf 147 * @private : Backend specific information for the buf
144 */ 148 */
145struct etr_buf { 149struct etr_buf {
150 refcount_t refcount;
146 enum etr_mode mode; 151 enum etr_mode mode;
147 bool full; 152 bool full;
148 ssize_t size; 153 ssize_t size;
@@ -160,6 +165,8 @@ struct etr_buf {
160 * @csdev: component vitals needed by the framework. 165 * @csdev: component vitals needed by the framework.
161 * @miscdev: specifics to handle "/dev/xyz.tmc" entry. 166 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
162 * @spinlock: only one at a time pls. 167 * @spinlock: only one at a time pls.
168 * @pid: Process ID of the process being monitored by the session
169 * that is using this component.
163 * @buf: Snapshot of the trace data for ETF/ETB. 170 * @buf: Snapshot of the trace data for ETF/ETB.
164 * @etr_buf: details of buffer used in TMC-ETR 171 * @etr_buf: details of buffer used in TMC-ETR
165 * @len: size of the available trace for ETF/ETB. 172 * @len: size of the available trace for ETF/ETB.
@@ -170,6 +177,8 @@ struct etr_buf {
170 * @trigger_cntr: amount of words to store after a trigger. 177 * @trigger_cntr: amount of words to store after a trigger.
171 * @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the 178 * @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
172 * device configuration register (DEVID) 179 * device configuration register (DEVID)
180 * @idr: Holds etr_bufs allocated for this ETR.
181 * @idr_mutex: Access serialisation for idr.
173 * @perf_data: PERF buffer for ETR. 182 * @perf_data: PERF buffer for ETR.
174 * @sysfs_data: SYSFS buffer for ETR. 183 * @sysfs_data: SYSFS buffer for ETR.
175 */ 184 */
@@ -179,6 +188,7 @@ struct tmc_drvdata {
179 struct coresight_device *csdev; 188 struct coresight_device *csdev;
180 struct miscdevice miscdev; 189 struct miscdevice miscdev;
181 spinlock_t spinlock; 190 spinlock_t spinlock;
191 pid_t pid;
182 bool reading; 192 bool reading;
183 union { 193 union {
184 char *buf; /* TMC ETB */ 194 char *buf; /* TMC ETB */
@@ -191,6 +201,8 @@ struct tmc_drvdata {
191 enum tmc_mem_intf_width memwidth; 201 enum tmc_mem_intf_width memwidth;
192 u32 trigger_cntr; 202 u32 trigger_cntr;
193 u32 etr_caps; 203 u32 etr_caps;
204 struct idr idr;
205 struct mutex idr_mutex;
194 struct etr_buf *sysfs_buf; 206 struct etr_buf *sysfs_buf;
195 void *perf_data; 207 void *perf_data;
196}; 208};
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index b2f72a1fa402..63d9af31f57f 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -5,6 +5,7 @@
5 * Description: CoreSight Trace Port Interface Unit driver 5 * Description: CoreSight Trace Port Interface Unit driver
6 */ 6 */
7 7
8#include <linux/atomic.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
9#include <linux/init.h> 10#include <linux/init.h>
10#include <linux/device.h> 11#include <linux/device.h>
@@ -73,7 +74,7 @@ static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
73 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 74 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
74 75
75 tpiu_enable_hw(drvdata); 76 tpiu_enable_hw(drvdata);
76 77 atomic_inc(csdev->refcnt);
77 dev_dbg(drvdata->dev, "TPIU enabled\n"); 78 dev_dbg(drvdata->dev, "TPIU enabled\n");
78 return 0; 79 return 0;
79} 80}
@@ -94,13 +95,17 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
94 CS_LOCK(drvdata->base); 95 CS_LOCK(drvdata->base);
95} 96}
96 97
97static void tpiu_disable(struct coresight_device *csdev) 98static int tpiu_disable(struct coresight_device *csdev)
98{ 99{
99 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 100 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
100 101
102 if (atomic_dec_return(csdev->refcnt))
103 return -EBUSY;
104
101 tpiu_disable_hw(drvdata); 105 tpiu_disable_hw(drvdata);
102 106
103 dev_dbg(drvdata->dev, "TPIU disabled\n"); 107 dev_dbg(drvdata->dev, "TPIU disabled\n");
108 return 0;
104} 109}
105 110
106static const struct coresight_ops_sink tpiu_sink_ops = { 111static const struct coresight_ops_sink tpiu_sink_ops = {
@@ -153,8 +158,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
153 /* Disable tpiu to support older devices */ 158 /* Disable tpiu to support older devices */
154 tpiu_disable_hw(drvdata); 159 tpiu_disable_hw(drvdata);
155 160
156 pm_runtime_put(&adev->dev);
157
158 desc.type = CORESIGHT_DEV_TYPE_SINK; 161 desc.type = CORESIGHT_DEV_TYPE_SINK;
159 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT; 162 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
160 desc.ops = &tpiu_cs_ops; 163 desc.ops = &tpiu_cs_ops;
@@ -162,7 +165,12 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
162 desc.dev = dev; 165 desc.dev = dev;
163 drvdata->csdev = coresight_register(&desc); 166 drvdata->csdev = coresight_register(&desc);
164 167
165 return PTR_ERR_OR_ZERO(drvdata->csdev); 168 if (!IS_ERR(drvdata->csdev)) {
169 pm_runtime_put(&adev->dev);
170 return 0;
171 }
172
173 return PTR_ERR(drvdata->csdev);
166} 174}
167 175
168#ifdef CONFIG_PM 176#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 29cef898afba..4b130281236a 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -225,26 +225,28 @@ static int coresight_enable_sink(struct coresight_device *csdev,
225 * We need to make sure the "new" session is compatible with the 225 * We need to make sure the "new" session is compatible with the
226 * existing "mode" of operation. 226 * existing "mode" of operation.
227 */ 227 */
228 if (sink_ops(csdev)->enable) { 228 if (!sink_ops(csdev)->enable)
229 ret = sink_ops(csdev)->enable(csdev, mode, data); 229 return -EINVAL;
230 if (ret)
231 return ret;
232 csdev->enable = true;
233 }
234 230
235 atomic_inc(csdev->refcnt); 231 ret = sink_ops(csdev)->enable(csdev, mode, data);
232 if (ret)
233 return ret;
234 csdev->enable = true;
236 235
237 return 0; 236 return 0;
238} 237}
239 238
240static void coresight_disable_sink(struct coresight_device *csdev) 239static void coresight_disable_sink(struct coresight_device *csdev)
241{ 240{
242 if (atomic_dec_return(csdev->refcnt) == 0) { 241 int ret;
243 if (sink_ops(csdev)->disable) { 242
244 sink_ops(csdev)->disable(csdev); 243 if (!sink_ops(csdev)->disable)
245 csdev->enable = false; 244 return;
246 } 245
247 } 246 ret = sink_ops(csdev)->disable(csdev);
247 if (ret)
248 return;
249 csdev->enable = false;
248} 250}
249 251
250static int coresight_enable_link(struct coresight_device *csdev, 252static int coresight_enable_link(struct coresight_device *csdev,
@@ -973,7 +975,6 @@ static void coresight_device_release(struct device *dev)
973{ 975{
974 struct coresight_device *csdev = to_coresight_device(dev); 976 struct coresight_device *csdev = to_coresight_device(dev);
975 977
976 kfree(csdev->conns);
977 kfree(csdev->refcnt); 978 kfree(csdev->refcnt);
978 kfree(csdev); 979 kfree(csdev);
979} 980}
diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c
index 87bc3744755f..87f9024e4bbb 100644
--- a/drivers/hwtracing/intel_th/acpi.c
+++ b/drivers/hwtracing/intel_th/acpi.c
@@ -37,15 +37,21 @@ MODULE_DEVICE_TABLE(acpi, intel_th_acpi_ids);
37static int intel_th_acpi_probe(struct platform_device *pdev) 37static int intel_th_acpi_probe(struct platform_device *pdev)
38{ 38{
39 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); 39 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
40 struct resource resource[TH_MMIO_END];
40 const struct acpi_device_id *id; 41 const struct acpi_device_id *id;
41 struct intel_th *th; 42 struct intel_th *th;
43 int i, r;
42 44
43 id = acpi_match_device(intel_th_acpi_ids, &pdev->dev); 45 id = acpi_match_device(intel_th_acpi_ids, &pdev->dev);
44 if (!id) 46 if (!id)
45 return -ENODEV; 47 return -ENODEV;
46 48
47 th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, 49 for (i = 0, r = 0; i < pdev->num_resources && r < TH_MMIO_END; i++)
48 pdev->resource, pdev->num_resources, -1); 50 if (pdev->resource[i].flags &
51 (IORESOURCE_IRQ | IORESOURCE_MEM))
52 resource[r++] = pdev->resource[i];
53
54 th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, resource, r);
49 if (IS_ERR(th)) 55 if (IS_ERR(th))
50 return PTR_ERR(th); 56 return PTR_ERR(th);
51 57
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 7c1acc2f801c..033dce563c99 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -430,9 +430,9 @@ static const struct intel_th_subdevice {
430 .nres = 1, 430 .nres = 1,
431 .res = { 431 .res = {
432 { 432 {
433 /* Handle TSCU from GTH driver */ 433 /* Handle TSCU and CTS from GTH driver */
434 .start = REG_GTH_OFFSET, 434 .start = REG_GTH_OFFSET,
435 .end = REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1, 435 .end = REG_CTS_OFFSET + REG_CTS_LENGTH - 1,
436 .flags = IORESOURCE_MEM, 436 .flags = IORESOURCE_MEM,
437 }, 437 },
438 }, 438 },
@@ -491,7 +491,7 @@ static const struct intel_th_subdevice {
491 .flags = IORESOURCE_MEM, 491 .flags = IORESOURCE_MEM,
492 }, 492 },
493 { 493 {
494 .start = 1, /* use resource[1] */ 494 .start = TH_MMIO_SW,
495 .end = 0, 495 .end = 0,
496 .flags = IORESOURCE_MEM, 496 .flags = IORESOURCE_MEM,
497 }, 497 },
@@ -501,6 +501,24 @@ static const struct intel_th_subdevice {
501 .type = INTEL_TH_SOURCE, 501 .type = INTEL_TH_SOURCE,
502 }, 502 },
503 { 503 {
504 .nres = 2,
505 .res = {
506 {
507 .start = REG_STH_OFFSET,
508 .end = REG_STH_OFFSET + REG_STH_LENGTH - 1,
509 .flags = IORESOURCE_MEM,
510 },
511 {
512 .start = TH_MMIO_RTIT,
513 .end = 0,
514 .flags = IORESOURCE_MEM,
515 },
516 },
517 .id = -1,
518 .name = "rtit",
519 .type = INTEL_TH_SOURCE,
520 },
521 {
504 .nres = 1, 522 .nres = 1,
505 .res = { 523 .res = {
506 { 524 {
@@ -584,7 +602,6 @@ intel_th_subdevice_alloc(struct intel_th *th,
584 struct intel_th_device *thdev; 602 struct intel_th_device *thdev;
585 struct resource res[3]; 603 struct resource res[3];
586 unsigned int req = 0; 604 unsigned int req = 0;
587 bool is64bit = false;
588 int r, err; 605 int r, err;
589 606
590 thdev = intel_th_device_alloc(th, subdev->type, subdev->name, 607 thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -594,18 +611,12 @@ intel_th_subdevice_alloc(struct intel_th *th,
594 611
595 thdev->drvdata = th->drvdata; 612 thdev->drvdata = th->drvdata;
596 613
597 for (r = 0; r < th->num_resources; r++)
598 if (th->resource[r].flags & IORESOURCE_MEM_64) {
599 is64bit = true;
600 break;
601 }
602
603 memcpy(res, subdev->res, 614 memcpy(res, subdev->res,
604 sizeof(struct resource) * subdev->nres); 615 sizeof(struct resource) * subdev->nres);
605 616
606 for (r = 0; r < subdev->nres; r++) { 617 for (r = 0; r < subdev->nres; r++) {
607 struct resource *devres = th->resource; 618 struct resource *devres = th->resource;
608 int bar = 0; /* cut subdevices' MMIO from resource[0] */ 619 int bar = TH_MMIO_CONFIG;
609 620
610 /* 621 /*
611 * Take .end == 0 to mean 'take the whole bar', 622 * Take .end == 0 to mean 'take the whole bar',
@@ -614,8 +625,9 @@ intel_th_subdevice_alloc(struct intel_th *th,
614 */ 625 */
615 if (!res[r].end && res[r].flags == IORESOURCE_MEM) { 626 if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
616 bar = res[r].start; 627 bar = res[r].start;
617 if (is64bit) 628 err = -ENODEV;
618 bar *= 2; 629 if (bar >= th->num_resources)
630 goto fail_put_device;
619 res[r].start = 0; 631 res[r].start = 0;
620 res[r].end = resource_size(&devres[bar]) - 1; 632 res[r].end = resource_size(&devres[bar]) - 1;
621 } 633 }
@@ -627,7 +639,12 @@ intel_th_subdevice_alloc(struct intel_th *th,
627 dev_dbg(th->dev, "%s:%d @ %pR\n", 639 dev_dbg(th->dev, "%s:%d @ %pR\n",
628 subdev->name, r, &res[r]); 640 subdev->name, r, &res[r]);
629 } else if (res[r].flags & IORESOURCE_IRQ) { 641 } else if (res[r].flags & IORESOURCE_IRQ) {
630 res[r].start = th->irq; 642 /*
643 * Only pass on the IRQ if we have useful interrupts:
644 * the ones that can be configured via MINTCTL.
645 */
646 if (INTEL_TH_CAP(th, has_mintctl) && th->irq != -1)
647 res[r].start = th->irq;
631 } 648 }
632 } 649 }
633 650
@@ -758,8 +775,13 @@ static int intel_th_populate(struct intel_th *th)
758 775
759 thdev = intel_th_subdevice_alloc(th, subdev); 776 thdev = intel_th_subdevice_alloc(th, subdev);
760 /* note: caller should free subdevices from th::thdev[] */ 777 /* note: caller should free subdevices from th::thdev[] */
761 if (IS_ERR(thdev)) 778 if (IS_ERR(thdev)) {
779 /* ENODEV for individual subdevices is allowed */
780 if (PTR_ERR(thdev) == -ENODEV)
781 continue;
782
762 return PTR_ERR(thdev); 783 return PTR_ERR(thdev);
784 }
763 785
764 th->thdev[th->num_thdevs++] = thdev; 786 th->thdev[th->num_thdevs++] = thdev;
765 } 787 }
@@ -809,26 +831,40 @@ static const struct file_operations intel_th_output_fops = {
809 .llseek = noop_llseek, 831 .llseek = noop_llseek,
810}; 832};
811 833
834static irqreturn_t intel_th_irq(int irq, void *data)
835{
836 struct intel_th *th = data;
837 irqreturn_t ret = IRQ_NONE;
838 struct intel_th_driver *d;
839 int i;
840
841 for (i = 0; i < th->num_thdevs; i++) {
842 if (th->thdev[i]->type != INTEL_TH_OUTPUT)
843 continue;
844
845 d = to_intel_th_driver(th->thdev[i]->dev.driver);
846 if (d && d->irq)
847 ret |= d->irq(th->thdev[i]);
848 }
849
850 if (ret == IRQ_NONE)
851 pr_warn_ratelimited("nobody cared for irq\n");
852
853 return ret;
854}
855
812/** 856/**
813 * intel_th_alloc() - allocate a new Intel TH device and its subdevices 857 * intel_th_alloc() - allocate a new Intel TH device and its subdevices
814 * @dev: parent device 858 * @dev: parent device
815 * @devres: parent's resources 859 * @devres: resources indexed by th_mmio_idx
816 * @ndevres: number of resources
817 * @irq: irq number 860 * @irq: irq number
818 */ 861 */
819struct intel_th * 862struct intel_th *
820intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, 863intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
821 struct resource *devres, unsigned int ndevres, int irq) 864 struct resource *devres, unsigned int ndevres)
822{ 865{
866 int err, r, nr_mmios = 0;
823 struct intel_th *th; 867 struct intel_th *th;
824 int err, r;
825
826 if (irq == -1)
827 for (r = 0; r < ndevres; r++)
828 if (devres[r].flags & IORESOURCE_IRQ) {
829 irq = devres[r].start;
830 break;
831 }
832 868
833 th = kzalloc(sizeof(*th), GFP_KERNEL); 869 th = kzalloc(sizeof(*th), GFP_KERNEL);
834 if (!th) 870 if (!th)
@@ -846,12 +882,32 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
846 err = th->major; 882 err = th->major;
847 goto err_ida; 883 goto err_ida;
848 } 884 }
885 th->irq = -1;
849 th->dev = dev; 886 th->dev = dev;
850 th->drvdata = drvdata; 887 th->drvdata = drvdata;
851 888
852 th->resource = devres; 889 for (r = 0; r < ndevres; r++)
853 th->num_resources = ndevres; 890 switch (devres[r].flags & IORESOURCE_TYPE_BITS) {
854 th->irq = irq; 891 case IORESOURCE_MEM:
892 th->resource[nr_mmios++] = devres[r];
893 break;
894 case IORESOURCE_IRQ:
895 err = devm_request_irq(dev, devres[r].start,
896 intel_th_irq, IRQF_SHARED,
897 dev_name(dev), th);
898 if (err)
899 goto err_chrdev;
900
901 if (th->irq == -1)
902 th->irq = devres[r].start;
903 break;
904 default:
905 dev_warn(dev, "Unknown resource type %lx\n",
906 devres[r].flags);
907 break;
908 }
909
910 th->num_resources = nr_mmios;
855 911
856 dev_set_drvdata(dev, th); 912 dev_set_drvdata(dev, th);
857 913
@@ -868,6 +924,10 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
868 924
869 return th; 925 return th;
870 926
927err_chrdev:
928 __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
929 "intel_th/output");
930
871err_ida: 931err_ida:
872 ida_simple_remove(&intel_th_ida, th->id); 932 ida_simple_remove(&intel_th_ida, th->id);
873 933
@@ -928,6 +988,27 @@ int intel_th_trace_enable(struct intel_th_device *thdev)
928EXPORT_SYMBOL_GPL(intel_th_trace_enable); 988EXPORT_SYMBOL_GPL(intel_th_trace_enable);
929 989
930/** 990/**
991 * intel_th_trace_switch() - execute a switch sequence
992 * @thdev: output device that requests tracing switch
993 */
994int intel_th_trace_switch(struct intel_th_device *thdev)
995{
996 struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
997 struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
998
999 if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH))
1000 return -EINVAL;
1001
1002 if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
1003 return -EINVAL;
1004
1005 hubdrv->trig_switch(hub, &thdev->output);
1006
1007 return 0;
1008}
1009EXPORT_SYMBOL_GPL(intel_th_trace_switch);
1010
1011/**
931 * intel_th_trace_disable() - disable tracing for an output device 1012 * intel_th_trace_disable() - disable tracing for an output device
932 * @thdev: output device that requests tracing be disabled 1013 * @thdev: output device that requests tracing be disabled
933 */ 1014 */
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index edc52d75e6bd..fa9d34af87ac 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -308,6 +308,11 @@ static int intel_th_gth_reset(struct gth_device *gth)
308 iowrite32(0, gth->base + REG_GTH_SCR); 308 iowrite32(0, gth->base + REG_GTH_SCR);
309 iowrite32(0xfc, gth->base + REG_GTH_SCR2); 309 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
310 310
311 /* setup CTS for single trigger */
312 iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
313 iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
314 CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
315
311 return 0; 316 return 0;
312} 317}
313 318
@@ -457,6 +462,68 @@ static int intel_th_output_attributes(struct gth_device *gth)
457} 462}
458 463
459/** 464/**
465 * intel_th_gth_stop() - stop tracing to an output device
466 * @gth: GTH device
467 * @output: output device's descriptor
468 * @capture_done: set when no more traces will be captured
469 *
470 * This will stop tracing using force storeEn off signal and wait for the
471 * pipelines to be empty for the corresponding output port.
472 */
473static void intel_th_gth_stop(struct gth_device *gth,
474 struct intel_th_output *output,
475 bool capture_done)
476{
477 struct intel_th_device *outdev =
478 container_of(output, struct intel_th_device, output);
479 struct intel_th_driver *outdrv =
480 to_intel_th_driver(outdev->dev.driver);
481 unsigned long count;
482 u32 reg;
483 u32 scr2 = 0xfc | (capture_done ? 1 : 0);
484
485 iowrite32(0, gth->base + REG_GTH_SCR);
486 iowrite32(scr2, gth->base + REG_GTH_SCR2);
487
488 /* wait on pipeline empty for the given port */
489 for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
490 count && !(reg & BIT(output->port)); count--) {
491 reg = ioread32(gth->base + REG_GTH_STAT);
492 cpu_relax();
493 }
494
495 if (!count)
496 dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
497 output->port);
498
499 /* wait on output piepline empty */
500 if (outdrv->wait_empty)
501 outdrv->wait_empty(outdev);
502
503 /* clear force capture done for next captures */
504 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
505}
506
507/**
508 * intel_th_gth_start() - start tracing to an output device
509 * @gth: GTH device
510 * @output: output device's descriptor
511 *
512 * This will start tracing using force storeEn signal.
513 */
514static void intel_th_gth_start(struct gth_device *gth,
515 struct intel_th_output *output)
516{
517 u32 scr = 0xfc0000;
518
519 if (output->multiblock)
520 scr |= 0xff;
521
522 iowrite32(scr, gth->base + REG_GTH_SCR);
523 iowrite32(0, gth->base + REG_GTH_SCR2);
524}
525
526/**
460 * intel_th_gth_disable() - disable tracing to an output device 527 * intel_th_gth_disable() - disable tracing to an output device
461 * @thdev: GTH device 528 * @thdev: GTH device
462 * @output: output device's descriptor 529 * @output: output device's descriptor
@@ -469,7 +536,6 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
469 struct intel_th_output *output) 536 struct intel_th_output *output)
470{ 537{
471 struct gth_device *gth = dev_get_drvdata(&thdev->dev); 538 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
472 unsigned long count;
473 int master; 539 int master;
474 u32 reg; 540 u32 reg;
475 541
@@ -482,22 +548,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
482 } 548 }
483 spin_unlock(&gth->gth_lock); 549 spin_unlock(&gth->gth_lock);
484 550
485 iowrite32(0, gth->base + REG_GTH_SCR); 551 intel_th_gth_stop(gth, output, true);
486 iowrite32(0xfd, gth->base + REG_GTH_SCR2);
487
488 /* wait on pipeline empty for the given port */
489 for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
490 count && !(reg & BIT(output->port)); count--) {
491 reg = ioread32(gth->base + REG_GTH_STAT);
492 cpu_relax();
493 }
494
495 /* clear force capture done for next captures */
496 iowrite32(0xfc, gth->base + REG_GTH_SCR2);
497
498 if (!count)
499 dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
500 output->port);
501 552
502 reg = ioread32(gth->base + REG_GTH_SCRPD0); 553 reg = ioread32(gth->base + REG_GTH_SCRPD0);
503 reg &= ~output->scratchpad; 554 reg &= ~output->scratchpad;
@@ -526,8 +577,8 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
526{ 577{
527 struct gth_device *gth = dev_get_drvdata(&thdev->dev); 578 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
528 struct intel_th *th = to_intel_th(thdev); 579 struct intel_th *th = to_intel_th(thdev);
529 u32 scr = 0xfc0000, scrpd;
530 int master; 580 int master;
581 u32 scrpd;
531 582
532 spin_lock(&gth->gth_lock); 583 spin_lock(&gth->gth_lock);
533 for_each_set_bit(master, gth->output[output->port].master, 584 for_each_set_bit(master, gth->output[output->port].master,
@@ -535,9 +586,6 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
535 gth_master_set(gth, master, output->port); 586 gth_master_set(gth, master, output->port);
536 } 587 }
537 588
538 if (output->multiblock)
539 scr |= 0xff;
540
541 output->active = true; 589 output->active = true;
542 spin_unlock(&gth->gth_lock); 590 spin_unlock(&gth->gth_lock);
543 591
@@ -548,8 +596,38 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
548 scrpd |= output->scratchpad; 596 scrpd |= output->scratchpad;
549 iowrite32(scrpd, gth->base + REG_GTH_SCRPD0); 597 iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
550 598
551 iowrite32(scr, gth->base + REG_GTH_SCR); 599 intel_th_gth_start(gth, output);
552 iowrite32(0, gth->base + REG_GTH_SCR2); 600}
601
602/**
603 * intel_th_gth_switch() - execute a switch sequence
604 * @thdev: GTH device
605 * @output: output device's descriptor
606 *
607 * This will execute a switch sequence that will trigger a switch window
608 * when tracing to MSC in multi-block mode.
609 */
610static void intel_th_gth_switch(struct intel_th_device *thdev,
611 struct intel_th_output *output)
612{
613 struct gth_device *gth = dev_get_drvdata(&thdev->dev);
614 unsigned long count;
615 u32 reg;
616
617 /* trigger */
618 iowrite32(0, gth->base + REG_CTS_CTL);
619 iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
620 /* wait on trigger status */
621 for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
622 count && !(reg & BIT(4)); count--) {
623 reg = ioread32(gth->base + REG_CTS_STAT);
624 cpu_relax();
625 }
626 if (!count)
627 dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
628
629 intel_th_gth_stop(gth, output, false);
630 intel_th_gth_start(gth, output);
553} 631}
554 632
555/** 633/**
@@ -735,6 +813,7 @@ static struct intel_th_driver intel_th_gth_driver = {
735 .unassign = intel_th_gth_unassign, 813 .unassign = intel_th_gth_unassign,
736 .set_output = intel_th_gth_set_output, 814 .set_output = intel_th_gth_set_output,
737 .enable = intel_th_gth_enable, 815 .enable = intel_th_gth_enable,
816 .trig_switch = intel_th_gth_switch,
738 .disable = intel_th_gth_disable, 817 .disable = intel_th_gth_disable,
739 .driver = { 818 .driver = {
740 .name = "gth", 819 .name = "gth",
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 6f2b0b930875..bfcc0fd01177 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -49,6 +49,12 @@ enum {
49 REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ 49 REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
50 REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */ 50 REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */
51 REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */ 51 REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */
52
53 /* Common Capture Sequencer (CTS) registers */
54 REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */
55 REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */
56 REG_CTS_STAT = 0x32a0, /* cts_status */
57 REG_CTS_CTL = 0x32a4, /* cts_control */
52}; 58};
53 59
54/* waiting for Pipeline Empty bit(s) to assert for GTH */ 60/* waiting for Pipeline Empty bit(s) to assert for GTH */
@@ -57,4 +63,17 @@ enum {
57#define TSUCTRL_CTCRESYNC BIT(0) 63#define TSUCTRL_CTCRESYNC BIT(0)
58#define TSCUSTAT_CTCSYNCING BIT(1) 64#define TSCUSTAT_CTCSYNCING BIT(1)
59 65
66/* waiting for Trigger status to assert for CTS */
67#define CTS_TRIG_WAITLOOP_DEPTH 10000
68
69#define CTS_EVENT_ENABLE_IF_ANYTHING BIT(31)
70#define CTS_ACTION_CONTROL_STATE_OFF 27
71#define CTS_ACTION_CONTROL_SET_STATE(x) \
72 (((x) & 0x1f) << CTS_ACTION_CONTROL_STATE_OFF)
73#define CTS_ACTION_CONTROL_TRIGGER BIT(4)
74
75#define CTS_STATE_IDLE 0x10u
76
77#define CTS_CTL_SEQUENCER_ENABLE BIT(0)
78
60#endif /* __INTEL_TH_GTH_H__ */ 79#endif /* __INTEL_TH_GTH_H__ */
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 780206dc9012..0df480072b6c 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -8,6 +8,8 @@
8#ifndef __INTEL_TH_H__ 8#ifndef __INTEL_TH_H__
9#define __INTEL_TH_H__ 9#define __INTEL_TH_H__
10 10
11#include <linux/irqreturn.h>
12
11/* intel_th_device device types */ 13/* intel_th_device device types */
12enum { 14enum {
13 /* Devices that generate trace data */ 15 /* Devices that generate trace data */
@@ -18,6 +20,8 @@ enum {
18 INTEL_TH_SWITCH, 20 INTEL_TH_SWITCH,
19}; 21};
20 22
23struct intel_th_device;
24
21/** 25/**
22 * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices 26 * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
23 * @port: output port number, assigned by the switch 27 * @port: output port number, assigned by the switch
@@ -25,6 +29,7 @@ enum {
25 * @scratchpad: scratchpad bits to flag when this output is enabled 29 * @scratchpad: scratchpad bits to flag when this output is enabled
26 * @multiblock: true for multiblock output configuration 30 * @multiblock: true for multiblock output configuration
27 * @active: true when this output is enabled 31 * @active: true when this output is enabled
32 * @wait_empty: wait for device pipeline to be empty
28 * 33 *
29 * Output port descriptor, used by switch driver to tell which output 34 * Output port descriptor, used by switch driver to tell which output
30 * port this output device corresponds to. Filled in at output device's 35 * port this output device corresponds to. Filled in at output device's
@@ -42,10 +47,12 @@ struct intel_th_output {
42/** 47/**
43 * struct intel_th_drvdata - describes hardware capabilities and quirks 48 * struct intel_th_drvdata - describes hardware capabilities and quirks
44 * @tscu_enable: device needs SW to enable time stamping unit 49 * @tscu_enable: device needs SW to enable time stamping unit
50 * @has_mintctl: device has interrupt control (MINTCTL) register
45 * @host_mode_only: device can only operate in 'host debugger' mode 51 * @host_mode_only: device can only operate in 'host debugger' mode
46 */ 52 */
47struct intel_th_drvdata { 53struct intel_th_drvdata {
48 unsigned int tscu_enable : 1, 54 unsigned int tscu_enable : 1,
55 has_mintctl : 1,
49 host_mode_only : 1; 56 host_mode_only : 1;
50}; 57};
51 58
@@ -157,10 +164,13 @@ struct intel_th_driver {
157 struct intel_th_device *othdev); 164 struct intel_th_device *othdev);
158 void (*enable)(struct intel_th_device *thdev, 165 void (*enable)(struct intel_th_device *thdev,
159 struct intel_th_output *output); 166 struct intel_th_output *output);
167 void (*trig_switch)(struct intel_th_device *thdev,
168 struct intel_th_output *output);
160 void (*disable)(struct intel_th_device *thdev, 169 void (*disable)(struct intel_th_device *thdev,
161 struct intel_th_output *output); 170 struct intel_th_output *output);
162 /* output ops */ 171 /* output ops */
163 void (*irq)(struct intel_th_device *thdev); 172 irqreturn_t (*irq)(struct intel_th_device *thdev);
173 void (*wait_empty)(struct intel_th_device *thdev);
164 int (*activate)(struct intel_th_device *thdev); 174 int (*activate)(struct intel_th_device *thdev);
165 void (*deactivate)(struct intel_th_device *thdev); 175 void (*deactivate)(struct intel_th_device *thdev);
166 /* file_operations for those who want a device node */ 176 /* file_operations for those who want a device node */
@@ -213,21 +223,23 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
213 223
214struct intel_th * 224struct intel_th *
215intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, 225intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
216 struct resource *devres, unsigned int ndevres, int irq); 226 struct resource *devres, unsigned int ndevres);
217void intel_th_free(struct intel_th *th); 227void intel_th_free(struct intel_th *th);
218 228
219int intel_th_driver_register(struct intel_th_driver *thdrv); 229int intel_th_driver_register(struct intel_th_driver *thdrv);
220void intel_th_driver_unregister(struct intel_th_driver *thdrv); 230void intel_th_driver_unregister(struct intel_th_driver *thdrv);
221 231
222int intel_th_trace_enable(struct intel_th_device *thdev); 232int intel_th_trace_enable(struct intel_th_device *thdev);
233int intel_th_trace_switch(struct intel_th_device *thdev);
223int intel_th_trace_disable(struct intel_th_device *thdev); 234int intel_th_trace_disable(struct intel_th_device *thdev);
224int intel_th_set_output(struct intel_th_device *thdev, 235int intel_th_set_output(struct intel_th_device *thdev,
225 unsigned int master); 236 unsigned int master);
226int intel_th_output_enable(struct intel_th *th, unsigned int otype); 237int intel_th_output_enable(struct intel_th *th, unsigned int otype);
227 238
228enum { 239enum th_mmio_idx {
229 TH_MMIO_CONFIG = 0, 240 TH_MMIO_CONFIG = 0,
230 TH_MMIO_SW = 2, 241 TH_MMIO_SW = 1,
242 TH_MMIO_RTIT = 2,
231 TH_MMIO_END, 243 TH_MMIO_END,
232}; 244};
233 245
@@ -237,6 +249,9 @@ enum {
237#define TH_CONFIGURABLE_MASTERS 256 249#define TH_CONFIGURABLE_MASTERS 256
238#define TH_MSC_MAX 2 250#define TH_MSC_MAX 2
239 251
252/* Maximum IRQ vectors */
253#define TH_NVEC_MAX 8
254
240/** 255/**
241 * struct intel_th - Intel TH controller 256 * struct intel_th - Intel TH controller
242 * @dev: driver core's device 257 * @dev: driver core's device
@@ -244,7 +259,7 @@ enum {
244 * @hub: "switch" subdevice (GTH) 259 * @hub: "switch" subdevice (GTH)
245 * @resource: resources of the entire controller 260 * @resource: resources of the entire controller
246 * @num_thdevs: number of devices in the @thdev array 261 * @num_thdevs: number of devices in the @thdev array
247 * @num_resources: number or resources in the @resource array 262 * @num_resources: number of resources in the @resource array
248 * @irq: irq number 263 * @irq: irq number
249 * @id: this Intel TH controller's device ID in the system 264 * @id: this Intel TH controller's device ID in the system
250 * @major: device node major for output devices 265 * @major: device node major for output devices
@@ -256,7 +271,7 @@ struct intel_th {
256 struct intel_th_device *hub; 271 struct intel_th_device *hub;
257 struct intel_th_drvdata *drvdata; 272 struct intel_th_drvdata *drvdata;
258 273
259 struct resource *resource; 274 struct resource resource[TH_MMIO_END];
260 int (*activate)(struct intel_th *); 275 int (*activate)(struct intel_th *);
261 void (*deactivate)(struct intel_th *); 276 void (*deactivate)(struct intel_th *);
262 unsigned int num_thdevs; 277 unsigned int num_thdevs;
@@ -296,6 +311,9 @@ enum {
296 REG_TSCU_OFFSET = 0x2000, 311 REG_TSCU_OFFSET = 0x2000,
297 REG_TSCU_LENGTH = 0x1000, 312 REG_TSCU_LENGTH = 0x1000,
298 313
314 REG_CTS_OFFSET = 0x3000,
315 REG_CTS_LENGTH = 0x1000,
316
299 /* Software Trace Hub (STH) [0x4000..0x4fff] */ 317 /* Software Trace Hub (STH) [0x4000..0x4fff] */
300 REG_STH_OFFSET = 0x4000, 318 REG_STH_OFFSET = 0x4000,
301 REG_STH_LENGTH = 0x2000, 319 REG_STH_LENGTH = 0x2000,
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index ba7aaf421f36..81bb54fa3ce8 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -29,28 +29,18 @@
29#define msc_dev(x) (&(x)->thdev->dev) 29#define msc_dev(x) (&(x)->thdev->dev)
30 30
31/** 31/**
32 * struct msc_block - multiblock mode block descriptor
33 * @bdesc: pointer to hardware descriptor (beginning of the block)
34 * @addr: physical address of the block
35 */
36struct msc_block {
37 struct msc_block_desc *bdesc;
38 dma_addr_t addr;
39};
40
41/**
42 * struct msc_window - multiblock mode window descriptor 32 * struct msc_window - multiblock mode window descriptor
43 * @entry: window list linkage (msc::win_list) 33 * @entry: window list linkage (msc::win_list)
44 * @pgoff: page offset into the buffer that this window starts at 34 * @pgoff: page offset into the buffer that this window starts at
45 * @nr_blocks: number of blocks (pages) in this window 35 * @nr_blocks: number of blocks (pages) in this window
46 * @block: array of block descriptors 36 * @sgt: array of block descriptors
47 */ 37 */
48struct msc_window { 38struct msc_window {
49 struct list_head entry; 39 struct list_head entry;
50 unsigned long pgoff; 40 unsigned long pgoff;
51 unsigned int nr_blocks; 41 unsigned int nr_blocks;
52 struct msc *msc; 42 struct msc *msc;
53 struct msc_block block[0]; 43 struct sg_table sgt;
54}; 44};
55 45
56/** 46/**
@@ -84,6 +74,8 @@ struct msc_iter {
84 * @reg_base: register window base address 74 * @reg_base: register window base address
85 * @thdev: intel_th_device pointer 75 * @thdev: intel_th_device pointer
86 * @win_list: list of windows in multiblock mode 76 * @win_list: list of windows in multiblock mode
77 * @single_sgt: single mode buffer
78 * @cur_win: current window
87 * @nr_pages: total number of pages allocated for this buffer 79 * @nr_pages: total number of pages allocated for this buffer
88 * @single_sz: amount of data in single mode 80 * @single_sz: amount of data in single mode
89 * @single_wrap: single mode wrap occurred 81 * @single_wrap: single mode wrap occurred
@@ -101,9 +93,12 @@ struct msc_iter {
101 */ 93 */
102struct msc { 94struct msc {
103 void __iomem *reg_base; 95 void __iomem *reg_base;
96 void __iomem *msu_base;
104 struct intel_th_device *thdev; 97 struct intel_th_device *thdev;
105 98
106 struct list_head win_list; 99 struct list_head win_list;
100 struct sg_table single_sgt;
101 struct msc_window *cur_win;
107 unsigned long nr_pages; 102 unsigned long nr_pages;
108 unsigned long single_sz; 103 unsigned long single_sz;
109 unsigned int single_wrap : 1; 104 unsigned int single_wrap : 1;
@@ -120,7 +115,8 @@ struct msc {
120 115
121 /* config */ 116 /* config */
122 unsigned int enabled : 1, 117 unsigned int enabled : 1,
123 wrap : 1; 118 wrap : 1,
119 do_irq : 1;
124 unsigned int mode; 120 unsigned int mode;
125 unsigned int burst_len; 121 unsigned int burst_len;
126 unsigned int index; 122 unsigned int index;
@@ -139,6 +135,49 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
139 return false; 135 return false;
140} 136}
141 137
138static inline struct msc_block_desc *
139msc_win_block(struct msc_window *win, unsigned int block)
140{
141 return sg_virt(&win->sgt.sgl[block]);
142}
143
144static inline dma_addr_t
145msc_win_baddr(struct msc_window *win, unsigned int block)
146{
147 return sg_dma_address(&win->sgt.sgl[block]);
148}
149
150static inline unsigned long
151msc_win_bpfn(struct msc_window *win, unsigned int block)
152{
153 return msc_win_baddr(win, block) >> PAGE_SHIFT;
154}
155
156/**
157 * msc_is_last_win() - check if a window is the last one for a given MSC
158 * @win: window
159 * Return: true if @win is the last window in MSC's multiblock buffer
160 */
161static inline bool msc_is_last_win(struct msc_window *win)
162{
163 return win->entry.next == &win->msc->win_list;
164}
165
166/**
167 * msc_next_window() - return next window in the multiblock buffer
168 * @win: current window
169 *
170 * Return: window following the current one
171 */
172static struct msc_window *msc_next_window(struct msc_window *win)
173{
174 if (msc_is_last_win(win))
175 return list_first_entry(&win->msc->win_list, struct msc_window,
176 entry);
177
178 return list_next_entry(win, entry);
179}
180
142/** 181/**
143 * msc_oldest_window() - locate the window with oldest data 182 * msc_oldest_window() - locate the window with oldest data
144 * @msc: MSC device 183 * @msc: MSC device
@@ -150,9 +189,7 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
150 */ 189 */
151static struct msc_window *msc_oldest_window(struct msc *msc) 190static struct msc_window *msc_oldest_window(struct msc *msc)
152{ 191{
153 struct msc_window *win; 192 struct msc_window *win, *next = msc_next_window(msc->cur_win);
154 u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
155 unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
156 unsigned int found = 0; 193 unsigned int found = 0;
157 194
158 if (list_empty(&msc->win_list)) 195 if (list_empty(&msc->win_list))
@@ -164,18 +201,18 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
164 * something like 2, in which case we're good 201 * something like 2, in which case we're good
165 */ 202 */
166 list_for_each_entry(win, &msc->win_list, entry) { 203 list_for_each_entry(win, &msc->win_list, entry) {
167 if (win->block[0].addr == win_addr) 204 if (win == next)
168 found++; 205 found++;
169 206
170 /* skip the empty ones */ 207 /* skip the empty ones */
171 if (msc_block_is_empty(win->block[0].bdesc)) 208 if (msc_block_is_empty(msc_win_block(win, 0)))
172 continue; 209 continue;
173 210
174 if (found) 211 if (found)
175 return win; 212 return win;
176 } 213 }
177 214
178 return list_entry(msc->win_list.next, struct msc_window, entry); 215 return list_first_entry(&msc->win_list, struct msc_window, entry);
179} 216}
180 217
181/** 218/**
@@ -187,7 +224,7 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
187static unsigned int msc_win_oldest_block(struct msc_window *win) 224static unsigned int msc_win_oldest_block(struct msc_window *win)
188{ 225{
189 unsigned int blk; 226 unsigned int blk;
190 struct msc_block_desc *bdesc = win->block[0].bdesc; 227 struct msc_block_desc *bdesc = msc_win_block(win, 0);
191 228
192 /* without wrapping, first block is the oldest */ 229 /* without wrapping, first block is the oldest */
193 if (!msc_block_wrapped(bdesc)) 230 if (!msc_block_wrapped(bdesc))
@@ -198,7 +235,7 @@ static unsigned int msc_win_oldest_block(struct msc_window *win)
198 * oldest data for this window. 235 * oldest data for this window.
199 */ 236 */
200 for (blk = 0; blk < win->nr_blocks; blk++) { 237 for (blk = 0; blk < win->nr_blocks; blk++) {
201 bdesc = win->block[blk].bdesc; 238 bdesc = msc_win_block(win, blk);
202 239
203 if (msc_block_last_written(bdesc)) 240 if (msc_block_last_written(bdesc))
204 return blk; 241 return blk;
@@ -207,34 +244,9 @@ static unsigned int msc_win_oldest_block(struct msc_window *win)
207 return 0; 244 return 0;
208} 245}
209 246
210/**
211 * msc_is_last_win() - check if a window is the last one for a given MSC
212 * @win: window
213 * Return: true if @win is the last window in MSC's multiblock buffer
214 */
215static inline bool msc_is_last_win(struct msc_window *win)
216{
217 return win->entry.next == &win->msc->win_list;
218}
219
220/**
221 * msc_next_window() - return next window in the multiblock buffer
222 * @win: current window
223 *
224 * Return: window following the current one
225 */
226static struct msc_window *msc_next_window(struct msc_window *win)
227{
228 if (msc_is_last_win(win))
229 return list_entry(win->msc->win_list.next, struct msc_window,
230 entry);
231
232 return list_entry(win->entry.next, struct msc_window, entry);
233}
234
235static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter) 247static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
236{ 248{
237 return iter->win->block[iter->block].bdesc; 249 return msc_win_block(iter->win, iter->block);
238} 250}
239 251
240static void msc_iter_init(struct msc_iter *iter) 252static void msc_iter_init(struct msc_iter *iter)
@@ -467,13 +479,47 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
467 offsetof(struct msc_block_desc, hw_tag); 479 offsetof(struct msc_block_desc, hw_tag);
468 480
469 for (blk = 0; blk < win->nr_blocks; blk++) { 481 for (blk = 0; blk < win->nr_blocks; blk++) {
470 struct msc_block_desc *bdesc = win->block[blk].bdesc; 482 struct msc_block_desc *bdesc = msc_win_block(win, blk);
471 483
472 memset(&bdesc->hw_tag, 0, hw_sz); 484 memset(&bdesc->hw_tag, 0, hw_sz);
473 } 485 }
474 } 486 }
475} 487}
476 488
489static int intel_th_msu_init(struct msc *msc)
490{
491 u32 mintctl, msusts;
492
493 if (!msc->do_irq)
494 return 0;
495
496 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
497 mintctl |= msc->index ? M1BLIE : M0BLIE;
498 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
499 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
500 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
501 msc->do_irq = 0;
502 return 0;
503 }
504
505 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
506 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
507
508 return 0;
509}
510
511static void intel_th_msu_deinit(struct msc *msc)
512{
513 u32 mintctl;
514
515 if (!msc->do_irq)
516 return;
517
518 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
519 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
520 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
521}
522
477/** 523/**
478 * msc_configure() - set up MSC hardware 524 * msc_configure() - set up MSC hardware
479 * @msc: the MSC device to configure 525 * @msc: the MSC device to configure
@@ -531,23 +577,14 @@ static int msc_configure(struct msc *msc)
531 */ 577 */
532static void msc_disable(struct msc *msc) 578static void msc_disable(struct msc *msc)
533{ 579{
534 unsigned long count;
535 u32 reg; 580 u32 reg;
536 581
537 lockdep_assert_held(&msc->buf_mutex); 582 lockdep_assert_held(&msc->buf_mutex);
538 583
539 intel_th_trace_disable(msc->thdev); 584 intel_th_trace_disable(msc->thdev);
540 585
541 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
542 count && !(reg & MSCSTS_PLE); count--) {
543 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
544 cpu_relax();
545 }
546
547 if (!count)
548 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
549
550 if (msc->mode == MSC_MODE_SINGLE) { 586 if (msc->mode == MSC_MODE_SINGLE) {
587 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
551 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); 588 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
552 589
553 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); 590 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
@@ -617,22 +654,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
617 */ 654 */
618static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size) 655static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
619{ 656{
657 unsigned long nr_pages = size >> PAGE_SHIFT;
620 unsigned int order = get_order(size); 658 unsigned int order = get_order(size);
621 struct page *page; 659 struct page *page;
660 int ret;
622 661
623 if (!size) 662 if (!size)
624 return 0; 663 return 0;
625 664
665 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
666 if (ret)
667 goto err_out;
668
669 ret = -ENOMEM;
626 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); 670 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
627 if (!page) 671 if (!page)
628 return -ENOMEM; 672 goto err_free_sgt;
629 673
630 split_page(page, order); 674 split_page(page, order);
631 msc->nr_pages = size >> PAGE_SHIFT; 675 sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
676
677 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
678 DMA_FROM_DEVICE);
679 if (ret < 0)
680 goto err_free_pages;
681
682 msc->nr_pages = nr_pages;
632 msc->base = page_address(page); 683 msc->base = page_address(page);
633 msc->base_addr = page_to_phys(page); 684 msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
634 685
635 return 0; 686 return 0;
687
688err_free_pages:
689 __free_pages(page, order);
690
691err_free_sgt:
692 sg_free_table(&msc->single_sgt);
693
694err_out:
695 return ret;
636} 696}
637 697
638/** 698/**
@@ -643,6 +703,10 @@ static void msc_buffer_contig_free(struct msc *msc)
643{ 703{
644 unsigned long off; 704 unsigned long off;
645 705
706 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
707 1, DMA_FROM_DEVICE);
708 sg_free_table(&msc->single_sgt);
709
646 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) { 710 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
647 struct page *page = virt_to_page(msc->base + off); 711 struct page *page = virt_to_page(msc->base + off);
648 712
@@ -669,6 +733,40 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc,
669 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT)); 733 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
670} 734}
671 735
736static int __msc_buffer_win_alloc(struct msc_window *win,
737 unsigned int nr_blocks)
738{
739 struct scatterlist *sg_ptr;
740 void *block;
741 int i, ret;
742
743 ret = sg_alloc_table(&win->sgt, nr_blocks, GFP_KERNEL);
744 if (ret)
745 return -ENOMEM;
746
747 for_each_sg(win->sgt.sgl, sg_ptr, nr_blocks, i) {
748 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
749 PAGE_SIZE, &sg_dma_address(sg_ptr),
750 GFP_KERNEL);
751 if (!block)
752 goto err_nomem;
753
754 sg_set_buf(sg_ptr, block, PAGE_SIZE);
755 }
756
757 return nr_blocks;
758
759err_nomem:
760 for (i--; i >= 0; i--)
761 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
762 msc_win_block(win, i),
763 msc_win_baddr(win, i));
764
765 sg_free_table(&win->sgt);
766
767 return -ENOMEM;
768}
769
672/** 770/**
673 * msc_buffer_win_alloc() - alloc a window for a multiblock mode 771 * msc_buffer_win_alloc() - alloc a window for a multiblock mode
674 * @msc: MSC device 772 * @msc: MSC device
@@ -682,44 +780,49 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc,
682static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks) 780static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
683{ 781{
684 struct msc_window *win; 782 struct msc_window *win;
685 unsigned long size = PAGE_SIZE; 783 int ret = -ENOMEM, i;
686 int i, ret = -ENOMEM;
687 784
688 if (!nr_blocks) 785 if (!nr_blocks)
689 return 0; 786 return 0;
690 787
691 win = kzalloc(offsetof(struct msc_window, block[nr_blocks]), 788 /*
692 GFP_KERNEL); 789 * This limitation hold as long as we need random access to the
790 * block. When that changes, this can go away.
791 */
792 if (nr_blocks > SG_MAX_SINGLE_ALLOC)
793 return -EINVAL;
794
795 win = kzalloc(sizeof(*win), GFP_KERNEL);
693 if (!win) 796 if (!win)
694 return -ENOMEM; 797 return -ENOMEM;
695 798
799 win->msc = msc;
800
696 if (!list_empty(&msc->win_list)) { 801 if (!list_empty(&msc->win_list)) {
697 struct msc_window *prev = list_entry(msc->win_list.prev, 802 struct msc_window *prev = list_last_entry(&msc->win_list,
698 struct msc_window, entry); 803 struct msc_window,
804 entry);
699 805
806 /* This works as long as blocks are page-sized */
700 win->pgoff = prev->pgoff + prev->nr_blocks; 807 win->pgoff = prev->pgoff + prev->nr_blocks;
701 } 808 }
702 809
703 for (i = 0; i < nr_blocks; i++) { 810 ret = __msc_buffer_win_alloc(win, nr_blocks);
704 win->block[i].bdesc = 811 if (ret < 0)
705 dma_alloc_coherent(msc_dev(msc)->parent->parent, size, 812 goto err_nomem;
706 &win->block[i].addr, GFP_KERNEL);
707
708 if (!win->block[i].bdesc)
709 goto err_nomem;
710 813
711#ifdef CONFIG_X86 814#ifdef CONFIG_X86
815 for (i = 0; i < ret; i++)
712 /* Set the page as uncached */ 816 /* Set the page as uncached */
713 set_memory_uc((unsigned long)win->block[i].bdesc, 1); 817 set_memory_uc((unsigned long)msc_win_block(win, i), 1);
714#endif 818#endif
715 }
716 819
717 win->msc = msc; 820 win->nr_blocks = ret;
718 win->nr_blocks = nr_blocks;
719 821
720 if (list_empty(&msc->win_list)) { 822 if (list_empty(&msc->win_list)) {
721 msc->base = win->block[0].bdesc; 823 msc->base = msc_win_block(win, 0);
722 msc->base_addr = win->block[0].addr; 824 msc->base_addr = msc_win_baddr(win, 0);
825 msc->cur_win = win;
723 } 826 }
724 827
725 list_add_tail(&win->entry, &msc->win_list); 828 list_add_tail(&win->entry, &msc->win_list);
@@ -728,19 +831,25 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
728 return 0; 831 return 0;
729 832
730err_nomem: 833err_nomem:
731 for (i--; i >= 0; i--) {
732#ifdef CONFIG_X86
733 /* Reset the page to write-back before releasing */
734 set_memory_wb((unsigned long)win->block[i].bdesc, 1);
735#endif
736 dma_free_coherent(msc_dev(msc)->parent->parent, size,
737 win->block[i].bdesc, win->block[i].addr);
738 }
739 kfree(win); 834 kfree(win);
740 835
741 return ret; 836 return ret;
742} 837}
743 838
839static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
840{
841 int i;
842
843 for (i = 0; i < win->nr_blocks; i++) {
844 struct page *page = sg_page(&win->sgt.sgl[i]);
845
846 page->mapping = NULL;
847 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
848 msc_win_block(win, i), msc_win_baddr(win, i));
849 }
850 sg_free_table(&win->sgt);
851}
852
744/** 853/**
745 * msc_buffer_win_free() - free a window from MSC's window list 854 * msc_buffer_win_free() - free a window from MSC's window list
746 * @msc: MSC device 855 * @msc: MSC device
@@ -761,17 +870,13 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
761 msc->base_addr = 0; 870 msc->base_addr = 0;
762 } 871 }
763 872
764 for (i = 0; i < win->nr_blocks; i++) {
765 struct page *page = virt_to_page(win->block[i].bdesc);
766
767 page->mapping = NULL;
768#ifdef CONFIG_X86 873#ifdef CONFIG_X86
769 /* Reset the page to write-back before releasing */ 874 for (i = 0; i < win->nr_blocks; i++)
770 set_memory_wb((unsigned long)win->block[i].bdesc, 1); 875 /* Reset the page to write-back */
876 set_memory_wb((unsigned long)msc_win_block(win, i), 1);
771#endif 877#endif
772 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE, 878
773 win->block[i].bdesc, win->block[i].addr); 879 __msc_buffer_win_free(msc, win);
774 }
775 880
776 kfree(win); 881 kfree(win);
777} 882}
@@ -798,19 +903,18 @@ static void msc_buffer_relink(struct msc *msc)
798 */ 903 */
799 if (msc_is_last_win(win)) { 904 if (msc_is_last_win(win)) {
800 sw_tag |= MSC_SW_TAG_LASTWIN; 905 sw_tag |= MSC_SW_TAG_LASTWIN;
801 next_win = list_entry(msc->win_list.next, 906 next_win = list_first_entry(&msc->win_list,
802 struct msc_window, entry); 907 struct msc_window, entry);
803 } else { 908 } else {
804 next_win = list_entry(win->entry.next, 909 next_win = list_next_entry(win, entry);
805 struct msc_window, entry);
806 } 910 }
807 911
808 for (blk = 0; blk < win->nr_blocks; blk++) { 912 for (blk = 0; blk < win->nr_blocks; blk++) {
809 struct msc_block_desc *bdesc = win->block[blk].bdesc; 913 struct msc_block_desc *bdesc = msc_win_block(win, blk);
810 914
811 memset(bdesc, 0, sizeof(*bdesc)); 915 memset(bdesc, 0, sizeof(*bdesc));
812 916
813 bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT; 917 bdesc->next_win = msc_win_bpfn(next_win, 0);
814 918
815 /* 919 /*
816 * Similarly to last window, last block should point 920 * Similarly to last window, last block should point
@@ -818,11 +922,9 @@ static void msc_buffer_relink(struct msc *msc)
818 */ 922 */
819 if (blk == win->nr_blocks - 1) { 923 if (blk == win->nr_blocks - 1) {
820 sw_tag |= MSC_SW_TAG_LASTBLK; 924 sw_tag |= MSC_SW_TAG_LASTBLK;
821 bdesc->next_blk = 925 bdesc->next_blk = msc_win_bpfn(win, 0);
822 win->block[0].addr >> PAGE_SHIFT;
823 } else { 926 } else {
824 bdesc->next_blk = 927 bdesc->next_blk = msc_win_bpfn(win, blk + 1);
825 win->block[blk + 1].addr >> PAGE_SHIFT;
826 } 928 }
827 929
828 bdesc->sw_tag = sw_tag; 930 bdesc->sw_tag = sw_tag;
@@ -997,7 +1099,7 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
997 1099
998found: 1100found:
999 pgoff -= win->pgoff; 1101 pgoff -= win->pgoff;
1000 return virt_to_page(win->block[pgoff].bdesc); 1102 return sg_page(&win->sgt.sgl[pgoff]);
1001} 1103}
1002 1104
1003/** 1105/**
@@ -1250,6 +1352,22 @@ static const struct file_operations intel_th_msc_fops = {
1250 .owner = THIS_MODULE, 1352 .owner = THIS_MODULE,
1251}; 1353};
1252 1354
1355static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1356{
1357 struct msc *msc = dev_get_drvdata(&thdev->dev);
1358 unsigned long count;
1359 u32 reg;
1360
1361 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1362 count && !(reg & MSCSTS_PLE); count--) {
1363 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1364 cpu_relax();
1365 }
1366
1367 if (!count)
1368 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1369}
1370
1253static int intel_th_msc_init(struct msc *msc) 1371static int intel_th_msc_init(struct msc *msc)
1254{ 1372{
1255 atomic_set(&msc->user_count, -1); 1373 atomic_set(&msc->user_count, -1);
@@ -1266,6 +1384,39 @@ static int intel_th_msc_init(struct msc *msc)
1266 return 0; 1384 return 0;
1267} 1385}
1268 1386
1387static void msc_win_switch(struct msc *msc)
1388{
1389 struct msc_window *last, *first;
1390
1391 first = list_first_entry(&msc->win_list, struct msc_window, entry);
1392 last = list_last_entry(&msc->win_list, struct msc_window, entry);
1393
1394 if (msc_is_last_win(msc->cur_win))
1395 msc->cur_win = first;
1396 else
1397 msc->cur_win = list_next_entry(msc->cur_win, entry);
1398
1399 msc->base = msc_win_block(msc->cur_win, 0);
1400 msc->base_addr = msc_win_baddr(msc->cur_win, 0);
1401
1402 intel_th_trace_switch(msc->thdev);
1403}
1404
1405static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1406{
1407 struct msc *msc = dev_get_drvdata(&thdev->dev);
1408 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1409 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1410
1411 if (!(msusts & mask)) {
1412 if (msc->enabled)
1413 return IRQ_HANDLED;
1414 return IRQ_NONE;
1415 }
1416
1417 return IRQ_HANDLED;
1418}
1419
1269static const char * const msc_mode[] = { 1420static const char * const msc_mode[] = {
1270 [MSC_MODE_SINGLE] = "single", 1421 [MSC_MODE_SINGLE] = "single",
1271 [MSC_MODE_MULTI] = "multi", 1422 [MSC_MODE_MULTI] = "multi",
@@ -1440,10 +1591,38 @@ free_win:
1440 1591
1441static DEVICE_ATTR_RW(nr_pages); 1592static DEVICE_ATTR_RW(nr_pages);
1442 1593
1594static ssize_t
1595win_switch_store(struct device *dev, struct device_attribute *attr,
1596 const char *buf, size_t size)
1597{
1598 struct msc *msc = dev_get_drvdata(dev);
1599 unsigned long val;
1600 int ret;
1601
1602 ret = kstrtoul(buf, 10, &val);
1603 if (ret)
1604 return ret;
1605
1606 if (val != 1)
1607 return -EINVAL;
1608
1609 mutex_lock(&msc->buf_mutex);
1610 if (msc->mode != MSC_MODE_MULTI)
1611 ret = -ENOTSUPP;
1612 else
1613 msc_win_switch(msc);
1614 mutex_unlock(&msc->buf_mutex);
1615
1616 return ret ? ret : size;
1617}
1618
1619static DEVICE_ATTR_WO(win_switch);
1620
1443static struct attribute *msc_output_attrs[] = { 1621static struct attribute *msc_output_attrs[] = {
1444 &dev_attr_wrap.attr, 1622 &dev_attr_wrap.attr,
1445 &dev_attr_mode.attr, 1623 &dev_attr_mode.attr,
1446 &dev_attr_nr_pages.attr, 1624 &dev_attr_nr_pages.attr,
1625 &dev_attr_win_switch.attr,
1447 NULL, 1626 NULL,
1448}; 1627};
1449 1628
@@ -1471,10 +1650,19 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
1471 if (!msc) 1650 if (!msc)
1472 return -ENOMEM; 1651 return -ENOMEM;
1473 1652
1653 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
1654 if (!res)
1655 msc->do_irq = 1;
1656
1474 msc->index = thdev->id; 1657 msc->index = thdev->id;
1475 1658
1476 msc->thdev = thdev; 1659 msc->thdev = thdev;
1477 msc->reg_base = base + msc->index * 0x100; 1660 msc->reg_base = base + msc->index * 0x100;
1661 msc->msu_base = base;
1662
1663 err = intel_th_msu_init(msc);
1664 if (err)
1665 return err;
1478 1666
1479 err = intel_th_msc_init(msc); 1667 err = intel_th_msc_init(msc);
1480 if (err) 1668 if (err)
@@ -1491,6 +1679,7 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
1491 int ret; 1679 int ret;
1492 1680
1493 intel_th_msc_deactivate(thdev); 1681 intel_th_msc_deactivate(thdev);
1682 intel_th_msu_deinit(msc);
1494 1683
1495 /* 1684 /*
1496 * Buffers should not be used at this point except if the 1685 * Buffers should not be used at this point except if the
@@ -1504,6 +1693,8 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
1504static struct intel_th_driver intel_th_msc_driver = { 1693static struct intel_th_driver intel_th_msc_driver = {
1505 .probe = intel_th_msc_probe, 1694 .probe = intel_th_msc_probe,
1506 .remove = intel_th_msc_remove, 1695 .remove = intel_th_msc_remove,
1696 .irq = intel_th_msc_interrupt,
1697 .wait_empty = intel_th_msc_wait_empty,
1507 .activate = intel_th_msc_activate, 1698 .activate = intel_th_msc_activate,
1508 .deactivate = intel_th_msc_deactivate, 1699 .deactivate = intel_th_msc_deactivate,
1509 .fops = &intel_th_msc_fops, 1700 .fops = &intel_th_msc_fops,
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 9cc8aced6116..574c16004cb2 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -11,6 +11,7 @@
11enum { 11enum {
12 REG_MSU_MSUPARAMS = 0x0000, 12 REG_MSU_MSUPARAMS = 0x0000,
13 REG_MSU_MSUSTS = 0x0008, 13 REG_MSU_MSUSTS = 0x0008,
14 REG_MSU_MINTCTL = 0x0004, /* MSU-global interrupt control */
14 REG_MSU_MSC0CTL = 0x0100, /* MSC0 control */ 15 REG_MSU_MSC0CTL = 0x0100, /* MSC0 control */
15 REG_MSU_MSC0STS = 0x0104, /* MSC0 status */ 16 REG_MSU_MSC0STS = 0x0104, /* MSC0 status */
16 REG_MSU_MSC0BAR = 0x0108, /* MSC0 output base address */ 17 REG_MSU_MSC0BAR = 0x0108, /* MSC0 output base address */
@@ -28,6 +29,8 @@ enum {
28 29
29/* MSUSTS bits */ 30/* MSUSTS bits */
30#define MSUSTS_MSU_INT BIT(0) 31#define MSUSTS_MSU_INT BIT(0)
32#define MSUSTS_MSC0BLAST BIT(16)
33#define MSUSTS_MSC1BLAST BIT(24)
31 34
32/* MSCnCTL bits */ 35/* MSCnCTL bits */
33#define MSC_EN BIT(0) 36#define MSC_EN BIT(0)
@@ -36,6 +39,11 @@ enum {
36#define MSC_MODE (BIT(4) | BIT(5)) 39#define MSC_MODE (BIT(4) | BIT(5))
37#define MSC_LEN (BIT(8) | BIT(9) | BIT(10)) 40#define MSC_LEN (BIT(8) | BIT(9) | BIT(10))
38 41
42/* MINTCTL bits */
43#define MICDE BIT(0)
44#define M0BLIE BIT(16)
45#define M1BLIE BIT(24)
46
39/* MSC operating modes (MSC_MODE) */ 47/* MSC operating modes (MSC_MODE) */
40enum { 48enum {
41 MSC_MODE_SINGLE = 0, 49 MSC_MODE_SINGLE = 0,
@@ -87,7 +95,7 @@ static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc)
87 95
88static inline bool msc_block_wrapped(struct msc_block_desc *bdesc) 96static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
89{ 97{
90 if (bdesc->hw_tag & MSC_HW_TAG_BLOCKWRAP) 98 if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP))
91 return true; 99 return true;
92 100
93 return false; 101 return false;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 70f2cb90adc5..f1228708f2a2 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -17,7 +17,13 @@
17 17
18#define DRIVER_NAME "intel_th_pci" 18#define DRIVER_NAME "intel_th_pci"
19 19
20#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW)) 20enum {
21 TH_PCI_CONFIG_BAR = 0,
22 TH_PCI_STH_SW_BAR = 2,
23 TH_PCI_RTIT_BAR = 4,
24};
25
26#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR))
21 27
22#define PCI_REG_NPKDSC 0x80 28#define PCI_REG_NPKDSC 0x80
23#define NPKDSC_TSACT BIT(5) 29#define NPKDSC_TSACT BIT(5)
@@ -66,8 +72,12 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
66 const struct pci_device_id *id) 72 const struct pci_device_id *id)
67{ 73{
68 struct intel_th_drvdata *drvdata = (void *)id->driver_data; 74 struct intel_th_drvdata *drvdata = (void *)id->driver_data;
75 struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = {
76 [TH_MMIO_CONFIG] = pdev->resource[TH_PCI_CONFIG_BAR],
77 [TH_MMIO_SW] = pdev->resource[TH_PCI_STH_SW_BAR],
78 };
79 int err, r = TH_MMIO_SW + 1, i;
69 struct intel_th *th; 80 struct intel_th *th;
70 int err;
71 81
72 err = pcim_enable_device(pdev); 82 err = pcim_enable_device(pdev);
73 if (err) 83 if (err)
@@ -77,8 +87,19 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
77 if (err) 87 if (err)
78 return err; 88 return err;
79 89
80 th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource, 90 if (pdev->resource[TH_PCI_RTIT_BAR].start) {
81 DEVICE_COUNT_RESOURCE, pdev->irq); 91 resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR];
92 r++;
93 }
94
95 err = pci_alloc_irq_vectors(pdev, 1, 8, PCI_IRQ_ALL_TYPES);
96 if (err > 0)
97 for (i = 0; i < err; i++, r++) {
98 resource[r].flags = IORESOURCE_IRQ;
99 resource[r].start = pci_irq_vector(pdev, i);
100 }
101
102 th = intel_th_alloc(&pdev->dev, drvdata, resource, r);
82 if (IS_ERR(th)) 103 if (IS_ERR(th))
83 return PTR_ERR(th); 104 return PTR_ERR(th);
84 105
@@ -95,10 +116,13 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
95 struct intel_th *th = pci_get_drvdata(pdev); 116 struct intel_th *th = pci_get_drvdata(pdev);
96 117
97 intel_th_free(th); 118 intel_th_free(th);
119
120 pci_free_irq_vectors(pdev);
98} 121}
99 122
100static const struct intel_th_drvdata intel_th_2x = { 123static const struct intel_th_drvdata intel_th_2x = {
101 .tscu_enable = 1, 124 .tscu_enable = 1,
125 .has_mintctl = 1,
102}; 126};
103 127
104static const struct pci_device_id intel_th_pci_id_table[] = { 128static const struct pci_device_id intel_th_pci_id_table[] = {
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 6005a1c189f6..871eb4bc4efc 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -90,18 +90,7 @@ static int icc_summary_show(struct seq_file *s, void *data)
90 90
91 return 0; 91 return 0;
92} 92}
93 93DEFINE_SHOW_ATTRIBUTE(icc_summary);
94static int icc_summary_open(struct inode *inode, struct file *file)
95{
96 return single_open(file, icc_summary_show, inode->i_private);
97}
98
99static const struct file_operations icc_summary_fops = {
100 .open = icc_summary_open,
101 .read = seq_read,
102 .llseek = seq_lseek,
103 .release = single_release,
104};
105 94
106static struct icc_node *node_find(const int id) 95static struct icc_node *node_find(const int id)
107{ 96{
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 42ab8ec92a04..3209ee020b15 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,6 +496,14 @@ config VEXPRESS_SYSCFG
496 bus. System Configuration interface is one of the possible means 496 bus. System Configuration interface is one of the possible means
497 of generating transactions on this bus. 497 of generating transactions on this bus.
498 498
499config ASPEED_P2A_CTRL
500 depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
501 tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
502 help
503 Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
504 ioctl()s, the driver also provides an interface for userspace mappings to
505 a pre-defined region.
506
499config ASPEED_LPC_CTRL 507config ASPEED_LPC_CTRL
500 depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON 508 depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
501 tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control" 509 tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d5b7d3404dc7..c36239573a5c 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
56obj-$(CONFIG_CXL_BASE) += cxl/ 56obj-$(CONFIG_CXL_BASE) += cxl/
57obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o 57obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
58obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o 58obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
59obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
59obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o 60obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
60obj-$(CONFIG_OCXL) += ocxl/ 61obj-$(CONFIG_OCXL) += ocxl/
61obj-y += cardreader/ 62obj-y += cardreader/
diff --git a/drivers/misc/aspeed-p2a-ctrl.c b/drivers/misc/aspeed-p2a-ctrl.c
new file mode 100644
index 000000000000..b60fbeaffcbd
--- /dev/null
+++ b/drivers/misc/aspeed-p2a-ctrl.c
@@ -0,0 +1,444 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2019 Google Inc
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * Provides a simple driver to control the ASPEED P2A interface which allows
11 * the host to read and write to various regions of the BMC's memory.
12 */
13
14#include <linux/fs.h>
15#include <linux/io.h>
16#include <linux/mfd/syscon.h>
17#include <linux/miscdevice.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/mutex.h>
21#include <linux/of_address.h>
22#include <linux/of_device.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27
28#include <linux/aspeed-p2a-ctrl.h>
29
30#define DEVICE_NAME "aspeed-p2a-ctrl"
31
32/* SCU2C is a Misc. Control Register. */
33#define SCU2C 0x2c
34/* SCU180 is the PCIe Configuration Setting Control Register. */
35#define SCU180 0x180
36/* Bit 1 controls the P2A bridge, while bit 0 controls the entire VGA device
37 * on the PCI bus.
38 */
39#define SCU180_ENP2A BIT(1)
40
41/* The ast2400/2500 both have six ranges. */
42#define P2A_REGION_COUNT 6
43
44struct region {
45 u64 min;
46 u64 max;
47 u32 bit;
48};
49
50struct aspeed_p2a_model_data {
51 /* min, max, bit */
52 struct region regions[P2A_REGION_COUNT];
53};
54
55struct aspeed_p2a_ctrl {
56 struct miscdevice miscdev;
57 struct regmap *regmap;
58
59 const struct aspeed_p2a_model_data *config;
60
61 /* Access to these needs to be locked, held via probe, mapping ioctl,
62 * and release, remove.
63 */
64 struct mutex tracking;
65 u32 readers;
66 u32 readerwriters[P2A_REGION_COUNT];
67
68 phys_addr_t mem_base;
69 resource_size_t mem_size;
70};
71
72struct aspeed_p2a_user {
73 struct file *file;
74 struct aspeed_p2a_ctrl *parent;
75
76 /* The entire memory space is opened for reading once the bridge is
77 * enabled, therefore this needs only to be tracked once per user.
78 * If any user has it open for read, the bridge must stay enabled.
79 */
80 u32 read;
81
82 /* Each entry of the array corresponds to a P2A Region. If the user
83 * opens for read or readwrite, the reference goes up here. On
84 * release, this array is walked and references adjusted accordingly.
85 */
86 u32 readwrite[P2A_REGION_COUNT];
87};
88
89static void aspeed_p2a_enable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
90{
91 regmap_update_bits(p2a_ctrl->regmap,
92 SCU180, SCU180_ENP2A, SCU180_ENP2A);
93}
94
95static void aspeed_p2a_disable_bridge(struct aspeed_p2a_ctrl *p2a_ctrl)
96{
97 regmap_update_bits(p2a_ctrl->regmap, SCU180, SCU180_ENP2A, 0);
98}
99
100static int aspeed_p2a_mmap(struct file *file, struct vm_area_struct *vma)
101{
102 unsigned long vsize;
103 pgprot_t prot;
104 struct aspeed_p2a_user *priv = file->private_data;
105 struct aspeed_p2a_ctrl *ctrl = priv->parent;
106
107 if (ctrl->mem_base == 0 && ctrl->mem_size == 0)
108 return -EINVAL;
109
110 vsize = vma->vm_end - vma->vm_start;
111 prot = vma->vm_page_prot;
112
113 if (vma->vm_pgoff + vsize > ctrl->mem_base + ctrl->mem_size)
114 return -EINVAL;
115
116 /* ast2400/2500 AHB accesses are not cache coherent */
117 prot = pgprot_noncached(prot);
118
119 if (remap_pfn_range(vma, vma->vm_start,
120 (ctrl->mem_base >> PAGE_SHIFT) + vma->vm_pgoff,
121 vsize, prot))
122 return -EAGAIN;
123
124 return 0;
125}
126
127static bool aspeed_p2a_region_acquire(struct aspeed_p2a_user *priv,
128 struct aspeed_p2a_ctrl *ctrl,
129 struct aspeed_p2a_ctrl_mapping *map)
130{
131 int i;
132 u64 base, end;
133 bool matched = false;
134
135 base = map->addr;
136 end = map->addr + (map->length - 1);
137
138 /* If the value is a legal u32, it will find a match. */
139 for (i = 0; i < P2A_REGION_COUNT; i++) {
140 const struct region *curr = &ctrl->config->regions[i];
141
142 /* If the top of this region is lower than your base, skip it.
143 */
144 if (curr->max < base)
145 continue;
146
147 /* If the bottom of this region is higher than your end, bail.
148 */
149 if (curr->min > end)
150 break;
151
152 /* Lock this and update it, therefore it someone else is
153 * closing their file out, this'll preserve the increment.
154 */
155 mutex_lock(&ctrl->tracking);
156 ctrl->readerwriters[i] += 1;
157 mutex_unlock(&ctrl->tracking);
158
159 /* Track with the user, so when they close their file, we can
160 * decrement properly.
161 */
162 priv->readwrite[i] += 1;
163
164 /* Enable the region as read-write. */
165 regmap_update_bits(ctrl->regmap, SCU2C, curr->bit, 0);
166 matched = true;
167 }
168
169 return matched;
170}
171
172static long aspeed_p2a_ioctl(struct file *file, unsigned int cmd,
173 unsigned long data)
174{
175 struct aspeed_p2a_user *priv = file->private_data;
176 struct aspeed_p2a_ctrl *ctrl = priv->parent;
177 void __user *arg = (void __user *)data;
178 struct aspeed_p2a_ctrl_mapping map;
179
180 if (copy_from_user(&map, arg, sizeof(map)))
181 return -EFAULT;
182
183 switch (cmd) {
184 case ASPEED_P2A_CTRL_IOCTL_SET_WINDOW:
185 /* If they want a region to be read-only, since the entire
186 * region is read-only once enabled, we just need to track this
187 * user wants to read from the bridge, and if it's not enabled.
188 * Enable it.
189 */
190 if (map.flags == ASPEED_P2A_CTRL_READ_ONLY) {
191 mutex_lock(&ctrl->tracking);
192 ctrl->readers += 1;
193 mutex_unlock(&ctrl->tracking);
194
195 /* Track with the user, so when they close their file,
196 * we can decrement properly.
197 */
198 priv->read += 1;
199 } else if (map.flags == ASPEED_P2A_CTRL_READWRITE) {
200 /* If we don't acquire any region return error. */
201 if (!aspeed_p2a_region_acquire(priv, ctrl, &map)) {
202 return -EINVAL;
203 }
204 } else {
205 /* Invalid map flags. */
206 return -EINVAL;
207 }
208
209 aspeed_p2a_enable_bridge(ctrl);
210 return 0;
211 case ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG:
212 /* This is a request for the memory-region and corresponding
213 * length that is used by the driver for mmap.
214 */
215
216 map.flags = 0;
217 map.addr = ctrl->mem_base;
218 map.length = ctrl->mem_size;
219
220 return copy_to_user(arg, &map, sizeof(map)) ? -EFAULT : 0;
221 }
222
223 return -EINVAL;
224}
225
226
227/*
228 * When a user opens this file, we create a structure to track their mappings.
229 *
230 * A user can map a region as read-only (bridge enabled), or read-write (bit
231 * flipped, and bridge enabled). Either way, this tracking is used, s.t. when
232 * they release the device references are handled.
233 *
234 * The bridge is not enabled until a user calls an ioctl to map a region,
235 * simply opening the device does not enable it.
236 */
237static int aspeed_p2a_open(struct inode *inode, struct file *file)
238{
239 struct aspeed_p2a_user *priv;
240
241 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
242 if (!priv)
243 return -ENOMEM;
244
245 priv->file = file;
246 priv->read = 0;
247 memset(priv->readwrite, 0, sizeof(priv->readwrite));
248
249 /* The file's private_data is initialized to the p2a_ctrl. */
250 priv->parent = file->private_data;
251
252 /* Set the file's private_data to the user's data. */
253 file->private_data = priv;
254
255 return 0;
256}
257
258/*
259 * This will close the users mappings. It will go through what they had opened
260 * for readwrite, and decrement those counts. If at the end, this is the last
261 * user, it'll close the bridge.
262 */
263static int aspeed_p2a_release(struct inode *inode, struct file *file)
264{
265 int i;
266 u32 bits = 0;
267 bool open_regions = false;
268 struct aspeed_p2a_user *priv = file->private_data;
269
270 /* Lock others from changing these values until everything is updated
271 * in one pass.
272 */
273 mutex_lock(&priv->parent->tracking);
274
275 priv->parent->readers -= priv->read;
276
277 for (i = 0; i < P2A_REGION_COUNT; i++) {
278 priv->parent->readerwriters[i] -= priv->readwrite[i];
279
280 if (priv->parent->readerwriters[i] > 0)
281 open_regions = true;
282 else
283 bits |= priv->parent->config->regions[i].bit;
284 }
285
286 /* Setting a bit to 1 disables the region, so let's just OR with the
287 * above to disable any.
288 */
289
290 /* Note, if another user is trying to ioctl, they can't grab tracking,
291 * and therefore can't grab either register mutex.
292 * If another user is trying to close, they can't grab tracking either.
293 */
294 regmap_update_bits(priv->parent->regmap, SCU2C, bits, bits);
295
296 /* If parent->readers is zero and open windows is 0, disable the
297 * bridge.
298 */
299 if (!open_regions && priv->parent->readers == 0)
300 aspeed_p2a_disable_bridge(priv->parent);
301
302 mutex_unlock(&priv->parent->tracking);
303
304 kfree(priv);
305
306 return 0;
307}
308
309static const struct file_operations aspeed_p2a_ctrl_fops = {
310 .owner = THIS_MODULE,
311 .mmap = aspeed_p2a_mmap,
312 .unlocked_ioctl = aspeed_p2a_ioctl,
313 .open = aspeed_p2a_open,
314 .release = aspeed_p2a_release,
315};
316
317/* The regions are controlled by SCU2C */
318static void aspeed_p2a_disable_all(struct aspeed_p2a_ctrl *p2a_ctrl)
319{
320 int i;
321 u32 value = 0;
322
323 for (i = 0; i < P2A_REGION_COUNT; i++)
324 value |= p2a_ctrl->config->regions[i].bit;
325
326 regmap_update_bits(p2a_ctrl->regmap, SCU2C, value, value);
327
328 /* Disable the bridge. */
329 aspeed_p2a_disable_bridge(p2a_ctrl);
330}
331
332static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
333{
334 struct aspeed_p2a_ctrl *misc_ctrl;
335 struct device *dev;
336 struct resource resm;
337 struct device_node *node;
338 int rc = 0;
339
340 dev = &pdev->dev;
341
342 misc_ctrl = devm_kzalloc(dev, sizeof(*misc_ctrl), GFP_KERNEL);
343 if (!misc_ctrl)
344 return -ENOMEM;
345
346 mutex_init(&misc_ctrl->tracking);
347
348 /* optional. */
349 node = of_parse_phandle(dev->of_node, "memory-region", 0);
350 if (node) {
351 rc = of_address_to_resource(node, 0, &resm);
352 of_node_put(node);
353 if (rc) {
354 dev_err(dev, "Couldn't address to resource for reserved memory\n");
355 return -ENODEV;
356 }
357
358 misc_ctrl->mem_size = resource_size(&resm);
359 misc_ctrl->mem_base = resm.start;
360 }
361
362 misc_ctrl->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
363 if (IS_ERR(misc_ctrl->regmap)) {
364 dev_err(dev, "Couldn't get regmap\n");
365 return -ENODEV;
366 }
367
368 misc_ctrl->config = of_device_get_match_data(dev);
369
370 dev_set_drvdata(&pdev->dev, misc_ctrl);
371
372 aspeed_p2a_disable_all(misc_ctrl);
373
374 misc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
375 misc_ctrl->miscdev.name = DEVICE_NAME;
376 misc_ctrl->miscdev.fops = &aspeed_p2a_ctrl_fops;
377 misc_ctrl->miscdev.parent = dev;
378
379 rc = misc_register(&misc_ctrl->miscdev);
380 if (rc)
381 dev_err(dev, "Unable to register device\n");
382
383 return rc;
384}
385
386static int aspeed_p2a_ctrl_remove(struct platform_device *pdev)
387{
388 struct aspeed_p2a_ctrl *p2a_ctrl = dev_get_drvdata(&pdev->dev);
389
390 misc_deregister(&p2a_ctrl->miscdev);
391
392 return 0;
393}
394
395#define SCU2C_DRAM BIT(25)
396#define SCU2C_SPI BIT(24)
397#define SCU2C_SOC BIT(23)
398#define SCU2C_FLASH BIT(22)
399
400static const struct aspeed_p2a_model_data ast2400_model_data = {
401 .regions = {
402 {0x00000000, 0x17FFFFFF, SCU2C_FLASH},
403 {0x18000000, 0x1FFFFFFF, SCU2C_SOC},
404 {0x20000000, 0x2FFFFFFF, SCU2C_FLASH},
405 {0x30000000, 0x3FFFFFFF, SCU2C_SPI},
406 {0x40000000, 0x5FFFFFFF, SCU2C_DRAM},
407 {0x60000000, 0xFFFFFFFF, SCU2C_SOC},
408 }
409};
410
411static const struct aspeed_p2a_model_data ast2500_model_data = {
412 .regions = {
413 {0x00000000, 0x0FFFFFFF, SCU2C_FLASH},
414 {0x10000000, 0x1FFFFFFF, SCU2C_SOC},
415 {0x20000000, 0x3FFFFFFF, SCU2C_FLASH},
416 {0x40000000, 0x5FFFFFFF, SCU2C_SOC},
417 {0x60000000, 0x7FFFFFFF, SCU2C_SPI},
418 {0x80000000, 0xFFFFFFFF, SCU2C_DRAM},
419 }
420};
421
422static const struct of_device_id aspeed_p2a_ctrl_match[] = {
423 { .compatible = "aspeed,ast2400-p2a-ctrl",
424 .data = &ast2400_model_data },
425 { .compatible = "aspeed,ast2500-p2a-ctrl",
426 .data = &ast2500_model_data },
427 { },
428};
429
430static struct platform_driver aspeed_p2a_ctrl_driver = {
431 .driver = {
432 .name = DEVICE_NAME,
433 .of_match_table = aspeed_p2a_ctrl_match,
434 },
435 .probe = aspeed_p2a_ctrl_probe,
436 .remove = aspeed_p2a_ctrl_remove,
437};
438
439module_platform_driver(aspeed_p2a_ctrl_driver);
440
441MODULE_DEVICE_TABLE(of, aspeed_p2a_ctrl_match);
442MODULE_LICENSE("GPL");
443MODULE_AUTHOR("Patrick Venture <venture@google.com>");
444MODULE_DESCRIPTION("Control for aspeed 2400/2500 P2A VGA HOST to BMC mappings");
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 52c95add56f0..4e285addbf2b 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -456,13 +456,13 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
456 pcr_dbg(pcr, "Set parameters for L1.2."); 456 pcr_dbg(pcr, "Set parameters for L1.2.");
457 rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL, 457 rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
458 0xFF, PCIE_L1_2_EN); 458 0xFF, PCIE_L1_2_EN);
459 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, 459 rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
460 RTS5260_DVCC_OCP_EN | 460 RTS5260_DVCC_OCP_EN |
461 RTS5260_DVCC_OCP_CL_EN, 461 RTS5260_DVCC_OCP_CL_EN,
462 RTS5260_DVCC_OCP_EN | 462 RTS5260_DVCC_OCP_EN |
463 RTS5260_DVCC_OCP_CL_EN); 463 RTS5260_DVCC_OCP_CL_EN);
464 464
465 rtsx_pci_write_register(pcr, PWR_FE_CTL, 465 rtsx_pci_write_register(pcr, PWR_FE_CTL,
466 0xFF, PCIE_L1_2_PD_FE_EN); 466 0xFF, PCIE_L1_2_PD_FE_EN);
467 } else if (lss_l1_1) { 467 } else if (lss_l1_1) {
468 pcr_dbg(pcr, "Set parameters for L1.1."); 468 pcr_dbg(pcr, "Set parameters for L1.1.");
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 36d0d5c9cfba..98603e235cf0 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/of_address.h> 13#include <linux/of_address.h>
14#include <linux/of.h> 14#include <linux/of.h>
15#include <linux/sort.h>
15#include <linux/of_platform.h> 16#include <linux/of_platform.h>
16#include <linux/rpmsg.h> 17#include <linux/rpmsg.h>
17#include <linux/scatterlist.h> 18#include <linux/scatterlist.h>
@@ -31,7 +32,7 @@
31#define FASTRPC_CTX_MAX (256) 32#define FASTRPC_CTX_MAX (256)
32#define FASTRPC_INIT_HANDLE 1 33#define FASTRPC_INIT_HANDLE 1
33#define FASTRPC_CTXID_MASK (0xFF0) 34#define FASTRPC_CTXID_MASK (0xFF0)
34#define INIT_FILELEN_MAX (2 * 1024 * 1024) 35#define INIT_FILELEN_MAX (64 * 1024 * 1024)
35#define INIT_MEMLEN_MAX (8 * 1024 * 1024) 36#define INIT_MEMLEN_MAX (8 * 1024 * 1024)
36#define FASTRPC_DEVICE_NAME "fastrpc" 37#define FASTRPC_DEVICE_NAME "fastrpc"
37 38
@@ -104,6 +105,15 @@ struct fastrpc_invoke_rsp {
104 int retval; /* invoke return value */ 105 int retval; /* invoke return value */
105}; 106};
106 107
108struct fastrpc_buf_overlap {
109 u64 start;
110 u64 end;
111 int raix;
112 u64 mstart;
113 u64 mend;
114 u64 offset;
115};
116
107struct fastrpc_buf { 117struct fastrpc_buf {
108 struct fastrpc_user *fl; 118 struct fastrpc_user *fl;
109 struct dma_buf *dmabuf; 119 struct dma_buf *dmabuf;
@@ -149,12 +159,14 @@ struct fastrpc_invoke_ctx {
149 struct kref refcount; 159 struct kref refcount;
150 struct list_head node; /* list of ctxs */ 160 struct list_head node; /* list of ctxs */
151 struct completion work; 161 struct completion work;
162 struct work_struct put_work;
152 struct fastrpc_msg msg; 163 struct fastrpc_msg msg;
153 struct fastrpc_user *fl; 164 struct fastrpc_user *fl;
154 struct fastrpc_remote_arg *rpra; 165 struct fastrpc_remote_arg *rpra;
155 struct fastrpc_map **maps; 166 struct fastrpc_map **maps;
156 struct fastrpc_buf *buf; 167 struct fastrpc_buf *buf;
157 struct fastrpc_invoke_args *args; 168 struct fastrpc_invoke_args *args;
169 struct fastrpc_buf_overlap *olaps;
158 struct fastrpc_channel_ctx *cctx; 170 struct fastrpc_channel_ctx *cctx;
159}; 171};
160 172
@@ -282,6 +294,7 @@ static void fastrpc_context_free(struct kref *ref)
282{ 294{
283 struct fastrpc_invoke_ctx *ctx; 295 struct fastrpc_invoke_ctx *ctx;
284 struct fastrpc_channel_ctx *cctx; 296 struct fastrpc_channel_ctx *cctx;
297 unsigned long flags;
285 int i; 298 int i;
286 299
287 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount); 300 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
@@ -293,11 +306,12 @@ static void fastrpc_context_free(struct kref *ref)
293 if (ctx->buf) 306 if (ctx->buf)
294 fastrpc_buf_free(ctx->buf); 307 fastrpc_buf_free(ctx->buf);
295 308
296 spin_lock(&cctx->lock); 309 spin_lock_irqsave(&cctx->lock, flags);
297 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); 310 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
298 spin_unlock(&cctx->lock); 311 spin_unlock_irqrestore(&cctx->lock, flags);
299 312
300 kfree(ctx->maps); 313 kfree(ctx->maps);
314 kfree(ctx->olaps);
301 kfree(ctx); 315 kfree(ctx);
302} 316}
303 317
@@ -311,12 +325,70 @@ static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
311 kref_put(&ctx->refcount, fastrpc_context_free); 325 kref_put(&ctx->refcount, fastrpc_context_free);
312} 326}
313 327
328static void fastrpc_context_put_wq(struct work_struct *work)
329{
330 struct fastrpc_invoke_ctx *ctx =
331 container_of(work, struct fastrpc_invoke_ctx, put_work);
332
333 fastrpc_context_put(ctx);
334}
335
336#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
337static int olaps_cmp(const void *a, const void *b)
338{
339 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
340 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
341 /* sort with lowest starting buffer first */
342 int st = CMP(pa->start, pb->start);
343 /* sort with highest ending buffer first */
344 int ed = CMP(pb->end, pa->end);
345
346 return st == 0 ? ed : st;
347}
348
349static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
350{
351 u64 max_end = 0;
352 int i;
353
354 for (i = 0; i < ctx->nbufs; ++i) {
355 ctx->olaps[i].start = ctx->args[i].ptr;
356 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
357 ctx->olaps[i].raix = i;
358 }
359
360 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
361
362 for (i = 0; i < ctx->nbufs; ++i) {
363 /* Falling inside previous range */
364 if (ctx->olaps[i].start < max_end) {
365 ctx->olaps[i].mstart = max_end;
366 ctx->olaps[i].mend = ctx->olaps[i].end;
367 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
368
369 if (ctx->olaps[i].end > max_end) {
370 max_end = ctx->olaps[i].end;
371 } else {
372 ctx->olaps[i].mend = 0;
373 ctx->olaps[i].mstart = 0;
374 }
375
376 } else {
377 ctx->olaps[i].mend = ctx->olaps[i].end;
378 ctx->olaps[i].mstart = ctx->olaps[i].start;
379 ctx->olaps[i].offset = 0;
380 max_end = ctx->olaps[i].end;
381 }
382 }
383}
384
314static struct fastrpc_invoke_ctx *fastrpc_context_alloc( 385static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
315 struct fastrpc_user *user, u32 kernel, u32 sc, 386 struct fastrpc_user *user, u32 kernel, u32 sc,
316 struct fastrpc_invoke_args *args) 387 struct fastrpc_invoke_args *args)
317{ 388{
318 struct fastrpc_channel_ctx *cctx = user->cctx; 389 struct fastrpc_channel_ctx *cctx = user->cctx;
319 struct fastrpc_invoke_ctx *ctx = NULL; 390 struct fastrpc_invoke_ctx *ctx = NULL;
391 unsigned long flags;
320 int ret; 392 int ret;
321 393
322 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 394 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -336,7 +408,15 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
336 kfree(ctx); 408 kfree(ctx);
337 return ERR_PTR(-ENOMEM); 409 return ERR_PTR(-ENOMEM);
338 } 410 }
411 ctx->olaps = kcalloc(ctx->nscalars,
412 sizeof(*ctx->olaps), GFP_KERNEL);
413 if (!ctx->olaps) {
414 kfree(ctx->maps);
415 kfree(ctx);
416 return ERR_PTR(-ENOMEM);
417 }
339 ctx->args = args; 418 ctx->args = args;
419 fastrpc_get_buff_overlaps(ctx);
340 } 420 }
341 421
342 ctx->sc = sc; 422 ctx->sc = sc;
@@ -345,20 +425,21 @@ static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
345 ctx->tgid = user->tgid; 425 ctx->tgid = user->tgid;
346 ctx->cctx = cctx; 426 ctx->cctx = cctx;
347 init_completion(&ctx->work); 427 init_completion(&ctx->work);
428 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
348 429
349 spin_lock(&user->lock); 430 spin_lock(&user->lock);
350 list_add_tail(&ctx->node, &user->pending); 431 list_add_tail(&ctx->node, &user->pending);
351 spin_unlock(&user->lock); 432 spin_unlock(&user->lock);
352 433
353 spin_lock(&cctx->lock); 434 spin_lock_irqsave(&cctx->lock, flags);
354 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, 435 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
355 FASTRPC_CTX_MAX, GFP_ATOMIC); 436 FASTRPC_CTX_MAX, GFP_ATOMIC);
356 if (ret < 0) { 437 if (ret < 0) {
357 spin_unlock(&cctx->lock); 438 spin_unlock_irqrestore(&cctx->lock, flags);
358 goto err_idr; 439 goto err_idr;
359 } 440 }
360 ctx->ctxid = ret << 4; 441 ctx->ctxid = ret << 4;
361 spin_unlock(&cctx->lock); 442 spin_unlock_irqrestore(&cctx->lock, flags);
362 443
363 kref_init(&ctx->refcount); 444 kref_init(&ctx->refcount);
364 445
@@ -368,6 +449,7 @@ err_idr:
368 list_del(&ctx->node); 449 list_del(&ctx->node);
369 spin_unlock(&user->lock); 450 spin_unlock(&user->lock);
370 kfree(ctx->maps); 451 kfree(ctx->maps);
452 kfree(ctx->olaps);
371 kfree(ctx); 453 kfree(ctx);
372 454
373 return ERR_PTR(ret); 455 return ERR_PTR(ret);
@@ -586,8 +668,11 @@ static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
586 size = ALIGN(metalen, FASTRPC_ALIGN); 668 size = ALIGN(metalen, FASTRPC_ALIGN);
587 for (i = 0; i < ctx->nscalars; i++) { 669 for (i = 0; i < ctx->nscalars; i++) {
588 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { 670 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
589 size = ALIGN(size, FASTRPC_ALIGN); 671
590 size += ctx->args[i].length; 672 if (ctx->olaps[i].offset == 0)
673 size = ALIGN(size, FASTRPC_ALIGN);
674
675 size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
591 } 676 }
592 } 677 }
593 678
@@ -625,12 +710,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
625 struct fastrpc_remote_arg *rpra; 710 struct fastrpc_remote_arg *rpra;
626 struct fastrpc_invoke_buf *list; 711 struct fastrpc_invoke_buf *list;
627 struct fastrpc_phy_page *pages; 712 struct fastrpc_phy_page *pages;
628 int inbufs, i, err = 0; 713 int inbufs, i, oix, err = 0;
629 u64 rlen, pkt_size; 714 u64 len, rlen, pkt_size;
715 u64 pg_start, pg_end;
630 uintptr_t args; 716 uintptr_t args;
631 int metalen; 717 int metalen;
632 718
633
634 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); 719 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
635 metalen = fastrpc_get_meta_size(ctx); 720 metalen = fastrpc_get_meta_size(ctx);
636 pkt_size = fastrpc_get_payload_size(ctx, metalen); 721 pkt_size = fastrpc_get_payload_size(ctx, metalen);
@@ -653,8 +738,11 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
653 rlen = pkt_size - metalen; 738 rlen = pkt_size - metalen;
654 ctx->rpra = rpra; 739 ctx->rpra = rpra;
655 740
656 for (i = 0; i < ctx->nbufs; ++i) { 741 for (oix = 0; oix < ctx->nbufs; ++oix) {
657 u64 len = ctx->args[i].length; 742 int mlen;
743
744 i = ctx->olaps[oix].raix;
745 len = ctx->args[i].length;
658 746
659 rpra[i].pv = 0; 747 rpra[i].pv = 0;
660 rpra[i].len = len; 748 rpra[i].len = len;
@@ -664,22 +752,45 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
664 if (!len) 752 if (!len)
665 continue; 753 continue;
666 754
667 pages[i].size = roundup(len, PAGE_SIZE);
668
669 if (ctx->maps[i]) { 755 if (ctx->maps[i]) {
756 struct vm_area_struct *vma = NULL;
757
670 rpra[i].pv = (u64) ctx->args[i].ptr; 758 rpra[i].pv = (u64) ctx->args[i].ptr;
671 pages[i].addr = ctx->maps[i]->phys; 759 pages[i].addr = ctx->maps[i]->phys;
760
761 vma = find_vma(current->mm, ctx->args[i].ptr);
762 if (vma)
763 pages[i].addr += ctx->args[i].ptr -
764 vma->vm_start;
765
766 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
767 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
768 PAGE_SHIFT;
769 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
770
672 } else { 771 } else {
673 rlen -= ALIGN(args, FASTRPC_ALIGN) - args; 772
674 args = ALIGN(args, FASTRPC_ALIGN); 773 if (ctx->olaps[oix].offset == 0) {
675 if (rlen < len) 774 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
775 args = ALIGN(args, FASTRPC_ALIGN);
776 }
777
778 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
779
780 if (rlen < mlen)
676 goto bail; 781 goto bail;
677 782
678 rpra[i].pv = args; 783 rpra[i].pv = args - ctx->olaps[oix].offset;
679 pages[i].addr = ctx->buf->phys + (pkt_size - rlen); 784 pages[i].addr = ctx->buf->phys -
785 ctx->olaps[oix].offset +
786 (pkt_size - rlen);
680 pages[i].addr = pages[i].addr & PAGE_MASK; 787 pages[i].addr = pages[i].addr & PAGE_MASK;
681 args = args + len; 788
682 rlen -= len; 789 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
790 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
791 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
792 args = args + mlen;
793 rlen -= mlen;
683 } 794 }
684 795
685 if (i < inbufs && !ctx->maps[i]) { 796 if (i < inbufs && !ctx->maps[i]) {
@@ -782,6 +893,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
782 if (err) 893 if (err)
783 goto bail; 894 goto bail;
784 } 895 }
896
897 /* make sure that all CPU memory writes are seen by DSP */
898 dma_wmb();
785 /* Send invoke buffer to remote dsp */ 899 /* Send invoke buffer to remote dsp */
786 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); 900 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
787 if (err) 901 if (err)
@@ -798,6 +912,8 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
798 goto bail; 912 goto bail;
799 913
800 if (ctx->nscalars) { 914 if (ctx->nscalars) {
915 /* make sure that all memory writes by DSP are seen by CPU */
916 dma_rmb();
801 /* populate all the output buffers with results */ 917 /* populate all the output buffers with results */
802 err = fastrpc_put_args(ctx, kernel); 918 err = fastrpc_put_args(ctx, kernel);
803 if (err) 919 if (err)
@@ -843,12 +959,12 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
843 959
844 if (copy_from_user(&init, argp, sizeof(init))) { 960 if (copy_from_user(&init, argp, sizeof(init))) {
845 err = -EFAULT; 961 err = -EFAULT;
846 goto bail; 962 goto err;
847 } 963 }
848 964
849 if (init.filelen > INIT_FILELEN_MAX) { 965 if (init.filelen > INIT_FILELEN_MAX) {
850 err = -EINVAL; 966 err = -EINVAL;
851 goto bail; 967 goto err;
852 } 968 }
853 969
854 inbuf.pgid = fl->tgid; 970 inbuf.pgid = fl->tgid;
@@ -862,17 +978,15 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
862 if (init.filelen && init.filefd) { 978 if (init.filelen && init.filefd) {
863 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); 979 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
864 if (err) 980 if (err)
865 goto bail; 981 goto err;
866 } 982 }
867 983
868 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4), 984 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
869 1024 * 1024); 985 1024 * 1024);
870 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, 986 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
871 &imem); 987 &imem);
872 if (err) { 988 if (err)
873 fastrpc_map_put(map); 989 goto err_alloc;
874 goto bail;
875 }
876 990
877 fl->init_mem = imem; 991 fl->init_mem = imem;
878 args[0].ptr = (u64)(uintptr_t)&inbuf; 992 args[0].ptr = (u64)(uintptr_t)&inbuf;
@@ -908,13 +1022,24 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
908 1022
909 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, 1023 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
910 sc, args); 1024 sc, args);
1025 if (err)
1026 goto err_invoke;
911 1027
912 if (err) { 1028 kfree(args);
1029
1030 return 0;
1031
1032err_invoke:
1033 fl->init_mem = NULL;
1034 fastrpc_buf_free(imem);
1035err_alloc:
1036 if (map) {
1037 spin_lock(&fl->lock);
1038 list_del(&map->node);
1039 spin_unlock(&fl->lock);
913 fastrpc_map_put(map); 1040 fastrpc_map_put(map);
914 fastrpc_buf_free(imem);
915 } 1041 }
916 1042err:
917bail:
918 kfree(args); 1043 kfree(args);
919 1044
920 return err; 1045 return err;
@@ -924,9 +1049,10 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
924 struct fastrpc_channel_ctx *cctx) 1049 struct fastrpc_channel_ctx *cctx)
925{ 1050{
926 struct fastrpc_session_ctx *session = NULL; 1051 struct fastrpc_session_ctx *session = NULL;
1052 unsigned long flags;
927 int i; 1053 int i;
928 1054
929 spin_lock(&cctx->lock); 1055 spin_lock_irqsave(&cctx->lock, flags);
930 for (i = 0; i < cctx->sesscount; i++) { 1056 for (i = 0; i < cctx->sesscount; i++) {
931 if (!cctx->session[i].used && cctx->session[i].valid) { 1057 if (!cctx->session[i].used && cctx->session[i].valid) {
932 cctx->session[i].used = true; 1058 cctx->session[i].used = true;
@@ -934,7 +1060,7 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
934 break; 1060 break;
935 } 1061 }
936 } 1062 }
937 spin_unlock(&cctx->lock); 1063 spin_unlock_irqrestore(&cctx->lock, flags);
938 1064
939 return session; 1065 return session;
940} 1066}
@@ -942,9 +1068,11 @@ static struct fastrpc_session_ctx *fastrpc_session_alloc(
942static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, 1068static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
943 struct fastrpc_session_ctx *session) 1069 struct fastrpc_session_ctx *session)
944{ 1070{
945 spin_lock(&cctx->lock); 1071 unsigned long flags;
1072
1073 spin_lock_irqsave(&cctx->lock, flags);
946 session->used = false; 1074 session->used = false;
947 spin_unlock(&cctx->lock); 1075 spin_unlock_irqrestore(&cctx->lock, flags);
948} 1076}
949 1077
950static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) 1078static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
@@ -970,12 +1098,13 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
970 struct fastrpc_channel_ctx *cctx = fl->cctx; 1098 struct fastrpc_channel_ctx *cctx = fl->cctx;
971 struct fastrpc_invoke_ctx *ctx, *n; 1099 struct fastrpc_invoke_ctx *ctx, *n;
972 struct fastrpc_map *map, *m; 1100 struct fastrpc_map *map, *m;
1101 unsigned long flags;
973 1102
974 fastrpc_release_current_dsp_process(fl); 1103 fastrpc_release_current_dsp_process(fl);
975 1104
976 spin_lock(&cctx->lock); 1105 spin_lock_irqsave(&cctx->lock, flags);
977 list_del(&fl->user); 1106 list_del(&fl->user);
978 spin_unlock(&cctx->lock); 1107 spin_unlock_irqrestore(&cctx->lock, flags);
979 1108
980 if (fl->init_mem) 1109 if (fl->init_mem)
981 fastrpc_buf_free(fl->init_mem); 1110 fastrpc_buf_free(fl->init_mem);
@@ -1003,6 +1132,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
1003{ 1132{
1004 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data); 1133 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1005 struct fastrpc_user *fl = NULL; 1134 struct fastrpc_user *fl = NULL;
1135 unsigned long flags;
1006 1136
1007 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 1137 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1008 if (!fl) 1138 if (!fl)
@@ -1026,9 +1156,9 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
1026 return -EBUSY; 1156 return -EBUSY;
1027 } 1157 }
1028 1158
1029 spin_lock(&cctx->lock); 1159 spin_lock_irqsave(&cctx->lock, flags);
1030 list_add_tail(&fl->user, &cctx->users); 1160 list_add_tail(&fl->user, &cctx->users);
1031 spin_unlock(&cctx->lock); 1161 spin_unlock_irqrestore(&cctx->lock, flags);
1032 1162
1033 return 0; 1163 return 0;
1034} 1164}
@@ -1184,6 +1314,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
1184 struct fastrpc_session_ctx *sess; 1314 struct fastrpc_session_ctx *sess;
1185 struct device *dev = &pdev->dev; 1315 struct device *dev = &pdev->dev;
1186 int i, sessions = 0; 1316 int i, sessions = 0;
1317 unsigned long flags;
1187 int rc; 1318 int rc;
1188 1319
1189 cctx = dev_get_drvdata(dev->parent); 1320 cctx = dev_get_drvdata(dev->parent);
@@ -1192,7 +1323,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
1192 1323
1193 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); 1324 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1194 1325
1195 spin_lock(&cctx->lock); 1326 spin_lock_irqsave(&cctx->lock, flags);
1196 sess = &cctx->session[cctx->sesscount]; 1327 sess = &cctx->session[cctx->sesscount];
1197 sess->used = false; 1328 sess->used = false;
1198 sess->valid = true; 1329 sess->valid = true;
@@ -1213,7 +1344,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
1213 } 1344 }
1214 } 1345 }
1215 cctx->sesscount++; 1346 cctx->sesscount++;
1216 spin_unlock(&cctx->lock); 1347 spin_unlock_irqrestore(&cctx->lock, flags);
1217 rc = dma_set_mask(dev, DMA_BIT_MASK(32)); 1348 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1218 if (rc) { 1349 if (rc) {
1219 dev_err(dev, "32-bit DMA enable failed\n"); 1350 dev_err(dev, "32-bit DMA enable failed\n");
@@ -1227,16 +1358,17 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
1227{ 1358{
1228 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); 1359 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1229 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); 1360 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1361 unsigned long flags;
1230 int i; 1362 int i;
1231 1363
1232 spin_lock(&cctx->lock); 1364 spin_lock_irqsave(&cctx->lock, flags);
1233 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { 1365 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1234 if (cctx->session[i].sid == sess->sid) { 1366 if (cctx->session[i].sid == sess->sid) {
1235 cctx->session[i].valid = false; 1367 cctx->session[i].valid = false;
1236 cctx->sesscount--; 1368 cctx->sesscount--;
1237 } 1369 }
1238 } 1370 }
1239 spin_unlock(&cctx->lock); 1371 spin_unlock_irqrestore(&cctx->lock, flags);
1240 1372
1241 return 0; 1373 return 0;
1242} 1374}
@@ -1318,11 +1450,12 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1318{ 1450{
1319 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); 1451 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1320 struct fastrpc_user *user; 1452 struct fastrpc_user *user;
1453 unsigned long flags;
1321 1454
1322 spin_lock(&cctx->lock); 1455 spin_lock_irqsave(&cctx->lock, flags);
1323 list_for_each_entry(user, &cctx->users, user) 1456 list_for_each_entry(user, &cctx->users, user)
1324 fastrpc_notify_users(user); 1457 fastrpc_notify_users(user);
1325 spin_unlock(&cctx->lock); 1458 spin_unlock_irqrestore(&cctx->lock, flags);
1326 1459
1327 misc_deregister(&cctx->miscdev); 1460 misc_deregister(&cctx->miscdev);
1328 of_platform_depopulate(&rpdev->dev); 1461 of_platform_depopulate(&rpdev->dev);
@@ -1354,7 +1487,13 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1354 1487
1355 ctx->retval = rsp->retval; 1488 ctx->retval = rsp->retval;
1356 complete(&ctx->work); 1489 complete(&ctx->work);
1357 fastrpc_context_put(ctx); 1490
1491 /*
1492 * The DMA buffer associated with the context cannot be freed in
1493 * interrupt context so schedule it through a worker thread to
1494 * avoid a kernel BUG.
1495 */
1496 schedule_work(&ctx->put_work);
1358 1497
1359 return 0; 1498 return 0;
1360} 1499}
diff --git a/drivers/misc/genwqe/card_debugfs.c b/drivers/misc/genwqe/card_debugfs.c
index 7c713e01d198..6f7e39f07811 100644
--- a/drivers/misc/genwqe/card_debugfs.c
+++ b/drivers/misc/genwqe/card_debugfs.c
@@ -227,7 +227,7 @@ static int ddcb_info_show(struct seq_file *s, void *unused)
227 seq_puts(s, "DDCB QUEUE:\n"); 227 seq_puts(s, "DDCB QUEUE:\n");
228 seq_printf(s, " ddcb_max: %d\n" 228 seq_printf(s, " ddcb_max: %d\n"
229 " ddcb_daddr: %016llx - %016llx\n" 229 " ddcb_daddr: %016llx - %016llx\n"
230 " ddcb_vaddr: %016llx\n" 230 " ddcb_vaddr: %p\n"
231 " ddcbs_in_flight: %u\n" 231 " ddcbs_in_flight: %u\n"
232 " ddcbs_max_in_flight: %u\n" 232 " ddcbs_max_in_flight: %u\n"
233 " ddcbs_completed: %u\n" 233 " ddcbs_completed: %u\n"
@@ -237,7 +237,7 @@ static int ddcb_info_show(struct seq_file *s, void *unused)
237 queue->ddcb_max, (long long)queue->ddcb_daddr, 237 queue->ddcb_max, (long long)queue->ddcb_daddr,
238 (long long)queue->ddcb_daddr + 238 (long long)queue->ddcb_daddr +
239 (queue->ddcb_max * DDCB_LENGTH), 239 (queue->ddcb_max * DDCB_LENGTH),
240 (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, 240 queue->ddcb_vaddr, queue->ddcbs_in_flight,
241 queue->ddcbs_max_in_flight, queue->ddcbs_completed, 241 queue->ddcbs_max_in_flight, queue->ddcbs_completed,
242 queue->return_on_busy, queue->wait_on_busy, 242 queue->return_on_busy, queue->wait_on_busy,
243 cd->irqs_processed); 243 cd->irqs_processed);
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile
index c6592db59b25..f8e85243d672 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/misc/habanalabs/Makefile
@@ -6,7 +6,7 @@ obj-m := habanalabs.o
6 6
7habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \ 7habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
8 command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \ 8 command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
9 command_submission.o mmu.o 9 command_submission.o mmu.o firmware_if.o pci.o
10 10
11habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o 11habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o
12 12
diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c
index 85f75806a9a7..e495f44064fa 100644
--- a/drivers/misc/habanalabs/command_buffer.c
+++ b/drivers/misc/habanalabs/command_buffer.c
@@ -13,7 +13,7 @@
13 13
14static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) 14static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
15{ 15{
16 hdev->asic_funcs->dma_free_coherent(hdev, cb->size, 16 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
17 (void *) (uintptr_t) cb->kernel_address, 17 (void *) (uintptr_t) cb->kernel_address,
18 cb->bus_address); 18 cb->bus_address);
19 kfree(cb); 19 kfree(cb);
@@ -66,10 +66,10 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
66 return NULL; 66 return NULL;
67 67
68 if (ctx_id == HL_KERNEL_ASID_ID) 68 if (ctx_id == HL_KERNEL_ASID_ID)
69 p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size, 69 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
70 &cb->bus_address, GFP_ATOMIC); 70 &cb->bus_address, GFP_ATOMIC);
71 else 71 else
72 p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size, 72 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
73 &cb->bus_address, 73 &cb->bus_address,
74 GFP_USER | __GFP_ZERO); 74 GFP_USER | __GFP_ZERO);
75 if (!p) { 75 if (!p) {
@@ -214,6 +214,13 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
214 u64 handle; 214 u64 handle;
215 int rc; 215 int rc;
216 216
217 if (hl_device_disabled_or_in_reset(hdev)) {
218 dev_warn_ratelimited(hdev->dev,
219 "Device is %s. Can't execute CB IOCTL\n",
220 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
221 return -EBUSY;
222 }
223
217 switch (args->in.op) { 224 switch (args->in.op) {
218 case HL_CB_OP_CREATE: 225 case HL_CB_OP_CREATE:
219 rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size, 226 rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 19c84214a7ea..6fe785e26859 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -93,7 +93,6 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
93 parser.user_cb_size = job->user_cb_size; 93 parser.user_cb_size = job->user_cb_size;
94 parser.ext_queue = job->ext_queue; 94 parser.ext_queue = job->ext_queue;
95 job->patched_cb = NULL; 95 job->patched_cb = NULL;
96 parser.use_virt_addr = hdev->mmu_enable;
97 96
98 rc = hdev->asic_funcs->cs_parser(hdev, &parser); 97 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
99 if (job->ext_queue) { 98 if (job->ext_queue) {
@@ -261,7 +260,8 @@ static void cs_timedout(struct work_struct *work)
261 ctx_asid = cs->ctx->asid; 260 ctx_asid = cs->ctx->asid;
262 261
263 /* TODO: add information about last signaled seq and last emitted seq */ 262 /* TODO: add information about last signaled seq and last emitted seq */
264 dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence); 263 dev_err(hdev->dev, "User %d command submission %llu got stuck!\n",
264 ctx_asid, cs->sequence);
265 265
266 cs_put(cs); 266 cs_put(cs);
267 267
@@ -600,20 +600,20 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
600 void __user *chunks; 600 void __user *chunks;
601 u32 num_chunks; 601 u32 num_chunks;
602 u64 cs_seq = ULONG_MAX; 602 u64 cs_seq = ULONG_MAX;
603 int rc, do_restore; 603 int rc, do_ctx_switch;
604 bool need_soft_reset = false; 604 bool need_soft_reset = false;
605 605
606 if (hl_device_disabled_or_in_reset(hdev)) { 606 if (hl_device_disabled_or_in_reset(hdev)) {
607 dev_warn(hdev->dev, 607 dev_warn_ratelimited(hdev->dev,
608 "Device is %s. Can't submit new CS\n", 608 "Device is %s. Can't submit new CS\n",
609 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); 609 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
610 rc = -EBUSY; 610 rc = -EBUSY;
611 goto out; 611 goto out;
612 } 612 }
613 613
614 do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0); 614 do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
615 615
616 if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { 616 if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
617 long ret; 617 long ret;
618 618
619 chunks = (void __user *)(uintptr_t)args->in.chunks_restore; 619 chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
@@ -621,7 +621,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
621 621
622 mutex_lock(&hpriv->restore_phase_mutex); 622 mutex_lock(&hpriv->restore_phase_mutex);
623 623
624 if (do_restore) { 624 if (do_ctx_switch) {
625 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); 625 rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
626 if (rc) { 626 if (rc) {
627 dev_err_ratelimited(hdev->dev, 627 dev_err_ratelimited(hdev->dev,
@@ -677,18 +677,18 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
677 } 677 }
678 } 678 }
679 679
680 ctx->thread_restore_wait_token = 1; 680 ctx->thread_ctx_switch_wait_token = 1;
681 } else if (!ctx->thread_restore_wait_token) { 681 } else if (!ctx->thread_ctx_switch_wait_token) {
682 u32 tmp; 682 u32 tmp;
683 683
684 rc = hl_poll_timeout_memory(hdev, 684 rc = hl_poll_timeout_memory(hdev,
685 (u64) (uintptr_t) &ctx->thread_restore_wait_token, 685 (u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token,
686 jiffies_to_usecs(hdev->timeout_jiffies), 686 jiffies_to_usecs(hdev->timeout_jiffies),
687 &tmp); 687 &tmp);
688 688
689 if (rc || !tmp) { 689 if (rc || !tmp) {
690 dev_err(hdev->dev, 690 dev_err(hdev->dev,
691 "restore phase hasn't finished in time\n"); 691 "context switch phase didn't finish in time\n");
692 rc = -ETIMEDOUT; 692 rc = -ETIMEDOUT;
693 goto out; 693 goto out;
694 } 694 }
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 619ace1c4ef7..4804cdcf4c48 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -106,8 +106,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
106 106
107 ctx->cs_sequence = 1; 107 ctx->cs_sequence = 1;
108 spin_lock_init(&ctx->cs_lock); 108 spin_lock_init(&ctx->cs_lock);
109 atomic_set(&ctx->thread_restore_token, 1); 109 atomic_set(&ctx->thread_ctx_switch_token, 1);
110 ctx->thread_restore_wait_token = 0; 110 ctx->thread_ctx_switch_wait_token = 0;
111 111
112 if (is_kernel_ctx) { 112 if (is_kernel_ctx) {
113 ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */ 113 ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 974a87789bd8..a4447699ff4e 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -505,22 +505,97 @@ err:
505 return -EINVAL; 505 return -EINVAL;
506} 506}
507 507
508static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
509 u64 *phys_addr)
510{
511 struct hl_ctx *ctx = hdev->user_ctx;
512 u64 hop_addr, hop_pte_addr, hop_pte;
513 int rc = 0;
514
515 if (!ctx) {
516 dev_err(hdev->dev, "no ctx available\n");
517 return -EINVAL;
518 }
519
520 mutex_lock(&ctx->mmu_lock);
521
522 /* hop 0 */
523 hop_addr = get_hop0_addr(ctx);
524 hop_pte_addr = get_hop0_pte_addr(ctx, hop_addr, virt_addr);
525 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
526
527 /* hop 1 */
528 hop_addr = get_next_hop_addr(hop_pte);
529 if (hop_addr == ULLONG_MAX)
530 goto not_mapped;
531 hop_pte_addr = get_hop1_pte_addr(ctx, hop_addr, virt_addr);
532 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
533
534 /* hop 2 */
535 hop_addr = get_next_hop_addr(hop_pte);
536 if (hop_addr == ULLONG_MAX)
537 goto not_mapped;
538 hop_pte_addr = get_hop2_pte_addr(ctx, hop_addr, virt_addr);
539 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
540
541 /* hop 3 */
542 hop_addr = get_next_hop_addr(hop_pte);
543 if (hop_addr == ULLONG_MAX)
544 goto not_mapped;
545 hop_pte_addr = get_hop3_pte_addr(ctx, hop_addr, virt_addr);
546 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
547
548 if (!(hop_pte & LAST_MASK)) {
549 /* hop 4 */
550 hop_addr = get_next_hop_addr(hop_pte);
551 if (hop_addr == ULLONG_MAX)
552 goto not_mapped;
553 hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
554 hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
555 }
556
557 if (!(hop_pte & PAGE_PRESENT_MASK))
558 goto not_mapped;
559
560 *phys_addr = (hop_pte & PTE_PHYS_ADDR_MASK) | (virt_addr & OFFSET_MASK);
561
562 goto out;
563
564not_mapped:
565 dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
566 virt_addr);
567 rc = -EINVAL;
568out:
569 mutex_unlock(&ctx->mmu_lock);
570 return rc;
571}
572
508static ssize_t hl_data_read32(struct file *f, char __user *buf, 573static ssize_t hl_data_read32(struct file *f, char __user *buf,
509 size_t count, loff_t *ppos) 574 size_t count, loff_t *ppos)
510{ 575{
511 struct hl_dbg_device_entry *entry = file_inode(f)->i_private; 576 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
512 struct hl_device *hdev = entry->hdev; 577 struct hl_device *hdev = entry->hdev;
578 struct asic_fixed_properties *prop = &hdev->asic_prop;
513 char tmp_buf[32]; 579 char tmp_buf[32];
580 u64 addr = entry->addr;
514 u32 val; 581 u32 val;
515 ssize_t rc; 582 ssize_t rc;
516 583
517 if (*ppos) 584 if (*ppos)
518 return 0; 585 return 0;
519 586
520 rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val); 587 if (addr >= prop->va_space_dram_start_address &&
588 addr < prop->va_space_dram_end_address &&
589 hdev->mmu_enable &&
590 hdev->dram_supports_virtual_memory) {
591 rc = device_va_to_pa(hdev, entry->addr, &addr);
592 if (rc)
593 return rc;
594 }
595
596 rc = hdev->asic_funcs->debugfs_read32(hdev, addr, &val);
521 if (rc) { 597 if (rc) {
522 dev_err(hdev->dev, "Failed to read from 0x%010llx\n", 598 dev_err(hdev->dev, "Failed to read from 0x%010llx\n", addr);
523 entry->addr);
524 return rc; 599 return rc;
525 } 600 }
526 601
@@ -536,6 +611,8 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
536{ 611{
537 struct hl_dbg_device_entry *entry = file_inode(f)->i_private; 612 struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
538 struct hl_device *hdev = entry->hdev; 613 struct hl_device *hdev = entry->hdev;
614 struct asic_fixed_properties *prop = &hdev->asic_prop;
615 u64 addr = entry->addr;
539 u32 value; 616 u32 value;
540 ssize_t rc; 617 ssize_t rc;
541 618
@@ -543,10 +620,19 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
543 if (rc) 620 if (rc)
544 return rc; 621 return rc;
545 622
546 rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value); 623 if (addr >= prop->va_space_dram_start_address &&
624 addr < prop->va_space_dram_end_address &&
625 hdev->mmu_enable &&
626 hdev->dram_supports_virtual_memory) {
627 rc = device_va_to_pa(hdev, entry->addr, &addr);
628 if (rc)
629 return rc;
630 }
631
632 rc = hdev->asic_funcs->debugfs_write32(hdev, addr, value);
547 if (rc) { 633 if (rc) {
548 dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n", 634 dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n",
549 value, entry->addr); 635 value, addr);
550 return rc; 636 return rc;
551 } 637 }
552 638
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 77d51be66c7e..91a9e47a3482 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -5,11 +5,14 @@
5 * All Rights Reserved. 5 * All Rights Reserved.
6 */ 6 */
7 7
8#define pr_fmt(fmt) "habanalabs: " fmt
9
8#include "habanalabs.h" 10#include "habanalabs.h"
9 11
10#include <linux/pci.h> 12#include <linux/pci.h>
11#include <linux/sched/signal.h> 13#include <linux/sched/signal.h>
12#include <linux/hwmon.h> 14#include <linux/hwmon.h>
15#include <uapi/misc/habanalabs.h>
13 16
14#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10) 17#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
15 18
@@ -21,6 +24,20 @@ bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
21 return false; 24 return false;
22} 25}
23 26
27enum hl_device_status hl_device_status(struct hl_device *hdev)
28{
29 enum hl_device_status status;
30
31 if (hdev->disabled)
32 status = HL_DEVICE_STATUS_MALFUNCTION;
33 else if (atomic_read(&hdev->in_reset))
34 status = HL_DEVICE_STATUS_IN_RESET;
35 else
36 status = HL_DEVICE_STATUS_OPERATIONAL;
37
38 return status;
39};
40
24static void hpriv_release(struct kref *ref) 41static void hpriv_release(struct kref *ref)
25{ 42{
26 struct hl_fpriv *hpriv; 43 struct hl_fpriv *hpriv;
@@ -498,11 +515,8 @@ disable_device:
498 return rc; 515 return rc;
499} 516}
500 517
501static void hl_device_hard_reset_pending(struct work_struct *work) 518static void device_kill_open_processes(struct hl_device *hdev)
502{ 519{
503 struct hl_device_reset_work *device_reset_work =
504 container_of(work, struct hl_device_reset_work, reset_work);
505 struct hl_device *hdev = device_reset_work->hdev;
506 u16 pending_total, pending_cnt; 520 u16 pending_total, pending_cnt;
507 struct task_struct *task = NULL; 521 struct task_struct *task = NULL;
508 522
@@ -537,6 +551,12 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
537 } 551 }
538 } 552 }
539 553
554 /* We killed the open users, but because the driver cleans up after the
555 * user contexts are closed (e.g. mmu mappings), we need to wait again
556 * to make sure the cleaning phase is finished before continuing with
557 * the reset
558 */
559
540 pending_cnt = pending_total; 560 pending_cnt = pending_total;
541 561
542 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) { 562 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
@@ -552,6 +572,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
552 572
553 mutex_unlock(&hdev->fd_open_cnt_lock); 573 mutex_unlock(&hdev->fd_open_cnt_lock);
554 574
575}
576
577static void device_hard_reset_pending(struct work_struct *work)
578{
579 struct hl_device_reset_work *device_reset_work =
580 container_of(work, struct hl_device_reset_work, reset_work);
581 struct hl_device *hdev = device_reset_work->hdev;
582
583 device_kill_open_processes(hdev);
584
555 hl_device_reset(hdev, true, true); 585 hl_device_reset(hdev, true, true);
556 586
557 kfree(device_reset_work); 587 kfree(device_reset_work);
@@ -613,6 +643,8 @@ again:
613 if ((hard_reset) && (!from_hard_reset_thread)) { 643 if ((hard_reset) && (!from_hard_reset_thread)) {
614 struct hl_device_reset_work *device_reset_work; 644 struct hl_device_reset_work *device_reset_work;
615 645
646 hdev->hard_reset_pending = true;
647
616 if (!hdev->pdev) { 648 if (!hdev->pdev) {
617 dev_err(hdev->dev, 649 dev_err(hdev->dev,
618 "Reset action is NOT supported in simulator\n"); 650 "Reset action is NOT supported in simulator\n");
@@ -620,8 +652,6 @@ again:
620 goto out_err; 652 goto out_err;
621 } 653 }
622 654
623 hdev->hard_reset_pending = true;
624
625 device_reset_work = kzalloc(sizeof(*device_reset_work), 655 device_reset_work = kzalloc(sizeof(*device_reset_work),
626 GFP_ATOMIC); 656 GFP_ATOMIC);
627 if (!device_reset_work) { 657 if (!device_reset_work) {
@@ -635,7 +665,7 @@ again:
635 * from a dedicated work 665 * from a dedicated work
636 */ 666 */
637 INIT_WORK(&device_reset_work->reset_work, 667 INIT_WORK(&device_reset_work->reset_work,
638 hl_device_hard_reset_pending); 668 device_hard_reset_pending);
639 device_reset_work->hdev = hdev; 669 device_reset_work->hdev = hdev;
640 schedule_work(&device_reset_work->reset_work); 670 schedule_work(&device_reset_work->reset_work);
641 671
@@ -663,17 +693,9 @@ again:
663 /* Go over all the queues, release all CS and their jobs */ 693 /* Go over all the queues, release all CS and their jobs */
664 hl_cs_rollback_all(hdev); 694 hl_cs_rollback_all(hdev);
665 695
666 if (hard_reset) { 696 /* Release kernel context */
667 /* Release kernel context */ 697 if ((hard_reset) && (hl_ctx_put(hdev->kernel_ctx) == 1))
668 if (hl_ctx_put(hdev->kernel_ctx) != 1) {
669 dev_err(hdev->dev,
670 "kernel ctx is alive during hard reset\n");
671 rc = -EBUSY;
672 goto out_err;
673 }
674
675 hdev->kernel_ctx = NULL; 698 hdev->kernel_ctx = NULL;
676 }
677 699
678 /* Reset the H/W. It will be in idle state after this returns */ 700 /* Reset the H/W. It will be in idle state after this returns */
679 hdev->asic_funcs->hw_fini(hdev, hard_reset); 701 hdev->asic_funcs->hw_fini(hdev, hard_reset);
@@ -688,16 +710,24 @@ again:
688 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 710 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
689 hl_cq_reset(hdev, &hdev->completion_queue[i]); 711 hl_cq_reset(hdev, &hdev->completion_queue[i]);
690 712
691 /* Make sure the setup phase for the user context will run again */ 713 /* Make sure the context switch phase will run again */
692 if (hdev->user_ctx) { 714 if (hdev->user_ctx) {
693 atomic_set(&hdev->user_ctx->thread_restore_token, 1); 715 atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
694 hdev->user_ctx->thread_restore_wait_token = 0; 716 hdev->user_ctx->thread_ctx_switch_wait_token = 0;
695 } 717 }
696 718
697 /* Finished tear-down, starting to re-initialize */ 719 /* Finished tear-down, starting to re-initialize */
698 720
699 if (hard_reset) { 721 if (hard_reset) {
700 hdev->device_cpu_disabled = false; 722 hdev->device_cpu_disabled = false;
723 hdev->hard_reset_pending = false;
724
725 if (hdev->kernel_ctx) {
726 dev_crit(hdev->dev,
727 "kernel ctx was alive during hard reset, something is terribly wrong\n");
728 rc = -EBUSY;
729 goto out_err;
730 }
701 731
702 /* Allocate the kernel context */ 732 /* Allocate the kernel context */
703 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), 733 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
@@ -752,8 +782,6 @@ again:
752 } 782 }
753 783
754 hl_set_max_power(hdev, hdev->max_power); 784 hl_set_max_power(hdev, hdev->max_power);
755
756 hdev->hard_reset_pending = false;
757 } else { 785 } else {
758 rc = hdev->asic_funcs->soft_reset_late_init(hdev); 786 rc = hdev->asic_funcs->soft_reset_late_init(hdev);
759 if (rc) { 787 if (rc) {
@@ -1030,11 +1058,22 @@ void hl_device_fini(struct hl_device *hdev)
1030 WARN(1, "Failed to remove device because reset function did not finish\n"); 1058 WARN(1, "Failed to remove device because reset function did not finish\n");
1031 return; 1059 return;
1032 } 1060 }
1033 }; 1061 }
1034 1062
1035 /* Mark device as disabled */ 1063 /* Mark device as disabled */
1036 hdev->disabled = true; 1064 hdev->disabled = true;
1037 1065
1066 /*
1067 * Flush anyone that is inside the critical section of enqueue
1068 * jobs to the H/W
1069 */
1070 hdev->asic_funcs->hw_queues_lock(hdev);
1071 hdev->asic_funcs->hw_queues_unlock(hdev);
1072
1073 hdev->hard_reset_pending = true;
1074
1075 device_kill_open_processes(hdev);
1076
1038 hl_hwmon_fini(hdev); 1077 hl_hwmon_fini(hdev);
1039 1078
1040 device_late_fini(hdev); 1079 device_late_fini(hdev);
@@ -1108,7 +1147,13 @@ int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr,
1108 * either by the direct access of the device or by another core 1147 * either by the direct access of the device or by another core
1109 */ 1148 */
1110 u32 *paddr = (u32 *) (uintptr_t) addr; 1149 u32 *paddr = (u32 *) (uintptr_t) addr;
1111 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); 1150 ktime_t timeout;
1151
1152 /* timeout should be longer when working with simulator */
1153 if (!hdev->pdev)
1154 timeout_us *= 10;
1155
1156 timeout = ktime_add_us(ktime_get(), timeout_us);
1112 1157
1113 might_sleep(); 1158 might_sleep();
1114 1159
diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
new file mode 100644
index 000000000000..eda5d7fcb79f
--- /dev/null
+++ b/drivers/misc/habanalabs/firmware_if.c
@@ -0,0 +1,322 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/firmware.h>
11#include <linux/genalloc.h>
12#include <linux/io-64-nonatomic-lo-hi.h>
13
14/**
15 * hl_fw_push_fw_to_device() - Push FW code to device.
16 * @hdev: pointer to hl_device structure.
17 *
18 * Copy fw code from firmware file to device memory.
19 *
20 * Return: 0 on success, non-zero for failure.
21 */
22int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
23 void __iomem *dst)
24{
25 const struct firmware *fw;
26 const u64 *fw_data;
27 size_t fw_size, i;
28 int rc;
29
30 rc = request_firmware(&fw, fw_name, hdev->dev);
31 if (rc) {
32 dev_err(hdev->dev, "Failed to request %s\n", fw_name);
33 goto out;
34 }
35
36 fw_size = fw->size;
37 if ((fw_size % 4) != 0) {
38 dev_err(hdev->dev, "illegal %s firmware size %zu\n",
39 fw_name, fw_size);
40 rc = -EINVAL;
41 goto out;
42 }
43
44 dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
45
46 fw_data = (const u64 *) fw->data;
47
48 if ((fw->size % 8) != 0)
49 fw_size -= 8;
50
51 for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
52 if (!(i & (0x80000 - 1))) {
53 dev_dbg(hdev->dev,
54 "copied so far %zu out of %zu for %s firmware",
55 i, fw_size, fw_name);
56 usleep_range(20, 100);
57 }
58
59 writeq(*fw_data, dst);
60 }
61
62 if ((fw->size % 8) != 0)
63 writel(*(const u32 *) fw_data, dst);
64
65out:
66 release_firmware(fw);
67 return rc;
68}
69
70int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
71{
72 struct armcp_packet pkt = {};
73
74 pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
75
76 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
77 sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
78}
79
80int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
81 u16 len, u32 timeout, long *result)
82{
83 struct armcp_packet *pkt;
84 dma_addr_t pkt_dma_addr;
85 u32 tmp;
86 int rc = 0;
87
88 if (len > HL_CPU_CB_SIZE) {
89 dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
90 len);
91 return -ENOMEM;
92 }
93
94 pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
95 &pkt_dma_addr);
96 if (!pkt) {
97 dev_err(hdev->dev,
98 "Failed to allocate DMA memory for packet to CPU\n");
99 return -ENOMEM;
100 }
101
102 memcpy(pkt, msg, len);
103
104 mutex_lock(&hdev->send_cpu_message_lock);
105
106 if (hdev->disabled)
107 goto out;
108
109 if (hdev->device_cpu_disabled) {
110 rc = -EIO;
111 goto out;
112 }
113
114 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
115 if (rc) {
116 dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
117 goto out;
118 }
119
120 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
121 timeout, &tmp);
122
123 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
124
125 if (rc == -ETIMEDOUT) {
126 dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
127 hdev->device_cpu_disabled = true;
128 goto out;
129 }
130
131 if (tmp == ARMCP_PACKET_FENCE_VAL) {
132 u32 ctl = le32_to_cpu(pkt->ctl);
133
134 rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
135 if (rc) {
136 dev_err(hdev->dev,
137 "F/W ERROR %d for CPU packet %d\n",
138 rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
139 >> ARMCP_PKT_CTL_OPCODE_SHIFT);
140 rc = -EINVAL;
141 } else if (result) {
142 *result = (long) le64_to_cpu(pkt->result);
143 }
144 } else {
145 dev_err(hdev->dev, "CPU packet wrong fence value\n");
146 rc = -EINVAL;
147 }
148
149out:
150 mutex_unlock(&hdev->send_cpu_message_lock);
151
152 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
153
154 return rc;
155}
156
157int hl_fw_test_cpu_queue(struct hl_device *hdev)
158{
159 struct armcp_packet test_pkt = {};
160 long result;
161 int rc;
162
163 test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
164 ARMCP_PKT_CTL_OPCODE_SHIFT);
165 test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
166
167 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
168 sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
169
170 if (!rc) {
171 if (result == ARMCP_PACKET_FENCE_VAL)
172 dev_info(hdev->dev,
173 "queue test on CPU queue succeeded\n");
174 else
175 dev_err(hdev->dev,
176 "CPU queue test failed (0x%08lX)\n", result);
177 } else {
178 dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
179 }
180
181 return rc;
182}
183
184void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
185 dma_addr_t *dma_handle)
186{
187 u64 kernel_addr;
188
189 /* roundup to HL_CPU_PKT_SIZE */
190 size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
191
192 kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
193
194 *dma_handle = hdev->cpu_accessible_dma_address +
195 (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
196
197 return (void *) (uintptr_t) kernel_addr;
198}
199
200void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
201 void *vaddr)
202{
203 /* roundup to HL_CPU_PKT_SIZE */
204 size = (size + (HL_CPU_PKT_SIZE - 1)) & HL_CPU_PKT_MASK;
205
206 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
207 size);
208}
209
210int hl_fw_send_heartbeat(struct hl_device *hdev)
211{
212 struct armcp_packet hb_pkt = {};
213 long result;
214 int rc;
215
216 hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
217 ARMCP_PKT_CTL_OPCODE_SHIFT);
218 hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
219
220 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
221 sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
222
223 if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
224 rc = -EIO;
225
226 return rc;
227}
228
229int hl_fw_armcp_info_get(struct hl_device *hdev)
230{
231 struct asic_fixed_properties *prop = &hdev->asic_prop;
232 struct armcp_packet pkt = {};
233 void *armcp_info_cpu_addr;
234 dma_addr_t armcp_info_dma_addr;
235 long result;
236 int rc;
237
238 armcp_info_cpu_addr =
239 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
240 sizeof(struct armcp_info),
241 &armcp_info_dma_addr);
242 if (!armcp_info_cpu_addr) {
243 dev_err(hdev->dev,
244 "Failed to allocate DMA memory for ArmCP info packet\n");
245 return -ENOMEM;
246 }
247
248 memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
249
250 pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
251 ARMCP_PKT_CTL_OPCODE_SHIFT);
252 pkt.addr = cpu_to_le64(armcp_info_dma_addr);
253 pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
254
255 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
256 HL_ARMCP_INFO_TIMEOUT_USEC, &result);
257 if (rc) {
258 dev_err(hdev->dev,
259 "Failed to send armcp info pkt, error %d\n", rc);
260 goto out;
261 }
262
263 memcpy(&prop->armcp_info, armcp_info_cpu_addr,
264 sizeof(prop->armcp_info));
265
266 rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
267 if (rc) {
268 dev_err(hdev->dev,
269 "Failed to build hwmon channel info, error %d\n", rc);
270 rc = -EFAULT;
271 goto out;
272 }
273
274out:
275 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
276 sizeof(struct armcp_info), armcp_info_cpu_addr);
277
278 return rc;
279}
280
281int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
282{
283 struct armcp_packet pkt = {};
284 void *eeprom_info_cpu_addr;
285 dma_addr_t eeprom_info_dma_addr;
286 long result;
287 int rc;
288
289 eeprom_info_cpu_addr =
290 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
291 max_size, &eeprom_info_dma_addr);
292 if (!eeprom_info_cpu_addr) {
293 dev_err(hdev->dev,
294 "Failed to allocate DMA memory for EEPROM info packet\n");
295 return -ENOMEM;
296 }
297
298 memset(eeprom_info_cpu_addr, 0, max_size);
299
300 pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
301 ARMCP_PKT_CTL_OPCODE_SHIFT);
302 pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
303 pkt.data_max_size = cpu_to_le32(max_size);
304
305 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
306 HL_ARMCP_EEPROM_TIMEOUT_USEC, &result);
307
308 if (rc) {
309 dev_err(hdev->dev,
310 "Failed to send armcp EEPROM pkt, error %d\n", rc);
311 goto out;
312 }
313
314 /* result contains the actual size */
315 memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
316
317out:
318 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
319 eeprom_info_cpu_addr);
320
321 return rc;
322}
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile
index e458e5ba500b..131432f677e2 100644
--- a/drivers/misc/habanalabs/goya/Makefile
+++ b/drivers/misc/habanalabs/goya/Makefile
@@ -1,3 +1,4 @@
1subdir-ccflags-y += -I$(src) 1subdir-ccflags-y += -I$(src)
2 2
3HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o 3HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o \
4 goya/goya_coresight.o
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 3c509e19d69d..a582e29c1ee4 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -12,10 +12,8 @@
12 12
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/genalloc.h> 14#include <linux/genalloc.h>
15#include <linux/firmware.h>
16#include <linux/hwmon.h> 15#include <linux/hwmon.h>
17#include <linux/io-64-nonatomic-lo-hi.h> 16#include <linux/io-64-nonatomic-lo-hi.h>
18#include <linux/io-64-nonatomic-hi-lo.h>
19 17
20/* 18/*
21 * GOYA security scheme: 19 * GOYA security scheme:
@@ -71,7 +69,7 @@
71 * 69 *
72 */ 70 */
73 71
74#define GOYA_MMU_REGS_NUM 61 72#define GOYA_MMU_REGS_NUM 63
75 73
76#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */ 74#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
77 75
@@ -80,15 +78,12 @@
80#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */ 78#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
81#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */ 79#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
82#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */ 80#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
83#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
84#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ 81#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
85#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) 82#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
86#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) 83#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
87 84
88#define GOYA_QMAN0_FENCE_VAL 0xD169B243 85#define GOYA_QMAN0_FENCE_VAL 0xD169B243
89 86
90#define GOYA_MAX_INITIATORS 20
91
92#define GOYA_MAX_STRING_LEN 20 87#define GOYA_MAX_STRING_LEN 20
93 88
94#define GOYA_CB_POOL_CB_CNT 512 89#define GOYA_CB_POOL_CB_CNT 512
@@ -173,12 +168,12 @@ static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
173 mmMME_SBA_CONTROL_DATA, 168 mmMME_SBA_CONTROL_DATA,
174 mmMME_SBB_CONTROL_DATA, 169 mmMME_SBB_CONTROL_DATA,
175 mmMME_SBC_CONTROL_DATA, 170 mmMME_SBC_CONTROL_DATA,
176 mmMME_WBC_CONTROL_DATA 171 mmMME_WBC_CONTROL_DATA,
172 mmPCIE_WRAP_PSOC_ARUSER,
173 mmPCIE_WRAP_PSOC_AWUSER
177}; 174};
178 175
179#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121 176static u32 goya_all_events[] = {
180
181static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
182 GOYA_ASYNC_EVENT_ID_PCIE_IF, 177 GOYA_ASYNC_EVENT_ID_PCIE_IF,
183 GOYA_ASYNC_EVENT_ID_TPC0_ECC, 178 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC1_ECC, 179 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
@@ -302,14 +297,7 @@ static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
302 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 297 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
303}; 298};
304 299
305static int goya_armcp_info_get(struct hl_device *hdev); 300void goya_get_fixed_properties(struct hl_device *hdev)
306static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
307static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
308static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
309static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
310 u64 phys_addr);
311
312static void goya_get_fixed_properties(struct hl_device *hdev)
313{ 301{
314 struct asic_fixed_properties *prop = &hdev->asic_prop; 302 struct asic_fixed_properties *prop = &hdev->asic_prop;
315 int i; 303 int i;
@@ -357,7 +345,6 @@ static void goya_get_fixed_properties(struct hl_device *hdev)
357 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; 345 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
358 prop->dram_page_size = PAGE_SIZE_2MB; 346 prop->dram_page_size = PAGE_SIZE_2MB;
359 347
360 prop->host_phys_base_address = HOST_PHYS_BASE;
361 prop->va_space_host_start_address = VA_HOST_SPACE_START; 348 prop->va_space_host_start_address = VA_HOST_SPACE_START;
362 prop->va_space_host_end_address = VA_HOST_SPACE_END; 349 prop->va_space_host_end_address = VA_HOST_SPACE_END;
363 prop->va_space_dram_start_address = VA_DDR_SPACE_START; 350 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
@@ -367,24 +354,13 @@ static void goya_get_fixed_properties(struct hl_device *hdev)
367 prop->cfg_size = CFG_SIZE; 354 prop->cfg_size = CFG_SIZE;
368 prop->max_asid = MAX_ASID; 355 prop->max_asid = MAX_ASID;
369 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE; 356 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
357 prop->high_pll = PLL_HIGH_DEFAULT;
370 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT; 358 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
371 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE; 359 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
372 prop->max_power_default = MAX_POWER_DEFAULT; 360 prop->max_power_default = MAX_POWER_DEFAULT;
373 prop->tpc_enabled_mask = TPC_ENABLED_MASK; 361 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
374 362 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
375 prop->high_pll = PLL_HIGH_DEFAULT; 363 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
376}
377
378int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
379{
380 struct armcp_packet pkt;
381
382 memset(&pkt, 0, sizeof(pkt));
383
384 pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
385
386 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
387 sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
388} 364}
389 365
390/* 366/*
@@ -398,199 +374,40 @@ int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
398 */ 374 */
399static int goya_pci_bars_map(struct hl_device *hdev) 375static int goya_pci_bars_map(struct hl_device *hdev)
400{ 376{
401 struct pci_dev *pdev = hdev->pdev; 377 static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
378 bool is_wc[3] = {false, false, true};
402 int rc; 379 int rc;
403 380
404 rc = pci_request_regions(pdev, HL_NAME); 381 rc = hl_pci_bars_map(hdev, name, is_wc);
405 if (rc) { 382 if (rc)
406 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
407 return rc; 383 return rc;
408 }
409
410 hdev->pcie_bar[SRAM_CFG_BAR_ID] =
411 pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
412 if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
413 dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
414 rc = -ENODEV;
415 goto err_release_regions;
416 }
417
418 hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
419 if (!hdev->pcie_bar[MSIX_BAR_ID]) {
420 dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
421 rc = -ENODEV;
422 goto err_unmap_sram_cfg;
423 }
424
425 hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
426 if (!hdev->pcie_bar[DDR_BAR_ID]) {
427 dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
428 rc = -ENODEV;
429 goto err_unmap_msix;
430 }
431 384
432 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] + 385 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
433 (CFG_BASE - SRAM_BASE_ADDR); 386 (CFG_BASE - SRAM_BASE_ADDR);
434
435 return 0;
436
437err_unmap_msix:
438 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
439err_unmap_sram_cfg:
440 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
441err_release_regions:
442 pci_release_regions(pdev);
443
444 return rc;
445}
446
447/*
448 * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
449 *
450 * @hdev: pointer to hl_device structure
451 *
452 * Release all PCI BARS and unmap their virtual addresses
453 *
454 */
455static void goya_pci_bars_unmap(struct hl_device *hdev)
456{
457 struct pci_dev *pdev = hdev->pdev;
458
459 iounmap(hdev->pcie_bar[DDR_BAR_ID]);
460 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
461 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
462 pci_release_regions(pdev);
463}
464
465/*
466 * goya_elbi_write - Write through the ELBI interface
467 *
468 * @hdev: pointer to hl_device structure
469 *
470 * return 0 on success, -1 on failure
471 *
472 */
473static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
474{
475 struct pci_dev *pdev = hdev->pdev;
476 ktime_t timeout;
477 u32 val;
478
479 /* Clear previous status */
480 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
481
482 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
483 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
484 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
485 PCI_CONFIG_ELBI_CTRL_WRITE);
486
487 timeout = ktime_add_ms(ktime_get(), 10);
488 for (;;) {
489 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
490 if (val & PCI_CONFIG_ELBI_STS_MASK)
491 break;
492 if (ktime_compare(ktime_get(), timeout) > 0) {
493 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
494 &val);
495 break;
496 }
497 usleep_range(300, 500);
498 }
499
500 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
501 return 0;
502
503 if (val & PCI_CONFIG_ELBI_STS_ERR) {
504 dev_err(hdev->dev, "Error writing to ELBI\n");
505 return -EIO;
506 }
507
508 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
509 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
510 return -EIO;
511 }
512
513 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
514 return -EIO;
515}
516
517/*
518 * goya_iatu_write - iatu write routine
519 *
520 * @hdev: pointer to hl_device structure
521 *
522 */
523static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
524{
525 u32 dbi_offset;
526 int rc;
527
528 dbi_offset = addr & 0xFFF;
529
530 rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
531 rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
532
533 if (rc)
534 return -EIO;
535 387
536 return 0; 388 return 0;
537} 389}
538 390
539static void goya_reset_link_through_bridge(struct hl_device *hdev) 391static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
540{
541 struct pci_dev *pdev = hdev->pdev;
542 struct pci_dev *parent_port;
543 u16 val;
544
545 parent_port = pdev->bus->self;
546 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
547 val |= PCI_BRIDGE_CTL_BUS_RESET;
548 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
549 ssleep(1);
550
551 val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
552 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
553 ssleep(3);
554}
555
556/*
557 * goya_set_ddr_bar_base - set DDR bar to map specific device address
558 *
559 * @hdev: pointer to hl_device structure
560 * @addr: address in DDR. Must be aligned to DDR bar size
561 *
562 * This function configures the iATU so that the DDR bar will start at the
563 * specified addr.
564 *
565 */
566static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
567{ 392{
568 struct goya_device *goya = hdev->asic_specific; 393 struct goya_device *goya = hdev->asic_specific;
394 u64 old_addr = addr;
569 int rc; 395 int rc;
570 396
571 if ((goya) && (goya->ddr_bar_cur_addr == addr)) 397 if ((goya) && (goya->ddr_bar_cur_addr == addr))
572 return 0; 398 return old_addr;
573 399
574 /* Inbound Region 1 - Bar 4 - Point to DDR */ 400 /* Inbound Region 1 - Bar 4 - Point to DDR */
575 rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr)); 401 rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
576 rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr)); 402 if (rc)
577 rc |= goya_iatu_write(hdev, 0x300, 0); 403 return U64_MAX;
578 /* Enable + Bar match + match enable + Bar 4 */
579 rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
580
581 /* Return the DBI window to the default location */
582 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
583 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
584
585 if (rc) {
586 dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
587 return -EIO;
588 }
589 404
590 if (goya) 405 if (goya) {
406 old_addr = goya->ddr_bar_cur_addr;
591 goya->ddr_bar_cur_addr = addr; 407 goya->ddr_bar_cur_addr = addr;
408 }
592 409
593 return 0; 410 return old_addr;
594} 411}
595 412
596/* 413/*
@@ -603,40 +420,8 @@ static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
603 */ 420 */
604static int goya_init_iatu(struct hl_device *hdev) 421static int goya_init_iatu(struct hl_device *hdev)
605{ 422{
606 int rc; 423 return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
607 424 HOST_PHYS_BASE, HOST_PHYS_SIZE);
608 /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
609 rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
610 rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
611 rc |= goya_iatu_write(hdev, 0x100, 0);
612 /* Enable + Bar match + match enable */
613 rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
614
615 /* Inbound Region 1 - Bar 4 - Point to DDR */
616 rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
617
618 /* Outbound Region 0 - Point to Host */
619 rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
620 rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
621 rc |= goya_iatu_write(hdev, 0x010,
622 lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
623 rc |= goya_iatu_write(hdev, 0x014, 0);
624 rc |= goya_iatu_write(hdev, 0x018, 0);
625 rc |= goya_iatu_write(hdev, 0x020,
626 upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
627 /* Increase region size */
628 rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
629 /* Enable */
630 rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
631
632 /* Return the DBI window to the default location */
633 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
634 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
635
636 if (rc)
637 return -EIO;
638
639 return 0;
640} 425}
641 426
642/* 427/*
@@ -682,52 +467,9 @@ static int goya_early_init(struct hl_device *hdev)
682 467
683 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); 468 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
684 469
685 /* set DMA mask for GOYA */ 470 rc = hl_pci_init(hdev, 39);
686 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); 471 if (rc)
687 if (rc) {
688 dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
689 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
690 if (rc) {
691 dev_err(hdev->dev,
692 "Unable to set pci dma mask to 32 bits\n");
693 return rc;
694 }
695 }
696
697 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
698 if (rc) {
699 dev_warn(hdev->dev,
700 "Unable to set pci consistent dma mask to 39 bits\n");
701 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
702 if (rc) {
703 dev_err(hdev->dev,
704 "Unable to set pci consistent dma mask to 32 bits\n");
705 return rc;
706 }
707 }
708
709 if (hdev->reset_pcilink)
710 goya_reset_link_through_bridge(hdev);
711
712 rc = pci_enable_device_mem(pdev);
713 if (rc) {
714 dev_err(hdev->dev, "can't enable PCI device\n");
715 return rc; 472 return rc;
716 }
717
718 pci_set_master(pdev);
719
720 rc = goya_init_iatu(hdev);
721 if (rc) {
722 dev_err(hdev->dev, "Failed to initialize iATU\n");
723 goto disable_device;
724 }
725
726 rc = goya_pci_bars_map(hdev);
727 if (rc) {
728 dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
729 goto disable_device;
730 }
731 473
732 if (!hdev->pldm) { 474 if (!hdev->pldm) {
733 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); 475 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
@@ -737,12 +479,6 @@ static int goya_early_init(struct hl_device *hdev)
737 } 479 }
738 480
739 return 0; 481 return 0;
740
741disable_device:
742 pci_clear_master(pdev);
743 pci_disable_device(pdev);
744
745 return rc;
746} 482}
747 483
748/* 484/*
@@ -755,14 +491,33 @@ disable_device:
755 */ 491 */
756static int goya_early_fini(struct hl_device *hdev) 492static int goya_early_fini(struct hl_device *hdev)
757{ 493{
758 goya_pci_bars_unmap(hdev); 494 hl_pci_fini(hdev);
759
760 pci_clear_master(hdev->pdev);
761 pci_disable_device(hdev->pdev);
762 495
763 return 0; 496 return 0;
764} 497}
765 498
499static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
500{
501 /* mask to zero the MMBP and ASID bits */
502 WREG32_AND(reg, ~0x7FF);
503 WREG32_OR(reg, asid);
504}
505
506static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
507{
508 struct goya_device *goya = hdev->asic_specific;
509
510 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
511 return;
512
513 if (secure)
514 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
515 else
516 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
517
518 RREG32(mmDMA_QM_0_GLBL_PROT);
519}
520
766/* 521/*
767 * goya_fetch_psoc_frequency - Fetch PSOC frequency values 522 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
768 * 523 *
@@ -779,20 +534,12 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
779 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1); 534 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
780} 535}
781 536
782/* 537int goya_late_init(struct hl_device *hdev)
783 * goya_late_init - GOYA late initialization code
784 *
785 * @hdev: pointer to hl_device structure
786 *
787 * Get ArmCP info and send message to CPU to enable PCI access
788 */
789static int goya_late_init(struct hl_device *hdev)
790{ 538{
791 struct asic_fixed_properties *prop = &hdev->asic_prop; 539 struct asic_fixed_properties *prop = &hdev->asic_prop;
792 struct goya_device *goya = hdev->asic_specific;
793 int rc; 540 int rc;
794 541
795 rc = goya->armcp_info_get(hdev); 542 rc = goya_armcp_info_get(hdev);
796 if (rc) { 543 if (rc) {
797 dev_err(hdev->dev, "Failed to get armcp info\n"); 544 dev_err(hdev->dev, "Failed to get armcp info\n");
798 return rc; 545 return rc;
@@ -804,7 +551,7 @@ static int goya_late_init(struct hl_device *hdev)
804 */ 551 */
805 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size)); 552 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
806 553
807 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS); 554 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
808 if (rc) { 555 if (rc) {
809 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); 556 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
810 return rc; 557 return rc;
@@ -830,7 +577,7 @@ static int goya_late_init(struct hl_device *hdev)
830 return 0; 577 return 0;
831 578
832disable_pci_access: 579disable_pci_access:
833 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); 580 hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
834 581
835 return rc; 582 return rc;
836} 583}
@@ -879,9 +626,6 @@ static int goya_sw_init(struct hl_device *hdev)
879 if (!goya) 626 if (!goya)
880 return -ENOMEM; 627 return -ENOMEM;
881 628
882 goya->test_cpu_queue = goya_test_cpu_queue;
883 goya->armcp_info_get = goya_armcp_info_get;
884
885 /* according to goya_init_iatu */ 629 /* according to goya_init_iatu */
886 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE; 630 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
887 631
@@ -901,45 +645,43 @@ static int goya_sw_init(struct hl_device *hdev)
901 } 645 }
902 646
903 hdev->cpu_accessible_dma_mem = 647 hdev->cpu_accessible_dma_mem =
904 hdev->asic_funcs->dma_alloc_coherent(hdev, 648 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
905 CPU_ACCESSIBLE_MEM_SIZE, 649 HL_CPU_ACCESSIBLE_MEM_SIZE,
906 &hdev->cpu_accessible_dma_address, 650 &hdev->cpu_accessible_dma_address,
907 GFP_KERNEL | __GFP_ZERO); 651 GFP_KERNEL | __GFP_ZERO);
908 652
909 if (!hdev->cpu_accessible_dma_mem) { 653 if (!hdev->cpu_accessible_dma_mem) {
910 dev_err(hdev->dev,
911 "failed to allocate %d of dma memory for CPU accessible memory space\n",
912 CPU_ACCESSIBLE_MEM_SIZE);
913 rc = -ENOMEM; 654 rc = -ENOMEM;
914 goto free_dma_pool; 655 goto free_dma_pool;
915 } 656 }
916 657
917 hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1); 658 hdev->cpu_accessible_dma_pool = gen_pool_create(HL_CPU_PKT_SHIFT, -1);
918 if (!hdev->cpu_accessible_dma_pool) { 659 if (!hdev->cpu_accessible_dma_pool) {
919 dev_err(hdev->dev, 660 dev_err(hdev->dev,
920 "Failed to create CPU accessible DMA pool\n"); 661 "Failed to create CPU accessible DMA pool\n");
921 rc = -ENOMEM; 662 rc = -ENOMEM;
922 goto free_cpu_pq_dma_mem; 663 goto free_cpu_dma_mem;
923 } 664 }
924 665
925 rc = gen_pool_add(hdev->cpu_accessible_dma_pool, 666 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
926 (uintptr_t) hdev->cpu_accessible_dma_mem, 667 (uintptr_t) hdev->cpu_accessible_dma_mem,
927 CPU_ACCESSIBLE_MEM_SIZE, -1); 668 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
928 if (rc) { 669 if (rc) {
929 dev_err(hdev->dev, 670 dev_err(hdev->dev,
930 "Failed to add memory to CPU accessible DMA pool\n"); 671 "Failed to add memory to CPU accessible DMA pool\n");
931 rc = -EFAULT; 672 rc = -EFAULT;
932 goto free_cpu_pq_pool; 673 goto free_cpu_accessible_dma_pool;
933 } 674 }
934 675
935 spin_lock_init(&goya->hw_queues_lock); 676 spin_lock_init(&goya->hw_queues_lock);
936 677
937 return 0; 678 return 0;
938 679
939free_cpu_pq_pool: 680free_cpu_accessible_dma_pool:
940 gen_pool_destroy(hdev->cpu_accessible_dma_pool); 681 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
941free_cpu_pq_dma_mem: 682free_cpu_dma_mem:
942 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE, 683 hdev->asic_funcs->asic_dma_free_coherent(hdev,
684 HL_CPU_ACCESSIBLE_MEM_SIZE,
943 hdev->cpu_accessible_dma_mem, 685 hdev->cpu_accessible_dma_mem,
944 hdev->cpu_accessible_dma_address); 686 hdev->cpu_accessible_dma_address);
945free_dma_pool: 687free_dma_pool:
@@ -962,7 +704,8 @@ static int goya_sw_fini(struct hl_device *hdev)
962 704
963 gen_pool_destroy(hdev->cpu_accessible_dma_pool); 705 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
964 706
965 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE, 707 hdev->asic_funcs->asic_dma_free_coherent(hdev,
708 HL_CPU_ACCESSIBLE_MEM_SIZE,
966 hdev->cpu_accessible_dma_mem, 709 hdev->cpu_accessible_dma_mem,
967 hdev->cpu_accessible_dma_address); 710 hdev->cpu_accessible_dma_address);
968 711
@@ -1056,11 +799,10 @@ static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1056 * Initialize the H/W registers of the QMAN DMA channels 799 * Initialize the H/W registers of the QMAN DMA channels
1057 * 800 *
1058 */ 801 */
1059static void goya_init_dma_qmans(struct hl_device *hdev) 802void goya_init_dma_qmans(struct hl_device *hdev)
1060{ 803{
1061 struct goya_device *goya = hdev->asic_specific; 804 struct goya_device *goya = hdev->asic_specific;
1062 struct hl_hw_queue *q; 805 struct hl_hw_queue *q;
1063 dma_addr_t bus_address;
1064 int i; 806 int i;
1065 807
1066 if (goya->hw_cap_initialized & HW_CAP_DMA) 808 if (goya->hw_cap_initialized & HW_CAP_DMA)
@@ -1069,10 +811,7 @@ static void goya_init_dma_qmans(struct hl_device *hdev)
1069 q = &hdev->kernel_queues[0]; 811 q = &hdev->kernel_queues[0];
1070 812
1071 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) { 813 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1072 bus_address = q->bus_address + 814 goya_init_dma_qman(hdev, i, q->bus_address);
1073 hdev->asic_prop.host_phys_base_address;
1074
1075 goya_init_dma_qman(hdev, i, bus_address);
1076 goya_init_dma_ch(hdev, i); 815 goya_init_dma_ch(hdev, i);
1077 } 816 }
1078 817
@@ -1209,11 +948,10 @@ static int goya_stop_external_queues(struct hl_device *hdev)
1209 * Returns 0 on success 948 * Returns 0 on success
1210 * 949 *
1211 */ 950 */
1212static int goya_init_cpu_queues(struct hl_device *hdev) 951int goya_init_cpu_queues(struct hl_device *hdev)
1213{ 952{
1214 struct goya_device *goya = hdev->asic_specific; 953 struct goya_device *goya = hdev->asic_specific;
1215 struct hl_eq *eq; 954 struct hl_eq *eq;
1216 dma_addr_t bus_address;
1217 u32 status; 955 u32 status;
1218 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ]; 956 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1219 int err; 957 int err;
@@ -1226,23 +964,22 @@ static int goya_init_cpu_queues(struct hl_device *hdev)
1226 964
1227 eq = &hdev->event_queue; 965 eq = &hdev->event_queue;
1228 966
1229 bus_address = cpu_pq->bus_address + 967 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0,
1230 hdev->asic_prop.host_phys_base_address; 968 lower_32_bits(cpu_pq->bus_address));
1231 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address)); 969 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1,
1232 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address)); 970 upper_32_bits(cpu_pq->bus_address));
1233 971
1234 bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address; 972 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(eq->bus_address));
1235 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address)); 973 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(eq->bus_address));
1236 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
1237 974
1238 bus_address = hdev->cpu_accessible_dma_address + 975 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8,
1239 hdev->asic_prop.host_phys_base_address; 976 lower_32_bits(hdev->cpu_accessible_dma_address));
1240 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address)); 977 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9,
1241 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address)); 978 upper_32_bits(hdev->cpu_accessible_dma_address));
1242 979
1243 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES); 980 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
1244 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES); 981 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
1245 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE); 982 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
1246 983
1247 /* Used for EQ CI */ 984 /* Used for EQ CI */
1248 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0); 985 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
@@ -1695,6 +1432,8 @@ static void goya_init_golden_registers(struct hl_device *hdev)
1695 */ 1432 */
1696 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0); 1433 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1697 1434
1435 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1436
1698 goya->hw_cap_initialized |= HW_CAP_GOLDEN; 1437 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1699} 1438}
1700 1439
@@ -1788,7 +1527,7 @@ static void goya_init_mme_cmdq(struct hl_device *hdev)
1788 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE); 1527 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1789} 1528}
1790 1529
1791static void goya_init_mme_qmans(struct hl_device *hdev) 1530void goya_init_mme_qmans(struct hl_device *hdev)
1792{ 1531{
1793 struct goya_device *goya = hdev->asic_specific; 1532 struct goya_device *goya = hdev->asic_specific;
1794 u32 so_base_lo, so_base_hi; 1533 u32 so_base_lo, so_base_hi;
@@ -1895,7 +1634,7 @@ static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1895 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE); 1634 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1896} 1635}
1897 1636
1898static void goya_init_tpc_qmans(struct hl_device *hdev) 1637void goya_init_tpc_qmans(struct hl_device *hdev)
1899{ 1638{
1900 struct goya_device *goya = hdev->asic_specific; 1639 struct goya_device *goya = hdev->asic_specific;
1901 u32 so_base_lo, so_base_hi; 1640 u32 so_base_lo, so_base_hi;
@@ -2222,10 +1961,10 @@ static int goya_enable_msix(struct hl_device *hdev)
2222 } 1961 }
2223 } 1962 }
2224 1963
2225 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX); 1964 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2226 1965
2227 rc = request_irq(irq, hl_irq_handler_eq, 0, 1966 rc = request_irq(irq, hl_irq_handler_eq, 0,
2228 goya_irq_name[EVENT_QUEUE_MSIX_IDX], 1967 goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
2229 &hdev->event_queue); 1968 &hdev->event_queue);
2230 if (rc) { 1969 if (rc) {
2231 dev_err(hdev->dev, "Failed to request IRQ %d", irq); 1970 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
@@ -2256,7 +1995,7 @@ static void goya_sync_irqs(struct hl_device *hdev)
2256 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) 1995 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2257 synchronize_irq(pci_irq_vector(hdev->pdev, i)); 1996 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2258 1997
2259 synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX)); 1998 synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2260} 1999}
2261 2000
2262static void goya_disable_msix(struct hl_device *hdev) 2001static void goya_disable_msix(struct hl_device *hdev)
@@ -2269,7 +2008,7 @@ static void goya_disable_msix(struct hl_device *hdev)
2269 2008
2270 goya_sync_irqs(hdev); 2009 goya_sync_irqs(hdev);
2271 2010
2272 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX); 2011 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2273 free_irq(irq, &hdev->event_queue); 2012 free_irq(irq, &hdev->event_queue);
2274 2013
2275 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) { 2014 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
@@ -2329,67 +2068,45 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2329} 2068}
2330 2069
2331/* 2070/*
2332 * goya_push_fw_to_device - Push FW code to device 2071 * goya_push_uboot_to_device() - Push u-boot FW code to device.
2333 * 2072 * @hdev: Pointer to hl_device structure.
2334 * @hdev: pointer to hl_device structure
2335 * 2073 *
2336 * Copy fw code from firmware file to device memory. 2074 * Copy u-boot fw code from firmware file to SRAM BAR.
2337 * Returns 0 on success
2338 * 2075 *
2076 * Return: 0 on success, non-zero for failure.
2339 */ 2077 */
2340static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name, 2078static int goya_push_uboot_to_device(struct hl_device *hdev)
2341 void __iomem *dst)
2342{ 2079{
2343 const struct firmware *fw; 2080 char fw_name[200];
2344 const u64 *fw_data; 2081 void __iomem *dst;
2345 size_t fw_size, i;
2346 int rc;
2347
2348 rc = request_firmware(&fw, fw_name, hdev->dev);
2349
2350 if (rc) {
2351 dev_err(hdev->dev, "Failed to request %s\n", fw_name);
2352 goto out;
2353 }
2354
2355 fw_size = fw->size;
2356 if ((fw_size % 4) != 0) {
2357 dev_err(hdev->dev, "illegal %s firmware size %zu\n",
2358 fw_name, fw_size);
2359 rc = -EINVAL;
2360 goto out;
2361 }
2362
2363 dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
2364
2365 fw_data = (const u64 *) fw->data;
2366 2082
2367 if ((fw->size % 8) != 0) 2083 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2368 fw_size -= 8; 2084 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2369 2085
2370 for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) { 2086 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2371 if (!(i & (0x80000 - 1))) { 2087}
2372 dev_dbg(hdev->dev,
2373 "copied so far %zu out of %zu for %s firmware",
2374 i, fw_size, fw_name);
2375 usleep_range(20, 100);
2376 }
2377 2088
2378 writeq(*fw_data, dst); 2089/*
2379 } 2090 * goya_push_linux_to_device() - Push LINUX FW code to device.
2091 * @hdev: Pointer to hl_device structure.
2092 *
2093 * Copy LINUX fw code from firmware file to HBM BAR.
2094 *
2095 * Return: 0 on success, non-zero for failure.
2096 */
2097static int goya_push_linux_to_device(struct hl_device *hdev)
2098{
2099 char fw_name[200];
2100 void __iomem *dst;
2380 2101
2381 if ((fw->size % 8) != 0) 2102 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2382 writel(*(const u32 *) fw_data, dst); 2103 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2383 2104
2384out: 2105 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2385 release_firmware(fw);
2386 return rc;
2387} 2106}
2388 2107
2389static int goya_pldm_init_cpu(struct hl_device *hdev) 2108static int goya_pldm_init_cpu(struct hl_device *hdev)
2390{ 2109{
2391 char fw_name[200];
2392 void __iomem *dst;
2393 u32 val, unit_rst_val; 2110 u32 val, unit_rst_val;
2394 int rc; 2111 int rc;
2395 2112
@@ -2407,15 +2124,11 @@ static int goya_pldm_init_cpu(struct hl_device *hdev)
2407 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val); 2124 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2408 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); 2125 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2409 2126
2410 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin"); 2127 rc = goya_push_uboot_to_device(hdev);
2411 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2412 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2413 if (rc) 2128 if (rc)
2414 return rc; 2129 return rc;
2415 2130
2416 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb"); 2131 rc = goya_push_linux_to_device(hdev);
2417 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2418 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2419 if (rc) 2132 if (rc)
2420 return rc; 2133 return rc;
2421 2134
@@ -2477,8 +2190,6 @@ static void goya_read_device_fw_version(struct hl_device *hdev,
2477static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout) 2190static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2478{ 2191{
2479 struct goya_device *goya = hdev->asic_specific; 2192 struct goya_device *goya = hdev->asic_specific;
2480 char fw_name[200];
2481 void __iomem *dst;
2482 u32 status; 2193 u32 status;
2483 int rc; 2194 int rc;
2484 2195
@@ -2492,11 +2203,10 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2492 * Before pushing u-boot/linux to device, need to set the ddr bar to 2203 * Before pushing u-boot/linux to device, need to set the ddr bar to
2493 * base address of dram 2204 * base address of dram
2494 */ 2205 */
2495 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE); 2206 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2496 if (rc) {
2497 dev_err(hdev->dev, 2207 dev_err(hdev->dev,
2498 "failed to map DDR bar to DRAM base address\n"); 2208 "failed to map DDR bar to DRAM base address\n");
2499 return rc; 2209 return -EIO;
2500 } 2210 }
2501 2211
2502 if (hdev->pldm) { 2212 if (hdev->pldm) {
@@ -2549,6 +2259,11 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2549 "ARM status %d - DDR initialization failed\n", 2259 "ARM status %d - DDR initialization failed\n",
2550 status); 2260 status);
2551 break; 2261 break;
2262 case CPU_BOOT_STATUS_UBOOT_NOT_READY:
2263 dev_err(hdev->dev,
2264 "ARM status %d - u-boot stopped by user\n",
2265 status);
2266 break;
2552 default: 2267 default:
2553 dev_err(hdev->dev, 2268 dev_err(hdev->dev,
2554 "ARM status %d - Invalid status code\n", 2269 "ARM status %d - Invalid status code\n",
@@ -2570,9 +2285,7 @@ static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2570 goto out; 2285 goto out;
2571 } 2286 }
2572 2287
2573 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb"); 2288 rc = goya_push_linux_to_device(hdev);
2574 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2575 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2576 if (rc) 2289 if (rc)
2577 return rc; 2290 return rc;
2578 2291
@@ -2605,7 +2318,39 @@ out:
2605 return 0; 2318 return 0;
2606} 2319}
2607 2320
2608static int goya_mmu_init(struct hl_device *hdev) 2321static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2322 u64 phys_addr)
2323{
2324 u32 status, timeout_usec;
2325 int rc;
2326
2327 if (hdev->pldm)
2328 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2329 else
2330 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2331
2332 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2333 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2334 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2335
2336 rc = hl_poll_timeout(
2337 hdev,
2338 MMU_ASID_BUSY,
2339 status,
2340 !(status & 0x80000000),
2341 1000,
2342 timeout_usec);
2343
2344 if (rc) {
2345 dev_err(hdev->dev,
2346 "Timeout during MMU hop0 config of asid %d\n", asid);
2347 return rc;
2348 }
2349
2350 return 0;
2351}
2352
2353int goya_mmu_init(struct hl_device *hdev)
2609{ 2354{
2610 struct asic_fixed_properties *prop = &hdev->asic_prop; 2355 struct asic_fixed_properties *prop = &hdev->asic_prop;
2611 struct goya_device *goya = hdev->asic_specific; 2356 struct goya_device *goya = hdev->asic_specific;
@@ -2696,12 +2441,12 @@ static int goya_hw_init(struct hl_device *hdev)
2696 * After CPU initialization is finished, change DDR bar mapping inside 2441 * After CPU initialization is finished, change DDR bar mapping inside
2697 * iATU to point to the start address of the MMU page tables 2442 * iATU to point to the start address of the MMU page tables
2698 */ 2443 */
2699 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE + 2444 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2700 (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull))); 2445 (MMU_PAGE_TABLES_ADDR &
2701 if (rc) { 2446 ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2702 dev_err(hdev->dev, 2447 dev_err(hdev->dev,
2703 "failed to map DDR bar to MMU page tables\n"); 2448 "failed to map DDR bar to MMU page tables\n");
2704 return rc; 2449 return -EIO;
2705 } 2450 }
2706 2451
2707 rc = goya_mmu_init(hdev); 2452 rc = goya_mmu_init(hdev);
@@ -2728,28 +2473,16 @@ static int goya_hw_init(struct hl_device *hdev)
2728 goto disable_msix; 2473 goto disable_msix;
2729 } 2474 }
2730 2475
2731 /* CPU initialization is finished, we can now move to 48 bit DMA mask */ 2476 /*
2732 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48)); 2477 * Check if we managed to set the DMA mask to more then 32 bits. If so,
2733 if (rc) { 2478 * let's try to increase it again because in Goya we set the initial
2734 dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n"); 2479 * dma mask to less then 39 bits so that the allocation of the memory
2735 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32)); 2480 * area for the device's cpu will be under 39 bits
2736 if (rc) { 2481 */
2737 dev_err(hdev->dev, 2482 if (hdev->dma_mask > 32) {
2738 "Unable to set pci dma mask to 32 bits\n"); 2483 rc = hl_pci_set_dma_mask(hdev, 48);
2739 goto disable_pci_access; 2484 if (rc)
2740 }
2741 }
2742
2743 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2744 if (rc) {
2745 dev_warn(hdev->dev,
2746 "Unable to set pci consistent dma mask to 48 bits\n");
2747 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2748 if (rc) {
2749 dev_err(hdev->dev,
2750 "Unable to set pci consistent dma mask to 32 bits\n");
2751 goto disable_pci_access; 2485 goto disable_pci_access;
2752 }
2753 } 2486 }
2754 2487
2755 /* Perform read from the device to flush all MSI-X configuration */ 2488 /* Perform read from the device to flush all MSI-X configuration */
@@ -2758,7 +2491,7 @@ static int goya_hw_init(struct hl_device *hdev)
2758 return 0; 2491 return 0;
2759 2492
2760disable_pci_access: 2493disable_pci_access:
2761 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); 2494 hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2762disable_msix: 2495disable_msix:
2763 goya_disable_msix(hdev); 2496 goya_disable_msix(hdev);
2764disable_queues: 2497disable_queues:
@@ -2865,7 +2598,7 @@ int goya_suspend(struct hl_device *hdev)
2865{ 2598{
2866 int rc; 2599 int rc;
2867 2600
2868 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); 2601 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2869 if (rc) 2602 if (rc)
2870 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); 2603 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2871 2604
@@ -2893,7 +2626,7 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2893 return rc; 2626 return rc;
2894} 2627}
2895 2628
2896static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) 2629void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2897{ 2630{
2898 u32 db_reg_offset, db_value; 2631 u32 db_reg_offset, db_value;
2899 bool invalid_queue = false; 2632 bool invalid_queue = false;
@@ -2991,13 +2724,23 @@ void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
2991static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, 2724static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2992 dma_addr_t *dma_handle, gfp_t flags) 2725 dma_addr_t *dma_handle, gfp_t flags)
2993{ 2726{
2994 return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags); 2727 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2728 dma_handle, flags);
2729
2730 /* Shift to the device's base physical address of host memory */
2731 if (kernel_addr)
2732 *dma_handle += HOST_PHYS_BASE;
2733
2734 return kernel_addr;
2995} 2735}
2996 2736
2997static void goya_dma_free_coherent(struct hl_device *hdev, size_t size, 2737static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2998 void *cpu_addr, dma_addr_t dma_handle) 2738 void *cpu_addr, dma_addr_t dma_handle)
2999{ 2739{
3000 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle); 2740 /* Cancel the device's base physical address of host memory */
2741 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
2742
2743 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
3001} 2744}
3002 2745
3003void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, 2746void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
@@ -3060,12 +2803,12 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3060 2803
3061static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) 2804static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3062{ 2805{
3063 struct goya_device *goya = hdev->asic_specific;
3064 struct packet_msg_prot *fence_pkt; 2806 struct packet_msg_prot *fence_pkt;
3065 u32 *fence_ptr; 2807 u32 *fence_ptr;
3066 dma_addr_t fence_dma_addr; 2808 dma_addr_t fence_dma_addr;
3067 struct hl_cb *cb; 2809 struct hl_cb *cb;
3068 u32 tmp, timeout; 2810 u32 tmp, timeout;
2811 char buf[16] = {};
3069 int rc; 2812 int rc;
3070 2813
3071 if (hdev->pldm) 2814 if (hdev->pldm)
@@ -3073,13 +2816,14 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3073 else 2816 else
3074 timeout = HL_DEVICE_TIMEOUT_USEC; 2817 timeout = HL_DEVICE_TIMEOUT_USEC;
3075 2818
3076 if (!hdev->asic_funcs->is_device_idle(hdev)) { 2819 if (!hdev->asic_funcs->is_device_idle(hdev, buf, sizeof(buf))) {
3077 dev_err_ratelimited(hdev->dev, 2820 dev_err_ratelimited(hdev->dev,
3078 "Can't send KMD job on QMAN0 if device is not idle\n"); 2821 "Can't send KMD job on QMAN0 because %s is busy\n",
2822 buf);
3079 return -EBUSY; 2823 return -EBUSY;
3080 } 2824 }
3081 2825
3082 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL, 2826 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3083 &fence_dma_addr); 2827 &fence_dma_addr);
3084 if (!fence_ptr) { 2828 if (!fence_ptr) {
3085 dev_err(hdev->dev, 2829 dev_err(hdev->dev,
@@ -3089,10 +2833,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3089 2833
3090 *fence_ptr = 0; 2834 *fence_ptr = 0;
3091 2835
3092 if (goya->hw_cap_initialized & HW_CAP_MMU) { 2836 goya_qman0_set_security(hdev, true);
3093 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
3094 RREG32(mmDMA_QM_0_GLBL_PROT);
3095 }
3096 2837
3097 /* 2838 /*
3098 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For 2839 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
@@ -3110,8 +2851,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3110 (1 << GOYA_PKT_CTL_MB_SHIFT); 2851 (1 << GOYA_PKT_CTL_MB_SHIFT);
3111 fence_pkt->ctl = cpu_to_le32(tmp); 2852 fence_pkt->ctl = cpu_to_le32(tmp);
3112 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL); 2853 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
3113 fence_pkt->addr = cpu_to_le64(fence_dma_addr + 2854 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3114 hdev->asic_prop.host_phys_base_address);
3115 2855
3116 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0, 2856 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3117 job->job_cb_size, cb->bus_address); 2857 job->job_cb_size, cb->bus_address);
@@ -3131,13 +2871,10 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3131 } 2871 }
3132 2872
3133free_fence_ptr: 2873free_fence_ptr:
3134 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr, 2874 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
3135 fence_dma_addr); 2875 fence_dma_addr);
3136 2876
3137 if (goya->hw_cap_initialized & HW_CAP_MMU) { 2877 goya_qman0_set_security(hdev, false);
3138 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
3139 RREG32(mmDMA_QM_0_GLBL_PROT);
3140 }
3141 2878
3142 return rc; 2879 return rc;
3143} 2880}
@@ -3146,10 +2883,6 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3146 u32 timeout, long *result) 2883 u32 timeout, long *result)
3147{ 2884{
3148 struct goya_device *goya = hdev->asic_specific; 2885 struct goya_device *goya = hdev->asic_specific;
3149 struct armcp_packet *pkt;
3150 dma_addr_t pkt_dma_addr;
3151 u32 tmp;
3152 int rc = 0;
3153 2886
3154 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) { 2887 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3155 if (result) 2888 if (result)
@@ -3157,74 +2890,8 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3157 return 0; 2890 return 0;
3158 } 2891 }
3159 2892
3160 if (len > CPU_CB_SIZE) { 2893 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
3161 dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n", 2894 timeout, result);
3162 len);
3163 return -ENOMEM;
3164 }
3165
3166 pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
3167 &pkt_dma_addr);
3168 if (!pkt) {
3169 dev_err(hdev->dev,
3170 "Failed to allocate DMA memory for packet to CPU\n");
3171 return -ENOMEM;
3172 }
3173
3174 memcpy(pkt, msg, len);
3175
3176 mutex_lock(&hdev->send_cpu_message_lock);
3177
3178 if (hdev->disabled)
3179 goto out;
3180
3181 if (hdev->device_cpu_disabled) {
3182 rc = -EIO;
3183 goto out;
3184 }
3185
3186 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
3187 pkt_dma_addr);
3188 if (rc) {
3189 dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
3190 goto out;
3191 }
3192
3193 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
3194 timeout, &tmp);
3195
3196 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
3197
3198 if (rc == -ETIMEDOUT) {
3199 dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
3200 hdev->device_cpu_disabled = true;
3201 goto out;
3202 }
3203
3204 if (tmp == ARMCP_PACKET_FENCE_VAL) {
3205 u32 ctl = le32_to_cpu(pkt->ctl);
3206
3207 rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
3208 if (rc) {
3209 dev_err(hdev->dev,
3210 "F/W ERROR %d for CPU packet %d\n",
3211 rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
3212 >> ARMCP_PKT_CTL_OPCODE_SHIFT);
3213 rc = -EINVAL;
3214 } else if (result) {
3215 *result = (long) le64_to_cpu(pkt->result);
3216 }
3217 } else {
3218 dev_err(hdev->dev, "CPU packet wrong fence value\n");
3219 rc = -EINVAL;
3220 }
3221
3222out:
3223 mutex_unlock(&hdev->send_cpu_message_lock);
3224
3225 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
3226
3227 return rc;
3228} 2895}
3229 2896
3230int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id) 2897int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
@@ -3238,7 +2905,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3238 2905
3239 fence_val = GOYA_QMAN0_FENCE_VAL; 2906 fence_val = GOYA_QMAN0_FENCE_VAL;
3240 2907
3241 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL, 2908 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3242 &fence_dma_addr); 2909 &fence_dma_addr);
3243 if (!fence_ptr) { 2910 if (!fence_ptr) {
3244 dev_err(hdev->dev, 2911 dev_err(hdev->dev,
@@ -3248,7 +2915,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3248 2915
3249 *fence_ptr = 0; 2916 *fence_ptr = 0;
3250 2917
3251 fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev, 2918 fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
3252 sizeof(struct packet_msg_prot), 2919 sizeof(struct packet_msg_prot),
3253 GFP_KERNEL, &pkt_dma_addr); 2920 GFP_KERNEL, &pkt_dma_addr);
3254 if (!fence_pkt) { 2921 if (!fence_pkt) {
@@ -3263,8 +2930,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3263 (1 << GOYA_PKT_CTL_MB_SHIFT); 2930 (1 << GOYA_PKT_CTL_MB_SHIFT);
3264 fence_pkt->ctl = cpu_to_le32(tmp); 2931 fence_pkt->ctl = cpu_to_le32(tmp);
3265 fence_pkt->value = cpu_to_le32(fence_val); 2932 fence_pkt->value = cpu_to_le32(fence_val);
3266 fence_pkt->addr = cpu_to_le64(fence_dma_addr + 2933 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3267 hdev->asic_prop.host_phys_base_address);
3268 2934
3269 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, 2935 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3270 sizeof(struct packet_msg_prot), 2936 sizeof(struct packet_msg_prot),
@@ -3292,48 +2958,30 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3292 } 2958 }
3293 2959
3294free_pkt: 2960free_pkt:
3295 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt, 2961 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
3296 pkt_dma_addr); 2962 pkt_dma_addr);
3297free_fence_ptr: 2963free_fence_ptr:
3298 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr, 2964 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
3299 fence_dma_addr); 2965 fence_dma_addr);
3300 return rc; 2966 return rc;
3301} 2967}
3302 2968
3303int goya_test_cpu_queue(struct hl_device *hdev) 2969int goya_test_cpu_queue(struct hl_device *hdev)
3304{ 2970{
3305 struct armcp_packet test_pkt; 2971 struct goya_device *goya = hdev->asic_specific;
3306 long result;
3307 int rc;
3308
3309 /* cpu_queues_enable flag is always checked in send cpu message */
3310
3311 memset(&test_pkt, 0, sizeof(test_pkt));
3312
3313 test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
3314 ARMCP_PKT_CTL_OPCODE_SHIFT);
3315 test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
3316
3317 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
3318 sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
3319 2972
3320 if (!rc) { 2973 /*
3321 if (result == ARMCP_PACKET_FENCE_VAL) 2974 * check capability here as send_cpu_message() won't update the result
3322 dev_info(hdev->dev, 2975 * value if no capability
3323 "queue test on CPU queue succeeded\n"); 2976 */
3324 else 2977 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
3325 dev_err(hdev->dev, 2978 return 0;
3326 "CPU queue test failed (0x%08lX)\n", result);
3327 } else {
3328 dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
3329 }
3330 2979
3331 return rc; 2980 return hl_fw_test_cpu_queue(hdev);
3332} 2981}
3333 2982
3334static int goya_test_queues(struct hl_device *hdev) 2983int goya_test_queues(struct hl_device *hdev)
3335{ 2984{
3336 struct goya_device *goya = hdev->asic_specific;
3337 int i, rc, ret_val = 0; 2985 int i, rc, ret_val = 0;
3338 2986
3339 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { 2987 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
@@ -3343,7 +2991,7 @@ static int goya_test_queues(struct hl_device *hdev)
3343 } 2991 }
3344 2992
3345 if (hdev->cpu_queues_enable) { 2993 if (hdev->cpu_queues_enable) {
3346 rc = goya->test_cpu_queue(hdev); 2994 rc = goya_test_cpu_queue(hdev);
3347 if (rc) 2995 if (rc)
3348 ret_val = -EINVAL; 2996 ret_val = -EINVAL;
3349 } 2997 }
@@ -3354,57 +3002,68 @@ static int goya_test_queues(struct hl_device *hdev)
3354static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size, 3002static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3355 gfp_t mem_flags, dma_addr_t *dma_handle) 3003 gfp_t mem_flags, dma_addr_t *dma_handle)
3356{ 3004{
3005 void *kernel_addr;
3006
3357 if (size > GOYA_DMA_POOL_BLK_SIZE) 3007 if (size > GOYA_DMA_POOL_BLK_SIZE)
3358 return NULL; 3008 return NULL;
3359 3009
3360 return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); 3010 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3011
3012 /* Shift to the device's base physical address of host memory */
3013 if (kernel_addr)
3014 *dma_handle += HOST_PHYS_BASE;
3015
3016 return kernel_addr;
3361} 3017}
3362 3018
3363static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr, 3019static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3364 dma_addr_t dma_addr) 3020 dma_addr_t dma_addr)
3365{ 3021{
3366 dma_pool_free(hdev->dma_pool, vaddr, dma_addr); 3022 /* Cancel the device's base physical address of host memory */
3023 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
3024
3025 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3367} 3026}
3368 3027
3369static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, 3028void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3370 size_t size, dma_addr_t *dma_handle) 3029 dma_addr_t *dma_handle)
3371{ 3030{
3372 u64 kernel_addr; 3031 return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3373
3374 /* roundup to CPU_PKT_SIZE */
3375 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3376
3377 kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
3378
3379 *dma_handle = hdev->cpu_accessible_dma_address +
3380 (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
3381
3382 return (void *) (uintptr_t) kernel_addr;
3383} 3032}
3384 3033
3385static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, 3034void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3386 size_t size, void *vaddr) 3035 void *vaddr)
3387{ 3036{
3388 /* roundup to CPU_PKT_SIZE */ 3037 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3389 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3390
3391 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
3392 size);
3393} 3038}
3394 3039
3395static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg, 3040static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
3396 int nents, enum dma_data_direction dir) 3041 int nents, enum dma_data_direction dir)
3397{ 3042{
3398 if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir)) 3043 struct scatterlist *sg;
3044 int i;
3045
3046 if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
3399 return -ENOMEM; 3047 return -ENOMEM;
3400 3048
3049 /* Shift to the device's base physical address of host memory */
3050 for_each_sg(sgl, sg, nents, i)
3051 sg->dma_address += HOST_PHYS_BASE;
3052
3401 return 0; 3053 return 0;
3402} 3054}
3403 3055
3404static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg, 3056static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
3405 int nents, enum dma_data_direction dir) 3057 int nents, enum dma_data_direction dir)
3406{ 3058{
3407 dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir); 3059 struct scatterlist *sg;
3060 int i;
3061
3062 /* Cancel the device's base physical address of host memory */
3063 for_each_sg(sgl, sg, nents, i)
3064 sg->dma_address -= HOST_PHYS_BASE;
3065
3066 dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
3408} 3067}
3409 3068
3410u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt) 3069u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
@@ -3554,31 +3213,29 @@ static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3554 return -EFAULT; 3213 return -EFAULT;
3555 } 3214 }
3556 3215
3557 if (parser->ctx_id != HL_KERNEL_ASID_ID) { 3216 if (sram_addr) {
3558 if (sram_addr) { 3217 if (!hl_mem_area_inside_range(device_memory_addr,
3559 if (!hl_mem_area_inside_range(device_memory_addr, 3218 le32_to_cpu(user_dma_pkt->tsize),
3560 le32_to_cpu(user_dma_pkt->tsize), 3219 hdev->asic_prop.sram_user_base_address,
3561 hdev->asic_prop.sram_user_base_address, 3220 hdev->asic_prop.sram_end_address)) {
3562 hdev->asic_prop.sram_end_address)) { 3221
3222 dev_err(hdev->dev,
3223 "SRAM address 0x%llx + 0x%x is invalid\n",
3224 device_memory_addr,
3225 user_dma_pkt->tsize);
3226 return -EFAULT;
3227 }
3228 } else {
3229 if (!hl_mem_area_inside_range(device_memory_addr,
3230 le32_to_cpu(user_dma_pkt->tsize),
3231 hdev->asic_prop.dram_user_base_address,
3232 hdev->asic_prop.dram_end_address)) {
3563 3233
3564 dev_err(hdev->dev, 3234 dev_err(hdev->dev,
3565 "SRAM address 0x%llx + 0x%x is invalid\n", 3235 "DRAM address 0x%llx + 0x%x is invalid\n",
3566 device_memory_addr, 3236 device_memory_addr,
3567 user_dma_pkt->tsize); 3237 user_dma_pkt->tsize);
3568 return -EFAULT; 3238 return -EFAULT;
3569 }
3570 } else {
3571 if (!hl_mem_area_inside_range(device_memory_addr,
3572 le32_to_cpu(user_dma_pkt->tsize),
3573 hdev->asic_prop.dram_user_base_address,
3574 hdev->asic_prop.dram_end_address)) {
3575
3576 dev_err(hdev->dev,
3577 "DRAM address 0x%llx + 0x%x is invalid\n",
3578 device_memory_addr,
3579 user_dma_pkt->tsize);
3580 return -EFAULT;
3581 }
3582 } 3239 }
3583 } 3240 }
3584 3241
@@ -3956,8 +3613,6 @@ static int goya_patch_dma_packet(struct hl_device *hdev,
3956 new_dma_pkt->ctl = cpu_to_le32(ctl); 3613 new_dma_pkt->ctl = cpu_to_le32(ctl);
3957 new_dma_pkt->tsize = cpu_to_le32((u32) len); 3614 new_dma_pkt->tsize = cpu_to_le32((u32) len);
3958 3615
3959 dma_addr += hdev->asic_prop.host_phys_base_address;
3960
3961 if (dir == DMA_TO_DEVICE) { 3616 if (dir == DMA_TO_DEVICE) {
3962 new_dma_pkt->src_addr = cpu_to_le64(dma_addr); 3617 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3963 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr); 3618 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
@@ -4208,36 +3863,35 @@ free_userptr:
4208 return rc; 3863 return rc;
4209} 3864}
4210 3865
4211static int goya_parse_cb_no_ext_quque(struct hl_device *hdev, 3866static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
4212 struct hl_cs_parser *parser) 3867 struct hl_cs_parser *parser)
4213{ 3868{
4214 struct asic_fixed_properties *asic_prop = &hdev->asic_prop; 3869 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4215 struct goya_device *goya = hdev->asic_specific; 3870 struct goya_device *goya = hdev->asic_specific;
4216 3871
4217 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) { 3872 if (goya->hw_cap_initialized & HW_CAP_MMU)
4218 /* For internal queue jobs, just check if cb address is valid */ 3873 return 0;
4219 if (hl_mem_area_inside_range(
4220 (u64) (uintptr_t) parser->user_cb,
4221 parser->user_cb_size,
4222 asic_prop->sram_user_base_address,
4223 asic_prop->sram_end_address))
4224 return 0;
4225 3874
4226 if (hl_mem_area_inside_range( 3875 /* For internal queue jobs, just check if CB address is valid */
4227 (u64) (uintptr_t) parser->user_cb, 3876 if (hl_mem_area_inside_range(
4228 parser->user_cb_size, 3877 (u64) (uintptr_t) parser->user_cb,
4229 asic_prop->dram_user_base_address, 3878 parser->user_cb_size,
4230 asic_prop->dram_end_address)) 3879 asic_prop->sram_user_base_address,
4231 return 0; 3880 asic_prop->sram_end_address))
3881 return 0;
4232 3882
4233 dev_err(hdev->dev, 3883 if (hl_mem_area_inside_range(
4234 "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n", 3884 (u64) (uintptr_t) parser->user_cb,
4235 parser->user_cb, parser->user_cb_size); 3885 parser->user_cb_size,
3886 asic_prop->dram_user_base_address,
3887 asic_prop->dram_end_address))
3888 return 0;
4236 3889
4237 return -EFAULT; 3890 dev_err(hdev->dev,
4238 } 3891 "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
3892 parser->user_cb, parser->user_cb_size);
4239 3893
4240 return 0; 3894 return -EFAULT;
4241} 3895}
4242 3896
4243int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser) 3897int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
@@ -4245,9 +3899,9 @@ int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4245 struct goya_device *goya = hdev->asic_specific; 3899 struct goya_device *goya = hdev->asic_specific;
4246 3900
4247 if (!parser->ext_queue) 3901 if (!parser->ext_queue)
4248 return goya_parse_cb_no_ext_quque(hdev, parser); 3902 return goya_parse_cb_no_ext_queue(hdev, parser);
4249 3903
4250 if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr) 3904 if (goya->hw_cap_initialized & HW_CAP_MMU)
4251 return goya_parse_cb_mmu(hdev, parser); 3905 return goya_parse_cb_mmu(hdev, parser);
4252 else 3906 else
4253 return goya_parse_cb_no_mmu(hdev, parser); 3907 return goya_parse_cb_no_mmu(hdev, parser);
@@ -4278,12 +3932,12 @@ void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
4278 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF); 3932 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
4279} 3933}
4280 3934
4281static void goya_update_eq_ci(struct hl_device *hdev, u32 val) 3935void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4282{ 3936{
4283 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val); 3937 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
4284} 3938}
4285 3939
4286static void goya_restore_phase_topology(struct hl_device *hdev) 3940void goya_restore_phase_topology(struct hl_device *hdev)
4287{ 3941{
4288 int i, num_of_sob_in_longs, num_of_mon_in_longs; 3942 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4289 3943
@@ -4320,6 +3974,7 @@ static void goya_restore_phase_topology(struct hl_device *hdev)
4320static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) 3974static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4321{ 3975{
4322 struct asic_fixed_properties *prop = &hdev->asic_prop; 3976 struct asic_fixed_properties *prop = &hdev->asic_prop;
3977 u64 ddr_bar_addr;
4323 int rc = 0; 3978 int rc = 0;
4324 3979
4325 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { 3980 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
@@ -4337,15 +3992,16 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4337 u64 bar_base_addr = DRAM_PHYS_BASE + 3992 u64 bar_base_addr = DRAM_PHYS_BASE +
4338 (addr & ~(prop->dram_pci_bar_size - 0x1ull)); 3993 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4339 3994
4340 rc = goya_set_ddr_bar_base(hdev, bar_base_addr); 3995 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4341 if (!rc) { 3996 if (ddr_bar_addr != U64_MAX) {
4342 *val = readl(hdev->pcie_bar[DDR_BAR_ID] + 3997 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4343 (addr - bar_base_addr)); 3998 (addr - bar_base_addr));
4344 3999
4345 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE + 4000 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4346 (MMU_PAGE_TABLES_ADDR & 4001 ddr_bar_addr);
4347 ~(prop->dram_pci_bar_size - 0x1ull)));
4348 } 4002 }
4003 if (ddr_bar_addr == U64_MAX)
4004 rc = -EIO;
4349 } else { 4005 } else {
4350 rc = -EFAULT; 4006 rc = -EFAULT;
4351 } 4007 }
@@ -4370,6 +4026,7 @@ static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4370static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) 4026static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4371{ 4027{
4372 struct asic_fixed_properties *prop = &hdev->asic_prop; 4028 struct asic_fixed_properties *prop = &hdev->asic_prop;
4029 u64 ddr_bar_addr;
4373 int rc = 0; 4030 int rc = 0;
4374 4031
4375 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { 4032 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
@@ -4387,15 +4044,16 @@ static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4387 u64 bar_base_addr = DRAM_PHYS_BASE + 4044 u64 bar_base_addr = DRAM_PHYS_BASE +
4388 (addr & ~(prop->dram_pci_bar_size - 0x1ull)); 4045 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4389 4046
4390 rc = goya_set_ddr_bar_base(hdev, bar_base_addr); 4047 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4391 if (!rc) { 4048 if (ddr_bar_addr != U64_MAX) {
4392 writel(val, hdev->pcie_bar[DDR_BAR_ID] + 4049 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4393 (addr - bar_base_addr)); 4050 (addr - bar_base_addr));
4394 4051
4395 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE + 4052 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4396 (MMU_PAGE_TABLES_ADDR & 4053 ddr_bar_addr);
4397 ~(prop->dram_pci_bar_size - 0x1ull)));
4398 } 4054 }
4055 if (ddr_bar_addr == U64_MAX)
4056 rc = -EIO;
4399 } else { 4057 } else {
4400 rc = -EFAULT; 4058 rc = -EFAULT;
4401 } 4059 }
@@ -4407,6 +4065,9 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4407{ 4065{
4408 struct goya_device *goya = hdev->asic_specific; 4066 struct goya_device *goya = hdev->asic_specific;
4409 4067
4068 if (hdev->hard_reset_pending)
4069 return U64_MAX;
4070
4410 return readq(hdev->pcie_bar[DDR_BAR_ID] + 4071 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4411 (addr - goya->ddr_bar_cur_addr)); 4072 (addr - goya->ddr_bar_cur_addr));
4412} 4073}
@@ -4415,6 +4076,9 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4415{ 4076{
4416 struct goya_device *goya = hdev->asic_specific; 4077 struct goya_device *goya = hdev->asic_specific;
4417 4078
4079 if (hdev->hard_reset_pending)
4080 return;
4081
4418 writeq(val, hdev->pcie_bar[DDR_BAR_ID] + 4082 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4419 (addr - goya->ddr_bar_cur_addr)); 4083 (addr - goya->ddr_bar_cur_addr));
4420} 4084}
@@ -4604,8 +4268,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4604 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << 4268 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4605 ARMCP_PKT_CTL_OPCODE_SHIFT); 4269 ARMCP_PKT_CTL_OPCODE_SHIFT);
4606 4270
4607 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, 4271 rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
4608 total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result); 4272 HL_DEVICE_TIMEOUT_USEC, &result);
4609 4273
4610 if (rc) 4274 if (rc)
4611 dev_err(hdev->dev, "failed to unmask IRQ array\n"); 4275 dev_err(hdev->dev, "failed to unmask IRQ array\n");
@@ -4621,8 +4285,8 @@ static int goya_soft_reset_late_init(struct hl_device *hdev)
4621 * Unmask all IRQs since some could have been received 4285 * Unmask all IRQs since some could have been received
4622 * during the soft reset 4286 * during the soft reset
4623 */ 4287 */
4624 return goya_unmask_irq_arr(hdev, goya_non_fatal_events, 4288 return goya_unmask_irq_arr(hdev, goya_all_events,
4625 sizeof(goya_non_fatal_events)); 4289 sizeof(goya_all_events));
4626} 4290}
4627 4291
4628static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) 4292static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
@@ -4637,7 +4301,7 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4637 ARMCP_PKT_CTL_OPCODE_SHIFT); 4301 ARMCP_PKT_CTL_OPCODE_SHIFT);
4638 pkt.value = cpu_to_le64(event_type); 4302 pkt.value = cpu_to_le64(event_type);
4639 4303
4640 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 4304 rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4641 HL_DEVICE_TIMEOUT_USEC, &result); 4305 HL_DEVICE_TIMEOUT_USEC, &result);
4642 4306
4643 if (rc) 4307 if (rc)
@@ -4758,7 +4422,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4758 u64 val, bool is_dram) 4422 u64 val, bool is_dram)
4759{ 4423{
4760 struct packet_lin_dma *lin_dma_pkt; 4424 struct packet_lin_dma *lin_dma_pkt;
4761 struct hl_cs_parser parser;
4762 struct hl_cs_job *job; 4425 struct hl_cs_job *job;
4763 u32 cb_size, ctl; 4426 u32 cb_size, ctl;
4764 struct hl_cb *cb; 4427 struct hl_cb *cb;
@@ -4798,36 +4461,16 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4798 job->user_cb->cs_cnt++; 4461 job->user_cb->cs_cnt++;
4799 job->user_cb_size = cb_size; 4462 job->user_cb_size = cb_size;
4800 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0; 4463 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4464 job->patched_cb = job->user_cb;
4465 job->job_cb_size = job->user_cb_size +
4466 sizeof(struct packet_msg_prot) * 2;
4801 4467
4802 hl_debugfs_add_job(hdev, job); 4468 hl_debugfs_add_job(hdev, job);
4803 4469
4804 parser.ctx_id = HL_KERNEL_ASID_ID;
4805 parser.cs_sequence = 0;
4806 parser.job_id = job->id;
4807 parser.hw_queue_id = job->hw_queue_id;
4808 parser.job_userptr_list = &job->userptr_list;
4809 parser.user_cb = job->user_cb;
4810 parser.user_cb_size = job->user_cb_size;
4811 parser.ext_queue = job->ext_queue;
4812 parser.use_virt_addr = hdev->mmu_enable;
4813
4814 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
4815 if (rc) {
4816 dev_err(hdev->dev, "Failed to parse kernel CB\n");
4817 goto free_job;
4818 }
4819
4820 job->patched_cb = parser.patched_cb;
4821 job->job_cb_size = parser.patched_cb_size;
4822 job->patched_cb->cs_cnt++;
4823
4824 rc = goya_send_job_on_qman0(hdev, job); 4470 rc = goya_send_job_on_qman0(hdev, job);
4825 4471
4826 job->patched_cb->cs_cnt--;
4827 hl_cb_put(job->patched_cb); 4472 hl_cb_put(job->patched_cb);
4828 4473
4829free_job:
4830 hl_userptr_delete_list(hdev, &job->userptr_list);
4831 hl_debugfs_remove_job(hdev, job); 4474 hl_debugfs_remove_job(hdev, job);
4832 kfree(job); 4475 kfree(job);
4833 cb->cs_cnt--; 4476 cb->cs_cnt--;
@@ -4839,7 +4482,7 @@ release_cb:
4839 return rc; 4482 return rc;
4840} 4483}
4841 4484
4842static int goya_context_switch(struct hl_device *hdev, u32 asid) 4485int goya_context_switch(struct hl_device *hdev, u32 asid)
4843{ 4486{
4844 struct asic_fixed_properties *prop = &hdev->asic_prop; 4487 struct asic_fixed_properties *prop = &hdev->asic_prop;
4845 u64 addr = prop->sram_base_address; 4488 u64 addr = prop->sram_base_address;
@@ -4853,12 +4496,13 @@ static int goya_context_switch(struct hl_device *hdev, u32 asid)
4853 return rc; 4496 return rc;
4854 } 4497 }
4855 4498
4499 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4856 goya_mmu_prepare(hdev, asid); 4500 goya_mmu_prepare(hdev, asid);
4857 4501
4858 return 0; 4502 return 0;
4859} 4503}
4860 4504
4861static int goya_mmu_clear_pgt_range(struct hl_device *hdev) 4505int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4862{ 4506{
4863 struct asic_fixed_properties *prop = &hdev->asic_prop; 4507 struct asic_fixed_properties *prop = &hdev->asic_prop;
4864 struct goya_device *goya = hdev->asic_specific; 4508 struct goya_device *goya = hdev->asic_specific;
@@ -4872,7 +4516,7 @@ static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4872 return goya_memset_device_memory(hdev, addr, size, 0, true); 4516 return goya_memset_device_memory(hdev, addr, size, 0, true);
4873} 4517}
4874 4518
4875static int goya_mmu_set_dram_default_page(struct hl_device *hdev) 4519int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4876{ 4520{
4877 struct goya_device *goya = hdev->asic_specific; 4521 struct goya_device *goya = hdev->asic_specific;
4878 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr; 4522 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
@@ -4885,7 +4529,7 @@ static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4885 return goya_memset_device_memory(hdev, addr, size, val, true); 4529 return goya_memset_device_memory(hdev, addr, size, val, true);
4886} 4530}
4887 4531
4888static void goya_mmu_prepare(struct hl_device *hdev, u32 asid) 4532void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4889{ 4533{
4890 struct goya_device *goya = hdev->asic_specific; 4534 struct goya_device *goya = hdev->asic_specific;
4891 int i; 4535 int i;
@@ -4899,10 +4543,8 @@ static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4899 } 4543 }
4900 4544
4901 /* zero the MMBP and ASID bits and then set the ASID */ 4545 /* zero the MMBP and ASID bits and then set the ASID */
4902 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) { 4546 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
4903 WREG32_AND(goya_mmu_regs[i], ~0x7FF); 4547 goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
4904 WREG32_OR(goya_mmu_regs[i], asid);
4905 }
4906} 4548}
4907 4549
4908static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard) 4550static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
@@ -4993,107 +4635,29 @@ static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4993 "Timeout when waiting for MMU cache invalidation\n"); 4635 "Timeout when waiting for MMU cache invalidation\n");
4994} 4636}
4995 4637
4996static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
4997 u64 phys_addr)
4998{
4999 u32 status, timeout_usec;
5000 int rc;
5001
5002 if (hdev->pldm)
5003 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5004 else
5005 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5006
5007 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
5008 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
5009 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
5010
5011 rc = hl_poll_timeout(
5012 hdev,
5013 MMU_ASID_BUSY,
5014 status,
5015 !(status & 0x80000000),
5016 1000,
5017 timeout_usec);
5018
5019 if (rc) {
5020 dev_err(hdev->dev,
5021 "Timeout during MMU hop0 config of asid %d\n", asid);
5022 return rc;
5023 }
5024
5025 return 0;
5026}
5027
5028int goya_send_heartbeat(struct hl_device *hdev) 4638int goya_send_heartbeat(struct hl_device *hdev)
5029{ 4639{
5030 struct goya_device *goya = hdev->asic_specific; 4640 struct goya_device *goya = hdev->asic_specific;
5031 struct armcp_packet hb_pkt;
5032 long result;
5033 int rc;
5034 4641
5035 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) 4642 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5036 return 0; 4643 return 0;
5037 4644
5038 memset(&hb_pkt, 0, sizeof(hb_pkt)); 4645 return hl_fw_send_heartbeat(hdev);
5039
5040 hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
5041 ARMCP_PKT_CTL_OPCODE_SHIFT);
5042 hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
5043
5044 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
5045 sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
5046
5047 if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
5048 rc = -EIO;
5049
5050 return rc;
5051} 4646}
5052 4647
5053static int goya_armcp_info_get(struct hl_device *hdev) 4648int goya_armcp_info_get(struct hl_device *hdev)
5054{ 4649{
5055 struct goya_device *goya = hdev->asic_specific; 4650 struct goya_device *goya = hdev->asic_specific;
5056 struct asic_fixed_properties *prop = &hdev->asic_prop; 4651 struct asic_fixed_properties *prop = &hdev->asic_prop;
5057 struct armcp_packet pkt;
5058 void *armcp_info_cpu_addr;
5059 dma_addr_t armcp_info_dma_addr;
5060 u64 dram_size; 4652 u64 dram_size;
5061 long result;
5062 int rc; 4653 int rc;
5063 4654
5064 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) 4655 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5065 return 0; 4656 return 0;
5066 4657
5067 armcp_info_cpu_addr = 4658 rc = hl_fw_armcp_info_get(hdev);
5068 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, 4659 if (rc)
5069 sizeof(struct armcp_info), &armcp_info_dma_addr); 4660 return rc;
5070 if (!armcp_info_cpu_addr) {
5071 dev_err(hdev->dev,
5072 "Failed to allocate DMA memory for ArmCP info packet\n");
5073 return -ENOMEM;
5074 }
5075
5076 memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
5077
5078 memset(&pkt, 0, sizeof(pkt));
5079
5080 pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
5081 ARMCP_PKT_CTL_OPCODE_SHIFT);
5082 pkt.addr = cpu_to_le64(armcp_info_dma_addr +
5083 prop->host_phys_base_address);
5084 pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
5085
5086 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5087 GOYA_ARMCP_INFO_TIMEOUT, &result);
5088
5089 if (rc) {
5090 dev_err(hdev->dev,
5091 "Failed to send armcp info pkt, error %d\n", rc);
5092 goto out;
5093 }
5094
5095 memcpy(&prop->armcp_info, armcp_info_cpu_addr,
5096 sizeof(prop->armcp_info));
5097 4661
5098 dram_size = le64_to_cpu(prop->armcp_info.dram_size); 4662 dram_size = le64_to_cpu(prop->armcp_info.dram_size);
5099 if (dram_size) { 4663 if (dram_size) {
@@ -5109,32 +4673,10 @@ static int goya_armcp_info_get(struct hl_device *hdev)
5109 prop->dram_end_address = prop->dram_base_address + dram_size; 4673 prop->dram_end_address = prop->dram_base_address + dram_size;
5110 } 4674 }
5111 4675
5112 rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors); 4676 return 0;
5113 if (rc) {
5114 dev_err(hdev->dev,
5115 "Failed to build hwmon channel info, error %d\n", rc);
5116 rc = -EFAULT;
5117 goto out;
5118 }
5119
5120out:
5121 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
5122 sizeof(struct armcp_info), armcp_info_cpu_addr);
5123
5124 return rc;
5125}
5126
5127static void goya_init_clock_gating(struct hl_device *hdev)
5128{
5129
5130}
5131
5132static void goya_disable_clock_gating(struct hl_device *hdev)
5133{
5134
5135} 4677}
5136 4678
5137static bool goya_is_device_idle(struct hl_device *hdev) 4679static bool goya_is_device_idle(struct hl_device *hdev, char *buf, size_t size)
5138{ 4680{
5139 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg; 4681 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
5140 int i; 4682 int i;
@@ -5146,7 +4688,7 @@ static bool goya_is_device_idle(struct hl_device *hdev)
5146 4688
5147 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) != 4689 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
5148 DMA_QM_IDLE_MASK) 4690 DMA_QM_IDLE_MASK)
5149 return false; 4691 return HL_ENG_BUSY(buf, size, "DMA%d_QM", i);
5150 } 4692 }
5151 4693
5152 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0; 4694 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
@@ -5158,31 +4700,31 @@ static bool goya_is_device_idle(struct hl_device *hdev)
5158 4700
5159 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) != 4701 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
5160 TPC_QM_IDLE_MASK) 4702 TPC_QM_IDLE_MASK)
5161 return false; 4703 return HL_ENG_BUSY(buf, size, "TPC%d_QM", i);
5162 4704
5163 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) != 4705 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
5164 TPC_CMDQ_IDLE_MASK) 4706 TPC_CMDQ_IDLE_MASK)
5165 return false; 4707 return HL_ENG_BUSY(buf, size, "TPC%d_CMDQ", i);
5166 4708
5167 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) != 4709 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
5168 TPC_CFG_IDLE_MASK) 4710 TPC_CFG_IDLE_MASK)
5169 return false; 4711 return HL_ENG_BUSY(buf, size, "TPC%d_CFG", i);
5170 } 4712 }
5171 4713
5172 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) != 4714 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
5173 MME_QM_IDLE_MASK) 4715 MME_QM_IDLE_MASK)
5174 return false; 4716 return HL_ENG_BUSY(buf, size, "MME_QM");
5175 4717
5176 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) != 4718 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
5177 MME_CMDQ_IDLE_MASK) 4719 MME_CMDQ_IDLE_MASK)
5178 return false; 4720 return HL_ENG_BUSY(buf, size, "MME_CMDQ");
5179 4721
5180 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) != 4722 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
5181 MME_ARCH_IDLE_MASK) 4723 MME_ARCH_IDLE_MASK)
5182 return false; 4724 return HL_ENG_BUSY(buf, size, "MME_ARCH");
5183 4725
5184 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK) 4726 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
5185 return false; 4727 return HL_ENG_BUSY(buf, size, "MME");
5186 4728
5187 return true; 4729 return true;
5188} 4730}
@@ -5210,52 +4752,11 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5210 size_t max_size) 4752 size_t max_size)
5211{ 4753{
5212 struct goya_device *goya = hdev->asic_specific; 4754 struct goya_device *goya = hdev->asic_specific;
5213 struct asic_fixed_properties *prop = &hdev->asic_prop;
5214 struct armcp_packet pkt;
5215 void *eeprom_info_cpu_addr;
5216 dma_addr_t eeprom_info_dma_addr;
5217 long result;
5218 int rc;
5219 4755
5220 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) 4756 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5221 return 0; 4757 return 0;
5222 4758
5223 eeprom_info_cpu_addr = 4759 return hl_fw_get_eeprom_data(hdev, data, max_size);
5224 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5225 max_size, &eeprom_info_dma_addr);
5226 if (!eeprom_info_cpu_addr) {
5227 dev_err(hdev->dev,
5228 "Failed to allocate DMA memory for EEPROM info packet\n");
5229 return -ENOMEM;
5230 }
5231
5232 memset(eeprom_info_cpu_addr, 0, max_size);
5233
5234 memset(&pkt, 0, sizeof(pkt));
5235
5236 pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
5237 ARMCP_PKT_CTL_OPCODE_SHIFT);
5238 pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
5239 prop->host_phys_base_address);
5240 pkt.data_max_size = cpu_to_le32(max_size);
5241
5242 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5243 GOYA_ARMCP_EEPROM_TIMEOUT, &result);
5244
5245 if (rc) {
5246 dev_err(hdev->dev,
5247 "Failed to send armcp EEPROM pkt, error %d\n", rc);
5248 goto out;
5249 }
5250
5251 /* result contains the actual size */
5252 memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
5253
5254out:
5255 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
5256 eeprom_info_cpu_addr);
5257
5258 return rc;
5259} 4760}
5260 4761
5261static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) 4762static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
@@ -5278,12 +4779,12 @@ static const struct hl_asic_funcs goya_funcs = {
5278 .cb_mmap = goya_cb_mmap, 4779 .cb_mmap = goya_cb_mmap,
5279 .ring_doorbell = goya_ring_doorbell, 4780 .ring_doorbell = goya_ring_doorbell,
5280 .flush_pq_write = goya_flush_pq_write, 4781 .flush_pq_write = goya_flush_pq_write,
5281 .dma_alloc_coherent = goya_dma_alloc_coherent, 4782 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5282 .dma_free_coherent = goya_dma_free_coherent, 4783 .asic_dma_free_coherent = goya_dma_free_coherent,
5283 .get_int_queue_base = goya_get_int_queue_base, 4784 .get_int_queue_base = goya_get_int_queue_base,
5284 .test_queues = goya_test_queues, 4785 .test_queues = goya_test_queues,
5285 .dma_pool_zalloc = goya_dma_pool_zalloc, 4786 .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
5286 .dma_pool_free = goya_dma_pool_free, 4787 .asic_dma_pool_free = goya_dma_pool_free,
5287 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc, 4788 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5288 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free, 4789 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5289 .hl_dma_unmap_sg = goya_dma_unmap_sg, 4790 .hl_dma_unmap_sg = goya_dma_unmap_sg,
@@ -5305,8 +4806,7 @@ static const struct hl_asic_funcs goya_funcs = {
5305 .mmu_invalidate_cache = goya_mmu_invalidate_cache, 4806 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5306 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, 4807 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5307 .send_heartbeat = goya_send_heartbeat, 4808 .send_heartbeat = goya_send_heartbeat,
5308 .enable_clock_gating = goya_init_clock_gating, 4809 .debug_coresight = goya_debug_coresight,
5309 .disable_clock_gating = goya_disable_clock_gating,
5310 .is_device_idle = goya_is_device_idle, 4810 .is_device_idle = goya_is_device_idle,
5311 .soft_reset_late_init = goya_soft_reset_late_init, 4811 .soft_reset_late_init = goya_soft_reset_late_init,
5312 .hw_queues_lock = goya_hw_queues_lock, 4812 .hw_queues_lock = goya_hw_queues_lock,
@@ -5314,7 +4814,12 @@ static const struct hl_asic_funcs goya_funcs = {
5314 .get_pci_id = goya_get_pci_id, 4814 .get_pci_id = goya_get_pci_id,
5315 .get_eeprom_data = goya_get_eeprom_data, 4815 .get_eeprom_data = goya_get_eeprom_data,
5316 .send_cpu_message = goya_send_cpu_message, 4816 .send_cpu_message = goya_send_cpu_message,
5317 .get_hw_state = goya_get_hw_state 4817 .get_hw_state = goya_get_hw_state,
4818 .pci_bars_map = goya_pci_bars_map,
4819 .set_dram_bar_base = goya_set_ddr_bar_base,
4820 .init_iatu = goya_init_iatu,
4821 .rreg = hl_rreg,
4822 .wreg = hl_wreg
5318}; 4823};
5319 4824
5320/* 4825/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 830551b6b062..14e216cb3668 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -39,9 +39,13 @@
39#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES" 39#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES"
40#endif 40#endif
41 41
42#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */ 42#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */
43 43
44#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */ 44#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */
45
46#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
47
48#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
45 49
46#define TPC_ENABLED_MASK 0xFF 50#define TPC_ENABLED_MASK 0xFF
47 51
@@ -49,19 +53,14 @@
49 53
50#define MAX_POWER_DEFAULT 200000 /* 200W */ 54#define MAX_POWER_DEFAULT 200000 /* 200W */
51 55
52#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */
53#define GOYA_ARMCP_EEPROM_TIMEOUT 10000000 /* 10s */
54
55#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */ 56#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */
56 57
57/* DRAM Memory Map */ 58/* DRAM Memory Map */
58 59
59#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */ 60#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
60#define MMU_PAGE_TABLES_SIZE 0x0DE00000 /* 222MB */ 61#define MMU_PAGE_TABLES_SIZE 0x0FC00000 /* 252MB */
61#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */ 62#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */
62#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */ 63#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */
63#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */
64#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */
65 64
66#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE 65#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
67#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE) 66#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE)
@@ -69,13 +68,13 @@
69 MMU_PAGE_TABLES_SIZE) 68 MMU_PAGE_TABLES_SIZE)
70#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \ 69#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \
71 MMU_DRAM_DEFAULT_PAGE_SIZE) 70 MMU_DRAM_DEFAULT_PAGE_SIZE)
72#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + \ 71#define DRAM_KMD_END_ADDR (MMU_CACHE_MNG_ADDR + \
73 MMU_CACHE_MNG_SIZE) 72 MMU_CACHE_MNG_SIZE)
74#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE)
75#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE)
76 73
77#if (DRAM_BASE_ADDR_USER != 0x20000000) 74#define DRAM_BASE_ADDR_USER 0x20000000
78#error "KMD must reserve 512MB" 75
76#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER)
77#error "KMD must reserve no more than 512MB"
79#endif 78#endif
80 79
81/* 80/*
@@ -142,22 +141,12 @@
142#define HW_CAP_GOLDEN 0x00000400 141#define HW_CAP_GOLDEN 0x00000400
143#define HW_CAP_TPC 0x00000800 142#define HW_CAP_TPC 0x00000800
144 143
145#define CPU_PKT_SHIFT 5
146#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT)
147#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1))
148#define CPU_MAX_PKTS_IN_CB 32
149#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB)
150#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE)
151
152enum goya_fw_component { 144enum goya_fw_component {
153 FW_COMP_UBOOT, 145 FW_COMP_UBOOT,
154 FW_COMP_PREBOOT 146 FW_COMP_PREBOOT
155}; 147};
156 148
157struct goya_device { 149struct goya_device {
158 int (*test_cpu_queue)(struct hl_device *hdev);
159 int (*armcp_info_get)(struct hl_device *hdev);
160
161 /* TODO: remove hw_queues_lock after moving to scheduler code */ 150 /* TODO: remove hw_queues_lock after moving to scheduler code */
162 spinlock_t hw_queues_lock; 151 spinlock_t hw_queues_lock;
163 152
@@ -170,13 +159,34 @@ struct goya_device {
170 u32 hw_cap_initialized; 159 u32 hw_cap_initialized;
171}; 160};
172 161
162void goya_get_fixed_properties(struct hl_device *hdev);
163int goya_mmu_init(struct hl_device *hdev);
164void goya_init_dma_qmans(struct hl_device *hdev);
165void goya_init_mme_qmans(struct hl_device *hdev);
166void goya_init_tpc_qmans(struct hl_device *hdev);
167int goya_init_cpu_queues(struct hl_device *hdev);
168void goya_init_security(struct hl_device *hdev);
169int goya_late_init(struct hl_device *hdev);
170void goya_late_fini(struct hl_device *hdev);
171
172void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
173void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
174void goya_update_eq_ci(struct hl_device *hdev, u32 val);
175void goya_restore_phase_topology(struct hl_device *hdev);
176int goya_context_switch(struct hl_device *hdev, u32 asid);
177
173int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, 178int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus,
174 u8 i2c_addr, u8 i2c_reg, u32 *val); 179 u8 i2c_addr, u8 i2c_reg, u32 *val);
175int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, 180int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus,
176 u8 i2c_addr, u8 i2c_reg, u32 val); 181 u8 i2c_addr, u8 i2c_reg, u32 val);
182void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state);
183
184int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
185int goya_test_queues(struct hl_device *hdev);
177int goya_test_cpu_queue(struct hl_device *hdev); 186int goya_test_cpu_queue(struct hl_device *hdev);
178int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, 187int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
179 u32 timeout, long *result); 188 u32 timeout, long *result);
189
180long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); 190long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
181long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr); 191long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
182long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr); 192long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
@@ -184,28 +194,35 @@ long goya_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
184long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr); 194long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
185void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, 195void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
186 long value); 196 long value);
187void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state); 197u64 goya_get_max_power(struct hl_device *hdev);
198void goya_set_max_power(struct hl_device *hdev, u64 value);
199
188void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq); 200void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
189void goya_add_device_attr(struct hl_device *hdev, 201void goya_add_device_attr(struct hl_device *hdev,
190 struct attribute_group *dev_attr_grp); 202 struct attribute_group *dev_attr_grp);
191void goya_init_security(struct hl_device *hdev); 203int goya_armcp_info_get(struct hl_device *hdev);
192u64 goya_get_max_power(struct hl_device *hdev); 204int goya_debug_coresight(struct hl_device *hdev, void *data);
193void goya_set_max_power(struct hl_device *hdev, u64 value); 205
206void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
207int goya_mmu_clear_pgt_range(struct hl_device *hdev);
208int goya_mmu_set_dram_default_page(struct hl_device *hdev);
194 209
195int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
196void goya_late_fini(struct hl_device *hdev);
197int goya_suspend(struct hl_device *hdev); 210int goya_suspend(struct hl_device *hdev);
198int goya_resume(struct hl_device *hdev); 211int goya_resume(struct hl_device *hdev);
199void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val); 212
200void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry); 213void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry);
201void *goya_get_events_stat(struct hl_device *hdev, u32 *size); 214void *goya_get_events_stat(struct hl_device *hdev, u32 *size);
215
202void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr, 216void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
203 u32 cq_val, u32 msix_vec); 217 u32 cq_val, u32 msix_vec);
204int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser); 218int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser);
205void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, 219void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
206 dma_addr_t *dma_handle, u16 *queue_len); 220 dma_addr_t *dma_handle, u16 *queue_len);
207u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt); 221u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt);
208int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id);
209int goya_send_heartbeat(struct hl_device *hdev); 222int goya_send_heartbeat(struct hl_device *hdev);
223void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
224 dma_addr_t *dma_handle);
225void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
226 void *vaddr);
210 227
211#endif /* GOYAP_H_ */ 228#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
new file mode 100644
index 000000000000..1ac951f52d1e
--- /dev/null
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -0,0 +1,628 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "goyaP.h"
9#include "include/goya/goya_coresight.h"
10#include "include/goya/asic_reg/goya_regs.h"
11
12#include <uapi/misc/habanalabs.h>
13
14#include <linux/coresight.h>
15
16#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
17
18static u64 debug_stm_regs[GOYA_STM_LAST + 1] = {
19 [GOYA_STM_CPU] = mmCPU_STM_BASE,
20 [GOYA_STM_DMA_CH_0_CS] = mmDMA_CH_0_CS_STM_BASE,
21 [GOYA_STM_DMA_CH_1_CS] = mmDMA_CH_1_CS_STM_BASE,
22 [GOYA_STM_DMA_CH_2_CS] = mmDMA_CH_2_CS_STM_BASE,
23 [GOYA_STM_DMA_CH_3_CS] = mmDMA_CH_3_CS_STM_BASE,
24 [GOYA_STM_DMA_CH_4_CS] = mmDMA_CH_4_CS_STM_BASE,
25 [GOYA_STM_DMA_MACRO_CS] = mmDMA_MACRO_CS_STM_BASE,
26 [GOYA_STM_MME1_SBA] = mmMME1_SBA_STM_BASE,
27 [GOYA_STM_MME3_SBB] = mmMME3_SBB_STM_BASE,
28 [GOYA_STM_MME4_WACS2] = mmMME4_WACS2_STM_BASE,
29 [GOYA_STM_MME4_WACS] = mmMME4_WACS_STM_BASE,
30 [GOYA_STM_MMU_CS] = mmMMU_CS_STM_BASE,
31 [GOYA_STM_PCIE] = mmPCIE_STM_BASE,
32 [GOYA_STM_PSOC] = mmPSOC_STM_BASE,
33 [GOYA_STM_TPC0_EML] = mmTPC0_EML_STM_BASE,
34 [GOYA_STM_TPC1_EML] = mmTPC1_EML_STM_BASE,
35 [GOYA_STM_TPC2_EML] = mmTPC2_EML_STM_BASE,
36 [GOYA_STM_TPC3_EML] = mmTPC3_EML_STM_BASE,
37 [GOYA_STM_TPC4_EML] = mmTPC4_EML_STM_BASE,
38 [GOYA_STM_TPC5_EML] = mmTPC5_EML_STM_BASE,
39 [GOYA_STM_TPC6_EML] = mmTPC6_EML_STM_BASE,
40 [GOYA_STM_TPC7_EML] = mmTPC7_EML_STM_BASE
41};
42
43static u64 debug_etf_regs[GOYA_ETF_LAST + 1] = {
44 [GOYA_ETF_CPU_0] = mmCPU_ETF_0_BASE,
45 [GOYA_ETF_CPU_1] = mmCPU_ETF_1_BASE,
46 [GOYA_ETF_CPU_TRACE] = mmCPU_ETF_TRACE_BASE,
47 [GOYA_ETF_DMA_CH_0_CS] = mmDMA_CH_0_CS_ETF_BASE,
48 [GOYA_ETF_DMA_CH_1_CS] = mmDMA_CH_1_CS_ETF_BASE,
49 [GOYA_ETF_DMA_CH_2_CS] = mmDMA_CH_2_CS_ETF_BASE,
50 [GOYA_ETF_DMA_CH_3_CS] = mmDMA_CH_3_CS_ETF_BASE,
51 [GOYA_ETF_DMA_CH_4_CS] = mmDMA_CH_4_CS_ETF_BASE,
52 [GOYA_ETF_DMA_MACRO_CS] = mmDMA_MACRO_CS_ETF_BASE,
53 [GOYA_ETF_MME1_SBA] = mmMME1_SBA_ETF_BASE,
54 [GOYA_ETF_MME3_SBB] = mmMME3_SBB_ETF_BASE,
55 [GOYA_ETF_MME4_WACS2] = mmMME4_WACS2_ETF_BASE,
56 [GOYA_ETF_MME4_WACS] = mmMME4_WACS_ETF_BASE,
57 [GOYA_ETF_MMU_CS] = mmMMU_CS_ETF_BASE,
58 [GOYA_ETF_PCIE] = mmPCIE_ETF_BASE,
59 [GOYA_ETF_PSOC] = mmPSOC_ETF_BASE,
60 [GOYA_ETF_TPC0_EML] = mmTPC0_EML_ETF_BASE,
61 [GOYA_ETF_TPC1_EML] = mmTPC1_EML_ETF_BASE,
62 [GOYA_ETF_TPC2_EML] = mmTPC2_EML_ETF_BASE,
63 [GOYA_ETF_TPC3_EML] = mmTPC3_EML_ETF_BASE,
64 [GOYA_ETF_TPC4_EML] = mmTPC4_EML_ETF_BASE,
65 [GOYA_ETF_TPC5_EML] = mmTPC5_EML_ETF_BASE,
66 [GOYA_ETF_TPC6_EML] = mmTPC6_EML_ETF_BASE,
67 [GOYA_ETF_TPC7_EML] = mmTPC7_EML_ETF_BASE
68};
69
70static u64 debug_funnel_regs[GOYA_FUNNEL_LAST + 1] = {
71 [GOYA_FUNNEL_CPU] = mmCPU_FUNNEL_BASE,
72 [GOYA_FUNNEL_DMA_CH_6_1] = mmDMA_CH_FUNNEL_6_1_BASE,
73 [GOYA_FUNNEL_DMA_MACRO_3_1] = mmDMA_MACRO_FUNNEL_3_1_BASE,
74 [GOYA_FUNNEL_MME0_RTR] = mmMME0_RTR_FUNNEL_BASE,
75 [GOYA_FUNNEL_MME1_RTR] = mmMME1_RTR_FUNNEL_BASE,
76 [GOYA_FUNNEL_MME2_RTR] = mmMME2_RTR_FUNNEL_BASE,
77 [GOYA_FUNNEL_MME3_RTR] = mmMME3_RTR_FUNNEL_BASE,
78 [GOYA_FUNNEL_MME4_RTR] = mmMME4_RTR_FUNNEL_BASE,
79 [GOYA_FUNNEL_MME5_RTR] = mmMME5_RTR_FUNNEL_BASE,
80 [GOYA_FUNNEL_PCIE] = mmPCIE_FUNNEL_BASE,
81 [GOYA_FUNNEL_PSOC] = mmPSOC_FUNNEL_BASE,
82 [GOYA_FUNNEL_TPC0_EML] = mmTPC0_EML_FUNNEL_BASE,
83 [GOYA_FUNNEL_TPC1_EML] = mmTPC1_EML_FUNNEL_BASE,
84 [GOYA_FUNNEL_TPC1_RTR] = mmTPC1_RTR_FUNNEL_BASE,
85 [GOYA_FUNNEL_TPC2_EML] = mmTPC2_EML_FUNNEL_BASE,
86 [GOYA_FUNNEL_TPC2_RTR] = mmTPC2_RTR_FUNNEL_BASE,
87 [GOYA_FUNNEL_TPC3_EML] = mmTPC3_EML_FUNNEL_BASE,
88 [GOYA_FUNNEL_TPC3_RTR] = mmTPC3_RTR_FUNNEL_BASE,
89 [GOYA_FUNNEL_TPC4_EML] = mmTPC4_EML_FUNNEL_BASE,
90 [GOYA_FUNNEL_TPC4_RTR] = mmTPC4_RTR_FUNNEL_BASE,
91 [GOYA_FUNNEL_TPC5_EML] = mmTPC5_EML_FUNNEL_BASE,
92 [GOYA_FUNNEL_TPC5_RTR] = mmTPC5_RTR_FUNNEL_BASE,
93 [GOYA_FUNNEL_TPC6_EML] = mmTPC6_EML_FUNNEL_BASE,
94 [GOYA_FUNNEL_TPC6_RTR] = mmTPC6_RTR_FUNNEL_BASE,
95 [GOYA_FUNNEL_TPC7_EML] = mmTPC7_EML_FUNNEL_BASE
96};
97
98static u64 debug_bmon_regs[GOYA_BMON_LAST + 1] = {
99 [GOYA_BMON_CPU_RD] = mmCPU_RD_BMON_BASE,
100 [GOYA_BMON_CPU_WR] = mmCPU_WR_BMON_BASE,
101 [GOYA_BMON_DMA_CH_0_0] = mmDMA_CH_0_BMON_0_BASE,
102 [GOYA_BMON_DMA_CH_0_1] = mmDMA_CH_0_BMON_1_BASE,
103 [GOYA_BMON_DMA_CH_1_0] = mmDMA_CH_1_BMON_0_BASE,
104 [GOYA_BMON_DMA_CH_1_1] = mmDMA_CH_1_BMON_1_BASE,
105 [GOYA_BMON_DMA_CH_2_0] = mmDMA_CH_2_BMON_0_BASE,
106 [GOYA_BMON_DMA_CH_2_1] = mmDMA_CH_2_BMON_1_BASE,
107 [GOYA_BMON_DMA_CH_3_0] = mmDMA_CH_3_BMON_0_BASE,
108 [GOYA_BMON_DMA_CH_3_1] = mmDMA_CH_3_BMON_1_BASE,
109 [GOYA_BMON_DMA_CH_4_0] = mmDMA_CH_4_BMON_0_BASE,
110 [GOYA_BMON_DMA_CH_4_1] = mmDMA_CH_4_BMON_1_BASE,
111 [GOYA_BMON_DMA_MACRO_0] = mmDMA_MACRO_BMON_0_BASE,
112 [GOYA_BMON_DMA_MACRO_1] = mmDMA_MACRO_BMON_1_BASE,
113 [GOYA_BMON_DMA_MACRO_2] = mmDMA_MACRO_BMON_2_BASE,
114 [GOYA_BMON_DMA_MACRO_3] = mmDMA_MACRO_BMON_3_BASE,
115 [GOYA_BMON_DMA_MACRO_4] = mmDMA_MACRO_BMON_4_BASE,
116 [GOYA_BMON_DMA_MACRO_5] = mmDMA_MACRO_BMON_5_BASE,
117 [GOYA_BMON_DMA_MACRO_6] = mmDMA_MACRO_BMON_6_BASE,
118 [GOYA_BMON_DMA_MACRO_7] = mmDMA_MACRO_BMON_7_BASE,
119 [GOYA_BMON_MME1_SBA_0] = mmMME1_SBA_BMON0_BASE,
120 [GOYA_BMON_MME1_SBA_1] = mmMME1_SBA_BMON1_BASE,
121 [GOYA_BMON_MME3_SBB_0] = mmMME3_SBB_BMON0_BASE,
122 [GOYA_BMON_MME3_SBB_1] = mmMME3_SBB_BMON1_BASE,
123 [GOYA_BMON_MME4_WACS2_0] = mmMME4_WACS2_BMON0_BASE,
124 [GOYA_BMON_MME4_WACS2_1] = mmMME4_WACS2_BMON1_BASE,
125 [GOYA_BMON_MME4_WACS2_2] = mmMME4_WACS2_BMON2_BASE,
126 [GOYA_BMON_MME4_WACS_0] = mmMME4_WACS_BMON0_BASE,
127 [GOYA_BMON_MME4_WACS_1] = mmMME4_WACS_BMON1_BASE,
128 [GOYA_BMON_MME4_WACS_2] = mmMME4_WACS_BMON2_BASE,
129 [GOYA_BMON_MME4_WACS_3] = mmMME4_WACS_BMON3_BASE,
130 [GOYA_BMON_MME4_WACS_4] = mmMME4_WACS_BMON4_BASE,
131 [GOYA_BMON_MME4_WACS_5] = mmMME4_WACS_BMON5_BASE,
132 [GOYA_BMON_MME4_WACS_6] = mmMME4_WACS_BMON6_BASE,
133 [GOYA_BMON_MMU_0] = mmMMU_BMON_0_BASE,
134 [GOYA_BMON_MMU_1] = mmMMU_BMON_1_BASE,
135 [GOYA_BMON_PCIE_MSTR_RD] = mmPCIE_BMON_MSTR_RD_BASE,
136 [GOYA_BMON_PCIE_MSTR_WR] = mmPCIE_BMON_MSTR_WR_BASE,
137 [GOYA_BMON_PCIE_SLV_RD] = mmPCIE_BMON_SLV_RD_BASE,
138 [GOYA_BMON_PCIE_SLV_WR] = mmPCIE_BMON_SLV_WR_BASE,
139 [GOYA_BMON_TPC0_EML_0] = mmTPC0_EML_BUSMON_0_BASE,
140 [GOYA_BMON_TPC0_EML_1] = mmTPC0_EML_BUSMON_1_BASE,
141 [GOYA_BMON_TPC0_EML_2] = mmTPC0_EML_BUSMON_2_BASE,
142 [GOYA_BMON_TPC0_EML_3] = mmTPC0_EML_BUSMON_3_BASE,
143 [GOYA_BMON_TPC1_EML_0] = mmTPC1_EML_BUSMON_0_BASE,
144 [GOYA_BMON_TPC1_EML_1] = mmTPC1_EML_BUSMON_1_BASE,
145 [GOYA_BMON_TPC1_EML_2] = mmTPC1_EML_BUSMON_2_BASE,
146 [GOYA_BMON_TPC1_EML_3] = mmTPC1_EML_BUSMON_3_BASE,
147 [GOYA_BMON_TPC2_EML_0] = mmTPC2_EML_BUSMON_0_BASE,
148 [GOYA_BMON_TPC2_EML_1] = mmTPC2_EML_BUSMON_1_BASE,
149 [GOYA_BMON_TPC2_EML_2] = mmTPC2_EML_BUSMON_2_BASE,
150 [GOYA_BMON_TPC2_EML_3] = mmTPC2_EML_BUSMON_3_BASE,
151 [GOYA_BMON_TPC3_EML_0] = mmTPC3_EML_BUSMON_0_BASE,
152 [GOYA_BMON_TPC3_EML_1] = mmTPC3_EML_BUSMON_1_BASE,
153 [GOYA_BMON_TPC3_EML_2] = mmTPC3_EML_BUSMON_2_BASE,
154 [GOYA_BMON_TPC3_EML_3] = mmTPC3_EML_BUSMON_3_BASE,
155 [GOYA_BMON_TPC4_EML_0] = mmTPC4_EML_BUSMON_0_BASE,
156 [GOYA_BMON_TPC4_EML_1] = mmTPC4_EML_BUSMON_1_BASE,
157 [GOYA_BMON_TPC4_EML_2] = mmTPC4_EML_BUSMON_2_BASE,
158 [GOYA_BMON_TPC4_EML_3] = mmTPC4_EML_BUSMON_3_BASE,
159 [GOYA_BMON_TPC5_EML_0] = mmTPC5_EML_BUSMON_0_BASE,
160 [GOYA_BMON_TPC5_EML_1] = mmTPC5_EML_BUSMON_1_BASE,
161 [GOYA_BMON_TPC5_EML_2] = mmTPC5_EML_BUSMON_2_BASE,
162 [GOYA_BMON_TPC5_EML_3] = mmTPC5_EML_BUSMON_3_BASE,
163 [GOYA_BMON_TPC6_EML_0] = mmTPC6_EML_BUSMON_0_BASE,
164 [GOYA_BMON_TPC6_EML_1] = mmTPC6_EML_BUSMON_1_BASE,
165 [GOYA_BMON_TPC6_EML_2] = mmTPC6_EML_BUSMON_2_BASE,
166 [GOYA_BMON_TPC6_EML_3] = mmTPC6_EML_BUSMON_3_BASE,
167 [GOYA_BMON_TPC7_EML_0] = mmTPC7_EML_BUSMON_0_BASE,
168 [GOYA_BMON_TPC7_EML_1] = mmTPC7_EML_BUSMON_1_BASE,
169 [GOYA_BMON_TPC7_EML_2] = mmTPC7_EML_BUSMON_2_BASE,
170 [GOYA_BMON_TPC7_EML_3] = mmTPC7_EML_BUSMON_3_BASE
171};
172
173static u64 debug_spmu_regs[GOYA_SPMU_LAST + 1] = {
174 [GOYA_SPMU_DMA_CH_0_CS] = mmDMA_CH_0_CS_SPMU_BASE,
175 [GOYA_SPMU_DMA_CH_1_CS] = mmDMA_CH_1_CS_SPMU_BASE,
176 [GOYA_SPMU_DMA_CH_2_CS] = mmDMA_CH_2_CS_SPMU_BASE,
177 [GOYA_SPMU_DMA_CH_3_CS] = mmDMA_CH_3_CS_SPMU_BASE,
178 [GOYA_SPMU_DMA_CH_4_CS] = mmDMA_CH_4_CS_SPMU_BASE,
179 [GOYA_SPMU_DMA_MACRO_CS] = mmDMA_MACRO_CS_SPMU_BASE,
180 [GOYA_SPMU_MME1_SBA] = mmMME1_SBA_SPMU_BASE,
181 [GOYA_SPMU_MME3_SBB] = mmMME3_SBB_SPMU_BASE,
182 [GOYA_SPMU_MME4_WACS2] = mmMME4_WACS2_SPMU_BASE,
183 [GOYA_SPMU_MME4_WACS] = mmMME4_WACS_SPMU_BASE,
184 [GOYA_SPMU_MMU_CS] = mmMMU_CS_SPMU_BASE,
185 [GOYA_SPMU_PCIE] = mmPCIE_SPMU_BASE,
186 [GOYA_SPMU_TPC0_EML] = mmTPC0_EML_SPMU_BASE,
187 [GOYA_SPMU_TPC1_EML] = mmTPC1_EML_SPMU_BASE,
188 [GOYA_SPMU_TPC2_EML] = mmTPC2_EML_SPMU_BASE,
189 [GOYA_SPMU_TPC3_EML] = mmTPC3_EML_SPMU_BASE,
190 [GOYA_SPMU_TPC4_EML] = mmTPC4_EML_SPMU_BASE,
191 [GOYA_SPMU_TPC5_EML] = mmTPC5_EML_SPMU_BASE,
192 [GOYA_SPMU_TPC6_EML] = mmTPC6_EML_SPMU_BASE,
193 [GOYA_SPMU_TPC7_EML] = mmTPC7_EML_SPMU_BASE
194};
195
196static int goya_coresight_timeout(struct hl_device *hdev, u64 addr,
197 int position, bool up)
198{
199 int rc;
200 u32 val, timeout_usec;
201
202 if (hdev->pldm)
203 timeout_usec = GOYA_PLDM_CORESIGHT_TIMEOUT_USEC;
204 else
205 timeout_usec = CORESIGHT_TIMEOUT_USEC;
206
207 rc = hl_poll_timeout(
208 hdev,
209 addr,
210 val,
211 up ? val & BIT(position) : !(val & BIT(position)),
212 1000,
213 timeout_usec);
214
215 if (rc) {
216 dev_err(hdev->dev,
217 "Timeout while waiting for coresight, addr: 0x%llx, position: %d, up: %d\n",
218 addr, position, up);
219 return -EFAULT;
220 }
221
222 return 0;
223}
224
225static int goya_config_stm(struct hl_device *hdev,
226 struct hl_debug_params *params)
227{
228 struct hl_debug_params_stm *input;
229 u64 base_reg = debug_stm_regs[params->reg_idx] - CFG_BASE;
230 int rc;
231
232 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
233
234 if (params->enable) {
235 input = params->input;
236
237 if (!input)
238 return -EINVAL;
239
240 WREG32(base_reg + 0xE80, 0x80004);
241 WREG32(base_reg + 0xD64, 7);
242 WREG32(base_reg + 0xD60, 0);
243 WREG32(base_reg + 0xD00, lower_32_bits(input->he_mask));
244 WREG32(base_reg + 0xD20, lower_32_bits(input->sp_mask));
245 WREG32(base_reg + 0xD60, 1);
246 WREG32(base_reg + 0xD00, upper_32_bits(input->he_mask));
247 WREG32(base_reg + 0xD20, upper_32_bits(input->sp_mask));
248 WREG32(base_reg + 0xE70, 0x10);
249 WREG32(base_reg + 0xE60, 0);
250 WREG32(base_reg + 0xE64, 0x420000);
251 WREG32(base_reg + 0xE00, 0xFFFFFFFF);
252 WREG32(base_reg + 0xE20, 0xFFFFFFFF);
253 WREG32(base_reg + 0xEF4, input->id);
254 WREG32(base_reg + 0xDF4, 0x80);
255 WREG32(base_reg + 0xE8C, input->frequency);
256 WREG32(base_reg + 0xE90, 0x7FF);
257 WREG32(base_reg + 0xE80, 0x7 | (input->id << 16));
258 } else {
259 WREG32(base_reg + 0xE80, 4);
260 WREG32(base_reg + 0xD64, 0);
261 WREG32(base_reg + 0xD60, 1);
262 WREG32(base_reg + 0xD00, 0);
263 WREG32(base_reg + 0xD20, 0);
264 WREG32(base_reg + 0xD60, 0);
265 WREG32(base_reg + 0xE20, 0);
266 WREG32(base_reg + 0xE00, 0);
267 WREG32(base_reg + 0xDF4, 0x80);
268 WREG32(base_reg + 0xE70, 0);
269 WREG32(base_reg + 0xE60, 0);
270 WREG32(base_reg + 0xE64, 0);
271 WREG32(base_reg + 0xE8C, 0);
272
273 rc = goya_coresight_timeout(hdev, base_reg + 0xE80, 23, false);
274 if (rc) {
275 dev_err(hdev->dev,
276 "Failed to disable STM on timeout, error %d\n",
277 rc);
278 return rc;
279 }
280
281 WREG32(base_reg + 0xE80, 4);
282 }
283
284 return 0;
285}
286
287static int goya_config_etf(struct hl_device *hdev,
288 struct hl_debug_params *params)
289{
290 struct hl_debug_params_etf *input;
291 u64 base_reg = debug_etf_regs[params->reg_idx] - CFG_BASE;
292 u32 val;
293 int rc;
294
295 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
296
297 val = RREG32(base_reg + 0x304);
298 val |= 0x1000;
299 WREG32(base_reg + 0x304, val);
300 val |= 0x40;
301 WREG32(base_reg + 0x304, val);
302
303 rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
304 if (rc) {
305 dev_err(hdev->dev,
306 "Failed to %s ETF on timeout, error %d\n",
307 params->enable ? "enable" : "disable", rc);
308 return rc;
309 }
310
311 rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
312 if (rc) {
313 dev_err(hdev->dev,
314 "Failed to %s ETF on timeout, error %d\n",
315 params->enable ? "enable" : "disable", rc);
316 return rc;
317 }
318
319 WREG32(base_reg + 0x20, 0);
320
321 if (params->enable) {
322 input = params->input;
323
324 if (!input)
325 return -EINVAL;
326
327 WREG32(base_reg + 0x34, 0x3FFC);
328 WREG32(base_reg + 0x28, input->sink_mode);
329 WREG32(base_reg + 0x304, 0x4001);
330 WREG32(base_reg + 0x308, 0xA);
331 WREG32(base_reg + 0x20, 1);
332 } else {
333 WREG32(base_reg + 0x34, 0);
334 WREG32(base_reg + 0x28, 0);
335 WREG32(base_reg + 0x304, 0);
336 }
337
338 return 0;
339}
340
341static int goya_etr_validate_address(struct hl_device *hdev, u64 addr,
342 u32 size)
343{
344 struct asic_fixed_properties *prop = &hdev->asic_prop;
345 u64 range_start, range_end;
346
347 if (hdev->mmu_enable) {
348 range_start = prop->va_space_dram_start_address;
349 range_end = prop->va_space_dram_end_address;
350 } else {
351 range_start = prop->dram_user_base_address;
352 range_end = prop->dram_end_address;
353 }
354
355 return hl_mem_area_inside_range(addr, size, range_start, range_end);
356}
357
358static int goya_config_etr(struct hl_device *hdev,
359 struct hl_debug_params *params)
360{
361 struct hl_debug_params_etr *input;
362 u64 base_reg = mmPSOC_ETR_BASE - CFG_BASE;
363 u32 val;
364 int rc;
365
366 WREG32(base_reg + 0xFB0, CORESIGHT_UNLOCK);
367
368 val = RREG32(base_reg + 0x304);
369 val |= 0x1000;
370 WREG32(base_reg + 0x304, val);
371 val |= 0x40;
372 WREG32(base_reg + 0x304, val);
373
374 rc = goya_coresight_timeout(hdev, base_reg + 0x304, 6, false);
375 if (rc) {
376 dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
377 params->enable ? "enable" : "disable", rc);
378 return rc;
379 }
380
381 rc = goya_coresight_timeout(hdev, base_reg + 0xC, 2, true);
382 if (rc) {
383 dev_err(hdev->dev, "Failed to %s ETR on timeout, error %d\n",
384 params->enable ? "enable" : "disable", rc);
385 return rc;
386 }
387
388 WREG32(base_reg + 0x20, 0);
389
390 if (params->enable) {
391 input = params->input;
392
393 if (!input)
394 return -EINVAL;
395
396 if (input->buffer_size == 0) {
397 dev_err(hdev->dev,
398 "ETR buffer size should be bigger than 0\n");
399 return -EINVAL;
400 }
401
402 if (!goya_etr_validate_address(hdev,
403 input->buffer_address, input->buffer_size)) {
404 dev_err(hdev->dev, "buffer address is not valid\n");
405 return -EINVAL;
406 }
407
408 WREG32(base_reg + 0x34, 0x3FFC);
409 WREG32(base_reg + 0x4, input->buffer_size);
410 WREG32(base_reg + 0x28, input->sink_mode);
411 WREG32(base_reg + 0x110, 0x700);
412 WREG32(base_reg + 0x118,
413 lower_32_bits(input->buffer_address));
414 WREG32(base_reg + 0x11C,
415 upper_32_bits(input->buffer_address));
416 WREG32(base_reg + 0x304, 3);
417 WREG32(base_reg + 0x308, 0xA);
418 WREG32(base_reg + 0x20, 1);
419 } else {
420 WREG32(base_reg + 0x34, 0);
421 WREG32(base_reg + 0x4, 0x400);
422 WREG32(base_reg + 0x118, 0);
423 WREG32(base_reg + 0x11C, 0);
424 WREG32(base_reg + 0x308, 0);
425 WREG32(base_reg + 0x28, 0);
426 WREG32(base_reg + 0x304, 0);
427
428 if (params->output_size >= sizeof(u32))
429 *(u32 *) params->output = RREG32(base_reg + 0x18);
430 }
431
432 return 0;
433}
434
435static int goya_config_funnel(struct hl_device *hdev,
436 struct hl_debug_params *params)
437{
438 WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE + 0xFB0,
439 CORESIGHT_UNLOCK);
440
441 WREG32(debug_funnel_regs[params->reg_idx] - CFG_BASE,
442 params->enable ? 0x33F : 0);
443
444 return 0;
445}
446
447static int goya_config_bmon(struct hl_device *hdev,
448 struct hl_debug_params *params)
449{
450 struct hl_debug_params_bmon *input;
451 u64 base_reg = debug_bmon_regs[params->reg_idx] - CFG_BASE;
452 u32 pcie_base = 0;
453
454 WREG32(base_reg + 0x104, 1);
455
456 if (params->enable) {
457 input = params->input;
458
459 if (!input)
460 return -EINVAL;
461
462 WREG32(base_reg + 0x200, lower_32_bits(input->start_addr0));
463 WREG32(base_reg + 0x204, upper_32_bits(input->start_addr0));
464 WREG32(base_reg + 0x208, lower_32_bits(input->addr_mask0));
465 WREG32(base_reg + 0x20C, upper_32_bits(input->addr_mask0));
466 WREG32(base_reg + 0x240, lower_32_bits(input->start_addr1));
467 WREG32(base_reg + 0x244, upper_32_bits(input->start_addr1));
468 WREG32(base_reg + 0x248, lower_32_bits(input->addr_mask1));
469 WREG32(base_reg + 0x24C, upper_32_bits(input->addr_mask1));
470 WREG32(base_reg + 0x224, 0);
471 WREG32(base_reg + 0x234, 0);
472 WREG32(base_reg + 0x30C, input->bw_win);
473 WREG32(base_reg + 0x308, input->win_capture);
474
475 /* PCIE IF BMON bug WA */
476 if (params->reg_idx != GOYA_BMON_PCIE_MSTR_RD &&
477 params->reg_idx != GOYA_BMON_PCIE_MSTR_WR &&
478 params->reg_idx != GOYA_BMON_PCIE_SLV_RD &&
479 params->reg_idx != GOYA_BMON_PCIE_SLV_WR)
480 pcie_base = 0xA000000;
481
482 WREG32(base_reg + 0x700, pcie_base | 0xB00 | (input->id << 12));
483 WREG32(base_reg + 0x708, pcie_base | 0xA00 | (input->id << 12));
484 WREG32(base_reg + 0x70C, pcie_base | 0xC00 | (input->id << 12));
485
486 WREG32(base_reg + 0x100, 0x11);
487 WREG32(base_reg + 0x304, 0x1);
488 } else {
489 WREG32(base_reg + 0x200, 0);
490 WREG32(base_reg + 0x204, 0);
491 WREG32(base_reg + 0x208, 0xFFFFFFFF);
492 WREG32(base_reg + 0x20C, 0xFFFFFFFF);
493 WREG32(base_reg + 0x240, 0);
494 WREG32(base_reg + 0x244, 0);
495 WREG32(base_reg + 0x248, 0xFFFFFFFF);
496 WREG32(base_reg + 0x24C, 0xFFFFFFFF);
497 WREG32(base_reg + 0x224, 0xFFFFFFFF);
498 WREG32(base_reg + 0x234, 0x1070F);
499 WREG32(base_reg + 0x30C, 0);
500 WREG32(base_reg + 0x308, 0xFFFF);
501 WREG32(base_reg + 0x700, 0xA000B00);
502 WREG32(base_reg + 0x708, 0xA000A00);
503 WREG32(base_reg + 0x70C, 0xA000C00);
504 WREG32(base_reg + 0x100, 1);
505 WREG32(base_reg + 0x304, 0);
506 WREG32(base_reg + 0x104, 0);
507 }
508
509 return 0;
510}
511
512static int goya_config_spmu(struct hl_device *hdev,
513 struct hl_debug_params *params)
514{
515 u64 base_reg = debug_spmu_regs[params->reg_idx] - CFG_BASE;
516 struct hl_debug_params_spmu *input = params->input;
517 u64 *output;
518 u32 output_arr_len;
519 u32 events_num;
520 u32 overflow_idx;
521 u32 cycle_cnt_idx;
522 int i;
523
524 if (params->enable) {
525 input = params->input;
526
527 if (!input)
528 return -EINVAL;
529
530 if (input->event_types_num < 3) {
531 dev_err(hdev->dev,
532 "not enough values for SPMU enable\n");
533 return -EINVAL;
534 }
535
536 WREG32(base_reg + 0xE04, 0x41013046);
537 WREG32(base_reg + 0xE04, 0x41013040);
538
539 for (i = 0 ; i < input->event_types_num ; i++)
540 WREG32(base_reg + 0x400 + i * 4, input->event_types[i]);
541
542 WREG32(base_reg + 0xE04, 0x41013041);
543 WREG32(base_reg + 0xC00, 0x8000003F);
544 } else {
545 output = params->output;
546 output_arr_len = params->output_size / 8;
547 events_num = output_arr_len - 2;
548 overflow_idx = output_arr_len - 2;
549 cycle_cnt_idx = output_arr_len - 1;
550
551 if (!output)
552 return -EINVAL;
553
554 if (output_arr_len < 3) {
555 dev_err(hdev->dev,
556 "not enough values for SPMU disable\n");
557 return -EINVAL;
558 }
559
560 WREG32(base_reg + 0xE04, 0x41013040);
561
562 for (i = 0 ; i < events_num ; i++)
563 output[i] = RREG32(base_reg + i * 8);
564
565 output[overflow_idx] = RREG32(base_reg + 0xCC0);
566
567 output[cycle_cnt_idx] = RREG32(base_reg + 0xFC);
568 output[cycle_cnt_idx] <<= 32;
569 output[cycle_cnt_idx] |= RREG32(base_reg + 0xF8);
570
571 WREG32(base_reg + 0xCC0, 0);
572 }
573
574 return 0;
575}
576
577static int goya_config_timestamp(struct hl_device *hdev,
578 struct hl_debug_params *params)
579{
580 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
581 if (params->enable) {
582 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
583 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
584 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
585 }
586
587 return 0;
588}
589
590int goya_debug_coresight(struct hl_device *hdev, void *data)
591{
592 struct hl_debug_params *params = data;
593 u32 val;
594 int rc;
595
596 switch (params->op) {
597 case HL_DEBUG_OP_STM:
598 rc = goya_config_stm(hdev, params);
599 break;
600 case HL_DEBUG_OP_ETF:
601 rc = goya_config_etf(hdev, params);
602 break;
603 case HL_DEBUG_OP_ETR:
604 rc = goya_config_etr(hdev, params);
605 break;
606 case HL_DEBUG_OP_FUNNEL:
607 rc = goya_config_funnel(hdev, params);
608 break;
609 case HL_DEBUG_OP_BMON:
610 rc = goya_config_bmon(hdev, params);
611 break;
612 case HL_DEBUG_OP_SPMU:
613 rc = goya_config_spmu(hdev, params);
614 break;
615 case HL_DEBUG_OP_TIMESTAMP:
616 rc = goya_config_timestamp(hdev, params);
617 break;
618
619 default:
620 dev_err(hdev->dev, "Unknown coresight id %d\n", params->op);
621 return -EINVAL;
622 }
623
624 /* Perform read from the device to flush all configuration */
625 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
626
627 return rc;
628}
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c
index 575003238401..d95d1b2f860d 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/misc/habanalabs/goya/goya_security.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "goyaP.h" 8#include "goyaP.h"
9#include "include/goya/asic_reg/goya_regs.h"
9 10
10/* 11/*
11 * goya_set_block_as_protected - set the given block as protected 12 * goya_set_block_as_protected - set the given block as protected
@@ -2159,6 +2160,8 @@ static void goya_init_protection_bits(struct hl_device *hdev)
2159 * Bits 7-11 represents the word offset inside the 128 bytes. 2160 * Bits 7-11 represents the word offset inside the 128 bytes.
2160 * Bits 2-6 represents the bit location inside the word. 2161 * Bits 2-6 represents the bit location inside the word.
2161 */ 2162 */
2163 u32 pb_addr, mask;
2164 u8 word_offset;
2162 2165
2163 goya_pb_set_block(hdev, mmPCI_NRTR_BASE); 2166 goya_pb_set_block(hdev, mmPCI_NRTR_BASE);
2164 goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE); 2167 goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE);
@@ -2237,6 +2240,14 @@ static void goya_init_protection_bits(struct hl_device *hdev)
2237 goya_pb_set_block(hdev, mmPCIE_AUX_BASE); 2240 goya_pb_set_block(hdev, mmPCIE_AUX_BASE);
2238 goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE); 2241 goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE);
2239 goya_pb_set_block(hdev, mmPCIE_PHY_BASE); 2242 goya_pb_set_block(hdev, mmPCIE_PHY_BASE);
2243 goya_pb_set_block(hdev, mmTPC0_NRTR_BASE);
2244 goya_pb_set_block(hdev, mmTPC_PLL_BASE);
2245
2246 pb_addr = (mmTPC_PLL_CLK_RLX_0 & ~0xFFF) + PROT_BITS_OFFS;
2247 word_offset = ((mmTPC_PLL_CLK_RLX_0 & PROT_BITS_OFFS) >> 7) << 2;
2248 mask = 1 << ((mmTPC_PLL_CLK_RLX_0 & 0x7C) >> 2);
2249
2250 WREG32(pb_addr + word_offset, mask);
2240 2251
2241 goya_init_mme_protection_bits(hdev); 2252 goya_init_mme_protection_bits(hdev);
2242 2253
@@ -2294,8 +2305,8 @@ void goya_init_security(struct hl_device *hdev)
2294 u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; 2305 u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2295 u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; 2306 u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2296 2307
2297 u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; 2308 u32 lbw_rng11_base = 0xFCE02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2298 u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; 2309 u32 lbw_rng11_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2299 2310
2300 u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; 2311 u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
2301 u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; 2312 u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK;
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a8ee52c880cd..71243b319920 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -11,8 +11,6 @@
11#include "include/armcp_if.h" 11#include "include/armcp_if.h"
12#include "include/qman_if.h" 12#include "include/qman_if.h"
13 13
14#define pr_fmt(fmt) "habanalabs: " fmt
15
16#include <linux/cdev.h> 14#include <linux/cdev.h>
17#include <linux/iopoll.h> 15#include <linux/iopoll.h>
18#include <linux/irqreturn.h> 16#include <linux/irqreturn.h>
@@ -33,6 +31,9 @@
33 31
34#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */ 32#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
35 33
34#define HL_ARMCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
35#define HL_ARMCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
36
36#define HL_MAX_QUEUES 128 37#define HL_MAX_QUEUES 128
37 38
38#define HL_MAX_JOBS_PER_CS 64 39#define HL_MAX_JOBS_PER_CS 64
@@ -48,8 +49,9 @@
48 49
49/** 50/**
50 * struct pgt_info - MMU hop page info. 51 * struct pgt_info - MMU hop page info.
51 * @node: hash linked-list node for the pgts hash of pgts. 52 * @node: hash linked-list node for the pgts shadow hash of pgts.
52 * @addr: physical address of the pgt. 53 * @phys_addr: physical address of the pgt.
54 * @shadow_addr: shadow hop in the host.
53 * @ctx: pointer to the owner ctx. 55 * @ctx: pointer to the owner ctx.
54 * @num_of_ptes: indicates how many ptes are used in the pgt. 56 * @num_of_ptes: indicates how many ptes are used in the pgt.
55 * 57 *
@@ -59,10 +61,11 @@
59 * page, it is freed with its pgt_info structure. 61 * page, it is freed with its pgt_info structure.
60 */ 62 */
61struct pgt_info { 63struct pgt_info {
62 struct hlist_node node; 64 struct hlist_node node;
63 u64 addr; 65 u64 phys_addr;
64 struct hl_ctx *ctx; 66 u64 shadow_addr;
65 int num_of_ptes; 67 struct hl_ctx *ctx;
68 int num_of_ptes;
66}; 69};
67 70
68struct hl_device; 71struct hl_device;
@@ -132,8 +135,6 @@ enum hl_device_hw_state {
132 * @dram_user_base_address: DRAM physical start address for user access. 135 * @dram_user_base_address: DRAM physical start address for user access.
133 * @dram_size: DRAM total size. 136 * @dram_size: DRAM total size.
134 * @dram_pci_bar_size: size of PCI bar towards DRAM. 137 * @dram_pci_bar_size: size of PCI bar towards DRAM.
135 * @host_phys_base_address: base physical address of host memory for
136 * transactions that the device generates.
137 * @max_power_default: max power of the device after reset 138 * @max_power_default: max power of the device after reset
138 * @va_space_host_start_address: base address of virtual memory range for 139 * @va_space_host_start_address: base address of virtual memory range for
139 * mapping host memory. 140 * mapping host memory.
@@ -145,6 +146,8 @@ enum hl_device_hw_state {
145 * mapping DRAM memory. 146 * mapping DRAM memory.
146 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page 147 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
147 * fault. 148 * fault.
149 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
150 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
148 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables. 151 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
149 * @mmu_dram_default_page_addr: DRAM default page physical address. 152 * @mmu_dram_default_page_addr: DRAM default page physical address.
150 * @mmu_pgt_size: MMU page tables total size. 153 * @mmu_pgt_size: MMU page tables total size.
@@ -179,13 +182,14 @@ struct asic_fixed_properties {
179 u64 dram_user_base_address; 182 u64 dram_user_base_address;
180 u64 dram_size; 183 u64 dram_size;
181 u64 dram_pci_bar_size; 184 u64 dram_pci_bar_size;
182 u64 host_phys_base_address;
183 u64 max_power_default; 185 u64 max_power_default;
184 u64 va_space_host_start_address; 186 u64 va_space_host_start_address;
185 u64 va_space_host_end_address; 187 u64 va_space_host_end_address;
186 u64 va_space_dram_start_address; 188 u64 va_space_dram_start_address;
187 u64 va_space_dram_end_address; 189 u64 va_space_dram_end_address;
188 u64 dram_size_for_default_page_mapping; 190 u64 dram_size_for_default_page_mapping;
191 u64 pcie_dbi_base_address;
192 u64 pcie_aux_dbi_reg_addr;
189 u64 mmu_pgt_addr; 193 u64 mmu_pgt_addr;
190 u64 mmu_dram_default_page_addr; 194 u64 mmu_dram_default_page_addr;
191 u32 mmu_pgt_size; 195 u32 mmu_pgt_size;
@@ -314,6 +318,18 @@ struct hl_cs_job;
314#define HL_EQ_LENGTH 64 318#define HL_EQ_LENGTH 64
315#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE) 319#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
316 320
321#define HL_CPU_PKT_SHIFT 5
322#define HL_CPU_PKT_SIZE (1 << HL_CPU_PKT_SHIFT)
323#define HL_CPU_PKT_MASK (~((1 << HL_CPU_PKT_SHIFT) - 1))
324#define HL_CPU_MAX_PKTS_IN_CB 32
325#define HL_CPU_CB_SIZE (HL_CPU_PKT_SIZE * \
326 HL_CPU_MAX_PKTS_IN_CB)
327#define HL_CPU_CB_QUEUE_SIZE (HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)
328
329/* KMD <-> ArmCP shared memory size (EQ + PQ + CPU CB queue) */
330#define HL_CPU_ACCESSIBLE_MEM_SIZE (HL_EQ_SIZE_IN_BYTES + \
331 HL_QUEUE_SIZE_IN_BYTES + \
332 HL_CPU_CB_QUEUE_SIZE)
317 333
318/** 334/**
319 * struct hl_hw_queue - describes a H/W transport queue. 335 * struct hl_hw_queue - describes a H/W transport queue.
@@ -381,14 +397,12 @@ struct hl_eq {
381 397
382/** 398/**
383 * enum hl_asic_type - supported ASIC types. 399 * enum hl_asic_type - supported ASIC types.
384 * @ASIC_AUTO_DETECT: ASIC type will be automatically set.
385 * @ASIC_GOYA: Goya device.
386 * @ASIC_INVALID: Invalid ASIC type. 400 * @ASIC_INVALID: Invalid ASIC type.
401 * @ASIC_GOYA: Goya device.
387 */ 402 */
388enum hl_asic_type { 403enum hl_asic_type {
389 ASIC_AUTO_DETECT, 404 ASIC_INVALID,
390 ASIC_GOYA, 405 ASIC_GOYA
391 ASIC_INVALID
392}; 406};
393 407
394struct hl_cs_parser; 408struct hl_cs_parser;
@@ -436,19 +450,19 @@ enum hl_pll_frequency {
436 * @cb_mmap: maps a CB. 450 * @cb_mmap: maps a CB.
437 * @ring_doorbell: increment PI on a given QMAN. 451 * @ring_doorbell: increment PI on a given QMAN.
438 * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. 452 * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
439 * @dma_alloc_coherent: Allocate coherent DMA memory by calling 453 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
440 * dma_alloc_coherent(). This is ASIC function because its 454 * dma_alloc_coherent(). This is ASIC function because
441 * implementation is not trivial when the driver is loaded 455 * its implementation is not trivial when the driver
442 * in simulation mode (not upstreamed). 456 * is loaded in simulation mode (not upstreamed).
443 * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent(). 457 * @asic_dma_free_coherent: Free coherent DMA memory by calling
444 * This is ASIC function because its implementation is not 458 * dma_free_coherent(). This is ASIC function because
445 * trivial when the driver is loaded in simulation mode 459 * its implementation is not trivial when the driver
446 * (not upstreamed). 460 * is loaded in simulation mode (not upstreamed).
447 * @get_int_queue_base: get the internal queue base address. 461 * @get_int_queue_base: get the internal queue base address.
448 * @test_queues: run simple test on all queues for sanity check. 462 * @test_queues: run simple test on all queues for sanity check.
449 * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool. 463 * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
450 * size of allocation is HL_DMA_POOL_BLK_SIZE. 464 * size of allocation is HL_DMA_POOL_BLK_SIZE.
451 * @dma_pool_free: free small DMA allocation from pool. 465 * @asic_dma_pool_free: free small DMA allocation from pool.
452 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool. 466 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
453 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool. 467 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
454 * @hl_dma_unmap_sg: DMA unmap scatter-gather list. 468 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
@@ -472,8 +486,7 @@ enum hl_pll_frequency {
472 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with 486 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
473 * ASID-VA-size mask. 487 * ASID-VA-size mask.
474 * @send_heartbeat: send is-alive packet to ArmCP and verify response. 488 * @send_heartbeat: send is-alive packet to ArmCP and verify response.
475 * @enable_clock_gating: enable clock gating for reducing power consumption. 489 * @debug_coresight: perform certain actions on Coresight for debugging.
476 * @disable_clock_gating: disable clock for accessing registers on HBW.
477 * @is_device_idle: return true if device is idle, false otherwise. 490 * @is_device_idle: return true if device is idle, false otherwise.
478 * @soft_reset_late_init: perform certain actions needed after soft reset. 491 * @soft_reset_late_init: perform certain actions needed after soft reset.
479 * @hw_queues_lock: acquire H/W queues lock. 492 * @hw_queues_lock: acquire H/W queues lock.
@@ -482,6 +495,12 @@ enum hl_pll_frequency {
482 * @get_eeprom_data: retrieve EEPROM data from F/W. 495 * @get_eeprom_data: retrieve EEPROM data from F/W.
483 * @send_cpu_message: send buffer to ArmCP. 496 * @send_cpu_message: send buffer to ArmCP.
484 * @get_hw_state: retrieve the H/W state 497 * @get_hw_state: retrieve the H/W state
498 * @pci_bars_map: Map PCI BARs.
499 * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
500 * old address the bar pointed to or U64_MAX for failure
501 * @init_iatu: Initialize the iATU unit inside the PCI controller.
502 * @rreg: Read a register. Needed for simulator support.
503 * @wreg: Write a register. Needed for simulator support.
485 */ 504 */
486struct hl_asic_funcs { 505struct hl_asic_funcs {
487 int (*early_init)(struct hl_device *hdev); 506 int (*early_init)(struct hl_device *hdev);
@@ -499,27 +518,27 @@ struct hl_asic_funcs {
499 u64 kaddress, phys_addr_t paddress, u32 size); 518 u64 kaddress, phys_addr_t paddress, u32 size);
500 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 519 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
501 void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); 520 void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
502 void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size, 521 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
503 dma_addr_t *dma_handle, gfp_t flag); 522 dma_addr_t *dma_handle, gfp_t flag);
504 void (*dma_free_coherent)(struct hl_device *hdev, size_t size, 523 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
505 void *cpu_addr, dma_addr_t dma_handle); 524 void *cpu_addr, dma_addr_t dma_handle);
506 void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id, 525 void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
507 dma_addr_t *dma_handle, u16 *queue_len); 526 dma_addr_t *dma_handle, u16 *queue_len);
508 int (*test_queues)(struct hl_device *hdev); 527 int (*test_queues)(struct hl_device *hdev);
509 void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size, 528 void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
510 gfp_t mem_flags, dma_addr_t *dma_handle); 529 gfp_t mem_flags, dma_addr_t *dma_handle);
511 void (*dma_pool_free)(struct hl_device *hdev, void *vaddr, 530 void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
512 dma_addr_t dma_addr); 531 dma_addr_t dma_addr);
513 void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev, 532 void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
514 size_t size, dma_addr_t *dma_handle); 533 size_t size, dma_addr_t *dma_handle);
515 void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev, 534 void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
516 size_t size, void *vaddr); 535 size_t size, void *vaddr);
517 void (*hl_dma_unmap_sg)(struct hl_device *hdev, 536 void (*hl_dma_unmap_sg)(struct hl_device *hdev,
518 struct scatterlist *sg, int nents, 537 struct scatterlist *sgl, int nents,
519 enum dma_data_direction dir); 538 enum dma_data_direction dir);
520 int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser); 539 int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
521 int (*asic_dma_map_sg)(struct hl_device *hdev, 540 int (*asic_dma_map_sg)(struct hl_device *hdev,
522 struct scatterlist *sg, int nents, 541 struct scatterlist *sgl, int nents,
523 enum dma_data_direction dir); 542 enum dma_data_direction dir);
524 u32 (*get_dma_desc_list_size)(struct hl_device *hdev, 543 u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
525 struct sg_table *sgt); 544 struct sg_table *sgt);
@@ -543,9 +562,8 @@ struct hl_asic_funcs {
543 void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, 562 void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
544 u32 asid, u64 va, u64 size); 563 u32 asid, u64 va, u64 size);
545 int (*send_heartbeat)(struct hl_device *hdev); 564 int (*send_heartbeat)(struct hl_device *hdev);
546 void (*enable_clock_gating)(struct hl_device *hdev); 565 int (*debug_coresight)(struct hl_device *hdev, void *data);
547 void (*disable_clock_gating)(struct hl_device *hdev); 566 bool (*is_device_idle)(struct hl_device *hdev, char *buf, size_t size);
548 bool (*is_device_idle)(struct hl_device *hdev);
549 int (*soft_reset_late_init)(struct hl_device *hdev); 567 int (*soft_reset_late_init)(struct hl_device *hdev);
550 void (*hw_queues_lock)(struct hl_device *hdev); 568 void (*hw_queues_lock)(struct hl_device *hdev);
551 void (*hw_queues_unlock)(struct hl_device *hdev); 569 void (*hw_queues_unlock)(struct hl_device *hdev);
@@ -555,6 +573,11 @@ struct hl_asic_funcs {
555 int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, 573 int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
556 u16 len, u32 timeout, long *result); 574 u16 len, u32 timeout, long *result);
557 enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev); 575 enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
576 int (*pci_bars_map)(struct hl_device *hdev);
577 u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
578 int (*init_iatu)(struct hl_device *hdev);
579 u32 (*rreg)(struct hl_device *hdev, u32 reg);
580 void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
558}; 581};
559 582
560 583
@@ -582,7 +605,8 @@ struct hl_va_range {
582 * struct hl_ctx - user/kernel context. 605 * struct hl_ctx - user/kernel context.
583 * @mem_hash: holds mapping from virtual address to virtual memory area 606 * @mem_hash: holds mapping from virtual address to virtual memory area
584 * descriptor (hl_vm_phys_pg_list or hl_userptr). 607 * descriptor (hl_vm_phys_pg_list or hl_userptr).
585 * @mmu_hash: holds a mapping from virtual address to pgt_info structure. 608 * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
609 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
586 * @hpriv: pointer to the private (KMD) data of the process (fd). 610 * @hpriv: pointer to the private (KMD) data of the process (fd).
587 * @hdev: pointer to the device structure. 611 * @hdev: pointer to the device structure.
588 * @refcount: reference counter for the context. Context is released only when 612 * @refcount: reference counter for the context. Context is released only when
@@ -601,17 +625,19 @@ struct hl_va_range {
601 * DRAM mapping. 625 * DRAM mapping.
602 * @cs_lock: spinlock to protect cs_sequence. 626 * @cs_lock: spinlock to protect cs_sequence.
603 * @dram_phys_mem: amount of used physical DRAM memory by this context. 627 * @dram_phys_mem: amount of used physical DRAM memory by this context.
604 * @thread_restore_token: token to prevent multiple threads of the same context 628 * @thread_ctx_switch_token: token to prevent multiple threads of the same
605 * from running the restore phase. Only one thread 629 * context from running the context switch phase.
606 * should run it. 630 * Only a single thread should run it.
607 * @thread_restore_wait_token: token to prevent the threads that didn't run 631 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
608 * the restore phase from moving to their execution 632 * the context switch phase from moving to their
609 * phase before the restore phase has finished. 633 * execution phase before the context switch phase
634 * has finished.
610 * @asid: context's unique address space ID in the device's MMU. 635 * @asid: context's unique address space ID in the device's MMU.
611 */ 636 */
612struct hl_ctx { 637struct hl_ctx {
613 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS); 638 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
614 DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS); 639 DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
640 DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
615 struct hl_fpriv *hpriv; 641 struct hl_fpriv *hpriv;
616 struct hl_device *hdev; 642 struct hl_device *hdev;
617 struct kref refcount; 643 struct kref refcount;
@@ -625,8 +651,8 @@ struct hl_ctx {
625 u64 *dram_default_hops; 651 u64 *dram_default_hops;
626 spinlock_t cs_lock; 652 spinlock_t cs_lock;
627 atomic64_t dram_phys_mem; 653 atomic64_t dram_phys_mem;
628 atomic_t thread_restore_token; 654 atomic_t thread_ctx_switch_token;
629 u32 thread_restore_wait_token; 655 u32 thread_ctx_switch_wait_token;
630 u32 asid; 656 u32 asid;
631}; 657};
632 658
@@ -753,8 +779,6 @@ struct hl_cs_job {
753 * @patched_cb_size: the size of the CB after parsing. 779 * @patched_cb_size: the size of the CB after parsing.
754 * @ext_queue: whether the job is for external queue or internal queue. 780 * @ext_queue: whether the job is for external queue or internal queue.
755 * @job_id: the id of the related job inside the related CS. 781 * @job_id: the id of the related job inside the related CS.
756 * @use_virt_addr: whether to treat the addresses in the CB as virtual during
757 * parsing.
758 */ 782 */
759struct hl_cs_parser { 783struct hl_cs_parser {
760 struct hl_cb *user_cb; 784 struct hl_cb *user_cb;
@@ -767,7 +791,6 @@ struct hl_cs_parser {
767 u32 patched_cb_size; 791 u32 patched_cb_size;
768 u8 ext_queue; 792 u8 ext_queue;
769 u8 job_id; 793 u8 job_id;
770 u8 use_virt_addr;
771}; 794};
772 795
773 796
@@ -850,6 +873,29 @@ struct hl_vm {
850 u8 init_done; 873 u8 init_done;
851}; 874};
852 875
876
877/*
878 * DEBUG, PROFILING STRUCTURE
879 */
880
881/**
882 * struct hl_debug_params - Coresight debug parameters.
883 * @input: pointer to component specific input parameters.
884 * @output: pointer to component specific output parameters.
885 * @output_size: size of output buffer.
886 * @reg_idx: relevant register ID.
887 * @op: component operation to execute.
888 * @enable: true if to enable component debugging, false otherwise.
889 */
890struct hl_debug_params {
891 void *input;
892 void *output;
893 u32 output_size;
894 u32 reg_idx;
895 u32 op;
896 bool enable;
897};
898
853/* 899/*
854 * FILE PRIVATE STRUCTURE 900 * FILE PRIVATE STRUCTURE
855 */ 901 */
@@ -973,13 +1019,10 @@ struct hl_dbg_device_entry {
973u32 hl_rreg(struct hl_device *hdev, u32 reg); 1019u32 hl_rreg(struct hl_device *hdev, u32 reg);
974void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); 1020void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
975 1021
976#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \ 1022#define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
977 readl_poll_timeout(hdev->rmmio + addr, val, cond, sleep_us, timeout_us) 1023#define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
978
979#define RREG32(reg) hl_rreg(hdev, (reg))
980#define WREG32(reg, v) hl_wreg(hdev, (reg), (v))
981#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \ 1024#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
982 hl_rreg(hdev, (reg))) 1025 hdev->asic_funcs->rreg(hdev, (reg)))
983 1026
984#define WREG32_P(reg, val, mask) \ 1027#define WREG32_P(reg, val, mask) \
985 do { \ 1028 do { \
@@ -997,6 +1040,36 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
997 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \ 1040 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
998 (val) << REG_FIELD_SHIFT(reg, field)) 1041 (val) << REG_FIELD_SHIFT(reg, field))
999 1042
1043#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
1044({ \
1045 ktime_t __timeout; \
1046 /* timeout should be longer when working with simulator */ \
1047 if (hdev->pdev) \
1048 __timeout = ktime_add_us(ktime_get(), timeout_us); \
1049 else \
1050 __timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
1051 might_sleep_if(sleep_us); \
1052 for (;;) { \
1053 (val) = RREG32(addr); \
1054 if (cond) \
1055 break; \
1056 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1057 (val) = RREG32(addr); \
1058 break; \
1059 } \
1060 if (sleep_us) \
1061 usleep_range((sleep_us >> 2) + 1, sleep_us); \
1062 } \
1063 (cond) ? 0 : -ETIMEDOUT; \
1064})
1065
1066
1067#define HL_ENG_BUSY(buf, size, fmt, ...) ({ \
1068 if (buf) \
1069 snprintf(buf, size, fmt, ##__VA_ARGS__); \
1070 false; \
1071 })
1072
1000struct hwmon_chip_info; 1073struct hwmon_chip_info;
1001 1074
1002/** 1075/**
@@ -1047,7 +1120,8 @@ struct hl_device_reset_work {
1047 * @asic_specific: ASIC specific information to use only from ASIC files. 1120 * @asic_specific: ASIC specific information to use only from ASIC files.
1048 * @mmu_pgt_pool: pool of available MMU hops. 1121 * @mmu_pgt_pool: pool of available MMU hops.
1049 * @vm: virtual memory manager for MMU. 1122 * @vm: virtual memory manager for MMU.
1050 * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context 1123 * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
1124 * @mmu_shadow_hop0: shadow mapping of the MMU hop 0 zone.
1051 * @hwmon_dev: H/W monitor device. 1125 * @hwmon_dev: H/W monitor device.
1052 * @pm_mng_profile: current power management profile. 1126 * @pm_mng_profile: current power management profile.
1053 * @hl_chip_info: ASIC's sensors information. 1127 * @hl_chip_info: ASIC's sensors information.
@@ -1082,6 +1156,7 @@ struct hl_device_reset_work {
1082 * @init_done: is the initialization of the device done. 1156 * @init_done: is the initialization of the device done.
1083 * @mmu_enable: is MMU enabled. 1157 * @mmu_enable: is MMU enabled.
1084 * @device_cpu_disabled: is the device CPU disabled (due to timeouts) 1158 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
1159 * @dma_mask: the dma mask that was set for this device
1085 */ 1160 */
1086struct hl_device { 1161struct hl_device {
1087 struct pci_dev *pdev; 1162 struct pci_dev *pdev;
@@ -1117,6 +1192,7 @@ struct hl_device {
1117 struct gen_pool *mmu_pgt_pool; 1192 struct gen_pool *mmu_pgt_pool;
1118 struct hl_vm vm; 1193 struct hl_vm vm;
1119 struct mutex mmu_cache_lock; 1194 struct mutex mmu_cache_lock;
1195 void *mmu_shadow_hop0;
1120 struct device *hwmon_dev; 1196 struct device *hwmon_dev;
1121 enum hl_pm_mng_profile pm_mng_profile; 1197 enum hl_pm_mng_profile pm_mng_profile;
1122 struct hwmon_chip_info *hl_chip_info; 1198 struct hwmon_chip_info *hl_chip_info;
@@ -1151,6 +1227,7 @@ struct hl_device {
1151 u8 dram_default_page_mapping; 1227 u8 dram_default_page_mapping;
1152 u8 init_done; 1228 u8 init_done;
1153 u8 device_cpu_disabled; 1229 u8 device_cpu_disabled;
1230 u8 dma_mask;
1154 1231
1155 /* Parameters for bring-up */ 1232 /* Parameters for bring-up */
1156 u8 mmu_enable; 1233 u8 mmu_enable;
@@ -1245,6 +1322,7 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
1245 1322
1246int hl_device_open(struct inode *inode, struct file *filp); 1323int hl_device_open(struct inode *inode, struct file *filp);
1247bool hl_device_disabled_or_in_reset(struct hl_device *hdev); 1324bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
1325enum hl_device_status hl_device_status(struct hl_device *hdev);
1248int create_hdev(struct hl_device **dev, struct pci_dev *pdev, 1326int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
1249 enum hl_asic_type asic_type, int minor); 1327 enum hl_asic_type asic_type, int minor);
1250void destroy_hdev(struct hl_device *hdev); 1328void destroy_hdev(struct hl_device *hdev);
@@ -1351,6 +1429,32 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
1351void hl_mmu_swap_out(struct hl_ctx *ctx); 1429void hl_mmu_swap_out(struct hl_ctx *ctx);
1352void hl_mmu_swap_in(struct hl_ctx *ctx); 1430void hl_mmu_swap_in(struct hl_ctx *ctx);
1353 1431
1432int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
1433 void __iomem *dst);
1434int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
1435int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
1436 u16 len, u32 timeout, long *result);
1437int hl_fw_test_cpu_queue(struct hl_device *hdev);
1438void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
1439 dma_addr_t *dma_handle);
1440void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
1441 void *vaddr);
1442int hl_fw_send_heartbeat(struct hl_device *hdev);
1443int hl_fw_armcp_info_get(struct hl_device *hdev);
1444int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
1445
1446int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
1447 bool is_wc[3]);
1448int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
1449int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
1450 u64 addr);
1451int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
1452 u64 dram_base_address, u64 host_phys_base_address,
1453 u64 host_phys_size);
1454int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
1455void hl_pci_fini(struct hl_device *hdev);
1456int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
1457
1354long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); 1458long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
1355void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq); 1459void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
1356long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); 1460long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
index 748601463f11..5f4d155be767 100644
--- a/drivers/misc/habanalabs/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/habanalabs_drv.c
@@ -6,6 +6,8 @@
6 * 6 *
7 */ 7 */
8 8
9#define pr_fmt(fmt) "habanalabs: " fmt
10
9#include "habanalabs.h" 11#include "habanalabs.h"
10 12
11#include <linux/pci.h> 13#include <linux/pci.h>
@@ -218,7 +220,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
218 hdev->disabled = true; 220 hdev->disabled = true;
219 hdev->pdev = pdev; /* can be NULL in case of simulator device */ 221 hdev->pdev = pdev; /* can be NULL in case of simulator device */
220 222
221 if (asic_type == ASIC_AUTO_DETECT) { 223 if (pdev) {
222 hdev->asic_type = get_asic_type(pdev->device); 224 hdev->asic_type = get_asic_type(pdev->device);
223 if (hdev->asic_type == ASIC_INVALID) { 225 if (hdev->asic_type == ASIC_INVALID) {
224 dev_err(&pdev->dev, "Unsupported ASIC\n"); 226 dev_err(&pdev->dev, "Unsupported ASIC\n");
@@ -229,6 +231,9 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
229 hdev->asic_type = asic_type; 231 hdev->asic_type = asic_type;
230 } 232 }
231 233
234 /* Set default DMA mask to 32 bits */
235 hdev->dma_mask = 32;
236
232 mutex_lock(&hl_devs_idr_lock); 237 mutex_lock(&hl_devs_idr_lock);
233 238
234 if (minor == -1) { 239 if (minor == -1) {
@@ -334,7 +339,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
334 " device found [%04x:%04x] (rev %x)\n", 339 " device found [%04x:%04x] (rev %x)\n",
335 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); 340 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
336 341
337 rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1); 342 rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1);
338 if (rc) 343 if (rc)
339 return rc; 344 return rc;
340 345
diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c
index 2c2739a3c5ec..eeefb22023e9 100644
--- a/drivers/misc/habanalabs/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/habanalabs_ioctl.c
@@ -12,6 +12,32 @@
12#include <linux/uaccess.h> 12#include <linux/uaccess.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14 14
15static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
16 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
17 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
18 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
19 [HL_DEBUG_OP_FUNNEL] = 0,
20 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
21 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
22 [HL_DEBUG_OP_TIMESTAMP] = 0
23
24};
25
26static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
27{
28 struct hl_info_device_status dev_stat = {0};
29 u32 size = args->return_size;
30 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
31
32 if ((!size) || (!out))
33 return -EINVAL;
34
35 dev_stat.status = hl_device_status(hdev);
36
37 return copy_to_user(out, &dev_stat,
38 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
39}
40
15static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) 41static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
16{ 42{
17 struct hl_info_hw_ip_info hw_ip = {0}; 43 struct hl_info_hw_ip_info hw_ip = {0};
@@ -93,21 +119,91 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
93 if ((!max_size) || (!out)) 119 if ((!max_size) || (!out))
94 return -EINVAL; 120 return -EINVAL;
95 121
96 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev); 122 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, NULL, 0);
97 123
98 return copy_to_user(out, &hw_idle, 124 return copy_to_user(out, &hw_idle,
99 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; 125 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
100} 126}
101 127
128static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
129{
130 struct hl_debug_params *params;
131 void *input = NULL, *output = NULL;
132 int rc;
133
134 params = kzalloc(sizeof(*params), GFP_KERNEL);
135 if (!params)
136 return -ENOMEM;
137
138 params->reg_idx = args->reg_idx;
139 params->enable = args->enable;
140 params->op = args->op;
141
142 if (args->input_ptr && args->input_size) {
143 input = memdup_user((const void __user *) args->input_ptr,
144 args->input_size);
145 if (IS_ERR(input)) {
146 rc = PTR_ERR(input);
147 input = NULL;
148 dev_err(hdev->dev,
149 "error %d when copying input debug data\n", rc);
150 goto out;
151 }
152
153 params->input = input;
154 }
155
156 if (args->output_ptr && args->output_size) {
157 output = kzalloc(args->output_size, GFP_KERNEL);
158 if (!output) {
159 rc = -ENOMEM;
160 goto out;
161 }
162
163 params->output = output;
164 params->output_size = args->output_size;
165 }
166
167 rc = hdev->asic_funcs->debug_coresight(hdev, params);
168 if (rc) {
169 dev_err(hdev->dev,
170 "debug coresight operation failed %d\n", rc);
171 goto out;
172 }
173
174 if (output) {
175 if (copy_to_user((void __user *) (uintptr_t) args->output_ptr,
176 output,
177 args->output_size)) {
178 dev_err(hdev->dev,
179 "copy to user failed in debug ioctl\n");
180 rc = -EFAULT;
181 goto out;
182 }
183 }
184
185out:
186 kfree(params);
187 kfree(output);
188 kfree(input);
189
190 return rc;
191}
192
102static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data) 193static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
103{ 194{
104 struct hl_info_args *args = data; 195 struct hl_info_args *args = data;
105 struct hl_device *hdev = hpriv->hdev; 196 struct hl_device *hdev = hpriv->hdev;
106 int rc; 197 int rc;
107 198
199 /* We want to return device status even if it disabled or in reset */
200 if (args->op == HL_INFO_DEVICE_STATUS)
201 return device_status_info(hdev, args);
202
108 if (hl_device_disabled_or_in_reset(hdev)) { 203 if (hl_device_disabled_or_in_reset(hdev)) {
109 dev_err(hdev->dev, 204 dev_warn_ratelimited(hdev->dev,
110 "Device is disabled or in reset. Can't execute INFO IOCTL\n"); 205 "Device is %s. Can't execute INFO IOCTL\n",
206 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
111 return -EBUSY; 207 return -EBUSY;
112 } 208 }
113 209
@@ -137,6 +233,40 @@ static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
137 return rc; 233 return rc;
138} 234}
139 235
236static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
237{
238 struct hl_debug_args *args = data;
239 struct hl_device *hdev = hpriv->hdev;
240 int rc = 0;
241
242 if (hl_device_disabled_or_in_reset(hdev)) {
243 dev_warn_ratelimited(hdev->dev,
244 "Device is %s. Can't execute DEBUG IOCTL\n",
245 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
246 return -EBUSY;
247 }
248
249 switch (args->op) {
250 case HL_DEBUG_OP_ETR:
251 case HL_DEBUG_OP_ETF:
252 case HL_DEBUG_OP_STM:
253 case HL_DEBUG_OP_FUNNEL:
254 case HL_DEBUG_OP_BMON:
255 case HL_DEBUG_OP_SPMU:
256 case HL_DEBUG_OP_TIMESTAMP:
257 args->input_size =
258 min(args->input_size, hl_debug_struct_size[args->op]);
259 rc = debug_coresight(hdev, args);
260 break;
261 default:
262 dev_err(hdev->dev, "Invalid request %d\n", args->op);
263 rc = -ENOTTY;
264 break;
265 }
266
267 return rc;
268}
269
140#define HL_IOCTL_DEF(ioctl, _func) \ 270#define HL_IOCTL_DEF(ioctl, _func) \
141 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} 271 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
142 272
@@ -145,7 +275,8 @@ static const struct hl_ioctl_desc hl_ioctls[] = {
145 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), 275 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
146 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), 276 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
147 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl), 277 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
148 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl) 278 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
279 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
149}; 280};
150 281
151#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls) 282#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index ef3bb6951360..2894d8975933 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -82,7 +82,7 @@ static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
82 bd += hl_pi_2_offset(q->pi); 82 bd += hl_pi_2_offset(q->pi);
83 bd->ctl = __cpu_to_le32(ctl); 83 bd->ctl = __cpu_to_le32(ctl);
84 bd->len = __cpu_to_le32(len); 84 bd->len = __cpu_to_le32(len);
85 bd->ptr = __cpu_to_le64(ptr + hdev->asic_prop.host_phys_base_address); 85 bd->ptr = __cpu_to_le64(ptr);
86 86
87 q->pi = hl_queue_inc_ptr(q->pi); 87 q->pi = hl_queue_inc_ptr(q->pi);
88 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 88 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
@@ -263,9 +263,7 @@ static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
263 * checked in hl_queue_sanity_checks 263 * checked in hl_queue_sanity_checks
264 */ 264 */
265 cq = &hdev->completion_queue[q->hw_queue_id]; 265 cq = &hdev->completion_queue[q->hw_queue_id];
266 cq_addr = cq->bus_address + 266 cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
267 hdev->asic_prop.host_phys_base_address;
268 cq_addr += cq->pi * sizeof(struct hl_cq_entry);
269 267
270 hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len, 268 hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len,
271 cq_addr, 269 cq_addr,
@@ -415,14 +413,20 @@ void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
415} 413}
416 414
417static int ext_and_cpu_hw_queue_init(struct hl_device *hdev, 415static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
418 struct hl_hw_queue *q) 416 struct hl_hw_queue *q, bool is_cpu_queue)
419{ 417{
420 void *p; 418 void *p;
421 int rc; 419 int rc;
422 420
423 p = hdev->asic_funcs->dma_alloc_coherent(hdev, 421 if (is_cpu_queue)
424 HL_QUEUE_SIZE_IN_BYTES, 422 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
425 &q->bus_address, GFP_KERNEL | __GFP_ZERO); 423 HL_QUEUE_SIZE_IN_BYTES,
424 &q->bus_address);
425 else
426 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
427 HL_QUEUE_SIZE_IN_BYTES,
428 &q->bus_address,
429 GFP_KERNEL | __GFP_ZERO);
426 if (!p) 430 if (!p)
427 return -ENOMEM; 431 return -ENOMEM;
428 432
@@ -446,8 +450,15 @@ static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
446 return 0; 450 return 0;
447 451
448free_queue: 452free_queue:
449 hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, 453 if (is_cpu_queue)
450 (void *) (uintptr_t) q->kernel_address, q->bus_address); 454 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
455 HL_QUEUE_SIZE_IN_BYTES,
456 (void *) (uintptr_t) q->kernel_address);
457 else
458 hdev->asic_funcs->asic_dma_free_coherent(hdev,
459 HL_QUEUE_SIZE_IN_BYTES,
460 (void *) (uintptr_t) q->kernel_address,
461 q->bus_address);
451 462
452 return rc; 463 return rc;
453} 464}
@@ -474,12 +485,12 @@ static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
474 485
475static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) 486static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
476{ 487{
477 return ext_and_cpu_hw_queue_init(hdev, q); 488 return ext_and_cpu_hw_queue_init(hdev, q, true);
478} 489}
479 490
480static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) 491static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
481{ 492{
482 return ext_and_cpu_hw_queue_init(hdev, q); 493 return ext_and_cpu_hw_queue_init(hdev, q, false);
483} 494}
484 495
485/* 496/*
@@ -569,8 +580,15 @@ static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
569 580
570 kfree(q->shadow_queue); 581 kfree(q->shadow_queue);
571 582
572 hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, 583 if (q->queue_type == QUEUE_TYPE_CPU)
573 (void *) (uintptr_t) q->kernel_address, q->bus_address); 584 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
585 HL_QUEUE_SIZE_IN_BYTES,
586 (void *) (uintptr_t) q->kernel_address);
587 else
588 hdev->asic_funcs->asic_dma_free_coherent(hdev,
589 HL_QUEUE_SIZE_IN_BYTES,
590 (void *) (uintptr_t) q->kernel_address,
591 q->bus_address);
574} 592}
575 593
576int hl_hw_queues_create(struct hl_device *hdev) 594int hl_hw_queues_create(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h
index 9dddb917e72c..1f1e35e86d84 100644
--- a/drivers/misc/habanalabs/include/armcp_if.h
+++ b/drivers/misc/habanalabs/include/armcp_if.h
@@ -32,8 +32,6 @@ struct hl_eq_entry {
32#define EQ_CTL_EVENT_TYPE_SHIFT 16 32#define EQ_CTL_EVENT_TYPE_SHIFT 16
33#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000 33#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000
34 34
35#define EVENT_QUEUE_MSIX_IDX 5
36
37enum pq_init_status { 35enum pq_init_status {
38 PQ_INIT_STATUS_NA = 0, 36 PQ_INIT_STATUS_NA = 0,
39 PQ_INIT_STATUS_READY_FOR_CP, 37 PQ_INIT_STATUS_READY_FOR_CP,
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
index 2cf5c46b6e8e..4e0dbbbbde20 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
@@ -188,4 +188,3 @@
188#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF 188#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF
189 189
190#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */ 190#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */
191
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
index 840ccffa1081..f3faf1aad91a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
@@ -58,4 +58,3 @@
58#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214 58#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214
59 59
60#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */ 60#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */
61
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
index f23cb3e41c30..cf657918962a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
@@ -46,4 +46,3 @@
46#define mmCPU_IF_AXI_SPLIT_INTR 0x442130 46#define mmCPU_IF_AXI_SPLIT_INTR 0x442130
47 47
48#endif /* ASIC_REG_CPU_IF_REGS_H_ */ 48#endif /* ASIC_REG_CPU_IF_REGS_H_ */
49
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
index 8fc97f838ada..8c8f9726d4b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
@@ -102,4 +102,3 @@
102#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440 102#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440
103 103
104#endif /* ASIC_REG_CPU_PLL_REGS_H_ */ 104#endif /* ASIC_REG_CPU_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
index 61c8cd9ce58b..0b246fe6ad04 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
@@ -206,4 +206,3 @@
206#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC 206#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC
207 207
208#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */ 208#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
index 92960ef5e308..5449031722f2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
@@ -206,4 +206,3 @@
206#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC 206#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC
207 207
208#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */ 208#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
index 4e37871a51bb..a4768521d18a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
@@ -206,4 +206,3 @@
206#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC 206#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC
207 207
208#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */ 208#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
index a2d6aeb32a18..619d01897ff8 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
@@ -206,4 +206,3 @@
206#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC 206#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC
207 207
208#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */ 208#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
index 400d6fd3acf5..038617e163f1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
@@ -206,4 +206,3 @@
206#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC 206#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC
207 207
208#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */ 208#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
index 8d965443c51e..f43b564af1be 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
@@ -102,4 +102,3 @@
102#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF 102#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF
103 103
104#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */ 104#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
index 8bfcb001189d..c3bfc1b8e3fd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
@@ -178,4 +178,3 @@
178#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158 178#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158
179 179
180#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */ 180#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */
181
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
index 9f33f351a3c1..bc977488c072 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
@@ -206,4 +206,3 @@
206#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 206#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
207 207
208#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */ 208#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
index d8293745a02b..c4abc7ff1fc6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
@@ -224,4 +224,3 @@
224#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604 224#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604
225 225
226#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */ 226#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
index 10619dbb9b17..b17f72c31ab6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
@@ -462,4 +462,3 @@
462#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF 462#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
463 463
464#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */ 464#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */
465
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
index c693bc5dcb22..bf360b301154 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
@@ -176,4 +176,3 @@
176#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C 176#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C
177 177
178#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */ 178#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
index da928390f89c..51d432d05ac4 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
@@ -176,4 +176,3 @@
176#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C 176#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C
177 177
178#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */ 178#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
index b4f06e9b71d6..18fc0c2b6cc2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
@@ -176,4 +176,3 @@
176#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C 176#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C
177 177
178#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */ 178#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
index 53e3cd78a06b..6cf7204bf5cc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
@@ -176,4 +176,3 @@
176#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C 176#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C
177 177
178#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */ 178#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
index e0eb5f260201..36fef2682875 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
@@ -176,4 +176,3 @@
176#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C 176#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C
177 177
178#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */ 178#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
index a161ecfe74de..8618891d5afa 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
@@ -189,18 +189,6 @@
189 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\ 189 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\
190 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT) 190 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT)
191 191
192/* PCI CONFIGURATION SPACE */
193#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
194#define mmPCI_CONFIG_ELBI_DATA 0xFF4
195#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
196#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
197
198#define mmPCI_CONFIG_ELBI_STS 0xFFC
199#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
200#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
201#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
202 PCI_CONFIG_ELBI_STS_DONE)
203
204#define GOYA_IRQ_HBW_ID_MASK 0x1FFF 192#define GOYA_IRQ_HBW_ID_MASK 0x1FFF
205#define GOYA_IRQ_HBW_ID_SHIFT 0 193#define GOYA_IRQ_HBW_ID_SHIFT 0
206#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000 194#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
index 6cb0b6e54d41..506e71e201e1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0
2 * 2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd. 3 * Copyright 2016-2019 HabanaLabs, Ltd.
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 */ 6 */
@@ -12,6 +12,7 @@
12#include "stlb_regs.h" 12#include "stlb_regs.h"
13#include "mmu_regs.h" 13#include "mmu_regs.h"
14#include "pcie_aux_regs.h" 14#include "pcie_aux_regs.h"
15#include "pcie_wrap_regs.h"
15#include "psoc_global_conf_regs.h" 16#include "psoc_global_conf_regs.h"
16#include "psoc_spi_regs.h" 17#include "psoc_spi_regs.h"
17#include "psoc_mme_pll_regs.h" 18#include "psoc_mme_pll_regs.h"
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
index 0a743817aad7..4ae7fed8b18c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
@@ -102,4 +102,3 @@
102#define mmIC_PLL_FREQ_CALC_EN 0x4A3440 102#define mmIC_PLL_FREQ_CALC_EN 0x4A3440
103 103
104#endif /* ASIC_REG_IC_PLL_REGS_H_ */ 104#endif /* ASIC_REG_IC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
index 4408188aa067..6d35d852798b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
@@ -102,4 +102,3 @@
102#define mmMC_PLL_FREQ_CALC_EN 0x4A1440 102#define mmMC_PLL_FREQ_CALC_EN 0x4A1440
103 103
104#endif /* ASIC_REG_MC_PLL_REGS_H_ */ 104#endif /* ASIC_REG_MC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
index 687bca5c5fe3..6c23f8b96e7e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
@@ -650,4 +650,3 @@
650#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1 650#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1
651 651
652#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */ 652#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */
653
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
index c248339a1cbe..122e9d529939 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
@@ -328,4 +328,3 @@
328#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604 328#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604
329 329
330#endif /* ASIC_REG_MME1_RTR_REGS_H_ */ 330#endif /* ASIC_REG_MME1_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
index 7a2b777bdc4f..00ce2252bbfb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
@@ -328,4 +328,3 @@
328#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604 328#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604
329 329
330#endif /* ASIC_REG_MME2_RTR_REGS_H_ */ 330#endif /* ASIC_REG_MME2_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
index b78f8bc387fc..8e3eb7fd2070 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
@@ -328,4 +328,3 @@
328#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604 328#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604
329 329
330#endif /* ASIC_REG_MME3_RTR_REGS_H_ */ 330#endif /* ASIC_REG_MME3_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
index d9a4a02cefa3..79b67bbc8567 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
@@ -328,4 +328,3 @@
328#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604 328#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604
329 329
330#endif /* ASIC_REG_MME4_RTR_REGS_H_ */ 330#endif /* ASIC_REG_MME4_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
index 205adc988407..0ac3c37ce47f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
@@ -328,4 +328,3 @@
328#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604 328#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604
329 329
330#endif /* ASIC_REG_MME5_RTR_REGS_H_ */ 330#endif /* ASIC_REG_MME5_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
index fcec68388278..50c49cce72a6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
@@ -328,4 +328,3 @@
328#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604 328#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604
329 329
330#endif /* ASIC_REG_MME6_RTR_REGS_H_ */ 330#endif /* ASIC_REG_MME6_RTR_REGS_H_ */
331
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
index a0d4382fbbd0..fe7d95bdcef9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
@@ -370,4 +370,3 @@
370#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF 370#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
371 371
372#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */ 372#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */
373
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
index 5c2f6b870a58..5f8b85d2b4b1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C 136#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C
137 137
138#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
index c7b1b0bb3384..1882c413cbe0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
@@ -1534,4 +1534,3 @@
1534#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 1534#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000
1535 1535
1536#endif /* ASIC_REG_MME_MASKS_H_ */ 1536#endif /* ASIC_REG_MME_MASKS_H_ */
1537
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
index d4bfa58dce19..e464e381555c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
@@ -462,4 +462,3 @@
462#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF 462#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
463 463
464#endif /* ASIC_REG_MME_QM_MASKS_H_ */ 464#endif /* ASIC_REG_MME_QM_MASKS_H_ */
465
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
index b5b1c776f6c3..538708beffc9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmMME_QM_CQ_BUF_RDATA 0xD830C 176#define mmMME_QM_CQ_BUF_RDATA 0xD830C
177 177
178#endif /* ASIC_REG_MME_QM_REGS_H_ */ 178#endif /* ASIC_REG_MME_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
index 9436b1e2705a..0396cbfd5c89 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
@@ -1150,4 +1150,3 @@
1150#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC 1150#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC
1151 1151
1152#endif /* ASIC_REG_MME_REGS_H_ */ 1152#endif /* ASIC_REG_MME_REGS_H_ */
1153
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
index 3a78078d3c4c..c3e69062b135 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
@@ -140,4 +140,3 @@
140#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF 140#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF
141 141
142#endif /* ASIC_REG_MMU_MASKS_H_ */ 142#endif /* ASIC_REG_MMU_MASKS_H_ */
143
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
index bec6c014135c..7ec81f12031e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
@@ -50,4 +50,3 @@
50#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040 50#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040
51 51
52#endif /* ASIC_REG_MMU_REGS_H_ */ 52#endif /* ASIC_REG_MMU_REGS_H_ */
53
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
index 209e41402a11..ceb59f2e28b3 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
@@ -206,4 +206,3 @@
206#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 206#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
207 207
208#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */ 208#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
index 447e5d4e7dc8..dd067f301ac2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
@@ -224,4 +224,3 @@
224#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604 224#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604
225 225
226#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */ 226#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
index daaf5d9079dc..35b1d8ac6f63 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
@@ -240,4 +240,3 @@
240#define mmPCIE_AUX_PERST 0xC079B8 240#define mmPCIE_AUX_PERST 0xC079B8
241 241
242#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */ 242#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */
243
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
new file mode 100644
index 000000000000..d1e55aace4a0
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
@@ -0,0 +1,306 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8/************************************
9 ** This is an auto-generated file **
10 ** DO NOT EDIT BELOW **
11 ************************************/
12
13#ifndef ASIC_REG_PCIE_WRAP_REGS_H_
14#define ASIC_REG_PCIE_WRAP_REGS_H_
15
16/*
17 *****************************************
18 * PCIE_WRAP (Prototype: PCIE_WRAP)
19 *****************************************
20 */
21
22#define mmPCIE_WRAP_PHY_RST_N 0xC01300
23
24#define mmPCIE_WRAP_OUTSTAND_TRANS 0xC01400
25
26#define mmPCIE_WRAP_MASK_REQ 0xC01404
27
28#define mmPCIE_WRAP_IND_AWADDR_L 0xC01500
29
30#define mmPCIE_WRAP_IND_AWADDR_H 0xC01504
31
32#define mmPCIE_WRAP_IND_AWLEN 0xC01508
33
34#define mmPCIE_WRAP_IND_AWSIZE 0xC0150C
35
36#define mmPCIE_WRAP_IND_AWBURST 0xC01510
37
38#define mmPCIE_WRAP_IND_AWLOCK 0xC01514
39
40#define mmPCIE_WRAP_IND_AWCACHE 0xC01518
41
42#define mmPCIE_WRAP_IND_AWPROT 0xC0151C
43
44#define mmPCIE_WRAP_IND_AWVALID 0xC01520
45
46#define mmPCIE_WRAP_IND_WDATA_0 0xC01524
47
48#define mmPCIE_WRAP_IND_WDATA_1 0xC01528
49
50#define mmPCIE_WRAP_IND_WDATA_2 0xC0152C
51
52#define mmPCIE_WRAP_IND_WDATA_3 0xC01530
53
54#define mmPCIE_WRAP_IND_WSTRB 0xC01544
55
56#define mmPCIE_WRAP_IND_WLAST 0xC01548
57
58#define mmPCIE_WRAP_IND_WVALID 0xC0154C
59
60#define mmPCIE_WRAP_IND_BRESP 0xC01550
61
62#define mmPCIE_WRAP_IND_BVALID 0xC01554
63
64#define mmPCIE_WRAP_IND_ARADDR_0 0xC01558
65
66#define mmPCIE_WRAP_IND_ARADDR_1 0xC0155C
67
68#define mmPCIE_WRAP_IND_ARLEN 0xC01560
69
70#define mmPCIE_WRAP_IND_ARSIZE 0xC01564
71
72#define mmPCIE_WRAP_IND_ARBURST 0xC01568
73
74#define mmPCIE_WRAP_IND_ARLOCK 0xC0156C
75
76#define mmPCIE_WRAP_IND_ARCACHE 0xC01570
77
78#define mmPCIE_WRAP_IND_ARPROT 0xC01574
79
80#define mmPCIE_WRAP_IND_ARVALID 0xC01578
81
82#define mmPCIE_WRAP_IND_RDATA_0 0xC0157C
83
84#define mmPCIE_WRAP_IND_RDATA_1 0xC01580
85
86#define mmPCIE_WRAP_IND_RDATA_2 0xC01584
87
88#define mmPCIE_WRAP_IND_RDATA_3 0xC01588
89
90#define mmPCIE_WRAP_IND_RLAST 0xC0159C
91
92#define mmPCIE_WRAP_IND_RRESP 0xC015A0
93
94#define mmPCIE_WRAP_IND_RVALID 0xC015A4
95
96#define mmPCIE_WRAP_IND_AWMISC_INFO 0xC015A8
97
98#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_0 0xC015AC
99
100#define mmPCIE_WRAP_IND_AWMISC_INFO_HDR_34DW_1 0xC015B0
101
102#define mmPCIE_WRAP_IND_AWMISC_INFO_P_TAG 0xC015B4
103
104#define mmPCIE_WRAP_IND_AWMISC_INFO_ATU_BYPAS 0xC015B8
105
106#define mmPCIE_WRAP_IND_AWMISC_INFO_FUNC_NUM 0xC015BC
107
108#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_ACT 0xC015C0
109
110#define mmPCIE_WRAP_IND_AWMISC_INFO_VFUNC_NUM 0xC015C4
111
112#define mmPCIE_WRAP_IND_AWMISC_INFO_TLPPRFX 0xC015C8
113
114#define mmPCIE_WRAP_IND_ARMISC_INFO 0xC015CC
115
116#define mmPCIE_WRAP_IND_ARMISC_INFO_TLPPRFX 0xC015D0
117
118#define mmPCIE_WRAP_IND_ARMISC_INFO_ATU_BYP 0xC015D4
119
120#define mmPCIE_WRAP_IND_ARMISC_INFO_FUNC_NUM 0xC015D8
121
122#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_ACT 0xC015DC
123
124#define mmPCIE_WRAP_IND_ARMISC_INFO_VFUNC_NUM 0xC015E0
125
126#define mmPCIE_WRAP_SLV_AWMISC_INFO 0xC01800
127
128#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_0 0xC01804
129
130#define mmPCIE_WRAP_SLV_AWMISC_INFO_HDR_34DW_1 0xC01808
131
132#define mmPCIE_WRAP_SLV_AWMISC_INFO_P_TAG 0xC0180C
133
134#define mmPCIE_WRAP_SLV_AWMISC_INFO_ATU_BYPAS 0xC01810
135
136#define mmPCIE_WRAP_SLV_AWMISC_INFO_FUNC_NUM 0xC01814
137
138#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_ACT 0xC01818
139
140#define mmPCIE_WRAP_SLV_AWMISC_INFO_VFUNC_NUM 0xC0181C
141
142#define mmPCIE_WRAP_SLV_AWMISC_INFO_TLPPRFX 0xC01820
143
144#define mmPCIE_WRAP_SLV_ARMISC_INFO 0xC01824
145
146#define mmPCIE_WRAP_SLV_ARMISC_INFO_TLPPRFX 0xC01828
147
148#define mmPCIE_WRAP_SLV_ARMISC_INFO_ATU_BYP 0xC0182C
149
150#define mmPCIE_WRAP_SLV_ARMISC_INFO_FUNC_NUM 0xC01830
151
152#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_ACT 0xC01834
153
154#define mmPCIE_WRAP_SLV_ARMISC_INFO_VFUNC_NUM 0xC01838
155
156#define mmPCIE_WRAP_MAX_QID 0xC01900
157
158#define mmPCIE_WRAP_DB_BASE_ADDR_L_0 0xC01910
159
160#define mmPCIE_WRAP_DB_BASE_ADDR_L_1 0xC01914
161
162#define mmPCIE_WRAP_DB_BASE_ADDR_L_2 0xC01918
163
164#define mmPCIE_WRAP_DB_BASE_ADDR_L_3 0xC0191C
165
166#define mmPCIE_WRAP_DB_BASE_ADDR_H_0 0xC01920
167
168#define mmPCIE_WRAP_DB_BASE_ADDR_H_1 0xC01924
169
170#define mmPCIE_WRAP_DB_BASE_ADDR_H_2 0xC01928
171
172#define mmPCIE_WRAP_DB_BASE_ADDR_H_3 0xC0192C
173
174#define mmPCIE_WRAP_DB_MASK 0xC01940
175
176#define mmPCIE_WRAP_SQ_BASE_ADDR_H 0xC01A00
177
178#define mmPCIE_WRAP_SQ_BASE_ADDR_L 0xC01A04
179
180#define mmPCIE_WRAP_SQ_STRIDE_ACCRESS 0xC01A08
181
182#define mmPCIE_WRAP_SQ_POP_CMD 0xC01A10
183
184#define mmPCIE_WRAP_SQ_POP_DATA 0xC01A14
185
186#define mmPCIE_WRAP_DB_INTR_0 0xC01A20
187
188#define mmPCIE_WRAP_DB_INTR_1 0xC01A24
189
190#define mmPCIE_WRAP_DB_INTR_2 0xC01A28
191
192#define mmPCIE_WRAP_DB_INTR_3 0xC01A2C
193
194#define mmPCIE_WRAP_DB_INTR_4 0xC01A30
195
196#define mmPCIE_WRAP_DB_INTR_5 0xC01A34
197
198#define mmPCIE_WRAP_DB_INTR_6 0xC01A38
199
200#define mmPCIE_WRAP_DB_INTR_7 0xC01A3C
201
202#define mmPCIE_WRAP_MMU_BYPASS_DMA 0xC01A80
203
204#define mmPCIE_WRAP_MMU_BYPASS_NON_DMA 0xC01A84
205
206#define mmPCIE_WRAP_ASID_NON_DMA 0xC01A90
207
208#define mmPCIE_WRAP_ASID_DMA_0 0xC01AA0
209
210#define mmPCIE_WRAP_ASID_DMA_1 0xC01AA4
211
212#define mmPCIE_WRAP_ASID_DMA_2 0xC01AA8
213
214#define mmPCIE_WRAP_ASID_DMA_3 0xC01AAC
215
216#define mmPCIE_WRAP_ASID_DMA_4 0xC01AB0
217
218#define mmPCIE_WRAP_ASID_DMA_5 0xC01AB4
219
220#define mmPCIE_WRAP_ASID_DMA_6 0xC01AB8
221
222#define mmPCIE_WRAP_ASID_DMA_7 0xC01ABC
223
224#define mmPCIE_WRAP_CPU_HOT_RST 0xC01AE0
225
226#define mmPCIE_WRAP_AXI_PROT_OVR 0xC01AE4
227
228#define mmPCIE_WRAP_CACHE_OVR 0xC01B00
229
230#define mmPCIE_WRAP_LOCK_OVR 0xC01B04
231
232#define mmPCIE_WRAP_PROT_OVR 0xC01B08
233
234#define mmPCIE_WRAP_ARUSER_OVR 0xC01B0C
235
236#define mmPCIE_WRAP_AWUSER_OVR 0xC01B10
237
238#define mmPCIE_WRAP_ARUSER_OVR_EN 0xC01B14
239
240#define mmPCIE_WRAP_AWUSER_OVR_EN 0xC01B18
241
242#define mmPCIE_WRAP_MAX_OUTSTAND 0xC01B20
243
244#define mmPCIE_WRAP_MST_IN 0xC01B24
245
246#define mmPCIE_WRAP_RSP_OK 0xC01B28
247
248#define mmPCIE_WRAP_LBW_CACHE_OVR 0xC01B40
249
250#define mmPCIE_WRAP_LBW_LOCK_OVR 0xC01B44
251
252#define mmPCIE_WRAP_LBW_PROT_OVR 0xC01B48
253
254#define mmPCIE_WRAP_LBW_ARUSER_OVR 0xC01B4C
255
256#define mmPCIE_WRAP_LBW_AWUSER_OVR 0xC01B50
257
258#define mmPCIE_WRAP_LBW_ARUSER_OVR_EN 0xC01B58
259
260#define mmPCIE_WRAP_LBW_AWUSER_OVR_EN 0xC01B5C
261
262#define mmPCIE_WRAP_LBW_MAX_OUTSTAND 0xC01B60
263
264#define mmPCIE_WRAP_LBW_MST_IN 0xC01B64
265
266#define mmPCIE_WRAP_LBW_RSP_OK 0xC01B68
267
268#define mmPCIE_WRAP_QUEUE_INIT 0xC01C00
269
270#define mmPCIE_WRAP_AXI_SPLIT_INTR_0 0xC01C10
271
272#define mmPCIE_WRAP_AXI_SPLIT_INTR_1 0xC01C14
273
274#define mmPCIE_WRAP_DB_AWUSER 0xC01D00
275
276#define mmPCIE_WRAP_DB_ARUSER 0xC01D04
277
278#define mmPCIE_WRAP_PCIE_AWUSER 0xC01D08
279
280#define mmPCIE_WRAP_PCIE_ARUSER 0xC01D0C
281
282#define mmPCIE_WRAP_PSOC_AWUSER 0xC01D10
283
284#define mmPCIE_WRAP_PSOC_ARUSER 0xC01D14
285
286#define mmPCIE_WRAP_SCH_Q_AWUSER 0xC01D18
287
288#define mmPCIE_WRAP_SCH_Q_ARUSER 0xC01D1C
289
290#define mmPCIE_WRAP_PSOC2PCI_AWUSER 0xC01D40
291
292#define mmPCIE_WRAP_PSOC2PCI_ARUSER 0xC01D44
293
294#define mmPCIE_WRAP_DRAIN_TIMEOUT 0xC01D50
295
296#define mmPCIE_WRAP_DRAIN_CFG 0xC01D54
297
298#define mmPCIE_WRAP_DB_AXI_ERR 0xC01DE0
299
300#define mmPCIE_WRAP_SPMU_INTR 0xC01DE4
301
302#define mmPCIE_WRAP_AXI_INTR 0xC01DE8
303
304#define mmPCIE_WRAP_E2E_CTRL 0xC01DF0
305
306#endif /* ASIC_REG_PCIE_WRAP_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
index 8eda4de58788..9271ea95ebe9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
@@ -102,4 +102,3 @@
102#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440 102#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440
103 103
104#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */ 104#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
index d4bf0e1db4df..324266653c9a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
@@ -444,4 +444,3 @@
444#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3 444#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3
445 445
446#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */ 446#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */
447
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
index cfbdd2c9c5c7..8141f422e712 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
@@ -742,4 +742,3 @@
742#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44 742#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44
743 743
744#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */ 744#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */
745
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
index 6723d8f76f30..4789ebb9c337 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
@@ -102,4 +102,3 @@
102#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440 102#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440
103 103
104#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */ 104#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
index abcded0531c9..27a296ea6c3d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
@@ -102,4 +102,3 @@
102#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440 102#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440
103 103
104#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */ 104#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
index 5925c7477c25..66aee7fa6b1e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
@@ -140,4 +140,3 @@
140#define mmPSOC_SPI_RSVD_2 0xC430FC 140#define mmPSOC_SPI_RSVD_2 0xC430FC
141 141
142#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */ 142#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */
143
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
index d56c9fa0e7ba..2ea1770b078f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
@@ -80,4 +80,3 @@
80#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330 80#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330
81 81
82#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */ 82#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
index 5624544303ca..37e0713efa73 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
@@ -80,4 +80,3 @@
80#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330 80#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330
81 81
82#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */ 82#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
index 3322bc0bd1df..d2572279a2b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
@@ -80,4 +80,3 @@
80#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330 80#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330
81 81
82#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */ 82#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
index 81e393db2027..68c5b402c506 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
@@ -80,4 +80,3 @@
80#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330 80#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330
81 81
82#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */ 82#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
index b2e11b1de385..a42f1ba06d28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
@@ -80,4 +80,3 @@
80#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330 80#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330
81 81
82#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */ 82#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */
83
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
index b4ea8cae2757..94f2ed4a36bd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
@@ -114,4 +114,3 @@
114#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10 114#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10
115 115
116#endif /* ASIC_REG_STLB_MASKS_H_ */ 116#endif /* ASIC_REG_STLB_MASKS_H_ */
117
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
index 0f5281d3e65b..35013f65acd2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
@@ -52,4 +52,3 @@
52#define mmSTLB_SRAM_INIT 0x49004C 52#define mmSTLB_SRAM_INIT 0x49004C
53 53
54#endif /* ASIC_REG_STLB_REGS_H_ */ 54#endif /* ASIC_REG_STLB_REGS_H_ */
55
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
index e5587b49eecd..89c9507a512f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
@@ -1604,4 +1604,3 @@
1604#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000 1604#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000
1605 1605
1606#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */ 1606#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */
1607
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
index 2be28a63c50a..7d71c4b73a5e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C 884#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C
885 885
886#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
index 9aa2d8b53207..9395f2458771 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
@@ -370,4 +370,3 @@
370#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF 370#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
371 371
372#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */ 372#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */
373
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
index 3572752ba66e..bc51df573bf0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C 136#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C
137 137
138#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
index ed866d93c440..553c6b6bd5ec 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
@@ -344,4 +344,3 @@
344#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1 344#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1
345 345
346#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */ 346#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */
347
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
index f1a1b4fa4841..8495479c3659 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
@@ -310,4 +310,3 @@
310#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334 310#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334
311 311
312#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */ 312#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */
313
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
index 7f86621179a5..43fafcf01041 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
@@ -206,4 +206,3 @@
206#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 206#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1
207 207
208#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */ 208#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */
209
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
index dc280f4e6608..ce3346dd2042 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
@@ -224,4 +224,3 @@
224#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604 224#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604
225 225
226#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */ 226#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
index 80d97ee3d8d6..2e4b45947944 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
@@ -462,4 +462,3 @@
462#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF 462#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF
463 463
464#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */ 464#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */
465
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
index 7552d4ba61fe..4fa09eb88878 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C 176#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C
177 177
178#endif /* ASIC_REG_TPC0_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC0_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
index 19894413474a..928eef1808ae 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C 884#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C
885 885
886#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
index 9099ebd7ab23..30ae0f307328 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C 136#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C
137 137
138#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
index bc8b9a10391f..b95de4f95ba9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C 176#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C
177 177
178#endif /* ASIC_REG_TPC1_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC1_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
index ae267f8f457e..0f91e307879e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
@@ -320,4 +320,3 @@
320#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604 320#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604
321 321
322#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */ 322#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
index 9c33fc039036..73421227f35b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C 884#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C
885 885
886#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
index 7a643887d6e1..27b66bf2da9f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C 136#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C
137 137
138#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
index f3e32c018064..31e5b2f53905 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C 176#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C
177 177
178#endif /* ASIC_REG_TPC2_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC2_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
index 0eb0cd1fbd19..4eddeaa15d94 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
@@ -320,4 +320,3 @@
320#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604 320#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604
321 321
322#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */ 322#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
index 0baf63c69b25..ce573a1a8361 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C 884#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C
885 885
886#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
index 82a5261e852f..11d81fca0a0f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C 136#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C
137 137
138#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
index b05b1e18e664..e41595a19e69 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C 176#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C
177 177
178#endif /* ASIC_REG_TPC3_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC3_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
index 5a2fd7652650..34a438b1efe5 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
@@ -320,4 +320,3 @@
320#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604 320#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604
321 321
322#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */ 322#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
index d64a100075f2..d44caf0fc1bb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C 884#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C
885 885
886#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
index 565b42885b0d..f13a6532961f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C 136#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C
137 137
138#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
index 196da3f12710..db081fc17cfc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C 176#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C
177 177
178#endif /* ASIC_REG_TPC4_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC4_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
index 8b54041d144a..8c5372303b28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
@@ -320,4 +320,3 @@
320#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604 320#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604
321 321
322#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */ 322#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
index 3f00954fcdba..5139fde71011 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C 884#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C
885 885
886#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
index d8e72a8e18d7..1e7cd6e1e888 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C 136#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C
137 137
138#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
index be2e68624709..ac0d3820cd6b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C 176#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C
177 177
178#endif /* ASIC_REG_TPC5_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC5_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
index 6f301c7bbc2f..57f83bc3b17d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
@@ -320,4 +320,3 @@
320#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604 320#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604
321 321
322#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */ 322#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
index 1e1168601c41..94e0191c06c1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C 884#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C
885 885
886#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
index fbca6b47284e..7a1a0e87b225 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C 136#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C
137 137
138#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
index bf32465dabcb..80fa0fe0f60f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C 176#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C
177 177
178#endif /* ASIC_REG_TPC6_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC6_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
index 609bb90e1046..d6cae8b8af66 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
@@ -320,4 +320,3 @@
320#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604 320#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604
321 321
322#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */ 322#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */
323
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
index bf2fd0f73906..234147adb779 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
@@ -884,4 +884,3 @@
884#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C 884#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C
885 885
886#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */ 886#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */
887
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
index 65d83043bf63..4c160632fe7d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
@@ -136,4 +136,3 @@
136#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C 136#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C
137 137
138#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */ 138#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */
139
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
index 3d5848d87304..0c13d4d167aa 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
@@ -224,4 +224,3 @@
224#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604 224#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604
225 225
226#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */ 226#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */
227
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
index 25f5095f68fb..cbe11425bfb0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
@@ -176,4 +176,3 @@
176#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C 176#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C
177 177
178#endif /* ASIC_REG_TPC7_QM_REGS_H_ */ 178#endif /* ASIC_REG_TPC7_QM_REGS_H_ */
179
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
index 920231d0afa5..e25e19660a9d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
+++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
@@ -102,4 +102,3 @@
102#define mmTPC_PLL_FREQ_CALC_EN 0xE01440 102#define mmTPC_PLL_FREQ_CALC_EN 0xE01440
103 103
104#endif /* ASIC_REG_TPC_PLL_REGS_H_ */ 104#endif /* ASIC_REG_TPC_PLL_REGS_H_ */
105
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h
index 614149efa412..3f02a52ba4ce 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/misc/habanalabs/include/goya/goya.h
@@ -8,10 +8,6 @@
8#ifndef GOYA_H 8#ifndef GOYA_H
9#define GOYA_H 9#define GOYA_H
10 10
11#include "asic_reg/goya_regs.h"
12
13#include <linux/types.h>
14
15#define SRAM_CFG_BAR_ID 0 11#define SRAM_CFG_BAR_ID 0
16#define MSIX_BAR_ID 2 12#define MSIX_BAR_ID 2
17#define DDR_BAR_ID 4 13#define DDR_BAR_ID 4
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h
index 497937a17ee9..bb7a1aa3279e 100644
--- a/drivers/misc/habanalabs/include/goya/goya_async_events.h
+++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h
@@ -9,7 +9,9 @@
9#define __GOYA_ASYNC_EVENTS_H_ 9#define __GOYA_ASYNC_EVENTS_H_
10 10
11enum goya_async_event_id { 11enum goya_async_event_id {
12 GOYA_ASYNC_EVENT_ID_PCIE_CORE = 32,
12 GOYA_ASYNC_EVENT_ID_PCIE_IF = 33, 13 GOYA_ASYNC_EVENT_ID_PCIE_IF = 33,
14 GOYA_ASYNC_EVENT_ID_PCIE_PHY = 34,
13 GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36, 15 GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36,
14 GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39, 16 GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39,
15 GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42, 17 GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42,
@@ -23,6 +25,8 @@ enum goya_async_event_id {
23 GOYA_ASYNC_EVENT_ID_MMU_ECC = 63, 25 GOYA_ASYNC_EVENT_ID_MMU_ECC = 63,
24 GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64, 26 GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64,
25 GOYA_ASYNC_EVENT_ID_DMA_ECC = 66, 27 GOYA_ASYNC_EVENT_ID_DMA_ECC = 66,
28 GOYA_ASYNC_EVENT_ID_DDR0_PARITY = 69,
29 GOYA_ASYNC_EVENT_ID_DDR1_PARITY = 72,
26 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75, 30 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75,
27 GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78, 31 GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78,
28 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79, 32 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79,
@@ -72,6 +76,7 @@ enum goya_async_event_id {
72 GOYA_ASYNC_EVENT_ID_MME_WACSD = 142, 76 GOYA_ASYNC_EVENT_ID_MME_WACSD = 142,
73 GOYA_ASYNC_EVENT_ID_PLL0 = 143, 77 GOYA_ASYNC_EVENT_ID_PLL0 = 143,
74 GOYA_ASYNC_EVENT_ID_PLL1 = 144, 78 GOYA_ASYNC_EVENT_ID_PLL1 = 144,
79 GOYA_ASYNC_EVENT_ID_PLL2 = 145,
75 GOYA_ASYNC_EVENT_ID_PLL3 = 146, 80 GOYA_ASYNC_EVENT_ID_PLL3 = 146,
76 GOYA_ASYNC_EVENT_ID_PLL4 = 147, 81 GOYA_ASYNC_EVENT_ID_PLL4 = 147,
77 GOYA_ASYNC_EVENT_ID_PLL5 = 148, 82 GOYA_ASYNC_EVENT_ID_PLL5 = 148,
@@ -81,6 +86,7 @@ enum goya_async_event_id {
81 GOYA_ASYNC_EVENT_ID_PSOC = 160, 86 GOYA_ASYNC_EVENT_ID_PSOC = 160,
82 GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171, 87 GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171,
83 GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172, 88 GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172,
89 GOYA_ASYNC_EVENT_ID_PCIE_PERST = 173,
84 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174, 90 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174,
85 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175, 91 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175,
86 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176, 92 GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176,
@@ -144,8 +150,11 @@ enum goya_async_event_id {
144 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330, 150 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330,
145 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331, 151 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331,
146 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332, 152 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332,
153 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_3 = 333,
154 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_4 = 334,
147 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356, 155 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356,
148 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361, 156 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361,
157 GOYA_ASYNC_EVENT_ID_FAN = 425,
149 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430, 158 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430,
150 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431, 159 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431,
151 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432, 160 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432,
diff --git a/drivers/misc/habanalabs/include/goya/goya_coresight.h b/drivers/misc/habanalabs/include/goya/goya_coresight.h
new file mode 100644
index 000000000000..6e933c0ca5cd
--- /dev/null
+++ b/drivers/misc/habanalabs/include/goya/goya_coresight.h
@@ -0,0 +1,199 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2018 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef GOYA_CORESIGHT_H
9#define GOYA_CORESIGHT_H
10
11enum goya_debug_stm_regs_index {
12 GOYA_STM_FIRST = 0,
13 GOYA_STM_CPU = GOYA_STM_FIRST,
14 GOYA_STM_DMA_CH_0_CS,
15 GOYA_STM_DMA_CH_1_CS,
16 GOYA_STM_DMA_CH_2_CS,
17 GOYA_STM_DMA_CH_3_CS,
18 GOYA_STM_DMA_CH_4_CS,
19 GOYA_STM_DMA_MACRO_CS,
20 GOYA_STM_MME1_SBA,
21 GOYA_STM_MME3_SBB,
22 GOYA_STM_MME4_WACS2,
23 GOYA_STM_MME4_WACS,
24 GOYA_STM_MMU_CS,
25 GOYA_STM_PCIE,
26 GOYA_STM_PSOC,
27 GOYA_STM_TPC0_EML,
28 GOYA_STM_TPC1_EML,
29 GOYA_STM_TPC2_EML,
30 GOYA_STM_TPC3_EML,
31 GOYA_STM_TPC4_EML,
32 GOYA_STM_TPC5_EML,
33 GOYA_STM_TPC6_EML,
34 GOYA_STM_TPC7_EML,
35 GOYA_STM_LAST = GOYA_STM_TPC7_EML
36};
37
38enum goya_debug_etf_regs_index {
39 GOYA_ETF_FIRST = 0,
40 GOYA_ETF_CPU_0 = GOYA_ETF_FIRST,
41 GOYA_ETF_CPU_1,
42 GOYA_ETF_CPU_TRACE,
43 GOYA_ETF_DMA_CH_0_CS,
44 GOYA_ETF_DMA_CH_1_CS,
45 GOYA_ETF_DMA_CH_2_CS,
46 GOYA_ETF_DMA_CH_3_CS,
47 GOYA_ETF_DMA_CH_4_CS,
48 GOYA_ETF_DMA_MACRO_CS,
49 GOYA_ETF_MME1_SBA,
50 GOYA_ETF_MME3_SBB,
51 GOYA_ETF_MME4_WACS2,
52 GOYA_ETF_MME4_WACS,
53 GOYA_ETF_MMU_CS,
54 GOYA_ETF_PCIE,
55 GOYA_ETF_PSOC,
56 GOYA_ETF_TPC0_EML,
57 GOYA_ETF_TPC1_EML,
58 GOYA_ETF_TPC2_EML,
59 GOYA_ETF_TPC3_EML,
60 GOYA_ETF_TPC4_EML,
61 GOYA_ETF_TPC5_EML,
62 GOYA_ETF_TPC6_EML,
63 GOYA_ETF_TPC7_EML,
64 GOYA_ETF_LAST = GOYA_ETF_TPC7_EML
65};
66
67enum goya_debug_funnel_regs_index {
68 GOYA_FUNNEL_FIRST = 0,
69 GOYA_FUNNEL_CPU = GOYA_FUNNEL_FIRST,
70 GOYA_FUNNEL_DMA_CH_6_1,
71 GOYA_FUNNEL_DMA_MACRO_3_1,
72 GOYA_FUNNEL_MME0_RTR,
73 GOYA_FUNNEL_MME1_RTR,
74 GOYA_FUNNEL_MME2_RTR,
75 GOYA_FUNNEL_MME3_RTR,
76 GOYA_FUNNEL_MME4_RTR,
77 GOYA_FUNNEL_MME5_RTR,
78 GOYA_FUNNEL_PCIE,
79 GOYA_FUNNEL_PSOC,
80 GOYA_FUNNEL_TPC0_EML,
81 GOYA_FUNNEL_TPC1_EML,
82 GOYA_FUNNEL_TPC1_RTR,
83 GOYA_FUNNEL_TPC2_EML,
84 GOYA_FUNNEL_TPC2_RTR,
85 GOYA_FUNNEL_TPC3_EML,
86 GOYA_FUNNEL_TPC3_RTR,
87 GOYA_FUNNEL_TPC4_EML,
88 GOYA_FUNNEL_TPC4_RTR,
89 GOYA_FUNNEL_TPC5_EML,
90 GOYA_FUNNEL_TPC5_RTR,
91 GOYA_FUNNEL_TPC6_EML,
92 GOYA_FUNNEL_TPC6_RTR,
93 GOYA_FUNNEL_TPC7_EML,
94 GOYA_FUNNEL_LAST = GOYA_FUNNEL_TPC7_EML
95};
96
97enum goya_debug_bmon_regs_index {
98 GOYA_BMON_FIRST = 0,
99 GOYA_BMON_CPU_RD = GOYA_BMON_FIRST,
100 GOYA_BMON_CPU_WR,
101 GOYA_BMON_DMA_CH_0_0,
102 GOYA_BMON_DMA_CH_0_1,
103 GOYA_BMON_DMA_CH_1_0,
104 GOYA_BMON_DMA_CH_1_1,
105 GOYA_BMON_DMA_CH_2_0,
106 GOYA_BMON_DMA_CH_2_1,
107 GOYA_BMON_DMA_CH_3_0,
108 GOYA_BMON_DMA_CH_3_1,
109 GOYA_BMON_DMA_CH_4_0,
110 GOYA_BMON_DMA_CH_4_1,
111 GOYA_BMON_DMA_MACRO_0,
112 GOYA_BMON_DMA_MACRO_1,
113 GOYA_BMON_DMA_MACRO_2,
114 GOYA_BMON_DMA_MACRO_3,
115 GOYA_BMON_DMA_MACRO_4,
116 GOYA_BMON_DMA_MACRO_5,
117 GOYA_BMON_DMA_MACRO_6,
118 GOYA_BMON_DMA_MACRO_7,
119 GOYA_BMON_MME1_SBA_0,
120 GOYA_BMON_MME1_SBA_1,
121 GOYA_BMON_MME3_SBB_0,
122 GOYA_BMON_MME3_SBB_1,
123 GOYA_BMON_MME4_WACS2_0,
124 GOYA_BMON_MME4_WACS2_1,
125 GOYA_BMON_MME4_WACS2_2,
126 GOYA_BMON_MME4_WACS_0,
127 GOYA_BMON_MME4_WACS_1,
128 GOYA_BMON_MME4_WACS_2,
129 GOYA_BMON_MME4_WACS_3,
130 GOYA_BMON_MME4_WACS_4,
131 GOYA_BMON_MME4_WACS_5,
132 GOYA_BMON_MME4_WACS_6,
133 GOYA_BMON_MMU_0,
134 GOYA_BMON_MMU_1,
135 GOYA_BMON_PCIE_MSTR_RD,
136 GOYA_BMON_PCIE_MSTR_WR,
137 GOYA_BMON_PCIE_SLV_RD,
138 GOYA_BMON_PCIE_SLV_WR,
139 GOYA_BMON_TPC0_EML_0,
140 GOYA_BMON_TPC0_EML_1,
141 GOYA_BMON_TPC0_EML_2,
142 GOYA_BMON_TPC0_EML_3,
143 GOYA_BMON_TPC1_EML_0,
144 GOYA_BMON_TPC1_EML_1,
145 GOYA_BMON_TPC1_EML_2,
146 GOYA_BMON_TPC1_EML_3,
147 GOYA_BMON_TPC2_EML_0,
148 GOYA_BMON_TPC2_EML_1,
149 GOYA_BMON_TPC2_EML_2,
150 GOYA_BMON_TPC2_EML_3,
151 GOYA_BMON_TPC3_EML_0,
152 GOYA_BMON_TPC3_EML_1,
153 GOYA_BMON_TPC3_EML_2,
154 GOYA_BMON_TPC3_EML_3,
155 GOYA_BMON_TPC4_EML_0,
156 GOYA_BMON_TPC4_EML_1,
157 GOYA_BMON_TPC4_EML_2,
158 GOYA_BMON_TPC4_EML_3,
159 GOYA_BMON_TPC5_EML_0,
160 GOYA_BMON_TPC5_EML_1,
161 GOYA_BMON_TPC5_EML_2,
162 GOYA_BMON_TPC5_EML_3,
163 GOYA_BMON_TPC6_EML_0,
164 GOYA_BMON_TPC6_EML_1,
165 GOYA_BMON_TPC6_EML_2,
166 GOYA_BMON_TPC6_EML_3,
167 GOYA_BMON_TPC7_EML_0,
168 GOYA_BMON_TPC7_EML_1,
169 GOYA_BMON_TPC7_EML_2,
170 GOYA_BMON_TPC7_EML_3,
171 GOYA_BMON_LAST = GOYA_BMON_TPC7_EML_3
172};
173
174enum goya_debug_spmu_regs_index {
175 GOYA_SPMU_FIRST = 0,
176 GOYA_SPMU_DMA_CH_0_CS = GOYA_SPMU_FIRST,
177 GOYA_SPMU_DMA_CH_1_CS,
178 GOYA_SPMU_DMA_CH_2_CS,
179 GOYA_SPMU_DMA_CH_3_CS,
180 GOYA_SPMU_DMA_CH_4_CS,
181 GOYA_SPMU_DMA_MACRO_CS,
182 GOYA_SPMU_MME1_SBA,
183 GOYA_SPMU_MME3_SBB,
184 GOYA_SPMU_MME4_WACS2,
185 GOYA_SPMU_MME4_WACS,
186 GOYA_SPMU_MMU_CS,
187 GOYA_SPMU_PCIE,
188 GOYA_SPMU_TPC0_EML,
189 GOYA_SPMU_TPC1_EML,
190 GOYA_SPMU_TPC2_EML,
191 GOYA_SPMU_TPC3_EML,
192 GOYA_SPMU_TPC4_EML,
193 GOYA_SPMU_TPC5_EML,
194 GOYA_SPMU_TPC6_EML,
195 GOYA_SPMU_TPC7_EML,
196 GOYA_SPMU_LAST = GOYA_SPMU_TPC7_EML
197};
198
199#endif /* GOYA_CORESIGHT_H */
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
index a9920cb4a07b..0fa80fe9f6cc 100644
--- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h
+++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h
@@ -8,6 +8,8 @@
8#ifndef GOYA_FW_IF_H 8#ifndef GOYA_FW_IF_H
9#define GOYA_FW_IF_H 9#define GOYA_FW_IF_H
10 10
11#define GOYA_EVENT_QUEUE_MSIX_IDX 5
12
11#define CPU_BOOT_ADDR 0x7FF8040000ull 13#define CPU_BOOT_ADDR 0x7FF8040000ull
12 14
13#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */ 15#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */
diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h
index 7475732b9996..4cd04c090285 100644
--- a/drivers/misc/habanalabs/include/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/hl_boot_if.h
@@ -18,7 +18,8 @@ enum cpu_boot_status {
18 CPU_BOOT_STATUS_IN_SPL, 18 CPU_BOOT_STATUS_IN_SPL,
19 CPU_BOOT_STATUS_IN_UBOOT, 19 CPU_BOOT_STATUS_IN_UBOOT,
20 CPU_BOOT_STATUS_DRAM_INIT_FAIL, 20 CPU_BOOT_STATUS_DRAM_INIT_FAIL,
21 CPU_BOOT_STATUS_FIT_CORRUPTED 21 CPU_BOOT_STATUS_FIT_CORRUPTED,
22 CPU_BOOT_STATUS_UBOOT_NOT_READY,
22}; 23};
23 24
24enum kmd_msg { 25enum kmd_msg {
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index b680052ee3f0..71ea3c3e8ba3 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -14,16 +14,16 @@
14#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB) 14#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB)
15#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1)) 15#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1))
16 16
17#define PAGE_PRESENT_MASK 0x0000000000001 17#define PAGE_PRESENT_MASK 0x0000000000001ull
18#define SWAP_OUT_MASK 0x0000000000004 18#define SWAP_OUT_MASK 0x0000000000004ull
19#define LAST_MASK 0x0000000000800 19#define LAST_MASK 0x0000000000800ull
20#define PHYS_ADDR_MASK 0x3FFFFFFFFF000ull 20#define PHYS_ADDR_MASK 0xFFFFFFFFFFFFF000ull
21#define HOP0_MASK 0x3000000000000ull 21#define HOP0_MASK 0x3000000000000ull
22#define HOP1_MASK 0x0FF8000000000ull 22#define HOP1_MASK 0x0FF8000000000ull
23#define HOP2_MASK 0x0007FC0000000ull 23#define HOP2_MASK 0x0007FC0000000ull
24#define HOP3_MASK 0x000003FE00000 24#define HOP3_MASK 0x000003FE00000ull
25#define HOP4_MASK 0x00000001FF000 25#define HOP4_MASK 0x00000001FF000ull
26#define OFFSET_MASK 0x0000000000FFF 26#define OFFSET_MASK 0x0000000000FFFull
27 27
28#define HOP0_SHIFT 48 28#define HOP0_SHIFT 48
29#define HOP1_SHIFT 39 29#define HOP1_SHIFT 39
@@ -32,7 +32,7 @@
32#define HOP4_SHIFT 12 32#define HOP4_SHIFT 12
33 33
34#define PTE_PHYS_ADDR_SHIFT 12 34#define PTE_PHYS_ADDR_SHIFT 12
35#define PTE_PHYS_ADDR_MASK ~0xFFF 35#define PTE_PHYS_ADDR_MASK ~OFFSET_MASK
36 36
37#define HL_PTE_SIZE sizeof(u64) 37#define HL_PTE_SIZE sizeof(u64)
38#define HOP_TABLE_SIZE PAGE_SIZE_4KB 38#define HOP_TABLE_SIZE PAGE_SIZE_4KB
diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h
new file mode 100644
index 000000000000..d232081d4e0f
--- /dev/null
+++ b/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h
@@ -0,0 +1,23 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright 2016-2019 HabanaLabs, Ltd.
4 * All Rights Reserved.
5 *
6 */
7
8#ifndef INCLUDE_PCI_GENERAL_H_
9#define INCLUDE_PCI_GENERAL_H_
10
11/* PCI CONFIGURATION SPACE */
12#define mmPCI_CONFIG_ELBI_ADDR 0xFF0
13#define mmPCI_CONFIG_ELBI_DATA 0xFF4
14#define mmPCI_CONFIG_ELBI_CTRL 0xFF8
15#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31)
16
17#define mmPCI_CONFIG_ELBI_STS 0xFFC
18#define PCI_CONFIG_ELBI_STS_ERR (1 << 30)
19#define PCI_CONFIG_ELBI_STS_DONE (1 << 31)
20#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \
21 PCI_CONFIG_ELBI_STS_DONE)
22
23#endif /* INCLUDE_PCI_GENERAL_H_ */
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index e69a09c10e3f..ea9f72ff456c 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -222,7 +222,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
222 222
223 BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE); 223 BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
224 224
225 p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, 225 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
226 &q->bus_address, GFP_KERNEL | __GFP_ZERO); 226 &q->bus_address, GFP_KERNEL | __GFP_ZERO);
227 if (!p) 227 if (!p)
228 return -ENOMEM; 228 return -ENOMEM;
@@ -248,7 +248,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
248 */ 248 */
249void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q) 249void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
250{ 250{
251 hdev->asic_funcs->dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, 251 hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
252 (void *) (uintptr_t) q->kernel_address, q->bus_address); 252 (void *) (uintptr_t) q->kernel_address, q->bus_address);
253} 253}
254 254
@@ -284,8 +284,9 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
284 284
285 BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE); 285 BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE);
286 286
287 p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_EQ_SIZE_IN_BYTES, 287 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
288 &q->bus_address, GFP_KERNEL | __GFP_ZERO); 288 HL_EQ_SIZE_IN_BYTES,
289 &q->bus_address);
289 if (!p) 290 if (!p)
290 return -ENOMEM; 291 return -ENOMEM;
291 292
@@ -308,8 +309,9 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
308{ 309{
309 flush_workqueue(hdev->eq_wq); 310 flush_workqueue(hdev->eq_wq);
310 311
311 hdev->asic_funcs->dma_free_coherent(hdev, HL_EQ_SIZE_IN_BYTES, 312 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
312 (void *) (uintptr_t) q->kernel_address, q->bus_address); 313 HL_EQ_SIZE_IN_BYTES,
314 (void *) (uintptr_t) q->kernel_address);
313} 315}
314 316
315void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q) 317void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index ce1fda40a8b8..d67d24c13efd 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -109,7 +109,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
109 page_size); 109 page_size);
110 if (!phys_pg_pack->pages[i]) { 110 if (!phys_pg_pack->pages[i]) {
111 dev_err(hdev->dev, 111 dev_err(hdev->dev,
112 "ioctl failed to allocate page\n"); 112 "Failed to allocate device memory (out of memory)\n");
113 rc = -ENOMEM; 113 rc = -ENOMEM;
114 goto page_err; 114 goto page_err;
115 } 115 }
@@ -759,10 +759,6 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
759 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 759 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
760 paddr = phys_pg_pack->pages[i]; 760 paddr = phys_pg_pack->pages[i];
761 761
762 /* For accessing the host we need to turn on bit 39 */
763 if (phys_pg_pack->created_from_userptr)
764 paddr += hdev->asic_prop.host_phys_base_address;
765
766 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); 762 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
767 if (rc) { 763 if (rc) {
768 dev_err(hdev->dev, 764 dev_err(hdev->dev,
@@ -1046,10 +1042,17 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
1046 1042
1047 mutex_lock(&ctx->mmu_lock); 1043 mutex_lock(&ctx->mmu_lock);
1048 1044
1049 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) 1045 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1050 if (hl_mmu_unmap(ctx, next_vaddr, page_size)) 1046 if (hl_mmu_unmap(ctx, next_vaddr, page_size))
1051 dev_warn_ratelimited(hdev->dev, 1047 dev_warn_ratelimited(hdev->dev,
1052 "unmap failed for vaddr: 0x%llx\n", next_vaddr); 1048 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1049
1050 /* unmapping on Palladium can be really long, so avoid a CPU
1051 * soft lockup bug by sleeping a little between unmapping pages
1052 */
1053 if (hdev->pldm)
1054 usleep_range(500, 1000);
1055 }
1053 1056
1054 hdev->asic_funcs->mmu_invalidate_cache(hdev, true); 1057 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
1055 1058
@@ -1083,6 +1086,64 @@ vm_type_err:
1083 return rc; 1086 return rc;
1084} 1087}
1085 1088
1089static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1090{
1091 struct hl_device *hdev = hpriv->hdev;
1092 struct hl_ctx *ctx = hpriv->ctx;
1093 u64 device_addr = 0;
1094 u32 handle = 0;
1095 int rc;
1096
1097 switch (args->in.op) {
1098 case HL_MEM_OP_ALLOC:
1099 if (args->in.alloc.mem_size == 0) {
1100 dev_err(hdev->dev,
1101 "alloc size must be larger than 0\n");
1102 rc = -EINVAL;
1103 goto out;
1104 }
1105
1106 /* Force contiguous as there are no real MMU
1107 * translations to overcome physical memory gaps
1108 */
1109 args->in.flags |= HL_MEM_CONTIGUOUS;
1110 rc = alloc_device_memory(ctx, &args->in, &handle);
1111
1112 memset(args, 0, sizeof(*args));
1113 args->out.handle = (__u64) handle;
1114 break;
1115
1116 case HL_MEM_OP_FREE:
1117 rc = free_device_memory(ctx, args->in.free.handle);
1118 break;
1119
1120 case HL_MEM_OP_MAP:
1121 if (args->in.flags & HL_MEM_USERPTR) {
1122 device_addr = args->in.map_host.host_virt_addr;
1123 rc = 0;
1124 } else {
1125 rc = get_paddr_from_handle(ctx, &args->in,
1126 &device_addr);
1127 }
1128
1129 memset(args, 0, sizeof(*args));
1130 args->out.device_virt_addr = device_addr;
1131 break;
1132
1133 case HL_MEM_OP_UNMAP:
1134 rc = 0;
1135 break;
1136
1137 default:
1138 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1139 rc = -ENOTTY;
1140 break;
1141 }
1142
1143out:
1144 return rc;
1145}
1146
1086int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) 1147int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1087{ 1148{
1088 union hl_mem_args *args = data; 1149 union hl_mem_args *args = data;
@@ -1094,104 +1155,54 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1094 1155
1095 if (hl_device_disabled_or_in_reset(hdev)) { 1156 if (hl_device_disabled_or_in_reset(hdev)) {
1096 dev_warn_ratelimited(hdev->dev, 1157 dev_warn_ratelimited(hdev->dev,
1097 "Device is disabled or in reset. Can't execute memory IOCTL\n"); 1158 "Device is %s. Can't execute MEMORY IOCTL\n",
1159 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1098 return -EBUSY; 1160 return -EBUSY;
1099 } 1161 }
1100 1162
1101 if (hdev->mmu_enable) { 1163 if (!hdev->mmu_enable)
1102 switch (args->in.op) { 1164 return mem_ioctl_no_mmu(hpriv, args);
1103 case HL_MEM_OP_ALLOC:
1104 if (!hdev->dram_supports_virtual_memory) {
1105 dev_err(hdev->dev,
1106 "DRAM alloc is not supported\n");
1107 rc = -EINVAL;
1108 goto out;
1109 }
1110 if (args->in.alloc.mem_size == 0) {
1111 dev_err(hdev->dev,
1112 "alloc size must be larger than 0\n");
1113 rc = -EINVAL;
1114 goto out;
1115 }
1116 rc = alloc_device_memory(ctx, &args->in, &handle);
1117
1118 memset(args, 0, sizeof(*args));
1119 args->out.handle = (__u64) handle;
1120 break;
1121
1122 case HL_MEM_OP_FREE:
1123 if (!hdev->dram_supports_virtual_memory) {
1124 dev_err(hdev->dev,
1125 "DRAM free is not supported\n");
1126 rc = -EINVAL;
1127 goto out;
1128 }
1129 rc = free_device_memory(ctx, args->in.free.handle);
1130 break;
1131
1132 case HL_MEM_OP_MAP:
1133 rc = map_device_va(ctx, &args->in, &device_addr);
1134
1135 memset(args, 0, sizeof(*args));
1136 args->out.device_virt_addr = device_addr;
1137 break;
1138
1139 case HL_MEM_OP_UNMAP:
1140 rc = unmap_device_va(ctx,
1141 args->in.unmap.device_virt_addr);
1142 break;
1143 1165
1144 default: 1166 switch (args->in.op) {
1145 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); 1167 case HL_MEM_OP_ALLOC:
1146 rc = -ENOTTY; 1168 if (!hdev->dram_supports_virtual_memory) {
1147 break; 1169 dev_err(hdev->dev, "DRAM alloc is not supported\n");
1170 rc = -EINVAL;
1171 goto out;
1148 } 1172 }
1149 } else {
1150 switch (args->in.op) {
1151 case HL_MEM_OP_ALLOC:
1152 if (args->in.alloc.mem_size == 0) {
1153 dev_err(hdev->dev,
1154 "alloc size must be larger than 0\n");
1155 rc = -EINVAL;
1156 goto out;
1157 }
1158 1173
1159 /* Force contiguous as there are no real MMU 1174 if (args->in.alloc.mem_size == 0) {
1160 * translations to overcome physical memory gaps 1175 dev_err(hdev->dev,
1161 */ 1176 "alloc size must be larger than 0\n");
1162 args->in.flags |= HL_MEM_CONTIGUOUS; 1177 rc = -EINVAL;
1163 rc = alloc_device_memory(ctx, &args->in, &handle); 1178 goto out;
1179 }
1180 rc = alloc_device_memory(ctx, &args->in, &handle);
1164 1181
1165 memset(args, 0, sizeof(*args)); 1182 memset(args, 0, sizeof(*args));
1166 args->out.handle = (__u64) handle; 1183 args->out.handle = (__u64) handle;
1167 break; 1184 break;
1168 1185
1169 case HL_MEM_OP_FREE: 1186 case HL_MEM_OP_FREE:
1170 rc = free_device_memory(ctx, args->in.free.handle); 1187 rc = free_device_memory(ctx, args->in.free.handle);
1171 break; 1188 break;
1172 1189
1173 case HL_MEM_OP_MAP: 1190 case HL_MEM_OP_MAP:
1174 if (args->in.flags & HL_MEM_USERPTR) { 1191 rc = map_device_va(ctx, &args->in, &device_addr);
1175 device_addr = args->in.map_host.host_virt_addr;
1176 rc = 0;
1177 } else {
1178 rc = get_paddr_from_handle(ctx, &args->in,
1179 &device_addr);
1180 }
1181 1192
1182 memset(args, 0, sizeof(*args)); 1193 memset(args, 0, sizeof(*args));
1183 args->out.device_virt_addr = device_addr; 1194 args->out.device_virt_addr = device_addr;
1184 break; 1195 break;
1185 1196
1186 case HL_MEM_OP_UNMAP: 1197 case HL_MEM_OP_UNMAP:
1187 rc = 0; 1198 rc = unmap_device_va(ctx,
1188 break; 1199 args->in.unmap.device_virt_addr);
1200 break;
1189 1201
1190 default: 1202 default:
1191 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); 1203 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1192 rc = -ENOTTY; 1204 rc = -ENOTTY;
1193 break; 1205 break;
1194 }
1195 } 1206 }
1196 1207
1197out: 1208out:
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 3a5a2cec8305..533d9315b6fb 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -11,13 +11,15 @@
11#include <linux/genalloc.h> 11#include <linux/genalloc.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13 13
14static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr) 14static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr);
15
16static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 hop_addr)
15{ 17{
16 struct pgt_info *pgt_info = NULL; 18 struct pgt_info *pgt_info = NULL;
17 19
18 hash_for_each_possible(ctx->mmu_hash, pgt_info, node, 20 hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node,
19 (unsigned long) addr) 21 (unsigned long) hop_addr)
20 if (addr == pgt_info->addr) 22 if (hop_addr == pgt_info->shadow_addr)
21 break; 23 break;
22 24
23 return pgt_info; 25 return pgt_info;
@@ -25,45 +27,109 @@ static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr)
25 27
26static void free_hop(struct hl_ctx *ctx, u64 hop_addr) 28static void free_hop(struct hl_ctx *ctx, u64 hop_addr)
27{ 29{
30 struct hl_device *hdev = ctx->hdev;
28 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); 31 struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr);
29 32
30 gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr, 33 gen_pool_free(hdev->mmu_pgt_pool, pgt_info->phys_addr,
31 ctx->hdev->asic_prop.mmu_hop_table_size); 34 hdev->asic_prop.mmu_hop_table_size);
32 hash_del(&pgt_info->node); 35 hash_del(&pgt_info->node);
33 36 kfree((u64 *) (uintptr_t) pgt_info->shadow_addr);
34 kfree(pgt_info); 37 kfree(pgt_info);
35} 38}
36 39
37static u64 alloc_hop(struct hl_ctx *ctx) 40static u64 alloc_hop(struct hl_ctx *ctx)
38{ 41{
39 struct hl_device *hdev = ctx->hdev; 42 struct hl_device *hdev = ctx->hdev;
43 struct asic_fixed_properties *prop = &hdev->asic_prop;
40 struct pgt_info *pgt_info; 44 struct pgt_info *pgt_info;
41 u64 addr; 45 u64 phys_addr, shadow_addr;
42 46
43 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); 47 pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
44 if (!pgt_info) 48 if (!pgt_info)
45 return ULLONG_MAX; 49 return ULLONG_MAX;
46 50
47 addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool, 51 phys_addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool,
48 hdev->asic_prop.mmu_hop_table_size); 52 prop->mmu_hop_table_size);
49 if (!addr) { 53 if (!phys_addr) {
50 dev_err(hdev->dev, "failed to allocate page\n"); 54 dev_err(hdev->dev, "failed to allocate page\n");
51 kfree(pgt_info); 55 goto pool_add_err;
52 return ULLONG_MAX;
53 } 56 }
54 57
55 pgt_info->addr = addr; 58 shadow_addr = (u64) (uintptr_t) kzalloc(prop->mmu_hop_table_size,
59 GFP_KERNEL);
60 if (!shadow_addr)
61 goto shadow_err;
62
63 pgt_info->phys_addr = phys_addr;
64 pgt_info->shadow_addr = shadow_addr;
56 pgt_info->ctx = ctx; 65 pgt_info->ctx = ctx;
57 pgt_info->num_of_ptes = 0; 66 pgt_info->num_of_ptes = 0;
58 hash_add(ctx->mmu_hash, &pgt_info->node, addr); 67 hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr);
68
69 return shadow_addr;
70
71shadow_err:
72 gen_pool_free(hdev->mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size);
73pool_add_err:
74 kfree(pgt_info);
75
76 return ULLONG_MAX;
77}
78
79static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx)
80{
81 return ctx->hdev->asic_prop.mmu_pgt_addr +
82 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
83}
84
85static inline u64 get_hop0_addr(struct hl_ctx *ctx)
86{
87 return (u64) (uintptr_t) ctx->hdev->mmu_shadow_hop0 +
88 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
89}
90
91static inline void flush(struct hl_ctx *ctx)
92{
93 /* flush all writes from all cores to reach PCI */
94 mb();
95 ctx->hdev->asic_funcs->read_pte(ctx->hdev, get_phys_hop0_addr(ctx));
96}
97
98/* transform the value to physical address when writing to H/W */
99static inline void write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val)
100{
101 /*
102 * The value to write is actually the address of the next shadow hop +
103 * flags at the 12 LSBs.
104 * Hence in order to get the value to write to the physical PTE, we
105 * clear the 12 LSBs and translate the shadow hop to its associated
106 * physical hop, and add back the original 12 LSBs.
107 */
108 u64 phys_val = get_phys_addr(ctx, val & PTE_PHYS_ADDR_MASK) |
109 (val & OFFSET_MASK);
110
111 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
112 get_phys_addr(ctx, shadow_pte_addr),
113 phys_val);
114
115 *(u64 *) (uintptr_t) shadow_pte_addr = val;
116}
59 117
60 return addr; 118/* do not transform the value to physical address when writing to H/W */
119static inline void write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr,
120 u64 val)
121{
122 ctx->hdev->asic_funcs->write_pte(ctx->hdev,
123 get_phys_addr(ctx, shadow_pte_addr),
124 val);
125 *(u64 *) (uintptr_t) shadow_pte_addr = val;
61} 126}
62 127
63static inline void clear_pte(struct hl_device *hdev, u64 pte_addr) 128/* clear the last and present bits */
129static inline void clear_pte(struct hl_ctx *ctx, u64 pte_addr)
64{ 130{
65 /* clear the last and present bits */ 131 /* no need to transform the value to physical address */
66 hdev->asic_funcs->write_pte(hdev, pte_addr, 0); 132 write_final_pte(ctx, pte_addr, 0);
67} 133}
68 134
69static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) 135static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr)
@@ -98,12 +164,6 @@ static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr)
98 return num_of_ptes_left; 164 return num_of_ptes_left;
99} 165}
100 166
101static inline u64 get_hop0_addr(struct hl_ctx *ctx)
102{
103 return ctx->hdev->asic_prop.mmu_pgt_addr +
104 (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size);
105}
106
107static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, 167static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr,
108 u64 virt_addr, u64 mask, u64 shift) 168 u64 virt_addr, u64 mask, u64 shift)
109{ 169{
@@ -136,7 +196,7 @@ static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr)
136 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT); 196 return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT);
137} 197}
138 198
139static inline u64 get_next_hop_addr(u64 curr_pte) 199static inline u64 get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte)
140{ 200{
141 if (curr_pte & PAGE_PRESENT_MASK) 201 if (curr_pte & PAGE_PRESENT_MASK)
142 return curr_pte & PHYS_ADDR_MASK; 202 return curr_pte & PHYS_ADDR_MASK;
@@ -147,7 +207,7 @@ static inline u64 get_next_hop_addr(u64 curr_pte)
147static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, 207static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
148 bool *is_new_hop) 208 bool *is_new_hop)
149{ 209{
150 u64 hop_addr = get_next_hop_addr(curr_pte); 210 u64 hop_addr = get_next_hop_addr(ctx, curr_pte);
151 211
152 if (hop_addr == ULLONG_MAX) { 212 if (hop_addr == ULLONG_MAX) {
153 hop_addr = alloc_hop(ctx); 213 hop_addr = alloc_hop(ctx);
@@ -157,106 +217,30 @@ static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte,
157 return hop_addr; 217 return hop_addr;
158} 218}
159 219
160/* 220/* translates shadow address inside hop to a physical address */
161 * hl_mmu_init - init the mmu module 221static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr)
162 *
163 * @hdev: pointer to the habanalabs device structure
164 *
165 * This function does the following:
166 * - Allocate max_asid zeroed hop0 pgts so no mapping is available
167 * - Enable mmu in hw
168 * - Invalidate the mmu cache
169 * - Create a pool of pages for pgts
170 * - Returns 0 on success
171 *
172 * This function depends on DMA QMAN to be working!
173 */
174int hl_mmu_init(struct hl_device *hdev)
175{ 222{
176 struct asic_fixed_properties *prop = &hdev->asic_prop; 223 u64 page_mask = (ctx->hdev->asic_prop.mmu_hop_table_size - 1);
177 int rc; 224 u64 shadow_hop_addr = shadow_addr & ~page_mask;
225 u64 pte_offset = shadow_addr & page_mask;
226 u64 phys_hop_addr;
178 227
179 if (!hdev->mmu_enable) 228 if (shadow_hop_addr != get_hop0_addr(ctx))
180 return 0; 229 phys_hop_addr = get_pgt_info(ctx, shadow_hop_addr)->phys_addr;
181 230 else
182 /* MMU HW init was already done in device hw_init() */ 231 phys_hop_addr = get_phys_hop0_addr(ctx);
183
184 mutex_init(&hdev->mmu_cache_lock);
185
186 hdev->mmu_pgt_pool =
187 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
188
189 if (!hdev->mmu_pgt_pool) {
190 dev_err(hdev->dev, "Failed to create page gen pool\n");
191 rc = -ENOMEM;
192 goto err_pool_create;
193 }
194
195 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
196 prop->mmu_hop0_tables_total_size,
197 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
198 -1);
199 if (rc) {
200 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
201 goto err_pool_add;
202 }
203
204 return 0;
205
206err_pool_add:
207 gen_pool_destroy(hdev->mmu_pgt_pool);
208err_pool_create:
209 mutex_destroy(&hdev->mmu_cache_lock);
210 232
211 return rc; 233 return phys_hop_addr + pte_offset;
212} 234}
213 235
214/* 236static int dram_default_mapping_init(struct hl_ctx *ctx)
215 * hl_mmu_fini - release the mmu module.
216 *
217 * @hdev: pointer to the habanalabs device structure
218 *
219 * This function does the following:
220 * - Disable mmu in hw
221 * - free the pgts pool
222 *
223 * All ctxs should be freed before calling this func
224 */
225void hl_mmu_fini(struct hl_device *hdev)
226{
227 if (!hdev->mmu_enable)
228 return;
229
230 gen_pool_destroy(hdev->mmu_pgt_pool);
231
232 mutex_destroy(&hdev->mmu_cache_lock);
233
234 /* MMU HW fini will be done in device hw_fini() */
235}
236
237/**
238 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
239 * @ctx: pointer to the context structure to initialize.
240 *
241 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
242 * page tables hops related to this context and an optional DRAM default page
243 * mapping.
244 * Return: 0 on success, non-zero otherwise.
245 */
246int hl_mmu_ctx_init(struct hl_ctx *ctx)
247{ 237{
248 struct hl_device *hdev = ctx->hdev; 238 struct hl_device *hdev = ctx->hdev;
249 struct asic_fixed_properties *prop = &hdev->asic_prop; 239 struct asic_fixed_properties *prop = &hdev->asic_prop;
250 u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr, 240 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
251 hop3_pte_addr, pte_val; 241 hop2_pte_addr, hop3_pte_addr, pte_val;
252 int rc, i, j, hop3_allocated = 0; 242 int rc, i, j, hop3_allocated = 0;
253 243
254 if (!hdev->mmu_enable)
255 return 0;
256
257 mutex_init(&ctx->mmu_lock);
258 hash_init(ctx->mmu_hash);
259
260 if (!hdev->dram_supports_virtual_memory || 244 if (!hdev->dram_supports_virtual_memory ||
261 !hdev->dram_default_page_mapping) 245 !hdev->dram_default_page_mapping)
262 return 0; 246 return 0;
@@ -269,10 +253,10 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
269 total_hops = num_of_hop3 + 2; 253 total_hops = num_of_hop3 + 2;
270 254
271 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL); 255 ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL);
272 if (!ctx->dram_default_hops) { 256 if (!ctx->dram_default_hops)
273 rc = -ENOMEM; 257 return -ENOMEM;
274 goto alloc_err; 258
275 } 259 hop0_addr = get_hop0_addr(ctx);
276 260
277 hop1_addr = alloc_hop(ctx); 261 hop1_addr = alloc_hop(ctx);
278 if (hop1_addr == ULLONG_MAX) { 262 if (hop1_addr == ULLONG_MAX) {
@@ -304,17 +288,17 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
304 288
305 /* need only pte 0 in hops 0 and 1 */ 289 /* need only pte 0 in hops 0 and 1 */
306 pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; 290 pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
307 hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val); 291 write_pte(ctx, hop0_addr, pte_val);
308 292
309 pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; 293 pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
310 hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val); 294 write_pte(ctx, hop1_addr, pte_val);
311 get_pte(ctx, hop1_addr); 295 get_pte(ctx, hop1_addr);
312 296
313 hop2_pte_addr = hop2_addr; 297 hop2_pte_addr = hop2_addr;
314 for (i = 0 ; i < num_of_hop3 ; i++) { 298 for (i = 0 ; i < num_of_hop3 ; i++) {
315 pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) | 299 pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) |
316 PAGE_PRESENT_MASK; 300 PAGE_PRESENT_MASK;
317 hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val); 301 write_pte(ctx, hop2_pte_addr, pte_val);
318 get_pte(ctx, hop2_addr); 302 get_pte(ctx, hop2_addr);
319 hop2_pte_addr += HL_PTE_SIZE; 303 hop2_pte_addr += HL_PTE_SIZE;
320 } 304 }
@@ -325,33 +309,183 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
325 for (i = 0 ; i < num_of_hop3 ; i++) { 309 for (i = 0 ; i < num_of_hop3 ; i++) {
326 hop3_pte_addr = ctx->dram_default_hops[i]; 310 hop3_pte_addr = ctx->dram_default_hops[i];
327 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { 311 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
328 hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, 312 write_final_pte(ctx, hop3_pte_addr, pte_val);
329 pte_val);
330 get_pte(ctx, ctx->dram_default_hops[i]); 313 get_pte(ctx, ctx->dram_default_hops[i]);
331 hop3_pte_addr += HL_PTE_SIZE; 314 hop3_pte_addr += HL_PTE_SIZE;
332 } 315 }
333 } 316 }
334 317
335 /* flush all writes to reach PCI */ 318 flush(ctx);
336 mb();
337 hdev->asic_funcs->read_pte(hdev, hop2_addr);
338 319
339 return 0; 320 return 0;
340 321
341hop3_err: 322hop3_err:
342 for (i = 0 ; i < hop3_allocated ; i++) 323 for (i = 0 ; i < hop3_allocated ; i++)
343 free_hop(ctx, ctx->dram_default_hops[i]); 324 free_hop(ctx, ctx->dram_default_hops[i]);
325
344 free_hop(ctx, hop2_addr); 326 free_hop(ctx, hop2_addr);
345hop2_err: 327hop2_err:
346 free_hop(ctx, hop1_addr); 328 free_hop(ctx, hop1_addr);
347hop1_err: 329hop1_err:
348 kfree(ctx->dram_default_hops); 330 kfree(ctx->dram_default_hops);
349alloc_err:
350 mutex_destroy(&ctx->mmu_lock);
351 331
352 return rc; 332 return rc;
353} 333}
354 334
335static void dram_default_mapping_fini(struct hl_ctx *ctx)
336{
337 struct hl_device *hdev = ctx->hdev;
338 struct asic_fixed_properties *prop = &hdev->asic_prop;
339 u64 num_of_hop3, total_hops, hop0_addr, hop1_addr, hop2_addr,
340 hop2_pte_addr, hop3_pte_addr;
341 int i, j;
342
343 if (!hdev->dram_supports_virtual_memory ||
344 !hdev->dram_default_page_mapping)
345 return;
346
347 num_of_hop3 = prop->dram_size_for_default_page_mapping;
348 do_div(num_of_hop3, prop->dram_page_size);
349 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
350
351 hop0_addr = get_hop0_addr(ctx);
352 /* add hop1 and hop2 */
353 total_hops = num_of_hop3 + 2;
354 hop1_addr = ctx->dram_default_hops[total_hops - 1];
355 hop2_addr = ctx->dram_default_hops[total_hops - 2];
356
357 for (i = 0 ; i < num_of_hop3 ; i++) {
358 hop3_pte_addr = ctx->dram_default_hops[i];
359 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
360 clear_pte(ctx, hop3_pte_addr);
361 put_pte(ctx, ctx->dram_default_hops[i]);
362 hop3_pte_addr += HL_PTE_SIZE;
363 }
364 }
365
366 hop2_pte_addr = hop2_addr;
367 hop2_pte_addr = hop2_addr;
368 for (i = 0 ; i < num_of_hop3 ; i++) {
369 clear_pte(ctx, hop2_pte_addr);
370 put_pte(ctx, hop2_addr);
371 hop2_pte_addr += HL_PTE_SIZE;
372 }
373
374 clear_pte(ctx, hop1_addr);
375 put_pte(ctx, hop1_addr);
376 clear_pte(ctx, hop0_addr);
377
378 kfree(ctx->dram_default_hops);
379
380 flush(ctx);
381}
382
383/**
384 * hl_mmu_init() - initialize the MMU module.
385 * @hdev: habanalabs device structure.
386 *
387 * This function does the following:
388 * - Allocate max_asid zeroed hop0 pgts so no mapping is available.
389 * - Enable MMU in H/W.
390 * - Invalidate the MMU cache.
391 * - Create a pool of pages for pgt_infos.
392 *
393 * This function depends on DMA QMAN to be working!
394 *
395 * Return: 0 for success, non-zero for failure.
396 */
397int hl_mmu_init(struct hl_device *hdev)
398{
399 struct asic_fixed_properties *prop = &hdev->asic_prop;
400 int rc;
401
402 if (!hdev->mmu_enable)
403 return 0;
404
405 /* MMU H/W init was already done in device hw_init() */
406
407 mutex_init(&hdev->mmu_cache_lock);
408
409 hdev->mmu_pgt_pool =
410 gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
411
412 if (!hdev->mmu_pgt_pool) {
413 dev_err(hdev->dev, "Failed to create page gen pool\n");
414 rc = -ENOMEM;
415 goto err_pool_create;
416 }
417
418 rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
419 prop->mmu_hop0_tables_total_size,
420 prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size,
421 -1);
422 if (rc) {
423 dev_err(hdev->dev, "Failed to add memory to page gen pool\n");
424 goto err_pool_add;
425 }
426
427 hdev->mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
428 prop->mmu_hop_table_size,
429 GFP_KERNEL | __GFP_ZERO);
430 if (!hdev->mmu_shadow_hop0) {
431 rc = -ENOMEM;
432 goto err_pool_add;
433 }
434
435 return 0;
436
437err_pool_add:
438 gen_pool_destroy(hdev->mmu_pgt_pool);
439err_pool_create:
440 mutex_destroy(&hdev->mmu_cache_lock);
441
442 return rc;
443}
444
445/**
446 * hl_mmu_fini() - release the MMU module.
447 * @hdev: habanalabs device structure.
448 *
449 * This function does the following:
450 * - Disable MMU in H/W.
451 * - Free the pgt_infos pool.
452 *
453 * All contexts should be freed before calling this function.
454 */
455void hl_mmu_fini(struct hl_device *hdev)
456{
457 if (!hdev->mmu_enable)
458 return;
459
460 kvfree(hdev->mmu_shadow_hop0);
461 gen_pool_destroy(hdev->mmu_pgt_pool);
462 mutex_destroy(&hdev->mmu_cache_lock);
463
464 /* MMU H/W fini will be done in device hw_fini() */
465}
466
467/**
468 * hl_mmu_ctx_init() - initialize a context for using the MMU module.
469 * @ctx: pointer to the context structure to initialize.
470 *
471 * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
472 * page tables hops related to this context.
473 * Return: 0 on success, non-zero otherwise.
474 */
475int hl_mmu_ctx_init(struct hl_ctx *ctx)
476{
477 struct hl_device *hdev = ctx->hdev;
478
479 if (!hdev->mmu_enable)
480 return 0;
481
482 mutex_init(&ctx->mmu_lock);
483 hash_init(ctx->mmu_phys_hash);
484 hash_init(ctx->mmu_shadow_hash);
485
486 return dram_default_mapping_init(ctx);
487}
488
355/* 489/*
356 * hl_mmu_ctx_fini - disable a ctx from using the mmu module 490 * hl_mmu_ctx_fini - disable a ctx from using the mmu module
357 * 491 *
@@ -365,63 +499,23 @@ alloc_err:
365void hl_mmu_ctx_fini(struct hl_ctx *ctx) 499void hl_mmu_ctx_fini(struct hl_ctx *ctx)
366{ 500{
367 struct hl_device *hdev = ctx->hdev; 501 struct hl_device *hdev = ctx->hdev;
368 struct asic_fixed_properties *prop = &hdev->asic_prop;
369 struct pgt_info *pgt_info; 502 struct pgt_info *pgt_info;
370 struct hlist_node *tmp; 503 struct hlist_node *tmp;
371 u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr, 504 int i;
372 hop3_pte_addr;
373 int i, j;
374 505
375 if (!ctx->hdev->mmu_enable) 506 if (!hdev->mmu_enable)
376 return; 507 return;
377 508
378 if (hdev->dram_supports_virtual_memory && 509 dram_default_mapping_fini(ctx);
379 hdev->dram_default_page_mapping) {
380
381 num_of_hop3 = prop->dram_size_for_default_page_mapping;
382 do_div(num_of_hop3, prop->dram_page_size);
383 do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
384
385 /* add hop1 and hop2 */
386 total_hops = num_of_hop3 + 2;
387 hop1_addr = ctx->dram_default_hops[total_hops - 1];
388 hop2_addr = ctx->dram_default_hops[total_hops - 2];
389
390 for (i = 0 ; i < num_of_hop3 ; i++) {
391 hop3_pte_addr = ctx->dram_default_hops[i];
392 for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
393 clear_pte(hdev, hop3_pte_addr);
394 put_pte(ctx, ctx->dram_default_hops[i]);
395 hop3_pte_addr += HL_PTE_SIZE;
396 }
397 }
398 510
399 hop2_pte_addr = hop2_addr; 511 if (!hash_empty(ctx->mmu_shadow_hash))
400 for (i = 0 ; i < num_of_hop3 ; i++) {
401 clear_pte(hdev, hop2_pte_addr);
402 put_pte(ctx, hop2_addr);
403 hop2_pte_addr += HL_PTE_SIZE;
404 }
405
406 clear_pte(hdev, hop1_addr);
407 put_pte(ctx, hop1_addr);
408 clear_pte(hdev, get_hop0_addr(ctx));
409
410 kfree(ctx->dram_default_hops);
411
412 /* flush all writes to reach PCI */
413 mb();
414 hdev->asic_funcs->read_pte(hdev, hop2_addr);
415 }
416
417 if (!hash_empty(ctx->mmu_hash))
418 dev_err(hdev->dev, "ctx is freed while it has pgts in use\n"); 512 dev_err(hdev->dev, "ctx is freed while it has pgts in use\n");
419 513
420 hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) { 514 hash_for_each_safe(ctx->mmu_shadow_hash, i, tmp, pgt_info, node) {
421 dev_err(hdev->dev, 515 dev_err(hdev->dev,
422 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", 516 "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
423 pgt_info->addr, ctx->asid, pgt_info->num_of_ptes); 517 pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
424 free_hop(ctx, pgt_info->addr); 518 free_hop(ctx, pgt_info->shadow_addr);
425 } 519 }
426 520
427 mutex_destroy(&ctx->mmu_lock); 521 mutex_destroy(&ctx->mmu_lock);
@@ -437,45 +531,43 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
437 hop3_addr = 0, hop3_pte_addr = 0, 531 hop3_addr = 0, hop3_pte_addr = 0,
438 hop4_addr = 0, hop4_pte_addr = 0, 532 hop4_addr = 0, hop4_pte_addr = 0,
439 curr_pte; 533 curr_pte;
440 int clear_hop3 = 1; 534 bool is_dram_addr, is_huge, clear_hop3 = true;
441 bool is_dram_addr, is_huge, is_dram_default_page_mapping;
442 535
443 is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB, 536 is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB,
444 prop->va_space_dram_start_address, 537 prop->va_space_dram_start_address,
445 prop->va_space_dram_end_address); 538 prop->va_space_dram_end_address);
446 539
447 hop0_addr = get_hop0_addr(ctx); 540 hop0_addr = get_hop0_addr(ctx);
448
449 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr); 541 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
450 542
451 curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); 543 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
452 544
453 hop1_addr = get_next_hop_addr(curr_pte); 545 hop1_addr = get_next_hop_addr(ctx, curr_pte);
454 546
455 if (hop1_addr == ULLONG_MAX) 547 if (hop1_addr == ULLONG_MAX)
456 goto not_mapped; 548 goto not_mapped;
457 549
458 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr); 550 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
459 551
460 curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); 552 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
461 553
462 hop2_addr = get_next_hop_addr(curr_pte); 554 hop2_addr = get_next_hop_addr(ctx, curr_pte);
463 555
464 if (hop2_addr == ULLONG_MAX) 556 if (hop2_addr == ULLONG_MAX)
465 goto not_mapped; 557 goto not_mapped;
466 558
467 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr); 559 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
468 560
469 curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); 561 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
470 562
471 hop3_addr = get_next_hop_addr(curr_pte); 563 hop3_addr = get_next_hop_addr(ctx, curr_pte);
472 564
473 if (hop3_addr == ULLONG_MAX) 565 if (hop3_addr == ULLONG_MAX)
474 goto not_mapped; 566 goto not_mapped;
475 567
476 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr); 568 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
477 569
478 curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); 570 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
479 571
480 is_huge = curr_pte & LAST_MASK; 572 is_huge = curr_pte & LAST_MASK;
481 573
@@ -485,27 +577,24 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
485 return -EFAULT; 577 return -EFAULT;
486 } 578 }
487 579
488 is_dram_default_page_mapping =
489 hdev->dram_default_page_mapping && is_dram_addr;
490
491 if (!is_huge) { 580 if (!is_huge) {
492 hop4_addr = get_next_hop_addr(curr_pte); 581 hop4_addr = get_next_hop_addr(ctx, curr_pte);
493 582
494 if (hop4_addr == ULLONG_MAX) 583 if (hop4_addr == ULLONG_MAX)
495 goto not_mapped; 584 goto not_mapped;
496 585
497 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr); 586 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
498 587
499 curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr); 588 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
500 589
501 clear_hop3 = 0; 590 clear_hop3 = false;
502 } 591 }
503 592
504 if (is_dram_default_page_mapping) { 593 if (hdev->dram_default_page_mapping && is_dram_addr) {
505 u64 zero_pte = (prop->mmu_dram_default_page_addr & 594 u64 default_pte = (prop->mmu_dram_default_page_addr &
506 PTE_PHYS_ADDR_MASK) | LAST_MASK | 595 PTE_PHYS_ADDR_MASK) | LAST_MASK |
507 PAGE_PRESENT_MASK; 596 PAGE_PRESENT_MASK;
508 if (curr_pte == zero_pte) { 597 if (curr_pte == default_pte) {
509 dev_err(hdev->dev, 598 dev_err(hdev->dev,
510 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n", 599 "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n",
511 virt_addr); 600 virt_addr);
@@ -519,40 +608,43 @@ static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr)
519 goto not_mapped; 608 goto not_mapped;
520 } 609 }
521 610
522 hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte); 611 write_final_pte(ctx, hop3_pte_addr, default_pte);
523 put_pte(ctx, hop3_addr); 612 put_pte(ctx, hop3_addr);
524 } else { 613 } else {
525 if (!(curr_pte & PAGE_PRESENT_MASK)) 614 if (!(curr_pte & PAGE_PRESENT_MASK))
526 goto not_mapped; 615 goto not_mapped;
527 616
528 clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr); 617 if (hop4_addr)
618 clear_pte(ctx, hop4_pte_addr);
619 else
620 clear_pte(ctx, hop3_pte_addr);
529 621
530 if (hop4_addr && !put_pte(ctx, hop4_addr)) 622 if (hop4_addr && !put_pte(ctx, hop4_addr))
531 clear_hop3 = 1; 623 clear_hop3 = true;
532 624
533 if (!clear_hop3) 625 if (!clear_hop3)
534 goto flush; 626 goto flush;
535 clear_pte(hdev, hop3_pte_addr); 627
628 clear_pte(ctx, hop3_pte_addr);
536 629
537 if (put_pte(ctx, hop3_addr)) 630 if (put_pte(ctx, hop3_addr))
538 goto flush; 631 goto flush;
539 clear_pte(hdev, hop2_pte_addr); 632
633 clear_pte(ctx, hop2_pte_addr);
540 634
541 if (put_pte(ctx, hop2_addr)) 635 if (put_pte(ctx, hop2_addr))
542 goto flush; 636 goto flush;
543 clear_pte(hdev, hop1_pte_addr); 637
638 clear_pte(ctx, hop1_pte_addr);
544 639
545 if (put_pte(ctx, hop1_addr)) 640 if (put_pte(ctx, hop1_addr))
546 goto flush; 641 goto flush;
547 clear_pte(hdev, hop0_pte_addr); 642
643 clear_pte(ctx, hop0_pte_addr);
548 } 644 }
549 645
550flush: 646flush:
551 /* flush all writes from all cores to reach PCI */ 647 flush(ctx);
552 mb();
553
554 hdev->asic_funcs->read_pte(hdev,
555 hop4_addr ? hop4_pte_addr : hop3_pte_addr);
556 648
557 return 0; 649 return 0;
558 650
@@ -632,8 +724,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
632 hop4_addr = 0, hop4_pte_addr = 0, 724 hop4_addr = 0, hop4_pte_addr = 0,
633 curr_pte = 0; 725 curr_pte = 0;
634 bool hop1_new = false, hop2_new = false, hop3_new = false, 726 bool hop1_new = false, hop2_new = false, hop3_new = false,
635 hop4_new = false, is_huge, is_dram_addr, 727 hop4_new = false, is_huge, is_dram_addr;
636 is_dram_default_page_mapping;
637 int rc = -ENOMEM; 728 int rc = -ENOMEM;
638 729
639 /* 730 /*
@@ -654,59 +745,46 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
654 return -EFAULT; 745 return -EFAULT;
655 } 746 }
656 747
657 is_dram_default_page_mapping =
658 hdev->dram_default_page_mapping && is_dram_addr;
659
660 hop0_addr = get_hop0_addr(ctx); 748 hop0_addr = get_hop0_addr(ctx);
661
662 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr); 749 hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr);
663 750 curr_pte = *(u64 *) (uintptr_t) hop0_pte_addr;
664 curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr);
665 751
666 hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new); 752 hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new);
667
668 if (hop1_addr == ULLONG_MAX) 753 if (hop1_addr == ULLONG_MAX)
669 goto err; 754 goto err;
670 755
671 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr); 756 hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr);
672 757 curr_pte = *(u64 *) (uintptr_t) hop1_pte_addr;
673 curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr);
674 758
675 hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new); 759 hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new);
676
677 if (hop2_addr == ULLONG_MAX) 760 if (hop2_addr == ULLONG_MAX)
678 goto err; 761 goto err;
679 762
680 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr); 763 hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr);
681 764 curr_pte = *(u64 *) (uintptr_t) hop2_pte_addr;
682 curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr);
683 765
684 hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new); 766 hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new);
685
686 if (hop3_addr == ULLONG_MAX) 767 if (hop3_addr == ULLONG_MAX)
687 goto err; 768 goto err;
688 769
689 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr); 770 hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr);
690 771 curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
691 curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr);
692 772
693 if (!is_huge) { 773 if (!is_huge) {
694 hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new); 774 hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new);
695
696 if (hop4_addr == ULLONG_MAX) 775 if (hop4_addr == ULLONG_MAX)
697 goto err; 776 goto err;
698 777
699 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr); 778 hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr);
700 779 curr_pte = *(u64 *) (uintptr_t) hop4_pte_addr;
701 curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr);
702 } 780 }
703 781
704 if (is_dram_default_page_mapping) { 782 if (hdev->dram_default_page_mapping && is_dram_addr) {
705 u64 zero_pte = (prop->mmu_dram_default_page_addr & 783 u64 default_pte = (prop->mmu_dram_default_page_addr &
706 PTE_PHYS_ADDR_MASK) | LAST_MASK | 784 PTE_PHYS_ADDR_MASK) | LAST_MASK |
707 PAGE_PRESENT_MASK; 785 PAGE_PRESENT_MASK;
708 786
709 if (curr_pte != zero_pte) { 787 if (curr_pte != default_pte) {
710 dev_err(hdev->dev, 788 dev_err(hdev->dev,
711 "DRAM: mapping already exists for virt_addr 0x%llx\n", 789 "DRAM: mapping already exists for virt_addr 0x%llx\n",
712 virt_addr); 790 virt_addr);
@@ -722,27 +800,22 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
722 } 800 }
723 } else if (curr_pte & PAGE_PRESENT_MASK) { 801 } else if (curr_pte & PAGE_PRESENT_MASK) {
724 dev_err(hdev->dev, 802 dev_err(hdev->dev,
725 "mapping already exists for virt_addr 0x%llx\n", 803 "mapping already exists for virt_addr 0x%llx\n",
726 virt_addr); 804 virt_addr);
727 805
728 dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n", 806 dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n",
729 hdev->asic_funcs->read_pte(hdev, hop0_pte_addr), 807 *(u64 *) (uintptr_t) hop0_pte_addr, hop0_pte_addr);
730 hop0_pte_addr);
731 dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n", 808 dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n",
732 hdev->asic_funcs->read_pte(hdev, hop1_pte_addr), 809 *(u64 *) (uintptr_t) hop1_pte_addr, hop1_pte_addr);
733 hop1_pte_addr);
734 dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n", 810 dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n",
735 hdev->asic_funcs->read_pte(hdev, hop2_pte_addr), 811 *(u64 *) (uintptr_t) hop2_pte_addr, hop2_pte_addr);
736 hop2_pte_addr);
737 dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n", 812 dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n",
738 hdev->asic_funcs->read_pte(hdev, hop3_pte_addr), 813 *(u64 *) (uintptr_t) hop3_pte_addr, hop3_pte_addr);
739 hop3_pte_addr);
740 814
741 if (!is_huge) 815 if (!is_huge)
742 dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n", 816 dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n",
743 hdev->asic_funcs->read_pte(hdev, 817 *(u64 *) (uintptr_t) hop4_pte_addr,
744 hop4_pte_addr), 818 hop4_pte_addr);
745 hop4_pte_addr);
746 819
747 rc = -EINVAL; 820 rc = -EINVAL;
748 goto err; 821 goto err;
@@ -751,28 +824,26 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
751 curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK 824 curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK
752 | PAGE_PRESENT_MASK; 825 | PAGE_PRESENT_MASK;
753 826
754 hdev->asic_funcs->write_pte(hdev, 827 if (is_huge)
755 is_huge ? hop3_pte_addr : hop4_pte_addr, 828 write_final_pte(ctx, hop3_pte_addr, curr_pte);
756 curr_pte); 829 else
830 write_final_pte(ctx, hop4_pte_addr, curr_pte);
757 831
758 if (hop1_new) { 832 if (hop1_new) {
759 curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) | 833 curr_pte =
760 PAGE_PRESENT_MASK; 834 (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
761 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr, 835 write_pte(ctx, hop0_pte_addr, curr_pte);
762 curr_pte);
763 } 836 }
764 if (hop2_new) { 837 if (hop2_new) {
765 curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) | 838 curr_pte =
766 PAGE_PRESENT_MASK; 839 (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
767 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr, 840 write_pte(ctx, hop1_pte_addr, curr_pte);
768 curr_pte);
769 get_pte(ctx, hop1_addr); 841 get_pte(ctx, hop1_addr);
770 } 842 }
771 if (hop3_new) { 843 if (hop3_new) {
772 curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) | 844 curr_pte =
773 PAGE_PRESENT_MASK; 845 (hop3_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK;
774 ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr, 846 write_pte(ctx, hop2_pte_addr, curr_pte);
775 curr_pte);
776 get_pte(ctx, hop2_addr); 847 get_pte(ctx, hop2_addr);
777 } 848 }
778 849
@@ -780,8 +851,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
780 if (hop4_new) { 851 if (hop4_new) {
781 curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) | 852 curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) |
782 PAGE_PRESENT_MASK; 853 PAGE_PRESENT_MASK;
783 ctx->hdev->asic_funcs->write_pte(ctx->hdev, 854 write_pte(ctx, hop3_pte_addr, curr_pte);
784 hop3_pte_addr, curr_pte);
785 get_pte(ctx, hop3_addr); 855 get_pte(ctx, hop3_addr);
786 } 856 }
787 857
@@ -790,11 +860,7 @@ static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
790 get_pte(ctx, hop3_addr); 860 get_pte(ctx, hop3_addr);
791 } 861 }
792 862
793 /* flush all writes from all cores to reach PCI */ 863 flush(ctx);
794 mb();
795
796 hdev->asic_funcs->read_pte(hdev,
797 is_huge ? hop3_pte_addr : hop4_pte_addr);
798 864
799 return 0; 865 return 0;
800 866
diff --git a/drivers/misc/habanalabs/pci.c b/drivers/misc/habanalabs/pci.c
new file mode 100644
index 000000000000..0e78a04d63f4
--- /dev/null
+++ b/drivers/misc/habanalabs/pci.c
@@ -0,0 +1,408 @@
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2019 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9#include "include/hw_ip/pci/pci_general.h"
10
11#include <linux/pci.h>
12
13/**
14 * hl_pci_bars_map() - Map PCI BARs.
15 * @hdev: Pointer to hl_device structure.
16 * @bar_name: Array of BAR names.
17 * @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
18 *
19 * Request PCI regions and map them to kernel virtual addresses.
20 *
21 * Return: 0 on success, non-zero for failure.
22 */
23int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
24 bool is_wc[3])
25{
26 struct pci_dev *pdev = hdev->pdev;
27 int rc, i, bar;
28
29 rc = pci_request_regions(pdev, HL_NAME);
30 if (rc) {
31 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
32 return rc;
33 }
34
35 for (i = 0 ; i < 3 ; i++) {
36 bar = i * 2; /* 64-bit BARs */
37 hdev->pcie_bar[bar] = is_wc[i] ?
38 pci_ioremap_wc_bar(pdev, bar) :
39 pci_ioremap_bar(pdev, bar);
40 if (!hdev->pcie_bar[bar]) {
41 dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n",
42 is_wc[i] ? "_wc" : "", name[i]);
43 rc = -ENODEV;
44 goto err;
45 }
46 }
47
48 return 0;
49
50err:
51 for (i = 2 ; i >= 0 ; i--) {
52 bar = i * 2; /* 64-bit BARs */
53 if (hdev->pcie_bar[bar])
54 iounmap(hdev->pcie_bar[bar]);
55 }
56
57 pci_release_regions(pdev);
58
59 return rc;
60}
61
62/*
63 * hl_pci_bars_unmap() - Unmap PCI BARS.
64 * @hdev: Pointer to hl_device structure.
65 *
66 * Release all PCI BARs and unmap their virtual addresses.
67 */
68static void hl_pci_bars_unmap(struct hl_device *hdev)
69{
70 struct pci_dev *pdev = hdev->pdev;
71 int i, bar;
72
73 for (i = 2 ; i >= 0 ; i--) {
74 bar = i * 2; /* 64-bit BARs */
75 iounmap(hdev->pcie_bar[bar]);
76 }
77
78 pci_release_regions(pdev);
79}
80
81/*
82 * hl_pci_elbi_write() - Write through the ELBI interface.
83 * @hdev: Pointer to hl_device structure.
84 *
85 * Return: 0 on success, negative value for failure.
86 */
87static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
88{
89 struct pci_dev *pdev = hdev->pdev;
90 ktime_t timeout;
91 u32 val;
92
93 /* Clear previous status */
94 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
95
96 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
97 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
98 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
99 PCI_CONFIG_ELBI_CTRL_WRITE);
100
101 timeout = ktime_add_ms(ktime_get(), 10);
102 for (;;) {
103 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
104 if (val & PCI_CONFIG_ELBI_STS_MASK)
105 break;
106 if (ktime_compare(ktime_get(), timeout) > 0) {
107 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
108 &val);
109 break;
110 }
111
112 usleep_range(300, 500);
113 }
114
115 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
116 return 0;
117
118 if (val & PCI_CONFIG_ELBI_STS_ERR) {
119 dev_err(hdev->dev, "Error writing to ELBI\n");
120 return -EIO;
121 }
122
123 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
124 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
125 return -EIO;
126 }
127
128 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
129 return -EIO;
130}
131
132/**
133 * hl_pci_iatu_write() - iatu write routine.
134 * @hdev: Pointer to hl_device structure.
135 *
136 * Return: 0 on success, negative value for failure.
137 */
138int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
139{
140 struct asic_fixed_properties *prop = &hdev->asic_prop;
141 u32 dbi_offset;
142 int rc;
143
144 dbi_offset = addr & 0xFFF;
145
146 rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
147 rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
148 data);
149
150 if (rc)
151 return -EIO;
152
153 return 0;
154}
155
156/*
157 * hl_pci_reset_link_through_bridge() - Reset PCI link.
158 * @hdev: Pointer to hl_device structure.
159 */
160static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
161{
162 struct pci_dev *pdev = hdev->pdev;
163 struct pci_dev *parent_port;
164 u16 val;
165
166 parent_port = pdev->bus->self;
167 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
168 val |= PCI_BRIDGE_CTL_BUS_RESET;
169 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
170 ssleep(1);
171
172 val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
173 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
174 ssleep(3);
175}
176
177/**
178 * hl_pci_set_dram_bar_base() - Set DDR BAR to map specific device address.
179 * @hdev: Pointer to hl_device structure.
180 * @inbound_region: Inbound region number.
181 * @bar: PCI BAR number.
182 * @addr: Address in DRAM. Must be aligned to DRAM bar size.
183 *
184 * Configure the iATU so that the DRAM bar will start at the specified address.
185 *
186 * Return: 0 on success, negative value for failure.
187 */
188int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
189 u64 addr)
190{
191 struct asic_fixed_properties *prop = &hdev->asic_prop;
192 u32 offset;
193 int rc;
194
195 switch (inbound_region) {
196 case 0:
197 offset = 0x100;
198 break;
199 case 1:
200 offset = 0x300;
201 break;
202 case 2:
203 offset = 0x500;
204 break;
205 default:
206 dev_err(hdev->dev, "Invalid inbound region %d\n",
207 inbound_region);
208 return -EINVAL;
209 }
210
211 if (bar != 0 && bar != 2 && bar != 4) {
212 dev_err(hdev->dev, "Invalid PCI BAR %d\n", bar);
213 return -EINVAL;
214 }
215
216 /* Point to the specified address */
217 rc = hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(addr));
218 rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(addr));
219 rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
220 /* Enable + BAR match + match enable + BAR number */
221 rc |= hl_pci_iatu_write(hdev, offset + 0x4, 0xC0080000 | (bar << 8));
222
223 /* Return the DBI window to the default location */
224 rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
225 rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
226
227 if (rc)
228 dev_err(hdev->dev, "failed to map DRAM bar to 0x%08llx\n",
229 addr);
230
231 return rc;
232}
233
234/**
235 * hl_pci_init_iatu() - Initialize the iATU unit inside the PCI controller.
236 * @hdev: Pointer to hl_device structure.
237 * @sram_base_address: SRAM base address.
238 * @dram_base_address: DRAM base address.
239 * @host_phys_base_address: Base physical address of host memory for device
240 * transactions.
241 * @host_phys_size: Size of host memory for device transactions.
242 *
243 * This is needed in case the firmware doesn't initialize the iATU.
244 *
245 * Return: 0 on success, negative value for failure.
246 */
247int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
248 u64 dram_base_address, u64 host_phys_base_address,
249 u64 host_phys_size)
250{
251 struct asic_fixed_properties *prop = &hdev->asic_prop;
252 u64 host_phys_end_addr;
253 int rc = 0;
254
255 /* Inbound Region 0 - Bar 0 - Point to SRAM base address */
256 rc = hl_pci_iatu_write(hdev, 0x114, lower_32_bits(sram_base_address));
257 rc |= hl_pci_iatu_write(hdev, 0x118, upper_32_bits(sram_base_address));
258 rc |= hl_pci_iatu_write(hdev, 0x100, 0);
259 /* Enable + Bar match + match enable */
260 rc |= hl_pci_iatu_write(hdev, 0x104, 0xC0080000);
261
262 /* Point to DRAM */
263 if (!hdev->asic_funcs->set_dram_bar_base)
264 return -EINVAL;
265 if (hdev->asic_funcs->set_dram_bar_base(hdev, dram_base_address) ==
266 U64_MAX)
267 return -EIO;
268
269
270 /* Outbound Region 0 - Point to Host */
271 host_phys_end_addr = host_phys_base_address + host_phys_size - 1;
272 rc |= hl_pci_iatu_write(hdev, 0x008,
273 lower_32_bits(host_phys_base_address));
274 rc |= hl_pci_iatu_write(hdev, 0x00C,
275 upper_32_bits(host_phys_base_address));
276 rc |= hl_pci_iatu_write(hdev, 0x010, lower_32_bits(host_phys_end_addr));
277 rc |= hl_pci_iatu_write(hdev, 0x014, 0);
278 rc |= hl_pci_iatu_write(hdev, 0x018, 0);
279 rc |= hl_pci_iatu_write(hdev, 0x020, upper_32_bits(host_phys_end_addr));
280 /* Increase region size */
281 rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
282 /* Enable */
283 rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
284
285 /* Return the DBI window to the default location */
286 rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
287 rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
288
289 if (rc)
290 return -EIO;
291
292 return 0;
293}
294
295/**
296 * hl_pci_set_dma_mask() - Set DMA masks for the device.
297 * @hdev: Pointer to hl_device structure.
298 * @dma_mask: number of bits for the requested dma mask.
299 *
300 * This function sets the DMA masks (regular and consistent) for a specified
301 * value. If it doesn't succeed, it tries to set it to a fall-back value
302 *
303 * Return: 0 on success, non-zero for failure.
304 */
305int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask)
306{
307 struct pci_dev *pdev = hdev->pdev;
308 int rc;
309
310 /* set DMA mask */
311 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
312 if (rc) {
313 dev_warn(hdev->dev,
314 "Failed to set pci dma mask to %d bits, error %d\n",
315 dma_mask, rc);
316
317 dma_mask = hdev->dma_mask;
318
319 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
320 if (rc) {
321 dev_err(hdev->dev,
322 "Failed to set pci dma mask to %d bits, error %d\n",
323 dma_mask, rc);
324 return rc;
325 }
326 }
327
328 /*
329 * We managed to set the dma mask, so update the dma mask field. If
330 * the set to the coherent mask will fail with that mask, we will
331 * fail the entire function
332 */
333 hdev->dma_mask = dma_mask;
334
335 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
336 if (rc) {
337 dev_err(hdev->dev,
338 "Failed to set pci consistent dma mask to %d bits, error %d\n",
339 dma_mask, rc);
340 return rc;
341 }
342
343 return 0;
344}
345
346/**
347 * hl_pci_init() - PCI initialization code.
348 * @hdev: Pointer to hl_device structure.
349 * @dma_mask: number of bits for the requested dma mask.
350 *
351 * Set DMA masks, initialize the PCI controller and map the PCI BARs.
352 *
353 * Return: 0 on success, non-zero for failure.
354 */
355int hl_pci_init(struct hl_device *hdev, u8 dma_mask)
356{
357 struct pci_dev *pdev = hdev->pdev;
358 int rc;
359
360 rc = hl_pci_set_dma_mask(hdev, dma_mask);
361 if (rc)
362 return rc;
363
364 if (hdev->reset_pcilink)
365 hl_pci_reset_link_through_bridge(hdev);
366
367 rc = pci_enable_device_mem(pdev);
368 if (rc) {
369 dev_err(hdev->dev, "can't enable PCI device\n");
370 return rc;
371 }
372
373 pci_set_master(pdev);
374
375 rc = hdev->asic_funcs->init_iatu(hdev);
376 if (rc) {
377 dev_err(hdev->dev, "Failed to initialize iATU\n");
378 goto disable_device;
379 }
380
381 rc = hdev->asic_funcs->pci_bars_map(hdev);
382 if (rc) {
383 dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
384 goto disable_device;
385 }
386
387 return 0;
388
389disable_device:
390 pci_clear_master(pdev);
391 pci_disable_device(pdev);
392
393 return rc;
394}
395
396/**
397 * hl_fw_fini() - PCI finalization code.
398 * @hdev: Pointer to hl_device structure
399 *
400 * Unmap PCI bars and disable PCI device.
401 */
402void hl_pci_fini(struct hl_device *hdev)
403{
404 hl_pci_bars_unmap(hdev);
405
406 pci_clear_master(hdev->pdev);
407 pci_disable_device(hdev->pdev);
408}
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index de20bdaa148d..8b01257783dd 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -1135,7 +1135,7 @@ static void kgdbts_put_char(u8 chr)
1135static int param_set_kgdbts_var(const char *kmessage, 1135static int param_set_kgdbts_var(const char *kmessage,
1136 const struct kernel_param *kp) 1136 const struct kernel_param *kp)
1137{ 1137{
1138 int len = strlen(kmessage); 1138 size_t len = strlen(kmessage);
1139 1139
1140 if (len >= MAX_CONFIG_LEN) { 1140 if (len >= MAX_CONFIG_LEN) {
1141 printk(KERN_ERR "kgdbts: config string too long\n"); 1141 printk(KERN_ERR "kgdbts: config string too long\n");
@@ -1155,7 +1155,7 @@ static int param_set_kgdbts_var(const char *kmessage,
1155 1155
1156 strcpy(config, kmessage); 1156 strcpy(config, kmessage);
1157 /* Chop out \n char as a result of echo */ 1157 /* Chop out \n char as a result of echo */
1158 if (config[len - 1] == '\n') 1158 if (len && config[len - 1] == '\n')
1159 config[len - 1] = '\0'; 1159 config[len - 1] = '\0';
1160 1160
1161 /* Go and configure with the new params. */ 1161 /* Go and configure with the new params. */
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 74e2c667dce0..9d7b3719bfa0 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,3 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0
2# Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
1config INTEL_MEI 3config INTEL_MEI
2 tristate "Intel Management Engine Interface" 4 tristate "Intel Management Engine Interface"
3 depends on X86 && PCI 5 depends on X86 && PCI
@@ -44,12 +46,4 @@ config INTEL_MEI_TXE
44 Supported SoCs: 46 Supported SoCs:
45 Intel Bay Trail 47 Intel Bay Trail
46 48
47config INTEL_MEI_HDCP 49source "drivers/misc/mei/hdcp/Kconfig"
48 tristate "Intel HDCP2.2 services of ME Interface"
49 select INTEL_MEI_ME
50 depends on DRM_I915
51 help
52 MEI Support for HDCP2.2 Services on Intel platforms.
53
54 Enables the ME FW services required for HDCP2.2 support through
55 I915 display driver of Intel.
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 8c2d9565a4cb..f1c76f7ee804 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -1,7 +1,7 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# 2#
3# Copyright (c) 2010-2019, Intel Corporation. All rights reserved.
3# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver 4# Makefile - Intel Management Engine Interface (Intel MEI) Linux driver
4# Copyright (c) 2010-2014, Intel Corporation.
5# 5#
6obj-$(CONFIG_INTEL_MEI) += mei.o 6obj-$(CONFIG_INTEL_MEI) += mei.o
7mei-objs := init.o 7mei-objs := init.o
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 5fcac02233af..32e9b1aed2ca 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,17 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2013-2019, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2018, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 65bec998eb6e..985bd4fd3328 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -1,16 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
3 * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
2 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
3 * Copyright (c) 2012-2013, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */ 5 */
15 6
16#include <linux/module.h> 7#include <linux/module.h>
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index ca4c9cc218a2..1e3edbbacb1e 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1,17 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#include <linux/sched/signal.h> 7#include <linux/sched/signal.h>
@@ -679,7 +669,7 @@ int mei_cl_unlink(struct mei_cl *cl)
679 669
680void mei_host_client_init(struct mei_device *dev) 670void mei_host_client_init(struct mei_device *dev)
681{ 671{
682 dev->dev_state = MEI_DEV_ENABLED; 672 mei_set_devstate(dev, MEI_DEV_ENABLED);
683 dev->reset_count = 0; 673 dev->reset_count = 0;
684 674
685 schedule_work(&dev->bus_rescan_work); 675 schedule_work(&dev->bus_rescan_work);
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 64e318f589b4..c1f9e810cf81 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -1,17 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#ifndef _MEI_CLIENT_H_ 7#ifndef _MEI_CLIENT_H_
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 7b5df8fd6c5a..0970142bcace 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2012-2016, Intel Corporation. All rights reserved
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2012-2013, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
6
16#include <linux/slab.h> 7#include <linux/slab.h>
17#include <linux/kernel.h> 8#include <linux/kernel.h>
18#include <linux/device.h> 9#include <linux/device.h>
diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c
index 795641b82181..ef56f849b251 100644
--- a/drivers/misc/mei/dma-ring.c
+++ b/drivers/misc/mei/dma-ring.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. 3 * Copyright(c) 2016-2018 Intel Corporation. All rights reserved.
4 */ 4 */
5#include <linux/dma-mapping.h> 5#include <linux/dma-mapping.h>
6#include <linux/mei.h> 6#include <linux/mei.h>
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index e6207f614816..a44094cdbc36 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1,19 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16
17#include <linux/export.h> 6#include <linux/export.h>
18#include <linux/sched.h> 7#include <linux/sched.h>
19#include <linux/wait.h> 8#include <linux/wait.h>
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 0171a7e79bab..5aa58cffdd2e 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -1,17 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#ifndef _MEI_HBM_H_ 7#ifndef _MEI_HBM_H_
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
new file mode 100644
index 000000000000..95b2d6d37f10
--- /dev/null
+++ b/drivers/misc/mei/hdcp/Kconfig
@@ -0,0 +1,13 @@
1
2# SPDX-License-Identifier: GPL-2.0
3# Copyright (c) 2019, Intel Corporation. All rights reserved.
4#
5config INTEL_MEI_HDCP
6 tristate "Intel HDCP2.2 services of ME Interface"
7 select INTEL_MEI_ME
8 depends on DRM_I915
9 help
10 MEI Support for HDCP2.2 Services on Intel platforms.
11
12 Enables the ME FW services required for HDCP2.2 support through
13 I915 display driver of Intel.
diff --git a/drivers/misc/mei/hdcp/Makefile b/drivers/misc/mei/hdcp/Makefile
index adbe7506282d..3fbb56485ce8 100644
--- a/drivers/misc/mei/hdcp/Makefile
+++ b/drivers/misc/mei/hdcp/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# 2#
3# Copyright (c) 2019, Intel Corporation. 3# Copyright (c) 2019, Intel Corporation. All rights reserved.
4# 4#
5# Makefile - HDCP client driver for Intel MEI Bus Driver. 5# Makefile - HDCP client driver for Intel MEI Bus Driver.
6 6
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 90b6ae8e9dae..b07000202d4a 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: (GPL-2.0) 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright © 2019 Intel Corporation 3 * Copyright © 2019 Intel Corporation
4 * 4 *
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h
index 5f74b908e486..e4b1cd54c853 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.h
+++ b/drivers/misc/mei/hdcp/mei_hdcp.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: (GPL-2.0+) */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright © 2019 Intel Corporation 3 * Copyright © 2019 Intel Corporation
4 * 4 *
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index bb1ee9834a02..d74b182e19f3 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -1,68 +1,8 @@
1/****************************************************************************** 1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/*
3 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
2 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
3 * Intel MEI Interface Header 5 */
4 *
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
7 *
8 * GPL LICENSE SUMMARY
9 *
10 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called LICENSE.GPL.
28 *
29 * Contact Information:
30 * Intel Corporation.
31 * linux-mei@linux.intel.com
32 * http://www.intel.com
33 *
34 * BSD LICENSE
35 *
36 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66#ifndef _MEI_HW_MEI_REGS_H_ 6#ifndef _MEI_HW_MEI_REGS_H_
67#define _MEI_HW_MEI_REGS_H_ 7#define _MEI_HW_MEI_REGS_H_
68 8
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 8a47a6fc3fc7..abe1b1f4362f 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1,17 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#include <linux/pci.h> 7#include <linux/pci.h>
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index bbcc5fc106cd..08c84a0de4a8 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,21 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * 3 * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17
18
19#ifndef _MEI_INTERFACE_H_ 7#ifndef _MEI_INTERFACE_H_
20#define _MEI_INTERFACE_H_ 8#define _MEI_INTERFACE_H_
21 9
diff --git a/drivers/misc/mei/hw-txe-regs.h b/drivers/misc/mei/hw-txe-regs.h
index f19229c4e655..a92b306dac8b 100644
--- a/drivers/misc/mei/hw-txe-regs.h
+++ b/drivers/misc/mei/hw-txe-regs.h
@@ -1,63 +1,8 @@
1/****************************************************************************** 1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/*
3 * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
2 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
3 * Intel MEI Interface Header 5 */
4 *
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
7 *
8 * GPL LICENSE SUMMARY
9 *
10 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING
23 *
24 * Contact Information:
25 * Intel Corporation.
26 * linux-mei@linux.intel.com
27 * http://www.intel.com
28 *
29 * BSD LICENSE
30 *
31 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 *
38 * * Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * * Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in
42 * the documentation and/or other materials provided with the
43 * distribution.
44 * * Neither the name Intel Corporation nor the names of its
45 * contributors may be used to endorse or promote products derived
46 * from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
51 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
52 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
54 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
58 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 *****************************************************************************/
61#ifndef _MEI_HW_TXE_REGS_H_ 6#ifndef _MEI_HW_TXE_REGS_H_
62#define _MEI_HW_TXE_REGS_H_ 7#define _MEI_HW_TXE_REGS_H_
63 8
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 8449fe0367ff..5e58656b8e19 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1,17 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2013-2014, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#include <linux/pci.h> 7#include <linux/pci.h>
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index e1e8b66d7648..96511b04bf88 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -1,17 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * 3 * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#ifndef _MEI_HW_TXE_H_ 7#ifndef _MEI_HW_TXE_H_
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index b7d2487b8409..d025a5f8317e 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -1,17 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#ifndef _MEI_HW_TYPES_H_ 7#ifndef _MEI_HW_TYPES_H_
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index eb026e2a0537..b9fef773e71b 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -1,17 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2012-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#include <linux/export.h> 7#include <linux/export.h>
@@ -133,12 +123,12 @@ int mei_reset(struct mei_device *dev)
133 123
134 /* enter reset flow */ 124 /* enter reset flow */
135 interrupts_enabled = state != MEI_DEV_POWER_DOWN; 125 interrupts_enabled = state != MEI_DEV_POWER_DOWN;
136 dev->dev_state = MEI_DEV_RESETTING; 126 mei_set_devstate(dev, MEI_DEV_RESETTING);
137 127
138 dev->reset_count++; 128 dev->reset_count++;
139 if (dev->reset_count > MEI_MAX_CONSEC_RESET) { 129 if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
140 dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n"); 130 dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
141 dev->dev_state = MEI_DEV_DISABLED; 131 mei_set_devstate(dev, MEI_DEV_DISABLED);
142 return -ENODEV; 132 return -ENODEV;
143 } 133 }
144 134
@@ -160,7 +150,7 @@ int mei_reset(struct mei_device *dev)
160 150
161 if (state == MEI_DEV_POWER_DOWN) { 151 if (state == MEI_DEV_POWER_DOWN) {
162 dev_dbg(dev->dev, "powering down: end of reset\n"); 152 dev_dbg(dev->dev, "powering down: end of reset\n");
163 dev->dev_state = MEI_DEV_DISABLED; 153 mei_set_devstate(dev, MEI_DEV_DISABLED);
164 return 0; 154 return 0;
165 } 155 }
166 156
@@ -172,11 +162,11 @@ int mei_reset(struct mei_device *dev)
172 162
173 dev_dbg(dev->dev, "link is established start sending messages.\n"); 163 dev_dbg(dev->dev, "link is established start sending messages.\n");
174 164
175 dev->dev_state = MEI_DEV_INIT_CLIENTS; 165 mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS);
176 ret = mei_hbm_start_req(dev); 166 ret = mei_hbm_start_req(dev);
177 if (ret) { 167 if (ret) {
178 dev_err(dev->dev, "hbm_start failed ret = %d\n", ret); 168 dev_err(dev->dev, "hbm_start failed ret = %d\n", ret);
179 dev->dev_state = MEI_DEV_RESETTING; 169 mei_set_devstate(dev, MEI_DEV_RESETTING);
180 return ret; 170 return ret;
181 } 171 }
182 172
@@ -206,7 +196,7 @@ int mei_start(struct mei_device *dev)
206 196
207 dev->reset_count = 0; 197 dev->reset_count = 0;
208 do { 198 do {
209 dev->dev_state = MEI_DEV_INITIALIZING; 199 mei_set_devstate(dev, MEI_DEV_INITIALIZING);
210 ret = mei_reset(dev); 200 ret = mei_reset(dev);
211 201
212 if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) { 202 if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
@@ -241,7 +231,7 @@ int mei_start(struct mei_device *dev)
241 return 0; 231 return 0;
242err: 232err:
243 dev_err(dev->dev, "link layer initialization failed.\n"); 233 dev_err(dev->dev, "link layer initialization failed.\n");
244 dev->dev_state = MEI_DEV_DISABLED; 234 mei_set_devstate(dev, MEI_DEV_DISABLED);
245 mutex_unlock(&dev->device_lock); 235 mutex_unlock(&dev->device_lock);
246 return -ENODEV; 236 return -ENODEV;
247} 237}
@@ -260,7 +250,7 @@ int mei_restart(struct mei_device *dev)
260 250
261 mutex_lock(&dev->device_lock); 251 mutex_lock(&dev->device_lock);
262 252
263 dev->dev_state = MEI_DEV_POWER_UP; 253 mei_set_devstate(dev, MEI_DEV_POWER_UP);
264 dev->reset_count = 0; 254 dev->reset_count = 0;
265 255
266 err = mei_reset(dev); 256 err = mei_reset(dev);
@@ -311,7 +301,7 @@ void mei_stop(struct mei_device *dev)
311 dev_dbg(dev->dev, "stopping the device.\n"); 301 dev_dbg(dev->dev, "stopping the device.\n");
312 302
313 mutex_lock(&dev->device_lock); 303 mutex_lock(&dev->device_lock);
314 dev->dev_state = MEI_DEV_POWER_DOWN; 304 mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
315 mutex_unlock(&dev->device_lock); 305 mutex_unlock(&dev->device_lock);
316 mei_cl_bus_remove_devices(dev); 306 mei_cl_bus_remove_devices(dev);
317 307
@@ -324,7 +314,7 @@ void mei_stop(struct mei_device *dev)
324 314
325 mei_reset(dev); 315 mei_reset(dev);
326 /* move device to disabled state unconditionally */ 316 /* move device to disabled state unconditionally */
327 dev->dev_state = MEI_DEV_DISABLED; 317 mei_set_devstate(dev, MEI_DEV_DISABLED);
328 318
329 mutex_unlock(&dev->device_lock); 319 mutex_unlock(&dev->device_lock);
330} 320}
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 055c2d89b310..c70a8c74cc57 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -1,20 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17
18#include <linux/export.h> 7#include <linux/export.h>
19#include <linux/kthread.h> 8#include <linux/kthread.h>
20#include <linux/interrupt.h> 9#include <linux/interrupt.h>
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 87281b3695e6..ad02097d7fee 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2018, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
6
16#include <linux/module.h> 7#include <linux/module.h>
17#include <linux/moduleparam.h> 8#include <linux/moduleparam.h>
18#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -37,6 +28,12 @@
37#include "mei_dev.h" 28#include "mei_dev.h"
38#include "client.h" 29#include "client.h"
39 30
31static struct class *mei_class;
32static dev_t mei_devt;
33#define MEI_MAX_DEVS MINORMASK
34static DEFINE_MUTEX(mei_minor_lock);
35static DEFINE_IDR(mei_idr);
36
40/** 37/**
41 * mei_open - the open function 38 * mei_open - the open function
42 * 39 *
@@ -838,12 +835,65 @@ static ssize_t fw_ver_show(struct device *device,
838} 835}
839static DEVICE_ATTR_RO(fw_ver); 836static DEVICE_ATTR_RO(fw_ver);
840 837
838/**
839 * dev_state_show - display device state
840 *
841 * @device: device pointer
842 * @attr: attribute pointer
843 * @buf: char out buffer
844 *
845 * Return: number of the bytes printed into buf or error
846 */
847static ssize_t dev_state_show(struct device *device,
848 struct device_attribute *attr, char *buf)
849{
850 struct mei_device *dev = dev_get_drvdata(device);
851 enum mei_dev_state dev_state;
852
853 mutex_lock(&dev->device_lock);
854 dev_state = dev->dev_state;
855 mutex_unlock(&dev->device_lock);
856
857 return sprintf(buf, "%s", mei_dev_state_str(dev_state));
858}
859static DEVICE_ATTR_RO(dev_state);
860
861static int match_devt(struct device *dev, const void *data)
862{
863 const dev_t *devt = data;
864
865 return dev->devt == *devt;
866}
867
868/**
869 * dev_set_devstate: set to new device state and notify sysfs file.
870 *
871 * @dev: mei_device
872 * @state: new device state
873 */
874void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
875{
876 struct device *clsdev;
877
878 if (dev->dev_state == state)
879 return;
880
881 dev->dev_state = state;
882
883 clsdev = class_find_device(mei_class, NULL, &dev->cdev.dev, match_devt);
884 if (clsdev) {
885 sysfs_notify(&clsdev->kobj, NULL, "dev_state");
886 put_device(clsdev);
887 }
888}
889
841static struct attribute *mei_attrs[] = { 890static struct attribute *mei_attrs[] = {
842 &dev_attr_fw_status.attr, 891 &dev_attr_fw_status.attr,
843 &dev_attr_hbm_ver.attr, 892 &dev_attr_hbm_ver.attr,
844 &dev_attr_hbm_ver_drv.attr, 893 &dev_attr_hbm_ver_drv.attr,
845 &dev_attr_tx_queue_limit.attr, 894 &dev_attr_tx_queue_limit.attr,
846 &dev_attr_fw_ver.attr, 895 &dev_attr_fw_ver.attr,
896 &dev_attr_dev_state.attr,
847 NULL 897 NULL
848}; 898};
849ATTRIBUTE_GROUPS(mei); 899ATTRIBUTE_GROUPS(mei);
@@ -867,12 +917,6 @@ static const struct file_operations mei_fops = {
867 .llseek = no_llseek 917 .llseek = no_llseek
868}; 918};
869 919
870static struct class *mei_class;
871static dev_t mei_devt;
872#define MEI_MAX_DEVS MINORMASK
873static DEFINE_MUTEX(mei_minor_lock);
874static DEFINE_IDR(mei_idr);
875
876/** 920/**
877 * mei_minor_get - obtain next free device minor number 921 * mei_minor_get - obtain next free device minor number
878 * 922 *
diff --git a/drivers/misc/mei/mei-trace.c b/drivers/misc/mei/mei-trace.c
index 374edde72a14..48d4c4fcefd2 100644
--- a/drivers/misc/mei/mei-trace.c
+++ b/drivers/misc/mei/mei-trace.c
@@ -1,17 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2015-2016, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2015, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16#include <linux/module.h> 6#include <linux/module.h>
17 7
diff --git a/drivers/misc/mei/mei-trace.h b/drivers/misc/mei/mei-trace.h
index b52e9b97a7c0..df758033dc93 100644
--- a/drivers/misc/mei/mei-trace.h
+++ b/drivers/misc/mei/mei-trace.h
@@ -1,17 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * 3 * Copyright (c) 2015-2016, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2015, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 7#if !defined(_MEI_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 685b78ce30a5..fca832fcac57 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -1,17 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * 3 * Copyright (c) 2003-2018, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2018, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#ifndef _MEI_DEV_H_ 7#ifndef _MEI_DEV_H_
@@ -535,7 +525,6 @@ struct mei_device {
535 struct dentry *dbgfs_dir; 525 struct dentry *dbgfs_dir;
536#endif /* CONFIG_DEBUG_FS */ 526#endif /* CONFIG_DEBUG_FS */
537 527
538
539 const struct mei_hw_ops *ops; 528 const struct mei_hw_ops *ops;
540 char hw[0] __aligned(sizeof(void *)); 529 char hw[0] __aligned(sizeof(void *));
541}; 530};
@@ -594,6 +583,8 @@ int mei_restart(struct mei_device *dev);
594void mei_stop(struct mei_device *dev); 583void mei_stop(struct mei_device *dev);
595void mei_cancel_work(struct mei_device *dev); 584void mei_cancel_work(struct mei_device *dev);
596 585
586void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state);
587
597int mei_dmam_ring_alloc(struct mei_device *dev); 588int mei_dmam_ring_alloc(struct mei_device *dev);
598void mei_dmam_ring_free(struct mei_device *dev); 589void mei_dmam_ring_free(struct mei_device *dev);
599bool mei_dma_ring_is_allocated(struct mei_device *dev); 590bool mei_dma_ring_is_allocated(struct mei_device *dev);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3ab946ad3257..7a2b3545a7f9 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
6
16#include <linux/module.h> 7#include <linux/module.h>
17#include <linux/moduleparam.h> 8#include <linux/moduleparam.h>
18#include <linux/kernel.h> 9#include <linux/kernel.h>
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index e1b909123fb0..2e37fc2e0fa8 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -1,17 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * 3 * Copyright (c) 2013-2017, Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2013-2014, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */ 5 */
16 6
17#include <linux/module.h> 7#include <linux/module.h>
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 9e443df44b3b..0c6de97dd347 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -572,6 +572,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
572 572
573 xpc_wakeup_channel_mgr(part); 573 xpc_wakeup_channel_mgr(part);
574 } 574 }
575 /* fall through */
575 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: 576 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
576 spin_lock_irqsave(&part_uv->flags_lock, irq_flags); 577 spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
577 part_uv->flags |= XPC_P_ENGAGED_UV; 578 part_uv->flags |= XPC_P_ENGAGED_UV;
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index c48c3a1eb1f8..fcf31335a8b6 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1282,6 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
1282 tbnet_tear_down(net, true); 1282 tbnet_tear_down(net, true);
1283 } 1283 }
1284 1284
1285 tb_unregister_protocol_handler(&net->handler);
1285 return 0; 1286 return 0;
1286} 1287}
1287 1288
@@ -1290,6 +1291,8 @@ static int __maybe_unused tbnet_resume(struct device *dev)
1290 struct tb_service *svc = tb_to_service(dev); 1291 struct tb_service *svc = tb_to_service(dev);
1291 struct tbnet *net = tb_service_get_drvdata(svc); 1292 struct tbnet *net = tb_service_get_drvdata(svc);
1292 1293
1294 tb_register_protocol_handler(&net->handler);
1295
1293 netif_carrier_off(net->dev); 1296 netif_carrier_off(net->dev);
1294 if (netif_running(net->dev)) { 1297 if (netif_running(net->dev)) {
1295 netif_device_attach(net->dev); 1298 netif_device_attach(net->dev);
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 8a04c5e02999..0f43bb389566 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -1,21 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * MEI Library for mei bus nfc device access 3 * Copyright (c) 2013, Intel Corporation.
3 *
4 * Copyright (C) 2013 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 * 4 *
10 * This program is distributed in the hope that it will be useful, 5 * MEI Library for mei bus nfc device access
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */ 6 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 8
21#include <linux/module.h> 9#include <linux/module.h>
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index eb5eddf1794e..5dad8847a9b3 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -1,19 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * HCI based Driver for Inside Secure microread NFC Chip 3 * Copyright (C) 2013 Intel Corporation. All rights reserved.
3 *
4 * Copyright (C) 2013 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 * 4 *
10 * This program is distributed in the hope that it will be useful, 5 * HCI based Driver for Inside Secure microread NFC Chip
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */ 6 */
18 7
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index ad57a8ec00d6..579bc599f545 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -1,19 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * HCI based Driver for NXP pn544 NFC Chip
3 *
4 * Copyright (C) 2013 Intel Corporation. All rights reserved. 3 * Copyright (C) 2013 Intel Corporation. All rights reserved.
5 * 4 *
6 * This program is free software; you can redistribute it and/or modify it 5 * HCI based Driver for NXP pn544 NFC Chip
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */ 6 */
18 7
19#include <linux/module.h> 8#include <linux/module.h>
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 530d570724c9..6b2c4254c2fb 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -13,6 +13,16 @@ menuconfig NVMEM
13 13
14if NVMEM 14if NVMEM
15 15
16config NVMEM_SYSFS
17 bool "/sys/bus/nvmem/devices/*/nvmem (sysfs interface)"
18 depends on SYSFS
19 default y
20 help
21 Say Y here to add a sysfs interface for NVMEM.
22
23 This interface is mostly used by userspace applications to
24 read/write directly into nvmem.
25
16config NVMEM_IMX_IIM 26config NVMEM_IMX_IIM
17 tristate "i.MX IC Identification Module support" 27 tristate "i.MX IC Identification Module support"
18 depends on ARCH_MXC || COMPILE_TEST 28 depends on ARCH_MXC || COMPILE_TEST
@@ -25,8 +35,8 @@ config NVMEM_IMX_IIM
25 will be called nvmem-imx-iim. 35 will be called nvmem-imx-iim.
26 36
27config NVMEM_IMX_OCOTP 37config NVMEM_IMX_OCOTP
28 tristate "i.MX6 On-Chip OTP Controller support" 38 tristate "i.MX 6/7/8 On-Chip OTP Controller support"
29 depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST 39 depends on ARCH_MXC || COMPILE_TEST
30 depends on HAS_IOMEM 40 depends on HAS_IOMEM
31 help 41 help
32 This is a driver for the On-Chip OTP Controller (OCOTP) available on 42 This is a driver for the On-Chip OTP Controller (OCOTP) available on
@@ -113,6 +123,16 @@ config NVMEM_BCM_OCOTP
113 This driver can also be built as a module. If so, the module 123 This driver can also be built as a module. If so, the module
114 will be called nvmem-bcm-ocotp. 124 will be called nvmem-bcm-ocotp.
115 125
126config NVMEM_STM32_ROMEM
127 tristate "STMicroelectronics STM32 factory-programmed memory support"
128 depends on ARCH_STM32 || COMPILE_TEST
129 help
130 Say y here to enable read-only access for STMicroelectronics STM32
131 factory-programmed memory area.
132
133 This driver can also be built as a module. If so, the module
134 will be called nvmem-stm32-romem.
135
116config NVMEM_SUNXI_SID 136config NVMEM_SUNXI_SID
117 tristate "Allwinner SoCs SID support" 137 tristate "Allwinner SoCs SID support"
118 depends on ARCH_SUNXI 138 depends on ARCH_SUNXI
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 2ece8ffffdda..c1fe4768dfef 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -6,6 +6,9 @@
6obj-$(CONFIG_NVMEM) += nvmem_core.o 6obj-$(CONFIG_NVMEM) += nvmem_core.o
7nvmem_core-y := core.o 7nvmem_core-y := core.o
8 8
9obj-$(CONFIG_NVMEM_SYSFS) += nvmem_sysfs.o
10nvmem_sysfs-y := nvmem-sysfs.o
11
9# Devices 12# Devices
10obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o 13obj-$(CONFIG_NVMEM_BCM_OCOTP) += nvmem-bcm-ocotp.o
11nvmem-bcm-ocotp-y := bcm-ocotp.o 14nvmem-bcm-ocotp-y := bcm-ocotp.o
@@ -26,6 +29,8 @@ nvmem_qfprom-y := qfprom.o
26obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o 29obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
27nvmem_rockchip_efuse-y := rockchip-efuse.o 30nvmem_rockchip_efuse-y := rockchip-efuse.o
28obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o 31obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
32nvmem_stm32_romem-y := stm32-romem.o
33obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
29nvmem_sunxi_sid-y := sunxi_sid.o 34nvmem_sunxi_sid-y := sunxi_sid.o
30obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o 35obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
31nvmem-uniphier-efuse-y := uniphier-efuse.o 36nvmem-uniphier-efuse-y := uniphier-efuse.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index f24008b66826..c7892c3da91f 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -17,27 +17,7 @@
17#include <linux/nvmem-provider.h> 17#include <linux/nvmem-provider.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20 20#include "nvmem.h"
21struct nvmem_device {
22 struct module *owner;
23 struct device dev;
24 int stride;
25 int word_size;
26 int id;
27 struct kref refcnt;
28 size_t size;
29 bool read_only;
30 int flags;
31 enum nvmem_type type;
32 struct bin_attribute eeprom;
33 struct device *base_dev;
34 struct list_head cells;
35 nvmem_reg_read_t reg_read;
36 nvmem_reg_write_t reg_write;
37 void *priv;
38};
39
40#define FLAG_COMPAT BIT(0)
41 21
42struct nvmem_cell { 22struct nvmem_cell {
43 const char *name; 23 const char *name;
@@ -61,18 +41,7 @@ static LIST_HEAD(nvmem_lookup_list);
61 41
62static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); 42static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
63 43
64static const char * const nvmem_type_str[] = {
65 [NVMEM_TYPE_UNKNOWN] = "Unknown",
66 [NVMEM_TYPE_EEPROM] = "EEPROM",
67 [NVMEM_TYPE_OTP] = "OTP",
68 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
69};
70 44
71#ifdef CONFIG_DEBUG_LOCK_ALLOC
72static struct lock_class_key eeprom_lock_key;
73#endif
74
75#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
76static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, 45static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
77 void *val, size_t bytes) 46 void *val, size_t bytes)
78{ 47{
@@ -91,187 +60,6 @@ static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
91 return -EINVAL; 60 return -EINVAL;
92} 61}
93 62
94static ssize_t type_show(struct device *dev,
95 struct device_attribute *attr, char *buf)
96{
97 struct nvmem_device *nvmem = to_nvmem_device(dev);
98
99 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
100}
101
102static DEVICE_ATTR_RO(type);
103
104static struct attribute *nvmem_attrs[] = {
105 &dev_attr_type.attr,
106 NULL,
107};
108
109static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
110 struct bin_attribute *attr,
111 char *buf, loff_t pos, size_t count)
112{
113 struct device *dev;
114 struct nvmem_device *nvmem;
115 int rc;
116
117 if (attr->private)
118 dev = attr->private;
119 else
120 dev = container_of(kobj, struct device, kobj);
121 nvmem = to_nvmem_device(dev);
122
123 /* Stop the user from reading */
124 if (pos >= nvmem->size)
125 return 0;
126
127 if (count < nvmem->word_size)
128 return -EINVAL;
129
130 if (pos + count > nvmem->size)
131 count = nvmem->size - pos;
132
133 count = round_down(count, nvmem->word_size);
134
135 rc = nvmem_reg_read(nvmem, pos, buf, count);
136
137 if (rc)
138 return rc;
139
140 return count;
141}
142
143static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
144 struct bin_attribute *attr,
145 char *buf, loff_t pos, size_t count)
146{
147 struct device *dev;
148 struct nvmem_device *nvmem;
149 int rc;
150
151 if (attr->private)
152 dev = attr->private;
153 else
154 dev = container_of(kobj, struct device, kobj);
155 nvmem = to_nvmem_device(dev);
156
157 /* Stop the user from writing */
158 if (pos >= nvmem->size)
159 return -EFBIG;
160
161 if (count < nvmem->word_size)
162 return -EINVAL;
163
164 if (pos + count > nvmem->size)
165 count = nvmem->size - pos;
166
167 count = round_down(count, nvmem->word_size);
168
169 rc = nvmem_reg_write(nvmem, pos, buf, count);
170
171 if (rc)
172 return rc;
173
174 return count;
175}
176
177/* default read/write permissions */
178static struct bin_attribute bin_attr_rw_nvmem = {
179 .attr = {
180 .name = "nvmem",
181 .mode = 0644,
182 },
183 .read = bin_attr_nvmem_read,
184 .write = bin_attr_nvmem_write,
185};
186
187static struct bin_attribute *nvmem_bin_rw_attributes[] = {
188 &bin_attr_rw_nvmem,
189 NULL,
190};
191
192static const struct attribute_group nvmem_bin_rw_group = {
193 .bin_attrs = nvmem_bin_rw_attributes,
194 .attrs = nvmem_attrs,
195};
196
197static const struct attribute_group *nvmem_rw_dev_groups[] = {
198 &nvmem_bin_rw_group,
199 NULL,
200};
201
202/* read only permission */
203static struct bin_attribute bin_attr_ro_nvmem = {
204 .attr = {
205 .name = "nvmem",
206 .mode = 0444,
207 },
208 .read = bin_attr_nvmem_read,
209};
210
211static struct bin_attribute *nvmem_bin_ro_attributes[] = {
212 &bin_attr_ro_nvmem,
213 NULL,
214};
215
216static const struct attribute_group nvmem_bin_ro_group = {
217 .bin_attrs = nvmem_bin_ro_attributes,
218 .attrs = nvmem_attrs,
219};
220
221static const struct attribute_group *nvmem_ro_dev_groups[] = {
222 &nvmem_bin_ro_group,
223 NULL,
224};
225
226/* default read/write permissions, root only */
227static struct bin_attribute bin_attr_rw_root_nvmem = {
228 .attr = {
229 .name = "nvmem",
230 .mode = 0600,
231 },
232 .read = bin_attr_nvmem_read,
233 .write = bin_attr_nvmem_write,
234};
235
236static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
237 &bin_attr_rw_root_nvmem,
238 NULL,
239};
240
241static const struct attribute_group nvmem_bin_rw_root_group = {
242 .bin_attrs = nvmem_bin_rw_root_attributes,
243 .attrs = nvmem_attrs,
244};
245
246static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
247 &nvmem_bin_rw_root_group,
248 NULL,
249};
250
251/* read only permission, root only */
252static struct bin_attribute bin_attr_ro_root_nvmem = {
253 .attr = {
254 .name = "nvmem",
255 .mode = 0400,
256 },
257 .read = bin_attr_nvmem_read,
258};
259
260static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
261 &bin_attr_ro_root_nvmem,
262 NULL,
263};
264
265static const struct attribute_group nvmem_bin_ro_root_group = {
266 .bin_attrs = nvmem_bin_ro_root_attributes,
267 .attrs = nvmem_attrs,
268};
269
270static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
271 &nvmem_bin_ro_root_group,
272 NULL,
273};
274
275static void nvmem_release(struct device *dev) 63static void nvmem_release(struct device *dev)
276{ 64{
277 struct nvmem_device *nvmem = to_nvmem_device(dev); 65 struct nvmem_device *nvmem = to_nvmem_device(dev);
@@ -422,43 +210,6 @@ err:
422 return rval; 210 return rval;
423} 211}
424 212
425/*
426 * nvmem_setup_compat() - Create an additional binary entry in
427 * drivers sys directory, to be backwards compatible with the older
428 * drivers/misc/eeprom drivers.
429 */
430static int nvmem_setup_compat(struct nvmem_device *nvmem,
431 const struct nvmem_config *config)
432{
433 int rval;
434
435 if (!config->base_dev)
436 return -EINVAL;
437
438 if (nvmem->read_only)
439 nvmem->eeprom = bin_attr_ro_root_nvmem;
440 else
441 nvmem->eeprom = bin_attr_rw_root_nvmem;
442 nvmem->eeprom.attr.name = "eeprom";
443 nvmem->eeprom.size = nvmem->size;
444#ifdef CONFIG_DEBUG_LOCK_ALLOC
445 nvmem->eeprom.attr.key = &eeprom_lock_key;
446#endif
447 nvmem->eeprom.private = &nvmem->dev;
448 nvmem->base_dev = config->base_dev;
449
450 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
451 if (rval) {
452 dev_err(&nvmem->dev,
453 "Failed to create eeprom binary file %d\n", rval);
454 return rval;
455 }
456
457 nvmem->flags |= FLAG_COMPAT;
458
459 return 0;
460}
461
462/** 213/**
463 * nvmem_register_notifier() - Register a notifier block for nvmem events. 214 * nvmem_register_notifier() - Register a notifier block for nvmem events.
464 * 215 *
@@ -651,14 +402,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
651 nvmem->read_only = device_property_present(config->dev, "read-only") || 402 nvmem->read_only = device_property_present(config->dev, "read-only") ||
652 config->read_only || !nvmem->reg_write; 403 config->read_only || !nvmem->reg_write;
653 404
654 if (config->root_only) 405 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
655 nvmem->dev.groups = nvmem->read_only ?
656 nvmem_ro_root_dev_groups :
657 nvmem_rw_root_dev_groups;
658 else
659 nvmem->dev.groups = nvmem->read_only ?
660 nvmem_ro_dev_groups :
661 nvmem_rw_dev_groups;
662 406
663 device_initialize(&nvmem->dev); 407 device_initialize(&nvmem->dev);
664 408
@@ -669,7 +413,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
669 goto err_put_device; 413 goto err_put_device;
670 414
671 if (config->compat) { 415 if (config->compat) {
672 rval = nvmem_setup_compat(nvmem, config); 416 rval = nvmem_sysfs_setup_compat(nvmem, config);
673 if (rval) 417 if (rval)
674 goto err_device_del; 418 goto err_device_del;
675 } 419 }
@@ -696,7 +440,7 @@ err_remove_cells:
696 nvmem_device_remove_all_cells(nvmem); 440 nvmem_device_remove_all_cells(nvmem);
697err_teardown_compat: 441err_teardown_compat:
698 if (config->compat) 442 if (config->compat)
699 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); 443 nvmem_sysfs_remove_compat(nvmem, config);
700err_device_del: 444err_device_del:
701 device_del(&nvmem->dev); 445 device_del(&nvmem->dev);
702err_put_device: 446err_put_device:
@@ -1166,7 +910,7 @@ EXPORT_SYMBOL_GPL(nvmem_cell_put);
1166static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf) 910static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1167{ 911{
1168 u8 *p, *b; 912 u8 *p, *b;
1169 int i, bit_offset = cell->bit_offset; 913 int i, extra, bit_offset = cell->bit_offset;
1170 914
1171 p = b = buf; 915 p = b = buf;
1172 if (bit_offset) { 916 if (bit_offset) {
@@ -1181,11 +925,16 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1181 p = b; 925 p = b;
1182 *b++ >>= bit_offset; 926 *b++ >>= bit_offset;
1183 } 927 }
1184 928 } else {
1185 /* result fits in less bytes */ 929 /* point to the msb */
1186 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) 930 p += cell->bytes - 1;
1187 *p-- = 0;
1188 } 931 }
932
933 /* result fits in less bytes */
934 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
935 while (--extra >= 0)
936 *p-- = 0;
937
1189 /* clear msb bits if any leftover in the last byte */ 938 /* clear msb bits if any leftover in the last byte */
1190 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); 939 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1191} 940}
@@ -1335,6 +1084,43 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1335EXPORT_SYMBOL_GPL(nvmem_cell_write); 1084EXPORT_SYMBOL_GPL(nvmem_cell_write);
1336 1085
1337/** 1086/**
1087 * nvmem_cell_read_u16() - Read a cell value as an u16
1088 *
1089 * @dev: Device that requests the nvmem cell.
1090 * @cell_id: Name of nvmem cell to read.
1091 * @val: pointer to output value.
1092 *
1093 * Return: 0 on success or negative errno.
1094 */
1095int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1096{
1097 struct nvmem_cell *cell;
1098 void *buf;
1099 size_t len;
1100
1101 cell = nvmem_cell_get(dev, cell_id);
1102 if (IS_ERR(cell))
1103 return PTR_ERR(cell);
1104
1105 buf = nvmem_cell_read(cell, &len);
1106 if (IS_ERR(buf)) {
1107 nvmem_cell_put(cell);
1108 return PTR_ERR(buf);
1109 }
1110 if (len != sizeof(*val)) {
1111 kfree(buf);
1112 nvmem_cell_put(cell);
1113 return -EINVAL;
1114 }
1115 memcpy(val, buf, sizeof(*val));
1116 kfree(buf);
1117 nvmem_cell_put(cell);
1118
1119 return 0;
1120}
1121EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1122
1123/**
1338 * nvmem_cell_read_u32() - Read a cell value as an u32 1124 * nvmem_cell_read_u32() - Read a cell value as an u32
1339 * 1125 *
1340 * @dev: Device that requests the nvmem cell. 1126 * @dev: Device that requests the nvmem cell.
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
index 6651e4cdc002..34582293b985 100644
--- a/drivers/nvmem/imx-iim.c
+++ b/drivers/nvmem/imx-iim.c
@@ -104,7 +104,6 @@ static int imx_iim_probe(struct platform_device *pdev)
104{ 104{
105 const struct of_device_id *of_id; 105 const struct of_device_id *of_id;
106 struct device *dev = &pdev->dev; 106 struct device *dev = &pdev->dev;
107 struct resource *res;
108 struct iim_priv *iim; 107 struct iim_priv *iim;
109 struct nvmem_device *nvmem; 108 struct nvmem_device *nvmem;
110 struct nvmem_config cfg = {}; 109 struct nvmem_config cfg = {};
@@ -114,8 +113,7 @@ static int imx_iim_probe(struct platform_device *pdev)
114 if (!iim) 113 if (!iim)
115 return -ENOMEM; 114 return -ENOMEM;
116 115
117 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 116 iim->base = devm_platform_ioremap_resource(pdev, 0);
118 iim->base = devm_ioremap_resource(dev, res);
119 if (IS_ERR(iim->base)) 117 if (IS_ERR(iim->base))
120 return PTR_ERR(iim->base); 118 return PTR_ERR(iim->base);
121 119
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 08a9b1ef8ae4..4cf7b61e4bf5 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -444,6 +444,12 @@ static const struct ocotp_params imx7ulp_params = {
444 .bank_address_words = 0, 444 .bank_address_words = 0,
445}; 445};
446 446
447static const struct ocotp_params imx8mq_params = {
448 .nregs = 256,
449 .bank_address_words = 4,
450 .set_timing = imx_ocotp_set_imx7_timing,
451};
452
447static const struct of_device_id imx_ocotp_dt_ids[] = { 453static const struct of_device_id imx_ocotp_dt_ids[] = {
448 { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params }, 454 { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
449 { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params }, 455 { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
@@ -453,6 +459,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
453 { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params }, 459 { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
454 { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params }, 460 { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params },
455 { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params }, 461 { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params },
462 { .compatible = "fsl,imx8mq-ocotp", .data = &imx8mq_params },
456 { }, 463 { },
457}; 464};
458MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); 465MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
@@ -460,7 +467,6 @@ MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
460static int imx_ocotp_probe(struct platform_device *pdev) 467static int imx_ocotp_probe(struct platform_device *pdev)
461{ 468{
462 struct device *dev = &pdev->dev; 469 struct device *dev = &pdev->dev;
463 struct resource *res;
464 struct ocotp_priv *priv; 470 struct ocotp_priv *priv;
465 struct nvmem_device *nvmem; 471 struct nvmem_device *nvmem;
466 472
@@ -470,8 +476,7 @@ static int imx_ocotp_probe(struct platform_device *pdev)
470 476
471 priv->dev = dev; 477 priv->dev = dev;
472 478
473 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 479 priv->base = devm_platform_ioremap_resource(pdev, 0);
474 priv->base = devm_ioremap_resource(dev, res);
475 if (IS_ERR(priv->base)) 480 if (IS_ERR(priv->base))
476 return PTR_ERR(priv->base); 481 return PTR_ERR(priv->base);
477 482
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 53122f59c4b2..fbb7db6ee1f5 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -145,7 +145,6 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
145 struct device *dev = &pdev->dev; 145 struct device *dev = &pdev->dev;
146 const struct mxs_data *data; 146 const struct mxs_data *data;
147 struct mxs_ocotp *otp; 147 struct mxs_ocotp *otp;
148 struct resource *res;
149 const struct of_device_id *match; 148 const struct of_device_id *match;
150 int ret; 149 int ret;
151 150
@@ -157,8 +156,7 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
157 if (!otp) 156 if (!otp)
158 return -ENOMEM; 157 return -ENOMEM;
159 158
160 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 159 otp->base = devm_platform_ioremap_resource(pdev, 0);
161 otp->base = devm_ioremap_resource(dev, res);
162 if (IS_ERR(otp->base)) 160 if (IS_ERR(otp->base))
163 return PTR_ERR(otp->base); 161 return PTR_ERR(otp->base);
164 162
diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
new file mode 100644
index 000000000000..6f303b91f6e7
--- /dev/null
+++ b/drivers/nvmem/nvmem-sysfs.c
@@ -0,0 +1,256 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019, Linaro Limited
4 */
5#include "nvmem.h"
6
7static const char * const nvmem_type_str[] = {
8 [NVMEM_TYPE_UNKNOWN] = "Unknown",
9 [NVMEM_TYPE_EEPROM] = "EEPROM",
10 [NVMEM_TYPE_OTP] = "OTP",
11 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
12};
13
14#ifdef CONFIG_DEBUG_LOCK_ALLOC
15static struct lock_class_key eeprom_lock_key;
16#endif
17
18static ssize_t type_show(struct device *dev,
19 struct device_attribute *attr, char *buf)
20{
21 struct nvmem_device *nvmem = to_nvmem_device(dev);
22
23 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
24}
25
26static DEVICE_ATTR_RO(type);
27
28static struct attribute *nvmem_attrs[] = {
29 &dev_attr_type.attr,
30 NULL,
31};
32
33static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
34 struct bin_attribute *attr,
35 char *buf, loff_t pos, size_t count)
36{
37 struct device *dev;
38 struct nvmem_device *nvmem;
39 int rc;
40
41 if (attr->private)
42 dev = attr->private;
43 else
44 dev = container_of(kobj, struct device, kobj);
45 nvmem = to_nvmem_device(dev);
46
47 /* Stop the user from reading */
48 if (pos >= nvmem->size)
49 return 0;
50
51 if (count < nvmem->word_size)
52 return -EINVAL;
53
54 if (pos + count > nvmem->size)
55 count = nvmem->size - pos;
56
57 count = round_down(count, nvmem->word_size);
58
59 rc = nvmem->reg_read(nvmem->priv, pos, buf, count);
60
61 if (rc)
62 return rc;
63
64 return count;
65}
66
67static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
68 struct bin_attribute *attr,
69 char *buf, loff_t pos, size_t count)
70{
71 struct device *dev;
72 struct nvmem_device *nvmem;
73 int rc;
74
75 if (attr->private)
76 dev = attr->private;
77 else
78 dev = container_of(kobj, struct device, kobj);
79 nvmem = to_nvmem_device(dev);
80
81 /* Stop the user from writing */
82 if (pos >= nvmem->size)
83 return -EFBIG;
84
85 if (count < nvmem->word_size)
86 return -EINVAL;
87
88 if (pos + count > nvmem->size)
89 count = nvmem->size - pos;
90
91 count = round_down(count, nvmem->word_size);
92
93 rc = nvmem->reg_write(nvmem->priv, pos, buf, count);
94
95 if (rc)
96 return rc;
97
98 return count;
99}
100
101/* default read/write permissions */
102static struct bin_attribute bin_attr_rw_nvmem = {
103 .attr = {
104 .name = "nvmem",
105 .mode = 0644,
106 },
107 .read = bin_attr_nvmem_read,
108 .write = bin_attr_nvmem_write,
109};
110
111static struct bin_attribute *nvmem_bin_rw_attributes[] = {
112 &bin_attr_rw_nvmem,
113 NULL,
114};
115
116static const struct attribute_group nvmem_bin_rw_group = {
117 .bin_attrs = nvmem_bin_rw_attributes,
118 .attrs = nvmem_attrs,
119};
120
121static const struct attribute_group *nvmem_rw_dev_groups[] = {
122 &nvmem_bin_rw_group,
123 NULL,
124};
125
126/* read only permission */
127static struct bin_attribute bin_attr_ro_nvmem = {
128 .attr = {
129 .name = "nvmem",
130 .mode = 0444,
131 },
132 .read = bin_attr_nvmem_read,
133};
134
135static struct bin_attribute *nvmem_bin_ro_attributes[] = {
136 &bin_attr_ro_nvmem,
137 NULL,
138};
139
140static const struct attribute_group nvmem_bin_ro_group = {
141 .bin_attrs = nvmem_bin_ro_attributes,
142 .attrs = nvmem_attrs,
143};
144
145static const struct attribute_group *nvmem_ro_dev_groups[] = {
146 &nvmem_bin_ro_group,
147 NULL,
148};
149
150/* default read/write permissions, root only */
151static struct bin_attribute bin_attr_rw_root_nvmem = {
152 .attr = {
153 .name = "nvmem",
154 .mode = 0600,
155 },
156 .read = bin_attr_nvmem_read,
157 .write = bin_attr_nvmem_write,
158};
159
160static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
161 &bin_attr_rw_root_nvmem,
162 NULL,
163};
164
165static const struct attribute_group nvmem_bin_rw_root_group = {
166 .bin_attrs = nvmem_bin_rw_root_attributes,
167 .attrs = nvmem_attrs,
168};
169
170static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
171 &nvmem_bin_rw_root_group,
172 NULL,
173};
174
175/* read only permission, root only */
176static struct bin_attribute bin_attr_ro_root_nvmem = {
177 .attr = {
178 .name = "nvmem",
179 .mode = 0400,
180 },
181 .read = bin_attr_nvmem_read,
182};
183
184static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
185 &bin_attr_ro_root_nvmem,
186 NULL,
187};
188
189static const struct attribute_group nvmem_bin_ro_root_group = {
190 .bin_attrs = nvmem_bin_ro_root_attributes,
191 .attrs = nvmem_attrs,
192};
193
194static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
195 &nvmem_bin_ro_root_group,
196 NULL,
197};
198
199const struct attribute_group **nvmem_sysfs_get_groups(
200 struct nvmem_device *nvmem,
201 const struct nvmem_config *config)
202{
203 if (config->root_only)
204 return nvmem->read_only ?
205 nvmem_ro_root_dev_groups :
206 nvmem_rw_root_dev_groups;
207
208 return nvmem->read_only ? nvmem_ro_dev_groups : nvmem_rw_dev_groups;
209}
210
211/*
212 * nvmem_setup_compat() - Create an additional binary entry in
213 * drivers sys directory, to be backwards compatible with the older
214 * drivers/misc/eeprom drivers.
215 */
216int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
217 const struct nvmem_config *config)
218{
219 int rval;
220
221 if (!config->compat)
222 return 0;
223
224 if (!config->base_dev)
225 return -EINVAL;
226
227 if (nvmem->read_only)
228 nvmem->eeprom = bin_attr_ro_root_nvmem;
229 else
230 nvmem->eeprom = bin_attr_rw_root_nvmem;
231 nvmem->eeprom.attr.name = "eeprom";
232 nvmem->eeprom.size = nvmem->size;
233#ifdef CONFIG_DEBUG_LOCK_ALLOC
234 nvmem->eeprom.attr.key = &eeprom_lock_key;
235#endif
236 nvmem->eeprom.private = &nvmem->dev;
237 nvmem->base_dev = config->base_dev;
238
239 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
240 if (rval) {
241 dev_err(&nvmem->dev,
242 "Failed to create eeprom binary file %d\n", rval);
243 return rval;
244 }
245
246 nvmem->flags |= FLAG_COMPAT;
247
248 return 0;
249}
250
251void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
252 const struct nvmem_config *config)
253{
254 if (config->compat)
255 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
256}
diff --git a/drivers/nvmem/nvmem.h b/drivers/nvmem/nvmem.h
new file mode 100644
index 000000000000..eb8ed7121fa3
--- /dev/null
+++ b/drivers/nvmem/nvmem.h
@@ -0,0 +1,62 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _DRIVERS_NVMEM_H
4#define _DRIVERS_NVMEM_H
5
6#include <linux/device.h>
7#include <linux/fs.h>
8#include <linux/kref.h>
9#include <linux/list.h>
10#include <linux/nvmem-consumer.h>
11#include <linux/nvmem-provider.h>
12
13struct nvmem_device {
14 struct module *owner;
15 struct device dev;
16 int stride;
17 int word_size;
18 int id;
19 struct kref refcnt;
20 size_t size;
21 bool read_only;
22 int flags;
23 enum nvmem_type type;
24 struct bin_attribute eeprom;
25 struct device *base_dev;
26 struct list_head cells;
27 nvmem_reg_read_t reg_read;
28 nvmem_reg_write_t reg_write;
29 void *priv;
30};
31
32#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
33#define FLAG_COMPAT BIT(0)
34
35#ifdef CONFIG_NVMEM_SYSFS
36const struct attribute_group **nvmem_sysfs_get_groups(
37 struct nvmem_device *nvmem,
38 const struct nvmem_config *config);
39int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
40 const struct nvmem_config *config);
41void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
42 const struct nvmem_config *config);
43#else
44static inline const struct attribute_group **nvmem_sysfs_get_groups(
45 struct nvmem_device *nvmem,
46 const struct nvmem_config *config)
47{
48 return NULL;
49}
50
51static inline int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
52 const struct nvmem_config *config)
53{
54 return -ENOSYS;
55}
56static inline void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
57 const struct nvmem_config *config)
58{
59}
60#endif /* CONFIG_NVMEM_SYSFS */
61
62#endif /* _DRIVERS_NVMEM_H */
diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
new file mode 100644
index 000000000000..354be526897f
--- /dev/null
+++ b/drivers/nvmem/stm32-romem.c
@@ -0,0 +1,202 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * STM32 Factory-programmed memory read access driver
4 *
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6 * Author: Fabrice Gasnier <fabrice.gasnier@st.com> for STMicroelectronics.
7 */
8
9#include <linux/arm-smccc.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/nvmem-provider.h>
13#include <linux/of_device.h>
14
15/* BSEC secure service access from non-secure */
16#define STM32_SMC_BSEC 0x82001003
17#define STM32_SMC_READ_SHADOW 0x01
18#define STM32_SMC_PROG_OTP 0x02
19#define STM32_SMC_WRITE_SHADOW 0x03
20#define STM32_SMC_READ_OTP 0x04
21
22/* shadow registers offest */
23#define STM32MP15_BSEC_DATA0 0x200
24
25/* 32 (x 32-bits) lower shadow registers */
26#define STM32MP15_BSEC_NUM_LOWER 32
27
28struct stm32_romem_cfg {
29 int size;
30};
31
32struct stm32_romem_priv {
33 void __iomem *base;
34 struct nvmem_config cfg;
35};
36
37static int stm32_romem_read(void *context, unsigned int offset, void *buf,
38 size_t bytes)
39{
40 struct stm32_romem_priv *priv = context;
41 u8 *buf8 = buf;
42 int i;
43
44 for (i = offset; i < offset + bytes; i++)
45 *buf8++ = readb_relaxed(priv->base + i);
46
47 return 0;
48}
49
50static int stm32_bsec_smc(u8 op, u32 otp, u32 data, u32 *result)
51{
52#if IS_ENABLED(CONFIG_HAVE_ARM_SMCCC)
53 struct arm_smccc_res res;
54
55 arm_smccc_smc(STM32_SMC_BSEC, op, otp, data, 0, 0, 0, 0, &res);
56 if (res.a0)
57 return -EIO;
58
59 if (result)
60 *result = (u32)res.a1;
61
62 return 0;
63#else
64 return -ENXIO;
65#endif
66}
67
68static int stm32_bsec_read(void *context, unsigned int offset, void *buf,
69 size_t bytes)
70{
71 struct stm32_romem_priv *priv = context;
72 struct device *dev = priv->cfg.dev;
73 u32 roffset, rbytes, val;
74 u8 *buf8 = buf, *val8 = (u8 *)&val;
75 int i, j = 0, ret, skip_bytes, size;
76
77 /* Round unaligned access to 32-bits */
78 roffset = rounddown(offset, 4);
79 skip_bytes = offset & 0x3;
80 rbytes = roundup(bytes + skip_bytes, 4);
81
82 if (roffset + rbytes > priv->cfg.size)
83 return -EINVAL;
84
85 for (i = roffset; (i < roffset + rbytes); i += 4) {
86 u32 otp = i >> 2;
87
88 if (otp < STM32MP15_BSEC_NUM_LOWER) {
89 /* read lower data from shadow registers */
90 val = readl_relaxed(
91 priv->base + STM32MP15_BSEC_DATA0 + i);
92 } else {
93 ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, otp, 0,
94 &val);
95 if (ret) {
96 dev_err(dev, "Can't read data%d (%d)\n", otp,
97 ret);
98 return ret;
99 }
100 }
101 /* skip first bytes in case of unaligned read */
102 if (skip_bytes)
103 size = min(bytes, (size_t)(4 - skip_bytes));
104 else
105 size = min(bytes, (size_t)4);
106 memcpy(&buf8[j], &val8[skip_bytes], size);
107 bytes -= size;
108 j += size;
109 skip_bytes = 0;
110 }
111
112 return 0;
113}
114
115static int stm32_bsec_write(void *context, unsigned int offset, void *buf,
116 size_t bytes)
117{
118 struct stm32_romem_priv *priv = context;
119 struct device *dev = priv->cfg.dev;
120 u32 *buf32 = buf;
121 int ret, i;
122
123 /* Allow only writing complete 32-bits aligned words */
124 if ((bytes % 4) || (offset % 4))
125 return -EINVAL;
126
127 for (i = offset; i < offset + bytes; i += 4) {
128 ret = stm32_bsec_smc(STM32_SMC_PROG_OTP, i >> 2, *buf32++,
129 NULL);
130 if (ret) {
131 dev_err(dev, "Can't write data%d (%d)\n", i >> 2, ret);
132 return ret;
133 }
134 }
135
136 return 0;
137}
138
139static int stm32_romem_probe(struct platform_device *pdev)
140{
141 const struct stm32_romem_cfg *cfg;
142 struct device *dev = &pdev->dev;
143 struct stm32_romem_priv *priv;
144 struct resource *res;
145
146 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
147 if (!priv)
148 return -ENOMEM;
149
150 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
151 priv->base = devm_ioremap_resource(dev, res);
152 if (IS_ERR(priv->base))
153 return PTR_ERR(priv->base);
154
155 priv->cfg.name = "stm32-romem";
156 priv->cfg.word_size = 1;
157 priv->cfg.stride = 1;
158 priv->cfg.dev = dev;
159 priv->cfg.priv = priv;
160 priv->cfg.owner = THIS_MODULE;
161
162 cfg = (const struct stm32_romem_cfg *)
163 of_match_device(dev->driver->of_match_table, dev)->data;
164 if (!cfg) {
165 priv->cfg.read_only = true;
166 priv->cfg.size = resource_size(res);
167 priv->cfg.reg_read = stm32_romem_read;
168 } else {
169 priv->cfg.size = cfg->size;
170 priv->cfg.reg_read = stm32_bsec_read;
171 priv->cfg.reg_write = stm32_bsec_write;
172 }
173
174 return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg));
175}
176
177static const struct stm32_romem_cfg stm32mp15_bsec_cfg = {
178 .size = 384, /* 96 x 32-bits data words */
179};
180
181static const struct of_device_id stm32_romem_of_match[] = {
182 { .compatible = "st,stm32f4-otp", }, {
183 .compatible = "st,stm32mp15-bsec",
184 .data = (void *)&stm32mp15_bsec_cfg,
185 }, {
186 },
187};
188MODULE_DEVICE_TABLE(of, stm32_romem_of_match);
189
190static struct platform_driver stm32_romem_driver = {
191 .probe = stm32_romem_probe,
192 .driver = {
193 .name = "stm32-romem",
194 .of_match_table = of_match_ptr(stm32_romem_of_match),
195 },
196};
197module_platform_driver(stm32_romem_driver);
198
199MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
200MODULE_DESCRIPTION("STMicroelectronics STM32 RO-MEM");
201MODULE_ALIAS("platform:nvmem-stm32-romem");
202MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 570a2e354f30..a079a80ddf2c 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -1,18 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Allwinner sunXi SoCs Security ID support. 3 * Allwinner sunXi SoCs Security ID support.
3 * 4 *
4 * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl> 5 * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
5 * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com> 6 * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */ 7 */
17 8
18#include <linux/device.h> 9#include <linux/device.h>
@@ -35,13 +26,6 @@
35#define SUN8I_SID_OP_LOCK (0xAC << 8) 26#define SUN8I_SID_OP_LOCK (0xAC << 8)
36#define SUN8I_SID_READ BIT(1) 27#define SUN8I_SID_READ BIT(1)
37 28
38static struct nvmem_config econfig = {
39 .name = "sunxi-sid",
40 .read_only = true,
41 .stride = 4,
42 .word_size = 1,
43};
44
45struct sunxi_sid_cfg { 29struct sunxi_sid_cfg {
46 u32 value_offset; 30 u32 value_offset;
47 u32 size; 31 u32 size;
@@ -53,33 +37,12 @@ struct sunxi_sid {
53 u32 value_offset; 37 u32 value_offset;
54}; 38};
55 39
56/* We read the entire key, due to a 32 bit read alignment requirement. Since we
57 * want to return the requested byte, this results in somewhat slower code and
58 * uses 4 times more reads as needed but keeps code simpler. Since the SID is
59 * only very rarely probed, this is not really an issue.
60 */
61static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
62 const unsigned int offset)
63{
64 u32 sid_key;
65
66 sid_key = ioread32be(sid->base + round_down(offset, 4));
67 sid_key >>= (offset % 4) * 8;
68
69 return sid_key; /* Only return the last byte */
70}
71
72static int sunxi_sid_read(void *context, unsigned int offset, 40static int sunxi_sid_read(void *context, unsigned int offset,
73 void *val, size_t bytes) 41 void *val, size_t bytes)
74{ 42{
75 struct sunxi_sid *sid = context; 43 struct sunxi_sid *sid = context;
76 u8 *buf = val;
77 44
78 /* Offset the read operation to the real position of SID */ 45 memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes);
79 offset += sid->value_offset;
80
81 while (bytes--)
82 *buf++ = sunxi_sid_read_byte(sid, offset++);
83 46
84 return 0; 47 return 0;
85} 48}
@@ -115,36 +78,34 @@ static int sun8i_sid_register_readout(const struct sunxi_sid *sid,
115 * to be not reliable at all. 78 * to be not reliable at all.
116 * Read by the registers instead. 79 * Read by the registers instead.
117 */ 80 */
118static int sun8i_sid_read_byte_by_reg(const struct sunxi_sid *sid,
119 const unsigned int offset,
120 u8 *out)
121{
122 u32 word;
123 int ret;
124
125 ret = sun8i_sid_register_readout(sid, offset & ~0x03, &word);
126
127 if (ret)
128 return ret;
129
130 *out = (word >> ((offset & 0x3) * 8)) & 0xff;
131
132 return 0;
133}
134
135static int sun8i_sid_read_by_reg(void *context, unsigned int offset, 81static int sun8i_sid_read_by_reg(void *context, unsigned int offset,
136 void *val, size_t bytes) 82 void *val, size_t bytes)
137{ 83{
138 struct sunxi_sid *sid = context; 84 struct sunxi_sid *sid = context;
139 u8 *buf = val; 85 u32 word;
140 int ret; 86 int ret;
141 87
142 while (bytes--) { 88 /* .stride = 4 so offset is guaranteed to be aligned */
143 ret = sun8i_sid_read_byte_by_reg(sid, offset++, buf++); 89 while (bytes >= 4) {
90 ret = sun8i_sid_register_readout(sid, offset, val);
144 if (ret) 91 if (ret)
145 return ret; 92 return ret;
93
94 val += 4;
95 offset += 4;
96 bytes -= 4;
146 } 97 }
147 98
99 if (!bytes)
100 return 0;
101
102 /* Handle any trailing bytes */
103 ret = sun8i_sid_register_readout(sid, offset, &word);
104 if (ret)
105 return ret;
106
107 memcpy(val, &word, bytes);
108
148 return 0; 109 return 0;
149} 110}
150 111
@@ -152,9 +113,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
152{ 113{
153 struct device *dev = &pdev->dev; 114 struct device *dev = &pdev->dev;
154 struct resource *res; 115 struct resource *res;
116 struct nvmem_config *nvmem_cfg;
155 struct nvmem_device *nvmem; 117 struct nvmem_device *nvmem;
156 struct sunxi_sid *sid; 118 struct sunxi_sid *sid;
157 int i, size; 119 int size;
158 char *randomness; 120 char *randomness;
159 const struct sunxi_sid_cfg *cfg; 121 const struct sunxi_sid_cfg *cfg;
160 122
@@ -174,14 +136,23 @@ static int sunxi_sid_probe(struct platform_device *pdev)
174 136
175 size = cfg->size; 137 size = cfg->size;
176 138
177 econfig.size = size; 139 nvmem_cfg = devm_kzalloc(dev, sizeof(*nvmem_cfg), GFP_KERNEL);
178 econfig.dev = dev; 140 if (!nvmem_cfg)
141 return -ENOMEM;
142
143 nvmem_cfg->dev = dev;
144 nvmem_cfg->name = "sunxi-sid";
145 nvmem_cfg->read_only = true;
146 nvmem_cfg->size = cfg->size;
147 nvmem_cfg->word_size = 1;
148 nvmem_cfg->stride = 4;
149 nvmem_cfg->priv = sid;
179 if (cfg->need_register_readout) 150 if (cfg->need_register_readout)
180 econfig.reg_read = sun8i_sid_read_by_reg; 151 nvmem_cfg->reg_read = sun8i_sid_read_by_reg;
181 else 152 else
182 econfig.reg_read = sunxi_sid_read; 153 nvmem_cfg->reg_read = sunxi_sid_read;
183 econfig.priv = sid; 154
184 nvmem = devm_nvmem_register(dev, &econfig); 155 nvmem = devm_nvmem_register(dev, nvmem_cfg);
185 if (IS_ERR(nvmem)) 156 if (IS_ERR(nvmem))
186 return PTR_ERR(nvmem); 157 return PTR_ERR(nvmem);
187 158
@@ -189,9 +160,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
189 if (!randomness) 160 if (!randomness)
190 return -ENOMEM; 161 return -ENOMEM;
191 162
192 for (i = 0; i < size; i++) 163 nvmem_cfg->reg_read(sid, 0, randomness, size);
193 econfig.reg_read(sid, i, &randomness[i], 1);
194
195 add_device_randomness(randomness, size); 164 add_device_randomness(randomness, size);
196 kfree(randomness); 165 kfree(randomness);
197 166
@@ -219,11 +188,19 @@ static const struct sunxi_sid_cfg sun50i_a64_cfg = {
219 .size = 0x100, 188 .size = 0x100,
220}; 189};
221 190
191static const struct sunxi_sid_cfg sun50i_h6_cfg = {
192 .value_offset = 0x200,
193 .size = 0x200,
194};
195
222static const struct of_device_id sunxi_sid_of_match[] = { 196static const struct of_device_id sunxi_sid_of_match[] = {
223 { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg }, 197 { .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
224 { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg }, 198 { .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
199 { .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
225 { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg }, 200 { .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
226 { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg }, 201 { .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
202 { .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
203 { .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg },
227 {/* sentinel */}, 204 {/* sentinel */},
228}; 205};
229MODULE_DEVICE_TABLE(of, sunxi_sid_of_match); 206MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index f12b9da69255..90fb73575495 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -722,7 +722,7 @@ ssize_t parport_read (struct parport *port, void *buffer, size_t len)
722 if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) { 722 if (parport_negotiate (port, IEEE1284_MODE_NIBBLE)) {
723 return -EIO; 723 return -EIO;
724 } 724 }
725 /* fall through to NIBBLE */ 725 /* fall through - to NIBBLE */
726 case IEEE1284_MODE_NIBBLE: 726 case IEEE1284_MODE_NIBBLE:
727 DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name); 727 DPRINTK (KERN_DEBUG "%s: Using nibble mode\n", port->name);
728 fn = port->ops->nibble_read_data; 728 fn = port->ops->nibble_read_data;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index e9b52e4a4648..e77044c2bf62 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -158,8 +158,9 @@ static int parport_config(struct pcmcia_device *link)
158 return 0; 158 return 0;
159 159
160failed: 160failed:
161 parport_cs_release(link); 161 parport_cs_release(link);
162 return -ENODEV; 162 kfree(link->priv);
163 return -ENODEV;
163} /* parport_config */ 164} /* parport_config */
164 165
165static void parport_cs_release(struct pcmcia_device *link) 166static void parport_cs_release(struct pcmcia_device *link)
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 71f094c9ec68..f3585777324c 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1342,6 +1342,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
1342 return -ENOMEM; 1342 return -ENOMEM;
1343 1343
1344 ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id); 1344 ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
1345 if (!ngd->pdev) {
1346 kfree(ngd);
1347 return -ENOMEM;
1348 }
1345 ngd->id = id; 1349 ngd->id = id;
1346 ngd->pdev->dev.parent = parent; 1350 ngd->pdev->dev.parent = parent;
1347 ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME; 1351 ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 19c8efb9a5ee..53b55b79c4af 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -4,7 +4,7 @@
4 4
5menuconfig SOUNDWIRE 5menuconfig SOUNDWIRE
6 bool "SoundWire support" 6 bool "SoundWire support"
7 ---help--- 7 help
8 SoundWire is a 2-Pin interface with data and clock line ratified 8 SoundWire is a 2-Pin interface with data and clock line ratified
9 by the MIPI Alliance. SoundWire is used for transporting data 9 by the MIPI Alliance. SoundWire is used for transporting data
10 typically related to audio functions. SoundWire interface is 10 typically related to audio functions. SoundWire interface is
@@ -28,7 +28,7 @@ config SOUNDWIRE_INTEL
28 select SOUNDWIRE_CADENCE 28 select SOUNDWIRE_CADENCE
29 select SOUNDWIRE_BUS 29 select SOUNDWIRE_BUS
30 depends on X86 && ACPI && SND_SOC 30 depends on X86 && ACPI && SND_SOC
31 ---help--- 31 help
32 SoundWire Intel Master driver. 32 SoundWire Intel Master driver.
33 If you have an Intel platform which has a SoundWire Master then 33 If you have an Intel platform which has a SoundWire Master then
34 enable this config option to get the SoundWire support for that 34 enable this config option to get the SoundWire support for that
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 1cbfedfc20ef..aac35fc3cf22 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -21,12 +21,12 @@ int sdw_add_bus_master(struct sdw_bus *bus)
21 int ret; 21 int ret;
22 22
23 if (!bus->dev) { 23 if (!bus->dev) {
24 pr_err("SoundWire bus has no device"); 24 pr_err("SoundWire bus has no device\n");
25 return -ENODEV; 25 return -ENODEV;
26 } 26 }
27 27
28 if (!bus->ops) { 28 if (!bus->ops) {
29 dev_err(bus->dev, "SoundWire Bus ops are not set"); 29 dev_err(bus->dev, "SoundWire Bus ops are not set\n");
30 return -EINVAL; 30 return -EINVAL;
31 } 31 }
32 32
@@ -43,13 +43,14 @@ int sdw_add_bus_master(struct sdw_bus *bus)
43 if (bus->ops->read_prop) { 43 if (bus->ops->read_prop) {
44 ret = bus->ops->read_prop(bus); 44 ret = bus->ops->read_prop(bus);
45 if (ret < 0) { 45 if (ret < 0) {
46 dev_err(bus->dev, "Bus read properties failed:%d", ret); 46 dev_err(bus->dev,
47 "Bus read properties failed:%d\n", ret);
47 return ret; 48 return ret;
48 } 49 }
49 } 50 }
50 51
51 /* 52 /*
52 * Device numbers in SoundWire are 0 thru 15. Enumeration device 53 * Device numbers in SoundWire are 0 through 15. Enumeration device
53 * number (0), Broadcast device number (15), Group numbers (12 and 54 * number (0), Broadcast device number (15), Group numbers (12 and
54 * 13) and Master device number (14) are not used for assignment so 55 * 13) and Master device number (14) are not used for assignment so
55 * mask these and other higher bits. 56 * mask these and other higher bits.
@@ -172,7 +173,8 @@ static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
172} 173}
173 174
174static inline int do_transfer_defer(struct sdw_bus *bus, 175static inline int do_transfer_defer(struct sdw_bus *bus,
175 struct sdw_msg *msg, struct sdw_defer *defer) 176 struct sdw_msg *msg,
177 struct sdw_defer *defer)
176{ 178{
177 int retry = bus->prop.err_threshold; 179 int retry = bus->prop.err_threshold;
178 enum sdw_command_response resp; 180 enum sdw_command_response resp;
@@ -224,7 +226,7 @@ int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
224 ret = do_transfer(bus, msg); 226 ret = do_transfer(bus, msg);
225 if (ret != 0 && ret != -ENODATA) 227 if (ret != 0 && ret != -ENODATA)
226 dev_err(bus->dev, "trf on Slave %d failed:%d\n", 228 dev_err(bus->dev, "trf on Slave %d failed:%d\n",
227 msg->dev_num, ret); 229 msg->dev_num, ret);
228 230
229 if (msg->page) 231 if (msg->page)
230 sdw_reset_page(bus, msg->dev_num); 232 sdw_reset_page(bus, msg->dev_num);
@@ -243,7 +245,7 @@ int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
243 * Caller needs to hold the msg_lock lock while calling this 245 * Caller needs to hold the msg_lock lock while calling this
244 */ 246 */
245int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg, 247int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
246 struct sdw_defer *defer) 248 struct sdw_defer *defer)
247{ 249{
248 int ret; 250 int ret;
249 251
@@ -253,7 +255,7 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
253 ret = do_transfer_defer(bus, msg, defer); 255 ret = do_transfer_defer(bus, msg, defer);
254 if (ret != 0 && ret != -ENODATA) 256 if (ret != 0 && ret != -ENODATA)
255 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n", 257 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
256 msg->dev_num, ret); 258 msg->dev_num, ret);
257 259
258 if (msg->page) 260 if (msg->page)
259 sdw_reset_page(bus, msg->dev_num); 261 sdw_reset_page(bus, msg->dev_num);
@@ -261,9 +263,8 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
261 return ret; 263 return ret;
262} 264}
263 265
264
265int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 266int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
266 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf) 267 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
267{ 268{
268 memset(msg, 0, sizeof(*msg)); 269 memset(msg, 0, sizeof(*msg));
269 msg->addr = addr; /* addr is 16 bit and truncated here */ 270 msg->addr = addr; /* addr is 16 bit and truncated here */
@@ -271,8 +272,6 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
271 msg->dev_num = dev_num; 272 msg->dev_num = dev_num;
272 msg->flags = flags; 273 msg->flags = flags;
273 msg->buf = buf; 274 msg->buf = buf;
274 msg->ssp_sync = false;
275 msg->page = false;
276 275
277 if (addr < SDW_REG_NO_PAGE) { /* no paging area */ 276 if (addr < SDW_REG_NO_PAGE) { /* no paging area */
278 return 0; 277 return 0;
@@ -284,7 +283,7 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
284 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */ 283 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
285 if (slave && !slave->prop.paging_support) 284 if (slave && !slave->prop.paging_support)
286 return 0; 285 return 0;
287 /* no need for else as that will fall thru to paging */ 286 /* no need for else as that will fall-through to paging */
288 } 287 }
289 288
290 /* paging mandatory */ 289 /* paging mandatory */
@@ -298,7 +297,7 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
298 return -EINVAL; 297 return -EINVAL;
299 } else if (!slave->prop.paging_support) { 298 } else if (!slave->prop.paging_support) {
300 dev_err(&slave->dev, 299 dev_err(&slave->dev,
301 "address %x needs paging but no support", addr); 300 "address %x needs paging but no support\n", addr);
302 return -EINVAL; 301 return -EINVAL;
303 } 302 }
304 303
@@ -323,7 +322,7 @@ int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
323 int ret; 322 int ret;
324 323
325 ret = sdw_fill_msg(&msg, slave, addr, count, 324 ret = sdw_fill_msg(&msg, slave, addr, count,
326 slave->dev_num, SDW_MSG_FLAG_READ, val); 325 slave->dev_num, SDW_MSG_FLAG_READ, val);
327 if (ret < 0) 326 if (ret < 0)
328 return ret; 327 return ret;
329 328
@@ -351,7 +350,7 @@ int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
351 int ret; 350 int ret;
352 351
353 ret = sdw_fill_msg(&msg, slave, addr, count, 352 ret = sdw_fill_msg(&msg, slave, addr, count,
354 slave->dev_num, SDW_MSG_FLAG_WRITE, val); 353 slave->dev_num, SDW_MSG_FLAG_WRITE, val);
355 if (ret < 0) 354 if (ret < 0)
356 return ret; 355 return ret;
357 356
@@ -393,7 +392,6 @@ EXPORT_SYMBOL(sdw_read);
393int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) 392int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
394{ 393{
395 return sdw_nwrite(slave, addr, 1, &value); 394 return sdw_nwrite(slave, addr, 1, &value);
396
397} 395}
398EXPORT_SYMBOL(sdw_write); 396EXPORT_SYMBOL(sdw_write);
399 397
@@ -416,11 +414,10 @@ static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
416 414
417static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id) 415static int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
418{ 416{
419 417 if (slave->id.unique_id != id.unique_id ||
420 if ((slave->id.unique_id != id.unique_id) || 418 slave->id.mfg_id != id.mfg_id ||
421 (slave->id.mfg_id != id.mfg_id) || 419 slave->id.part_id != id.part_id ||
422 (slave->id.part_id != id.part_id) || 420 slave->id.class_id != id.class_id)
423 (slave->id.class_id != id.class_id))
424 return -ENODEV; 421 return -ENODEV;
425 422
426 return 0; 423 return 0;
@@ -457,24 +454,23 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
457 dev_num = sdw_get_device_num(slave); 454 dev_num = sdw_get_device_num(slave);
458 mutex_unlock(&slave->bus->bus_lock); 455 mutex_unlock(&slave->bus->bus_lock);
459 if (dev_num < 0) { 456 if (dev_num < 0) {
460 dev_err(slave->bus->dev, "Get dev_num failed: %d", 457 dev_err(slave->bus->dev, "Get dev_num failed: %d\n",
461 dev_num); 458 dev_num);
462 return dev_num; 459 return dev_num;
463 } 460 }
464 } else { 461 } else {
465 dev_info(slave->bus->dev, 462 dev_info(slave->bus->dev,
466 "Slave already registered dev_num:%d", 463 "Slave already registered dev_num:%d\n",
467 slave->dev_num); 464 slave->dev_num);
468 465
469 /* Clear the slave->dev_num to transfer message on device 0 */ 466 /* Clear the slave->dev_num to transfer message on device 0 */
470 dev_num = slave->dev_num; 467 dev_num = slave->dev_num;
471 slave->dev_num = 0; 468 slave->dev_num = 0;
472
473 } 469 }
474 470
475 ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num); 471 ret = sdw_write(slave, SDW_SCP_DEVNUMBER, dev_num);
476 if (ret < 0) { 472 if (ret < 0) {
477 dev_err(&slave->dev, "Program device_num failed: %d", ret); 473 dev_err(&slave->dev, "Program device_num failed: %d\n", ret);
478 return ret; 474 return ret;
479 } 475 }
480 476
@@ -485,9 +481,9 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
485} 481}
486 482
487void sdw_extract_slave_id(struct sdw_bus *bus, 483void sdw_extract_slave_id(struct sdw_bus *bus,
488 u64 addr, struct sdw_slave_id *id) 484 u64 addr, struct sdw_slave_id *id)
489{ 485{
490 dev_dbg(bus->dev, "SDW Slave Addr: %llx", addr); 486 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
491 487
492 /* 488 /*
493 * Spec definition 489 * Spec definition
@@ -507,10 +503,9 @@ void sdw_extract_slave_id(struct sdw_bus *bus,
507 id->class_id = addr & GENMASK(7, 0); 503 id->class_id = addr & GENMASK(7, 0);
508 504
509 dev_dbg(bus->dev, 505 dev_dbg(bus->dev,
510 "SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x", 506 "SDW Slave class_id %x, part_id %x, mfg_id %x, unique_id %x, version %x\n",
511 id->class_id, id->part_id, id->mfg_id, 507 id->class_id, id->part_id, id->mfg_id,
512 id->unique_id, id->sdw_version); 508 id->unique_id, id->sdw_version);
513
514} 509}
515 510
516static int sdw_program_device_num(struct sdw_bus *bus) 511static int sdw_program_device_num(struct sdw_bus *bus)
@@ -525,7 +520,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
525 520
526 /* No Slave, so use raw xfer api */ 521 /* No Slave, so use raw xfer api */
527 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0, 522 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
528 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf); 523 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
529 if (ret < 0) 524 if (ret < 0)
530 return ret; 525 return ret;
531 526
@@ -564,7 +559,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
564 ret = sdw_assign_device_num(slave); 559 ret = sdw_assign_device_num(slave);
565 if (ret) { 560 if (ret) {
566 dev_err(slave->bus->dev, 561 dev_err(slave->bus->dev,
567 "Assign dev_num failed:%d", 562 "Assign dev_num failed:%d\n",
568 ret); 563 ret);
569 return ret; 564 return ret;
570 } 565 }
@@ -573,9 +568,9 @@ static int sdw_program_device_num(struct sdw_bus *bus)
573 } 568 }
574 } 569 }
575 570
576 if (found == false) { 571 if (!found) {
577 /* TODO: Park this device in Group 13 */ 572 /* TODO: Park this device in Group 13 */
578 dev_err(bus->dev, "Slave Entry not found"); 573 dev_err(bus->dev, "Slave Entry not found\n");
579 } 574 }
580 575
581 count++; 576 count++;
@@ -592,7 +587,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
592} 587}
593 588
594static void sdw_modify_slave_status(struct sdw_slave *slave, 589static void sdw_modify_slave_status(struct sdw_slave *slave,
595 enum sdw_slave_status status) 590 enum sdw_slave_status status)
596{ 591{
597 mutex_lock(&slave->bus->bus_lock); 592 mutex_lock(&slave->bus->bus_lock);
598 slave->status = status; 593 slave->status = status;
@@ -600,7 +595,7 @@ static void sdw_modify_slave_status(struct sdw_slave *slave,
600} 595}
601 596
602int sdw_configure_dpn_intr(struct sdw_slave *slave, 597int sdw_configure_dpn_intr(struct sdw_slave *slave,
603 int port, bool enable, int mask) 598 int port, bool enable, int mask)
604{ 599{
605 u32 addr; 600 u32 addr;
606 int ret; 601 int ret;
@@ -620,7 +615,7 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
620 ret = sdw_update(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); 615 ret = sdw_update(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
621 if (ret < 0) 616 if (ret < 0)
622 dev_err(slave->bus->dev, 617 dev_err(slave->bus->dev,
623 "SDW_DPN_INTMASK write failed:%d", val); 618 "SDW_DPN_INTMASK write failed:%d\n", val);
624 619
625 return ret; 620 return ret;
626} 621}
@@ -644,7 +639,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
644 ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val); 639 ret = sdw_update(slave, SDW_SCP_INTMASK1, val, val);
645 if (ret < 0) { 640 if (ret < 0) {
646 dev_err(slave->bus->dev, 641 dev_err(slave->bus->dev,
647 "SDW_SCP_INTMASK1 write failed:%d", ret); 642 "SDW_SCP_INTMASK1 write failed:%d\n", ret);
648 return ret; 643 return ret;
649 } 644 }
650 645
@@ -659,7 +654,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave)
659 ret = sdw_update(slave, SDW_DP0_INTMASK, val, val); 654 ret = sdw_update(slave, SDW_DP0_INTMASK, val, val);
660 if (ret < 0) { 655 if (ret < 0) {
661 dev_err(slave->bus->dev, 656 dev_err(slave->bus->dev,
662 "SDW_DP0_INTMASK read failed:%d", ret); 657 "SDW_DP0_INTMASK read failed:%d\n", ret);
663 return val; 658 return val;
664 } 659 }
665 660
@@ -674,14 +669,13 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
674 status = sdw_read(slave, SDW_DP0_INT); 669 status = sdw_read(slave, SDW_DP0_INT);
675 if (status < 0) { 670 if (status < 0) {
676 dev_err(slave->bus->dev, 671 dev_err(slave->bus->dev,
677 "SDW_DP0_INT read failed:%d", status); 672 "SDW_DP0_INT read failed:%d\n", status);
678 return status; 673 return status;
679 } 674 }
680 675
681 do { 676 do {
682
683 if (status & SDW_DP0_INT_TEST_FAIL) { 677 if (status & SDW_DP0_INT_TEST_FAIL) {
684 dev_err(&slave->dev, "Test fail for port 0"); 678 dev_err(&slave->dev, "Test fail for port 0\n");
685 clear |= SDW_DP0_INT_TEST_FAIL; 679 clear |= SDW_DP0_INT_TEST_FAIL;
686 } 680 }
687 681
@@ -696,7 +690,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
696 } 690 }
697 691
698 if (status & SDW_DP0_INT_BRA_FAILURE) { 692 if (status & SDW_DP0_INT_BRA_FAILURE) {
699 dev_err(&slave->dev, "BRA failed"); 693 dev_err(&slave->dev, "BRA failed\n");
700 clear |= SDW_DP0_INT_BRA_FAILURE; 694 clear |= SDW_DP0_INT_BRA_FAILURE;
701 } 695 }
702 696
@@ -712,7 +706,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
712 ret = sdw_write(slave, SDW_DP0_INT, clear); 706 ret = sdw_write(slave, SDW_DP0_INT, clear);
713 if (ret < 0) { 707 if (ret < 0) {
714 dev_err(slave->bus->dev, 708 dev_err(slave->bus->dev,
715 "SDW_DP0_INT write failed:%d", ret); 709 "SDW_DP0_INT write failed:%d\n", ret);
716 return ret; 710 return ret;
717 } 711 }
718 712
@@ -720,7 +714,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
720 status2 = sdw_read(slave, SDW_DP0_INT); 714 status2 = sdw_read(slave, SDW_DP0_INT);
721 if (status2 < 0) { 715 if (status2 < 0) {
722 dev_err(slave->bus->dev, 716 dev_err(slave->bus->dev,
723 "SDW_DP0_INT read failed:%d", status2); 717 "SDW_DP0_INT read failed:%d\n", status2);
724 return status2; 718 return status2;
725 } 719 }
726 status &= status2; 720 status &= status2;
@@ -731,13 +725,13 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
731 } while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 725 } while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
732 726
733 if (count == SDW_READ_INTR_CLEAR_RETRY) 727 if (count == SDW_READ_INTR_CLEAR_RETRY)
734 dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read"); 728 dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read\n");
735 729
736 return ret; 730 return ret;
737} 731}
738 732
739static int sdw_handle_port_interrupt(struct sdw_slave *slave, 733static int sdw_handle_port_interrupt(struct sdw_slave *slave,
740 int port, u8 *slave_status) 734 int port, u8 *slave_status)
741{ 735{
742 u8 clear = 0, impl_int_mask; 736 u8 clear = 0, impl_int_mask;
743 int status, status2, ret, count = 0; 737 int status, status2, ret, count = 0;
@@ -750,15 +744,14 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
750 status = sdw_read(slave, addr); 744 status = sdw_read(slave, addr);
751 if (status < 0) { 745 if (status < 0) {
752 dev_err(slave->bus->dev, 746 dev_err(slave->bus->dev,
753 "SDW_DPN_INT read failed:%d", status); 747 "SDW_DPN_INT read failed:%d\n", status);
754 748
755 return status; 749 return status;
756 } 750 }
757 751
758 do { 752 do {
759
760 if (status & SDW_DPN_INT_TEST_FAIL) { 753 if (status & SDW_DPN_INT_TEST_FAIL) {
761 dev_err(&slave->dev, "Test fail for port:%d", port); 754 dev_err(&slave->dev, "Test fail for port:%d\n", port);
762 clear |= SDW_DPN_INT_TEST_FAIL; 755 clear |= SDW_DPN_INT_TEST_FAIL;
763 } 756 }
764 757
@@ -774,7 +767,6 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
774 impl_int_mask = SDW_DPN_INT_IMPDEF1 | 767 impl_int_mask = SDW_DPN_INT_IMPDEF1 |
775 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3; 768 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
776 769
777
778 if (status & impl_int_mask) { 770 if (status & impl_int_mask) {
779 clear |= impl_int_mask; 771 clear |= impl_int_mask;
780 *slave_status = clear; 772 *slave_status = clear;
@@ -784,7 +776,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
784 ret = sdw_write(slave, addr, clear); 776 ret = sdw_write(slave, addr, clear);
785 if (ret < 0) { 777 if (ret < 0) {
786 dev_err(slave->bus->dev, 778 dev_err(slave->bus->dev,
787 "SDW_DPN_INT write failed:%d", ret); 779 "SDW_DPN_INT write failed:%d\n", ret);
788 return ret; 780 return ret;
789 } 781 }
790 782
@@ -792,7 +784,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave,
792 status2 = sdw_read(slave, addr); 784 status2 = sdw_read(slave, addr);
793 if (status2 < 0) { 785 if (status2 < 0) {
794 dev_err(slave->bus->dev, 786 dev_err(slave->bus->dev,
795 "SDW_DPN_INT read failed:%d", status2); 787 "SDW_DPN_INT read failed:%d\n", status2);
796 return status2; 788 return status2;
797 } 789 }
798 status &= status2; 790 status &= status2;
@@ -820,17 +812,18 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
820 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); 812 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
821 813
822 /* Read Instat 1, Instat 2 and Instat 3 registers */ 814 /* Read Instat 1, Instat 2 and Instat 3 registers */
823 buf = ret = sdw_read(slave, SDW_SCP_INT1); 815 ret = sdw_read(slave, SDW_SCP_INT1);
824 if (ret < 0) { 816 if (ret < 0) {
825 dev_err(slave->bus->dev, 817 dev_err(slave->bus->dev,
826 "SDW_SCP_INT1 read failed:%d", ret); 818 "SDW_SCP_INT1 read failed:%d\n", ret);
827 return ret; 819 return ret;
828 } 820 }
821 buf = ret;
829 822
830 ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, buf2); 823 ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, buf2);
831 if (ret < 0) { 824 if (ret < 0) {
832 dev_err(slave->bus->dev, 825 dev_err(slave->bus->dev,
833 "SDW_SCP_INT2/3 read failed:%d", ret); 826 "SDW_SCP_INT2/3 read failed:%d\n", ret);
834 return ret; 827 return ret;
835 } 828 }
836 829
@@ -840,12 +833,12 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
840 * interrupt 833 * interrupt
841 */ 834 */
842 if (buf & SDW_SCP_INT1_PARITY) { 835 if (buf & SDW_SCP_INT1_PARITY) {
843 dev_err(&slave->dev, "Parity error detected"); 836 dev_err(&slave->dev, "Parity error detected\n");
844 clear |= SDW_SCP_INT1_PARITY; 837 clear |= SDW_SCP_INT1_PARITY;
845 } 838 }
846 839
847 if (buf & SDW_SCP_INT1_BUS_CLASH) { 840 if (buf & SDW_SCP_INT1_BUS_CLASH) {
848 dev_err(&slave->dev, "Bus clash error detected"); 841 dev_err(&slave->dev, "Bus clash error detected\n");
849 clear |= SDW_SCP_INT1_BUS_CLASH; 842 clear |= SDW_SCP_INT1_BUS_CLASH;
850 } 843 }
851 844
@@ -869,8 +862,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
869 port = port >> SDW_REG_SHIFT(SDW_SCP_INT1_PORT0_3); 862 port = port >> SDW_REG_SHIFT(SDW_SCP_INT1_PORT0_3);
870 for_each_set_bit(bit, &port, 8) { 863 for_each_set_bit(bit, &port, 8) {
871 sdw_handle_port_interrupt(slave, bit, 864 sdw_handle_port_interrupt(slave, bit,
872 &port_status[bit]); 865 &port_status[bit]);
873
874 } 866 }
875 867
876 /* Check if cascade 2 interrupt is present */ 868 /* Check if cascade 2 interrupt is present */
@@ -898,11 +890,11 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
898 } 890 }
899 891
900 /* Update the Slave driver */ 892 /* Update the Slave driver */
901 if (slave_notify && (slave->ops) && 893 if (slave_notify && slave->ops &&
902 (slave->ops->interrupt_callback)) { 894 slave->ops->interrupt_callback) {
903 slave_intr.control_port = clear; 895 slave_intr.control_port = clear;
904 memcpy(slave_intr.port, &port_status, 896 memcpy(slave_intr.port, &port_status,
905 sizeof(slave_intr.port)); 897 sizeof(slave_intr.port));
906 898
907 slave->ops->interrupt_callback(slave, &slave_intr); 899 slave->ops->interrupt_callback(slave, &slave_intr);
908 } 900 }
@@ -911,7 +903,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
911 ret = sdw_write(slave, SDW_SCP_INT1, clear); 903 ret = sdw_write(slave, SDW_SCP_INT1, clear);
912 if (ret < 0) { 904 if (ret < 0) {
913 dev_err(slave->bus->dev, 905 dev_err(slave->bus->dev,
914 "SDW_SCP_INT1 write failed:%d", ret); 906 "SDW_SCP_INT1 write failed:%d\n", ret);
915 return ret; 907 return ret;
916 } 908 }
917 909
@@ -919,17 +911,18 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
919 * Read status again to ensure no new interrupts arrived 911 * Read status again to ensure no new interrupts arrived
920 * while servicing interrupts. 912 * while servicing interrupts.
921 */ 913 */
922 _buf = ret = sdw_read(slave, SDW_SCP_INT1); 914 ret = sdw_read(slave, SDW_SCP_INT1);
923 if (ret < 0) { 915 if (ret < 0) {
924 dev_err(slave->bus->dev, 916 dev_err(slave->bus->dev,
925 "SDW_SCP_INT1 read failed:%d", ret); 917 "SDW_SCP_INT1 read failed:%d\n", ret);
926 return ret; 918 return ret;
927 } 919 }
920 _buf = ret;
928 921
929 ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, _buf2); 922 ret = sdw_nread(slave, SDW_SCP_INTSTAT2, 2, _buf2);
930 if (ret < 0) { 923 if (ret < 0) {
931 dev_err(slave->bus->dev, 924 dev_err(slave->bus->dev,
932 "SDW_SCP_INT2/3 read failed:%d", ret); 925 "SDW_SCP_INT2/3 read failed:%d\n", ret);
933 return ret; 926 return ret;
934 } 927 }
935 928
@@ -949,15 +942,15 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
949 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 942 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
950 943
951 if (count == SDW_READ_INTR_CLEAR_RETRY) 944 if (count == SDW_READ_INTR_CLEAR_RETRY)
952 dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read"); 945 dev_warn(slave->bus->dev, "Reached MAX_RETRY on alert read\n");
953 946
954 return ret; 947 return ret;
955} 948}
956 949
957static int sdw_update_slave_status(struct sdw_slave *slave, 950static int sdw_update_slave_status(struct sdw_slave *slave,
958 enum sdw_slave_status status) 951 enum sdw_slave_status status)
959{ 952{
960 if ((slave->ops) && (slave->ops->update_status)) 953 if (slave->ops && slave->ops->update_status)
961 return slave->ops->update_status(slave, status); 954 return slave->ops->update_status(slave, status);
962 955
963 return 0; 956 return 0;
@@ -969,7 +962,7 @@ static int sdw_update_slave_status(struct sdw_slave *slave,
969 * @status: Status for all Slave(s) 962 * @status: Status for all Slave(s)
970 */ 963 */
971int sdw_handle_slave_status(struct sdw_bus *bus, 964int sdw_handle_slave_status(struct sdw_bus *bus,
972 enum sdw_slave_status status[]) 965 enum sdw_slave_status status[])
973{ 966{
974 enum sdw_slave_status prev_status; 967 enum sdw_slave_status prev_status;
975 struct sdw_slave *slave; 968 struct sdw_slave *slave;
@@ -978,7 +971,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
978 if (status[0] == SDW_SLAVE_ATTACHED) { 971 if (status[0] == SDW_SLAVE_ATTACHED) {
979 ret = sdw_program_device_num(bus); 972 ret = sdw_program_device_num(bus);
980 if (ret) 973 if (ret)
981 dev_err(bus->dev, "Slave attach failed: %d", ret); 974 dev_err(bus->dev, "Slave attach failed: %d\n", ret);
982 } 975 }
983 976
984 /* Continue to check other slave statuses */ 977 /* Continue to check other slave statuses */
@@ -1006,7 +999,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
1006 ret = sdw_handle_slave_alerts(slave); 999 ret = sdw_handle_slave_alerts(slave);
1007 if (ret) 1000 if (ret)
1008 dev_err(bus->dev, 1001 dev_err(bus->dev,
1009 "Slave %d alert handling failed: %d", 1002 "Slave %d alert handling failed: %d\n",
1010 i, ret); 1003 i, ret);
1011 break; 1004 break;
1012 1005
@@ -1023,22 +1016,21 @@ int sdw_handle_slave_status(struct sdw_bus *bus,
1023 ret = sdw_initialize_slave(slave); 1016 ret = sdw_initialize_slave(slave);
1024 if (ret) 1017 if (ret)
1025 dev_err(bus->dev, 1018 dev_err(bus->dev,
1026 "Slave %d initialization failed: %d", 1019 "Slave %d initialization failed: %d\n",
1027 i, ret); 1020 i, ret);
1028 1021
1029 break; 1022 break;
1030 1023
1031 default: 1024 default:
1032 dev_err(bus->dev, "Invalid slave %d status:%d", 1025 dev_err(bus->dev, "Invalid slave %d status:%d\n",
1033 i, status[i]); 1026 i, status[i]);
1034 break; 1027 break;
1035 } 1028 }
1036 1029
1037 ret = sdw_update_slave_status(slave, status[i]); 1030 ret = sdw_update_slave_status(slave, status[i]);
1038 if (ret) 1031 if (ret)
1039 dev_err(slave->bus->dev, 1032 dev_err(slave->bus->dev,
1040 "Update Slave status failed:%d", ret); 1033 "Update Slave status failed:%d\n", ret);
1041
1042 } 1034 }
1043 1035
1044 return ret; 1036 return ret;
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index c77de05b8100..3048ca153f22 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2// Copyright(c) 2015-17 Intel Corporation. 2/* Copyright(c) 2015-17 Intel Corporation. */
3 3
4#ifndef __SDW_BUS_H 4#ifndef __SDW_BUS_H
5#define __SDW_BUS_H 5#define __SDW_BUS_H
@@ -16,7 +16,7 @@ static inline int sdw_acpi_find_slaves(struct sdw_bus *bus)
16#endif 16#endif
17 17
18void sdw_extract_slave_id(struct sdw_bus *bus, 18void sdw_extract_slave_id(struct sdw_bus *bus,
19 u64 addr, struct sdw_slave_id *id); 19 u64 addr, struct sdw_slave_id *id);
20 20
21enum { 21enum {
22 SDW_MSG_FLAG_READ = 0, 22 SDW_MSG_FLAG_READ = 0,
@@ -116,19 +116,19 @@ struct sdw_master_runtime {
116}; 116};
117 117
118struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, 118struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
119 enum sdw_data_direction direction, 119 enum sdw_data_direction direction,
120 unsigned int port_num); 120 unsigned int port_num);
121int sdw_configure_dpn_intr(struct sdw_slave *slave, int port, 121int sdw_configure_dpn_intr(struct sdw_slave *slave, int port,
122 bool enable, int mask); 122 bool enable, int mask);
123 123
124int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg); 124int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg);
125int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg, 125int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
126 struct sdw_defer *defer); 126 struct sdw_defer *defer);
127 127
128#define SDW_READ_INTR_CLEAR_RETRY 10 128#define SDW_READ_INTR_CLEAR_RETRY 10
129 129
130int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 130int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
131 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf); 131 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
132 132
133/* Read-Modify-Write Slave register */ 133/* Read-Modify-Write Slave register */
134static inline int 134static inline int
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 283b2832728e..2655602f0cfb 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -107,7 +107,7 @@ static int sdw_drv_probe(struct device *dev)
107 slave->prop.clk_stop_timeout = 300; 107 slave->prop.clk_stop_timeout = 300;
108 108
109 slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout, 109 slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout,
110 slave->prop.clk_stop_timeout); 110 slave->prop.clk_stop_timeout);
111 111
112 return 0; 112 return 0;
113} 113}
@@ -148,7 +148,7 @@ int __sdw_register_driver(struct sdw_driver *drv, struct module *owner)
148 148
149 if (!drv->probe) { 149 if (!drv->probe) {
150 pr_err("driver %s didn't provide SDW probe routine\n", 150 pr_err("driver %s didn't provide SDW probe routine\n",
151 drv->name); 151 drv->name);
152 return -EINVAL; 152 return -EINVAL;
153 } 153 }
154 154
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index cb6a331f448a..682789bb8ab3 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -42,7 +42,6 @@
42#define CDNS_MCP_CONTROL_CMD_ACCEPT BIT(1) 42#define CDNS_MCP_CONTROL_CMD_ACCEPT BIT(1)
43#define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0) 43#define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
44 44
45
46#define CDNS_MCP_CMDCTRL 0x8 45#define CDNS_MCP_CMDCTRL 0x8
47#define CDNS_MCP_SSPSTAT 0xC 46#define CDNS_MCP_SSPSTAT 0xC
48#define CDNS_MCP_FRAME_SHAPE 0x10 47#define CDNS_MCP_FRAME_SHAPE 0x10
@@ -226,9 +225,9 @@ static int cdns_clear_bit(struct sdw_cdns *cdns, int offset, u32 value)
226/* 225/*
227 * IO Calls 226 * IO Calls
228 */ 227 */
229static enum sdw_command_response cdns_fill_msg_resp( 228static enum sdw_command_response
230 struct sdw_cdns *cdns, 229cdns_fill_msg_resp(struct sdw_cdns *cdns,
231 struct sdw_msg *msg, int count, int offset) 230 struct sdw_msg *msg, int count, int offset)
232{ 231{
233 int nack = 0, no_ack = 0; 232 int nack = 0, no_ack = 0;
234 int i; 233 int i;
@@ -263,7 +262,7 @@ static enum sdw_command_response cdns_fill_msg_resp(
263 262
264static enum sdw_command_response 263static enum sdw_command_response
265_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, 264_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
266 int offset, int count, bool defer) 265 int offset, int count, bool defer)
267{ 266{
268 unsigned long time; 267 unsigned long time;
269 u32 base, i, data; 268 u32 base, i, data;
@@ -296,7 +295,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
296 295
297 /* wait for timeout or response */ 296 /* wait for timeout or response */
298 time = wait_for_completion_timeout(&cdns->tx_complete, 297 time = wait_for_completion_timeout(&cdns->tx_complete,
299 msecs_to_jiffies(CDNS_TX_TIMEOUT)); 298 msecs_to_jiffies(CDNS_TX_TIMEOUT));
300 if (!time) { 299 if (!time) {
301 dev_err(cdns->dev, "IO transfer timed out\n"); 300 dev_err(cdns->dev, "IO transfer timed out\n");
302 msg->len = 0; 301 msg->len = 0;
@@ -306,8 +305,8 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
306 return cdns_fill_msg_resp(cdns, msg, count, offset); 305 return cdns_fill_msg_resp(cdns, msg, count, offset);
307} 306}
308 307
309static enum sdw_command_response cdns_program_scp_addr( 308static enum sdw_command_response
310 struct sdw_cdns *cdns, struct sdw_msg *msg) 309cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
311{ 310{
312 int nack = 0, no_ack = 0; 311 int nack = 0, no_ack = 0;
313 unsigned long time; 312 unsigned long time;
@@ -336,7 +335,7 @@ static enum sdw_command_response cdns_program_scp_addr(
336 cdns_writel(cdns, base, data[1]); 335 cdns_writel(cdns, base, data[1]);
337 336
338 time = wait_for_completion_timeout(&cdns->tx_complete, 337 time = wait_for_completion_timeout(&cdns->tx_complete,
339 msecs_to_jiffies(CDNS_TX_TIMEOUT)); 338 msecs_to_jiffies(CDNS_TX_TIMEOUT));
340 if (!time) { 339 if (!time) {
341 dev_err(cdns->dev, "SCP Msg trf timed out\n"); 340 dev_err(cdns->dev, "SCP Msg trf timed out\n");
342 msg->len = 0; 341 msg->len = 0;
@@ -347,10 +346,10 @@ static enum sdw_command_response cdns_program_scp_addr(
347 for (i = 0; i < 2; i++) { 346 for (i = 0; i < 2; i++) {
348 if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) { 347 if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
349 no_ack = 1; 348 no_ack = 1;
350 dev_err(cdns->dev, "Program SCP Ack not received"); 349 dev_err(cdns->dev, "Program SCP Ack not received\n");
351 if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) { 350 if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
352 nack = 1; 351 nack = 1;
353 dev_err(cdns->dev, "Program SCP NACK received"); 352 dev_err(cdns->dev, "Program SCP NACK received\n");
354 } 353 }
355 } 354 }
356 } 355 }
@@ -358,11 +357,11 @@ static enum sdw_command_response cdns_program_scp_addr(
358 /* For NACK, NO ack, don't return err if we are in Broadcast mode */ 357 /* For NACK, NO ack, don't return err if we are in Broadcast mode */
359 if (nack) { 358 if (nack) {
360 dev_err(cdns->dev, 359 dev_err(cdns->dev,
361 "SCP_addrpage NACKed for Slave %d", msg->dev_num); 360 "SCP_addrpage NACKed for Slave %d\n", msg->dev_num);
362 return SDW_CMD_FAIL; 361 return SDW_CMD_FAIL;
363 } else if (no_ack) { 362 } else if (no_ack) {
364 dev_dbg(cdns->dev, 363 dev_dbg(cdns->dev,
365 "SCP_addrpage ignored for Slave %d", msg->dev_num); 364 "SCP_addrpage ignored for Slave %d\n", msg->dev_num);
366 return SDW_CMD_IGNORED; 365 return SDW_CMD_IGNORED;
367 } 366 }
368 367
@@ -410,7 +409,7 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
410 409
411 for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) { 410 for (i = 0; i < msg->len / CDNS_MCP_CMD_LEN; i++) {
412 ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN, 411 ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
413 CDNS_MCP_CMD_LEN, false); 412 CDNS_MCP_CMD_LEN, false);
414 if (ret < 0) 413 if (ret < 0)
415 goto exit; 414 goto exit;
416 } 415 }
@@ -419,7 +418,7 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
419 goto exit; 418 goto exit;
420 419
421 ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN, 420 ret = _cdns_xfer_msg(cdns, msg, cmd, i * CDNS_MCP_CMD_LEN,
422 msg->len % CDNS_MCP_CMD_LEN, false); 421 msg->len % CDNS_MCP_CMD_LEN, false);
423 422
424exit: 423exit:
425 return ret; 424 return ret;
@@ -428,7 +427,7 @@ EXPORT_SYMBOL(cdns_xfer_msg);
428 427
429enum sdw_command_response 428enum sdw_command_response
430cdns_xfer_msg_defer(struct sdw_bus *bus, 429cdns_xfer_msg_defer(struct sdw_bus *bus,
431 struct sdw_msg *msg, struct sdw_defer *defer) 430 struct sdw_msg *msg, struct sdw_defer *defer)
432{ 431{
433 struct sdw_cdns *cdns = bus_to_cdns(bus); 432 struct sdw_cdns *cdns = bus_to_cdns(bus);
434 int cmd = 0, ret; 433 int cmd = 0, ret;
@@ -483,7 +482,7 @@ static void cdns_read_response(struct sdw_cdns *cdns)
483} 482}
484 483
485static int cdns_update_slave_status(struct sdw_cdns *cdns, 484static int cdns_update_slave_status(struct sdw_cdns *cdns,
486 u32 slave0, u32 slave1) 485 u32 slave0, u32 slave1)
487{ 486{
488 enum sdw_slave_status status[SDW_MAX_DEVICES + 1]; 487 enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
489 bool is_slave = false; 488 bool is_slave = false;
@@ -526,8 +525,8 @@ static int cdns_update_slave_status(struct sdw_cdns *cdns,
526 /* first check if Slave reported multiple status */ 525 /* first check if Slave reported multiple status */
527 if (set_status > 1) { 526 if (set_status > 1) {
528 dev_warn(cdns->dev, 527 dev_warn(cdns->dev,
529 "Slave reported multiple Status: %d\n", 528 "Slave reported multiple Status: %d\n",
530 status[i]); 529 status[i]);
531 /* 530 /*
532 * TODO: we need to reread the status here by 531 * TODO: we need to reread the status here by
533 * issuing a PING cmd 532 * issuing a PING cmd
@@ -566,15 +565,15 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
566 565
567 if (cdns->defer) { 566 if (cdns->defer) {
568 cdns_fill_msg_resp(cdns, cdns->defer->msg, 567 cdns_fill_msg_resp(cdns, cdns->defer->msg,
569 cdns->defer->length, 0); 568 cdns->defer->length, 0);
570 complete(&cdns->defer->complete); 569 complete(&cdns->defer->complete);
571 cdns->defer = NULL; 570 cdns->defer = NULL;
572 } else 571 } else {
573 complete(&cdns->tx_complete); 572 complete(&cdns->tx_complete);
573 }
574 } 574 }
575 575
576 if (int_status & CDNS_MCP_INT_CTRL_CLASH) { 576 if (int_status & CDNS_MCP_INT_CTRL_CLASH) {
577
578 /* Slave is driving bit slot during control word */ 577 /* Slave is driving bit slot during control word */
579 dev_err_ratelimited(cdns->dev, "Bus clash for control word\n"); 578 dev_err_ratelimited(cdns->dev, "Bus clash for control word\n");
580 int_status |= CDNS_MCP_INT_CTRL_CLASH; 579 int_status |= CDNS_MCP_INT_CTRL_CLASH;
@@ -592,7 +591,7 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
592 if (int_status & CDNS_MCP_INT_SLAVE_MASK) { 591 if (int_status & CDNS_MCP_INT_SLAVE_MASK) {
593 /* Mask the Slave interrupt and wake thread */ 592 /* Mask the Slave interrupt and wake thread */
594 cdns_updatel(cdns, CDNS_MCP_INTMASK, 593 cdns_updatel(cdns, CDNS_MCP_INTMASK,
595 CDNS_MCP_INT_SLAVE_MASK, 0); 594 CDNS_MCP_INT_SLAVE_MASK, 0);
596 595
597 int_status &= ~CDNS_MCP_INT_SLAVE_MASK; 596 int_status &= ~CDNS_MCP_INT_SLAVE_MASK;
598 ret = IRQ_WAKE_THREAD; 597 ret = IRQ_WAKE_THREAD;
@@ -625,7 +624,7 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id)
625 /* clear and unmask Slave interrupt now */ 624 /* clear and unmask Slave interrupt now */
626 cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK); 625 cdns_writel(cdns, CDNS_MCP_INTSTAT, CDNS_MCP_INT_SLAVE_MASK);
627 cdns_updatel(cdns, CDNS_MCP_INTMASK, 626 cdns_updatel(cdns, CDNS_MCP_INTMASK,
628 CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK); 627 CDNS_MCP_INT_SLAVE_MASK, CDNS_MCP_INT_SLAVE_MASK);
629 628
630 return IRQ_HANDLED; 629 return IRQ_HANDLED;
631} 630}
@@ -639,9 +638,9 @@ static int _cdns_enable_interrupt(struct sdw_cdns *cdns)
639 u32 mask; 638 u32 mask;
640 639
641 cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0, 640 cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK0,
642 CDNS_MCP_SLAVE_INTMASK0_MASK); 641 CDNS_MCP_SLAVE_INTMASK0_MASK);
643 cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1, 642 cdns_writel(cdns, CDNS_MCP_SLAVE_INTMASK1,
644 CDNS_MCP_SLAVE_INTMASK1_MASK); 643 CDNS_MCP_SLAVE_INTMASK1_MASK);
645 644
646 mask = CDNS_MCP_INT_SLAVE_RSVD | CDNS_MCP_INT_SLAVE_ALERT | 645 mask = CDNS_MCP_INT_SLAVE_RSVD | CDNS_MCP_INT_SLAVE_ALERT |
647 CDNS_MCP_INT_SLAVE_ATTACH | CDNS_MCP_INT_SLAVE_NATTACH | 646 CDNS_MCP_INT_SLAVE_ATTACH | CDNS_MCP_INT_SLAVE_NATTACH |
@@ -663,17 +662,17 @@ int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns)
663 662
664 _cdns_enable_interrupt(cdns); 663 _cdns_enable_interrupt(cdns);
665 ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE, 664 ret = cdns_clear_bit(cdns, CDNS_MCP_CONFIG_UPDATE,
666 CDNS_MCP_CONFIG_UPDATE_BIT); 665 CDNS_MCP_CONFIG_UPDATE_BIT);
667 if (ret < 0) 666 if (ret < 0)
668 dev_err(cdns->dev, "Config update timedout"); 667 dev_err(cdns->dev, "Config update timedout\n");
669 668
670 return ret; 669 return ret;
671} 670}
672EXPORT_SYMBOL(sdw_cdns_enable_interrupt); 671EXPORT_SYMBOL(sdw_cdns_enable_interrupt);
673 672
674static int cdns_allocate_pdi(struct sdw_cdns *cdns, 673static int cdns_allocate_pdi(struct sdw_cdns *cdns,
675 struct sdw_cdns_pdi **stream, 674 struct sdw_cdns_pdi **stream,
676 u32 num, u32 pdi_offset) 675 u32 num, u32 pdi_offset)
677{ 676{
678 struct sdw_cdns_pdi *pdi; 677 struct sdw_cdns_pdi *pdi;
679 int i; 678 int i;
@@ -701,7 +700,7 @@ static int cdns_allocate_pdi(struct sdw_cdns *cdns,
701 * @config: Stream configurations 700 * @config: Stream configurations
702 */ 701 */
703int sdw_cdns_pdi_init(struct sdw_cdns *cdns, 702int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
704 struct sdw_cdns_stream_config config) 703 struct sdw_cdns_stream_config config)
705{ 704{
706 struct sdw_cdns_streams *stream; 705 struct sdw_cdns_streams *stream;
707 int offset, i, ret; 706 int offset, i, ret;
@@ -770,7 +769,7 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
770 cdns->num_ports += stream->num_pdi; 769 cdns->num_ports += stream->num_pdi;
771 770
772 cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports, 771 cdns->ports = devm_kcalloc(cdns->dev, cdns->num_ports,
773 sizeof(*cdns->ports), GFP_KERNEL); 772 sizeof(*cdns->ports), GFP_KERNEL);
774 if (!cdns->ports) { 773 if (!cdns->ports) {
775 ret = -ENOMEM; 774 ret = -ENOMEM;
776 return ret; 775 return ret;
@@ -796,7 +795,7 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
796 795
797 /* Exit clock stop */ 796 /* Exit clock stop */
798 ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL, 797 ret = cdns_clear_bit(cdns, CDNS_MCP_CONTROL,
799 CDNS_MCP_CONTROL_CLK_STOP_CLR); 798 CDNS_MCP_CONTROL_CLK_STOP_CLR);
800 if (ret < 0) { 799 if (ret < 0) {
801 dev_err(cdns->dev, "Couldn't exit from clock stop\n"); 800 dev_err(cdns->dev, "Couldn't exit from clock stop\n");
802 return ret; 801 return ret;
@@ -816,7 +815,7 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
816 815
817 /* Set cmd accept mode */ 816 /* Set cmd accept mode */
818 cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT, 817 cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
819 CDNS_MCP_CONTROL_CMD_ACCEPT); 818 CDNS_MCP_CONTROL_CMD_ACCEPT);
820 819
821 /* Configure mcp config */ 820 /* Configure mcp config */
822 val = cdns_readl(cdns, CDNS_MCP_CONFIG); 821 val = cdns_readl(cdns, CDNS_MCP_CONFIG);
@@ -853,7 +852,7 @@ int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params)
853 int divider; 852 int divider;
854 853
855 if (!params->curr_dr_freq) { 854 if (!params->curr_dr_freq) {
856 dev_err(cdns->dev, "NULL curr_dr_freq"); 855 dev_err(cdns->dev, "NULL curr_dr_freq\n");
857 return -EINVAL; 856 return -EINVAL;
858 } 857 }
859 858
@@ -873,7 +872,7 @@ int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params)
873EXPORT_SYMBOL(cdns_bus_conf); 872EXPORT_SYMBOL(cdns_bus_conf);
874 873
875static int cdns_port_params(struct sdw_bus *bus, 874static int cdns_port_params(struct sdw_bus *bus,
876 struct sdw_port_params *p_params, unsigned int bank) 875 struct sdw_port_params *p_params, unsigned int bank)
877{ 876{
878 struct sdw_cdns *cdns = bus_to_cdns(bus); 877 struct sdw_cdns *cdns = bus_to_cdns(bus);
879 int dpn_config = 0, dpn_config_off; 878 int dpn_config = 0, dpn_config_off;
@@ -898,8 +897,8 @@ static int cdns_port_params(struct sdw_bus *bus,
898} 897}
899 898
900static int cdns_transport_params(struct sdw_bus *bus, 899static int cdns_transport_params(struct sdw_bus *bus,
901 struct sdw_transport_params *t_params, 900 struct sdw_transport_params *t_params,
902 enum sdw_reg_bank bank) 901 enum sdw_reg_bank bank)
903{ 902{
904 struct sdw_cdns *cdns = bus_to_cdns(bus); 903 struct sdw_cdns *cdns = bus_to_cdns(bus);
905 int dpn_offsetctrl = 0, dpn_offsetctrl_off; 904 int dpn_offsetctrl = 0, dpn_offsetctrl_off;
@@ -952,7 +951,7 @@ static int cdns_transport_params(struct sdw_bus *bus,
952} 951}
953 952
954static int cdns_port_enable(struct sdw_bus *bus, 953static int cdns_port_enable(struct sdw_bus *bus,
955 struct sdw_enable_ch *enable_ch, unsigned int bank) 954 struct sdw_enable_ch *enable_ch, unsigned int bank)
956{ 955{
957 struct sdw_cdns *cdns = bus_to_cdns(bus); 956 struct sdw_cdns *cdns = bus_to_cdns(bus);
958 int dpn_chnen_off, ch_mask; 957 int dpn_chnen_off, ch_mask;
@@ -988,7 +987,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
988EXPORT_SYMBOL(sdw_cdns_probe); 987EXPORT_SYMBOL(sdw_cdns_probe);
989 988
990int cdns_set_sdw_stream(struct snd_soc_dai *dai, 989int cdns_set_sdw_stream(struct snd_soc_dai *dai,
991 void *stream, bool pcm, int direction) 990 void *stream, bool pcm, int direction)
992{ 991{
993 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 992 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
994 struct sdw_cdns_dma_data *dma; 993 struct sdw_cdns_dma_data *dma;
@@ -1026,12 +1025,13 @@ EXPORT_SYMBOL(cdns_set_sdw_stream);
1026 * Find and return a free PDI for a given PDI array 1025 * Find and return a free PDI for a given PDI array
1027 */ 1026 */
1028static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns, 1027static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
1029 unsigned int num, struct sdw_cdns_pdi *pdi) 1028 unsigned int num,
1029 struct sdw_cdns_pdi *pdi)
1030{ 1030{
1031 int i; 1031 int i;
1032 1032
1033 for (i = 0; i < num; i++) { 1033 for (i = 0; i < num; i++) {
1034 if (pdi[i].assigned == true) 1034 if (pdi[i].assigned)
1035 continue; 1035 continue;
1036 pdi[i].assigned = true; 1036 pdi[i].assigned = true;
1037 return &pdi[i]; 1037 return &pdi[i];
@@ -1050,8 +1050,8 @@ static struct sdw_cdns_pdi *cdns_find_pdi(struct sdw_cdns *cdns,
1050 * @pdi: PDI to be used 1050 * @pdi: PDI to be used
1051 */ 1051 */
1052void sdw_cdns_config_stream(struct sdw_cdns *cdns, 1052void sdw_cdns_config_stream(struct sdw_cdns *cdns,
1053 struct sdw_cdns_port *port, 1053 struct sdw_cdns_port *port,
1054 u32 ch, u32 dir, struct sdw_cdns_pdi *pdi) 1054 u32 ch, u32 dir, struct sdw_cdns_pdi *pdi)
1055{ 1055{
1056 u32 offset, val = 0; 1056 u32 offset, val = 0;
1057 1057
@@ -1076,13 +1076,13 @@ EXPORT_SYMBOL(sdw_cdns_config_stream);
1076 * @ch_count: Channel count 1076 * @ch_count: Channel count
1077 */ 1077 */
1078static int cdns_get_num_pdi(struct sdw_cdns *cdns, 1078static int cdns_get_num_pdi(struct sdw_cdns *cdns,
1079 struct sdw_cdns_pdi *pdi, 1079 struct sdw_cdns_pdi *pdi,
1080 unsigned int num, u32 ch_count) 1080 unsigned int num, u32 ch_count)
1081{ 1081{
1082 int i, pdis = 0; 1082 int i, pdis = 0;
1083 1083
1084 for (i = 0; i < num; i++) { 1084 for (i = 0; i < num; i++) {
1085 if (pdi[i].assigned == true) 1085 if (pdi[i].assigned)
1086 continue; 1086 continue;
1087 1087
1088 if (pdi[i].ch_count < ch_count) 1088 if (pdi[i].ch_count < ch_count)
@@ -1139,8 +1139,8 @@ EXPORT_SYMBOL(sdw_cdns_get_stream);
1139 * @dir: Data direction 1139 * @dir: Data direction
1140 */ 1140 */
1141int sdw_cdns_alloc_stream(struct sdw_cdns *cdns, 1141int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
1142 struct sdw_cdns_streams *stream, 1142 struct sdw_cdns_streams *stream,
1143 struct sdw_cdns_port *port, u32 ch, u32 dir) 1143 struct sdw_cdns_port *port, u32 ch, u32 dir)
1144{ 1144{
1145 struct sdw_cdns_pdi *pdi = NULL; 1145 struct sdw_cdns_pdi *pdi = NULL;
1146 1146
@@ -1167,7 +1167,7 @@ int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
1167EXPORT_SYMBOL(sdw_cdns_alloc_stream); 1167EXPORT_SYMBOL(sdw_cdns_alloc_stream);
1168 1168
1169void sdw_cdns_shutdown(struct snd_pcm_substream *substream, 1169void sdw_cdns_shutdown(struct snd_pcm_substream *substream,
1170 struct snd_soc_dai *dai) 1170 struct snd_soc_dai *dai)
1171{ 1171{
1172 struct sdw_cdns_dma_data *dma; 1172 struct sdw_cdns_dma_data *dma;
1173 1173
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index eb902b19c5a4..fe2af62958b1 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2// Copyright(c) 2015-17 Intel Corporation. 2/* Copyright(c) 2015-17 Intel Corporation. */
3#include <sound/soc.h> 3#include <sound/soc.h>
4 4
5#ifndef __SDW_CADENCE_H 5#ifndef __SDW_CADENCE_H
@@ -160,24 +160,24 @@ irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
160 160
161int sdw_cdns_init(struct sdw_cdns *cdns); 161int sdw_cdns_init(struct sdw_cdns *cdns);
162int sdw_cdns_pdi_init(struct sdw_cdns *cdns, 162int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
163 struct sdw_cdns_stream_config config); 163 struct sdw_cdns_stream_config config);
164int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns); 164int sdw_cdns_enable_interrupt(struct sdw_cdns *cdns);
165 165
166int sdw_cdns_get_stream(struct sdw_cdns *cdns, 166int sdw_cdns_get_stream(struct sdw_cdns *cdns,
167 struct sdw_cdns_streams *stream, 167 struct sdw_cdns_streams *stream,
168 u32 ch, u32 dir); 168 u32 ch, u32 dir);
169int sdw_cdns_alloc_stream(struct sdw_cdns *cdns, 169int sdw_cdns_alloc_stream(struct sdw_cdns *cdns,
170 struct sdw_cdns_streams *stream, 170 struct sdw_cdns_streams *stream,
171 struct sdw_cdns_port *port, u32 ch, u32 dir); 171 struct sdw_cdns_port *port, u32 ch, u32 dir);
172void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port, 172void sdw_cdns_config_stream(struct sdw_cdns *cdns, struct sdw_cdns_port *port,
173 u32 ch, u32 dir, struct sdw_cdns_pdi *pdi); 173 u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
174 174
175void sdw_cdns_shutdown(struct snd_pcm_substream *substream, 175void sdw_cdns_shutdown(struct snd_pcm_substream *substream,
176 struct snd_soc_dai *dai); 176 struct snd_soc_dai *dai);
177int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai, 177int sdw_cdns_pcm_set_stream(struct snd_soc_dai *dai,
178 void *stream, int direction); 178 void *stream, int direction);
179int sdw_cdns_pdm_set_stream(struct snd_soc_dai *dai, 179int sdw_cdns_pdm_set_stream(struct snd_soc_dai *dai,
180 void *stream, int direction); 180 void *stream, int direction);
181 181
182enum sdw_command_response 182enum sdw_command_response
183cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num); 183cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
@@ -187,7 +187,7 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg);
187 187
188enum sdw_command_response 188enum sdw_command_response
189cdns_xfer_msg_defer(struct sdw_bus *bus, 189cdns_xfer_msg_defer(struct sdw_bus *bus,
190 struct sdw_msg *msg, struct sdw_defer *defer); 190 struct sdw_msg *msg, struct sdw_defer *defer);
191 191
192enum sdw_command_response 192enum sdw_command_response
193cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num); 193cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
@@ -195,5 +195,5 @@ cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
195int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params); 195int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params);
196 196
197int cdns_set_sdw_stream(struct snd_soc_dai *dai, 197int cdns_set_sdw_stream(struct snd_soc_dai *dai,
198 void *stream, bool pcm, int direction); 198 void *stream, bool pcm, int direction);
199#endif /* __SDW_CADENCE_H */ 199#endif /* __SDW_CADENCE_H */
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index fd8d034cfec1..31336b0271b0 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/acpi.h> 8#include <linux/acpi.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/module.h>
10#include <linux/interrupt.h> 11#include <linux/interrupt.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <sound/pcm_params.h> 13#include <sound/pcm_params.h>
@@ -23,18 +24,18 @@
23#define SDW_SHIM_IPPTR 0x8 24#define SDW_SHIM_IPPTR 0x8
24#define SDW_SHIM_SYNC 0xC 25#define SDW_SHIM_SYNC 0xC
25 26
26#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * x) 27#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
27#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * x) 28#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
28#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * x) 29#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
29#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * x) 30#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
30#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * x) 31#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
31#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * x) 32#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
32 33
33#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * x) + (0x2 * y)) 34#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
34#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * x) + (0x2 * y)) 35#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
35#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * x) 36#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x))
36#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * x) 37#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
37#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * x) 38#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
38 39
39#define SDW_SHIM_WAKEEN 0x190 40#define SDW_SHIM_WAKEEN 0x190
40#define SDW_SHIM_WAKESTS 0x192 41#define SDW_SHIM_WAKESTS 0x192
@@ -81,7 +82,7 @@
81#define SDW_SHIM_WAKESTS_STATUS BIT(0) 82#define SDW_SHIM_WAKESTS_STATUS BIT(0)
82 83
83/* Intel ALH Register definitions */ 84/* Intel ALH Register definitions */
84#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * x)) 85#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
85 86
86#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3 87#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
87#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0) 88#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
@@ -235,9 +236,9 @@ static int intel_shim_init(struct sdw_intel *sdw)
235 /* Set SyncCPU bit */ 236 /* Set SyncCPU bit */
236 sync_reg |= SDW_SHIM_SYNC_SYNCCPU; 237 sync_reg |= SDW_SHIM_SYNC_SYNCCPU;
237 ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg, 238 ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
238 SDW_SHIM_SYNC_SYNCCPU); 239 SDW_SHIM_SYNC_SYNCCPU);
239 if (ret < 0) 240 if (ret < 0)
240 dev_err(sdw->cdns.dev, "Failed to set sync period: %d", ret); 241 dev_err(sdw->cdns.dev, "Failed to set sync period: %d\n", ret);
241 242
242 return ret; 243 return ret;
243} 244}
@@ -246,7 +247,7 @@ static int intel_shim_init(struct sdw_intel *sdw)
246 * PDI routines 247 * PDI routines
247 */ 248 */
248static void intel_pdi_init(struct sdw_intel *sdw, 249static void intel_pdi_init(struct sdw_intel *sdw,
249 struct sdw_cdns_stream_config *config) 250 struct sdw_cdns_stream_config *config)
250{ 251{
251 void __iomem *shim = sdw->res->shim; 252 void __iomem *shim = sdw->res->shim;
252 unsigned int link_id = sdw->instance; 253 unsigned int link_id = sdw->instance;
@@ -295,9 +296,9 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
295} 296}
296 297
297static int intel_pdi_get_ch_update(struct sdw_intel *sdw, 298static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
298 struct sdw_cdns_pdi *pdi, 299 struct sdw_cdns_pdi *pdi,
299 unsigned int num_pdi, 300 unsigned int num_pdi,
300 unsigned int *num_ch, bool pcm) 301 unsigned int *num_ch, bool pcm)
301{ 302{
302 int i, ch_count = 0; 303 int i, ch_count = 0;
303 304
@@ -312,16 +313,16 @@ static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
312} 313}
313 314
314static int intel_pdi_stream_ch_update(struct sdw_intel *sdw, 315static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
315 struct sdw_cdns_streams *stream, bool pcm) 316 struct sdw_cdns_streams *stream, bool pcm)
316{ 317{
317 intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd, 318 intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
318 &stream->num_ch_bd, pcm); 319 &stream->num_ch_bd, pcm);
319 320
320 intel_pdi_get_ch_update(sdw, stream->in, stream->num_in, 321 intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
321 &stream->num_ch_in, pcm); 322 &stream->num_ch_in, pcm);
322 323
323 intel_pdi_get_ch_update(sdw, stream->out, stream->num_out, 324 intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
324 &stream->num_ch_out, pcm); 325 &stream->num_ch_out, pcm);
325 326
326 return 0; 327 return 0;
327} 328}
@@ -386,9 +387,9 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
386} 387}
387 388
388static int intel_config_stream(struct sdw_intel *sdw, 389static int intel_config_stream(struct sdw_intel *sdw,
389 struct snd_pcm_substream *substream, 390 struct snd_pcm_substream *substream,
390 struct snd_soc_dai *dai, 391 struct snd_soc_dai *dai,
391 struct snd_pcm_hw_params *hw_params, int link_id) 392 struct snd_pcm_hw_params *hw_params, int link_id)
392{ 393{
393 if (sdw->res->ops && sdw->res->ops->config_stream) 394 if (sdw->res->ops && sdw->res->ops->config_stream)
394 return sdw->res->ops->config_stream(sdw->res->arg, 395 return sdw->res->ops->config_stream(sdw->res->arg,
@@ -453,9 +454,9 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
453 sync_reg |= SDW_SHIM_SYNC_SYNCGO; 454 sync_reg |= SDW_SHIM_SYNC_SYNCGO;
454 455
455 ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg, 456 ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
456 SDW_SHIM_SYNC_SYNCGO); 457 SDW_SHIM_SYNC_SYNCGO);
457 if (ret < 0) 458 if (ret < 0)
458 dev_err(sdw->cdns.dev, "Post bank switch failed: %d", ret); 459 dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
459 460
460 return ret; 461 return ret;
461} 462}
@@ -465,14 +466,14 @@ static int intel_post_bank_switch(struct sdw_bus *bus)
465 */ 466 */
466 467
467static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw, 468static struct sdw_cdns_port *intel_alloc_port(struct sdw_intel *sdw,
468 u32 ch, u32 dir, bool pcm) 469 u32 ch, u32 dir, bool pcm)
469{ 470{
470 struct sdw_cdns *cdns = &sdw->cdns; 471 struct sdw_cdns *cdns = &sdw->cdns;
471 struct sdw_cdns_port *port = NULL; 472 struct sdw_cdns_port *port = NULL;
472 int i, ret = 0; 473 int i, ret = 0;
473 474
474 for (i = 0; i < cdns->num_ports; i++) { 475 for (i = 0; i < cdns->num_ports; i++) {
475 if (cdns->ports[i].assigned == true) 476 if (cdns->ports[i].assigned)
476 continue; 477 continue;
477 478
478 port = &cdns->ports[i]; 479 port = &cdns->ports[i];
@@ -525,8 +526,8 @@ static void intel_port_cleanup(struct sdw_cdns_dma_data *dma)
525} 526}
526 527
527static int intel_hw_params(struct snd_pcm_substream *substream, 528static int intel_hw_params(struct snd_pcm_substream *substream,
528 struct snd_pcm_hw_params *params, 529 struct snd_pcm_hw_params *params,
529 struct snd_soc_dai *dai) 530 struct snd_soc_dai *dai)
530{ 531{
531 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai); 532 struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
532 struct sdw_intel *sdw = cdns_to_intel(cdns); 533 struct sdw_intel *sdw = cdns_to_intel(cdns);
@@ -555,7 +556,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
555 } 556 }
556 557
557 if (!dma->nr_ports) { 558 if (!dma->nr_ports) {
558 dev_err(dai->dev, "ports/resources not available"); 559 dev_err(dai->dev, "ports/resources not available\n");
559 return -EINVAL; 560 return -EINVAL;
560 } 561 }
561 562
@@ -574,7 +575,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
574 /* Inform DSP about PDI stream number */ 575 /* Inform DSP about PDI stream number */
575 for (i = 0; i < dma->nr_ports; i++) { 576 for (i = 0; i < dma->nr_ports; i++) {
576 ret = intel_config_stream(sdw, substream, dai, params, 577 ret = intel_config_stream(sdw, substream, dai, params,
577 dma->port[i]->pdi->intel_alh_id); 578 dma->port[i]->pdi->intel_alh_id);
578 if (ret) 579 if (ret)
579 goto port_error; 580 goto port_error;
580 } 581 }
@@ -604,9 +605,9 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
604 } 605 }
605 606
606 ret = sdw_stream_add_master(&cdns->bus, &sconfig, 607 ret = sdw_stream_add_master(&cdns->bus, &sconfig,
607 pconfig, dma->nr_ports, dma->stream); 608 pconfig, dma->nr_ports, dma->stream);
608 if (ret) { 609 if (ret) {
609 dev_err(cdns->dev, "add master to stream failed:%d", ret); 610 dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
610 goto stream_error; 611 goto stream_error;
611 } 612 }
612 613
@@ -634,8 +635,8 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
634 635
635 ret = sdw_stream_remove_master(&cdns->bus, dma->stream); 636 ret = sdw_stream_remove_master(&cdns->bus, dma->stream);
636 if (ret < 0) 637 if (ret < 0)
637 dev_err(dai->dev, "remove master from stream %s failed: %d", 638 dev_err(dai->dev, "remove master from stream %s failed: %d\n",
638 dma->stream->name, ret); 639 dma->stream->name, ret);
639 640
640 intel_port_cleanup(dma); 641 intel_port_cleanup(dma);
641 kfree(dma->port); 642 kfree(dma->port);
@@ -643,13 +644,13 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
643} 644}
644 645
645static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai, 646static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
646 void *stream, int direction) 647 void *stream, int direction)
647{ 648{
648 return cdns_set_sdw_stream(dai, stream, true, direction); 649 return cdns_set_sdw_stream(dai, stream, true, direction);
649} 650}
650 651
651static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai, 652static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
652 void *stream, int direction) 653 void *stream, int direction)
653{ 654{
654 return cdns_set_sdw_stream(dai, stream, false, direction); 655 return cdns_set_sdw_stream(dai, stream, false, direction);
655} 656}
@@ -673,9 +674,9 @@ static const struct snd_soc_component_driver dai_component = {
673}; 674};
674 675
675static int intel_create_dai(struct sdw_cdns *cdns, 676static int intel_create_dai(struct sdw_cdns *cdns,
676 struct snd_soc_dai_driver *dais, 677 struct snd_soc_dai_driver *dais,
677 enum intel_pdi_type type, 678 enum intel_pdi_type type,
678 u32 num, u32 off, u32 max_ch, bool pcm) 679 u32 num, u32 off, u32 max_ch, bool pcm)
679{ 680{
680 int i; 681 int i;
681 682
@@ -685,14 +686,14 @@ static int intel_create_dai(struct sdw_cdns *cdns,
685 /* TODO: Read supported rates/formats from hardware */ 686 /* TODO: Read supported rates/formats from hardware */
686 for (i = off; i < (off + num); i++) { 687 for (i = off; i < (off + num); i++) {
687 dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d", 688 dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d",
688 cdns->instance, i); 689 cdns->instance, i);
689 if (!dais[i].name) 690 if (!dais[i].name)
690 return -ENOMEM; 691 return -ENOMEM;
691 692
692 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) { 693 if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
693 dais[i].playback.stream_name = kasprintf(GFP_KERNEL, 694 dais[i].playback.stream_name =
694 "SDW%d Tx%d", 695 kasprintf(GFP_KERNEL, "SDW%d Tx%d",
695 cdns->instance, i); 696 cdns->instance, i);
696 if (!dais[i].playback.stream_name) { 697 if (!dais[i].playback.stream_name) {
697 kfree(dais[i].name); 698 kfree(dais[i].name);
698 return -ENOMEM; 699 return -ENOMEM;
@@ -705,9 +706,9 @@ static int intel_create_dai(struct sdw_cdns *cdns,
705 } 706 }
706 707
707 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) { 708 if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
708 dais[i].capture.stream_name = kasprintf(GFP_KERNEL, 709 dais[i].capture.stream_name =
709 "SDW%d Rx%d", 710 kasprintf(GFP_KERNEL, "SDW%d Rx%d",
710 cdns->instance, i); 711 cdns->instance, i);
711 if (!dais[i].capture.stream_name) { 712 if (!dais[i].capture.stream_name) {
712 kfree(dais[i].name); 713 kfree(dais[i].name);
713 kfree(dais[i].playback.stream_name); 714 kfree(dais[i].playback.stream_name);
@@ -748,45 +749,45 @@ static int intel_register_dai(struct sdw_intel *sdw)
748 /* Create PCM DAIs */ 749 /* Create PCM DAIs */
749 stream = &cdns->pcm; 750 stream = &cdns->pcm;
750 751
751 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, 752 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, stream->num_in,
752 stream->num_in, off, stream->num_ch_in, true); 753 off, stream->num_ch_in, true);
753 if (ret) 754 if (ret)
754 return ret; 755 return ret;
755 756
756 off += cdns->pcm.num_in; 757 off += cdns->pcm.num_in;
757 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, 758 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
758 cdns->pcm.num_out, off, stream->num_ch_out, true); 759 off, stream->num_ch_out, true);
759 if (ret) 760 if (ret)
760 return ret; 761 return ret;
761 762
762 off += cdns->pcm.num_out; 763 off += cdns->pcm.num_out;
763 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, 764 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
764 cdns->pcm.num_bd, off, stream->num_ch_bd, true); 765 off, stream->num_ch_bd, true);
765 if (ret) 766 if (ret)
766 return ret; 767 return ret;
767 768
768 /* Create PDM DAIs */ 769 /* Create PDM DAIs */
769 stream = &cdns->pdm; 770 stream = &cdns->pdm;
770 off += cdns->pcm.num_bd; 771 off += cdns->pcm.num_bd;
771 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, 772 ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
772 cdns->pdm.num_in, off, stream->num_ch_in, false); 773 off, stream->num_ch_in, false);
773 if (ret) 774 if (ret)
774 return ret; 775 return ret;
775 776
776 off += cdns->pdm.num_in; 777 off += cdns->pdm.num_in;
777 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, 778 ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
778 cdns->pdm.num_out, off, stream->num_ch_out, false); 779 off, stream->num_ch_out, false);
779 if (ret) 780 if (ret)
780 return ret; 781 return ret;
781 782
782 off += cdns->pdm.num_bd; 783 off += cdns->pdm.num_bd;
783 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, 784 ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
784 cdns->pdm.num_bd, off, stream->num_ch_bd, false); 785 off, stream->num_ch_bd, false);
785 if (ret) 786 if (ret)
786 return ret; 787 return ret;
787 788
788 return snd_soc_register_component(cdns->dev, &dai_component, 789 return snd_soc_register_component(cdns->dev, &dai_component,
789 dais, num_dai); 790 dais, num_dai);
790} 791}
791 792
792static int intel_prop_read(struct sdw_bus *bus) 793static int intel_prop_read(struct sdw_bus *bus)
@@ -796,8 +797,8 @@ static int intel_prop_read(struct sdw_bus *bus)
796 797
797 /* BIOS is not giving some values correctly. So, lets override them */ 798 /* BIOS is not giving some values correctly. So, lets override them */
798 bus->prop.num_freq = 1; 799 bus->prop.num_freq = 1;
799 bus->prop.freq = devm_kcalloc(bus->dev, sizeof(*bus->prop.freq), 800 bus->prop.freq = devm_kcalloc(bus->dev, bus->prop.num_freq,
800 bus->prop.num_freq, GFP_KERNEL); 801 sizeof(*bus->prop.freq), GFP_KERNEL);
801 if (!bus->prop.freq) 802 if (!bus->prop.freq)
802 return -ENOMEM; 803 return -ENOMEM;
803 804
@@ -872,19 +873,18 @@ static int intel_probe(struct platform_device *pdev)
872 intel_pdi_ch_update(sdw); 873 intel_pdi_ch_update(sdw);
873 874
874 /* Acquire IRQ */ 875 /* Acquire IRQ */
875 ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq, 876 ret = request_threaded_irq(sdw->res->irq, sdw_cdns_irq, sdw_cdns_thread,
876 sdw_cdns_thread, IRQF_SHARED, KBUILD_MODNAME, 877 IRQF_SHARED, KBUILD_MODNAME, &sdw->cdns);
877 &sdw->cdns);
878 if (ret < 0) { 878 if (ret < 0) {
879 dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n", 879 dev_err(sdw->cdns.dev, "unable to grab IRQ %d, disabling device\n",
880 sdw->res->irq); 880 sdw->res->irq);
881 goto err_init; 881 goto err_init;
882 } 882 }
883 883
884 /* Register DAIs */ 884 /* Register DAIs */
885 ret = intel_register_dai(sdw); 885 ret = intel_register_dai(sdw);
886 if (ret) { 886 if (ret) {
887 dev_err(sdw->cdns.dev, "DAI registration failed: %d", ret); 887 dev_err(sdw->cdns.dev, "DAI registration failed: %d\n", ret);
888 snd_soc_unregister_component(sdw->cdns.dev); 888 snd_soc_unregister_component(sdw->cdns.dev);
889 goto err_dai; 889 goto err_dai;
890 } 890 }
diff --git a/drivers/soundwire/intel.h b/drivers/soundwire/intel.h
index c1a5bac6212e..71050e5f643d 100644
--- a/drivers/soundwire/intel.h
+++ b/drivers/soundwire/intel.h
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2// Copyright(c) 2015-17 Intel Corporation. 2/* Copyright(c) 2015-17 Intel Corporation. */
3 3
4#ifndef __SDW_INTEL_LOCAL_H 4#ifndef __SDW_INTEL_LOCAL_H
5#define __SDW_INTEL_LOCAL_H 5#define __SDW_INTEL_LOCAL_H
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index 5c8a20d99878..d3d6b54c5791 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -8,6 +8,8 @@
8 */ 8 */
9 9
10#include <linux/acpi.h> 10#include <linux/acpi.h>
11#include <linux/export.h>
12#include <linux/module.h>
11#include <linux/platform_device.h> 13#include <linux/platform_device.h>
12#include <linux/soundwire/sdw_intel.h> 14#include <linux/soundwire/sdw_intel.h>
13#include "intel.h" 15#include "intel.h"
@@ -67,7 +69,7 @@ static struct sdw_intel_ctx
67 /* Found controller, find links supported */ 69 /* Found controller, find links supported */
68 count = 0; 70 count = 0;
69 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev), 71 ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
70 "mipi-sdw-master-count", &count, 1); 72 "mipi-sdw-master-count", &count, 1);
71 73
72 /* Don't fail on error, continue and use hw value */ 74 /* Don't fail on error, continue and use hw value */
73 if (ret) { 75 if (ret) {
@@ -85,7 +87,7 @@ static struct sdw_intel_ctx
85 /* Check count is within bounds */ 87 /* Check count is within bounds */
86 if (count > SDW_MAX_LINKS) { 88 if (count > SDW_MAX_LINKS) {
87 dev_err(&adev->dev, "Link count %d exceeds max %d\n", 89 dev_err(&adev->dev, "Link count %d exceeds max %d\n",
88 count, SDW_MAX_LINKS); 90 count, SDW_MAX_LINKS);
89 return NULL; 91 return NULL;
90 } 92 }
91 93
@@ -104,7 +106,6 @@ static struct sdw_intel_ctx
104 106
105 /* Create SDW Master devices */ 107 /* Create SDW Master devices */
106 for (i = 0; i < count; i++) { 108 for (i = 0; i < count; i++) {
107
108 link->res.irq = res->irq; 109 link->res.irq = res->irq;
109 link->res.registers = res->mmio_base + SDW_LINK_BASE 110 link->res.registers = res->mmio_base + SDW_LINK_BASE
110 + (SDW_LINK_SIZE * i); 111 + (SDW_LINK_SIZE * i);
@@ -145,7 +146,7 @@ link_err:
145} 146}
146 147
147static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level, 148static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
148 void *cdata, void **return_value) 149 void *cdata, void **return_value)
149{ 150{
150 struct sdw_intel_res *res = cdata; 151 struct sdw_intel_res *res = cdata;
151 struct acpi_device *adev; 152 struct acpi_device *adev;
@@ -172,9 +173,9 @@ void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res)
172 acpi_status status; 173 acpi_status status;
173 174
174 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, 175 status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
175 parent_handle, 1, 176 parent_handle, 1,
176 sdw_intel_acpi_cb, 177 sdw_intel_acpi_cb,
177 NULL, res, NULL); 178 NULL, res, NULL);
178 if (ACPI_FAILURE(status)) 179 if (ACPI_FAILURE(status))
179 return NULL; 180 return NULL;
180 181
diff --git a/drivers/soundwire/mipi_disco.c b/drivers/soundwire/mipi_disco.c
index fdeba0c3b589..c1f51d6a23d2 100644
--- a/drivers/soundwire/mipi_disco.c
+++ b/drivers/soundwire/mipi_disco.c
@@ -35,11 +35,12 @@ int sdw_master_read_prop(struct sdw_bus *bus)
35 int nval, i; 35 int nval, i;
36 36
37 device_property_read_u32(bus->dev, 37 device_property_read_u32(bus->dev,
38 "mipi-sdw-sw-interface-revision", &prop->revision); 38 "mipi-sdw-sw-interface-revision",
39 &prop->revision);
39 40
40 /* Find master handle */ 41 /* Find master handle */
41 snprintf(name, sizeof(name), 42 snprintf(name, sizeof(name),
42 "mipi-sdw-master-%d-subproperties", bus->link_id); 43 "mipi-sdw-master-%d-subproperties", bus->link_id);
43 44
44 link = device_get_named_child_node(bus->dev, name); 45 link = device_get_named_child_node(bus->dev, name);
45 if (!link) { 46 if (!link) {
@@ -48,23 +49,23 @@ int sdw_master_read_prop(struct sdw_bus *bus)
48 } 49 }
49 50
50 if (fwnode_property_read_bool(link, 51 if (fwnode_property_read_bool(link,
51 "mipi-sdw-clock-stop-mode0-supported") == true) 52 "mipi-sdw-clock-stop-mode0-supported"))
52 prop->clk_stop_mode = SDW_CLK_STOP_MODE0; 53 prop->clk_stop_mode = SDW_CLK_STOP_MODE0;
53 54
54 if (fwnode_property_read_bool(link, 55 if (fwnode_property_read_bool(link,
55 "mipi-sdw-clock-stop-mode1-supported") == true) 56 "mipi-sdw-clock-stop-mode1-supported"))
56 prop->clk_stop_mode |= SDW_CLK_STOP_MODE1; 57 prop->clk_stop_mode |= SDW_CLK_STOP_MODE1;
57 58
58 fwnode_property_read_u32(link, 59 fwnode_property_read_u32(link,
59 "mipi-sdw-max-clock-frequency", &prop->max_freq); 60 "mipi-sdw-max-clock-frequency",
61 &prop->max_freq);
60 62
61 nval = fwnode_property_read_u32_array(link, 63 nval = fwnode_property_read_u32_array(link,
62 "mipi-sdw-clock-frequencies-supported", NULL, 0); 64 "mipi-sdw-clock-frequencies-supported", NULL, 0);
63 if (nval > 0) { 65 if (nval > 0) {
64
65 prop->num_freq = nval; 66 prop->num_freq = nval;
66 prop->freq = devm_kcalloc(bus->dev, prop->num_freq, 67 prop->freq = devm_kcalloc(bus->dev, prop->num_freq,
67 sizeof(*prop->freq), GFP_KERNEL); 68 sizeof(*prop->freq), GFP_KERNEL);
68 if (!prop->freq) 69 if (!prop->freq)
69 return -ENOMEM; 70 return -ENOMEM;
70 71
@@ -88,47 +89,49 @@ int sdw_master_read_prop(struct sdw_bus *bus)
88 nval = fwnode_property_read_u32_array(link, 89 nval = fwnode_property_read_u32_array(link,
89 "mipi-sdw-supported-clock-gears", NULL, 0); 90 "mipi-sdw-supported-clock-gears", NULL, 0);
90 if (nval > 0) { 91 if (nval > 0) {
91
92 prop->num_clk_gears = nval; 92 prop->num_clk_gears = nval;
93 prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears, 93 prop->clk_gears = devm_kcalloc(bus->dev, prop->num_clk_gears,
94 sizeof(*prop->clk_gears), GFP_KERNEL); 94 sizeof(*prop->clk_gears),
95 GFP_KERNEL);
95 if (!prop->clk_gears) 96 if (!prop->clk_gears)
96 return -ENOMEM; 97 return -ENOMEM;
97 98
98 fwnode_property_read_u32_array(link, 99 fwnode_property_read_u32_array(link,
99 "mipi-sdw-supported-clock-gears", 100 "mipi-sdw-supported-clock-gears",
100 prop->clk_gears, prop->num_clk_gears); 101 prop->clk_gears,
102 prop->num_clk_gears);
101 } 103 }
102 104
103 fwnode_property_read_u32(link, "mipi-sdw-default-frame-rate", 105 fwnode_property_read_u32(link, "mipi-sdw-default-frame-rate",
104 &prop->default_frame_rate); 106 &prop->default_frame_rate);
105 107
106 fwnode_property_read_u32(link, "mipi-sdw-default-frame-row-size", 108 fwnode_property_read_u32(link, "mipi-sdw-default-frame-row-size",
107 &prop->default_row); 109 &prop->default_row);
108 110
109 fwnode_property_read_u32(link, "mipi-sdw-default-frame-col-size", 111 fwnode_property_read_u32(link, "mipi-sdw-default-frame-col-size",
110 &prop->default_col); 112 &prop->default_col);
111 113
112 prop->dynamic_frame = fwnode_property_read_bool(link, 114 prop->dynamic_frame = fwnode_property_read_bool(link,
113 "mipi-sdw-dynamic-frame-shape"); 115 "mipi-sdw-dynamic-frame-shape");
114 116
115 fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold", 117 fwnode_property_read_u32(link, "mipi-sdw-command-error-threshold",
116 &prop->err_threshold); 118 &prop->err_threshold);
117 119
118 return 0; 120 return 0;
119} 121}
120EXPORT_SYMBOL(sdw_master_read_prop); 122EXPORT_SYMBOL(sdw_master_read_prop);
121 123
122static int sdw_slave_read_dp0(struct sdw_slave *slave, 124static int sdw_slave_read_dp0(struct sdw_slave *slave,
123 struct fwnode_handle *port, struct sdw_dp0_prop *dp0) 125 struct fwnode_handle *port,
126 struct sdw_dp0_prop *dp0)
124{ 127{
125 int nval; 128 int nval;
126 129
127 fwnode_property_read_u32(port, "mipi-sdw-port-max-wordlength", 130 fwnode_property_read_u32(port, "mipi-sdw-port-max-wordlength",
128 &dp0->max_word); 131 &dp0->max_word);
129 132
130 fwnode_property_read_u32(port, "mipi-sdw-port-min-wordlength", 133 fwnode_property_read_u32(port, "mipi-sdw-port-min-wordlength",
131 &dp0->min_word); 134 &dp0->min_word);
132 135
133 nval = fwnode_property_read_u32_array(port, 136 nval = fwnode_property_read_u32_array(port,
134 "mipi-sdw-port-wordlength-configs", NULL, 0); 137 "mipi-sdw-port-wordlength-configs", NULL, 0);
@@ -136,8 +139,8 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave,
136 139
137 dp0->num_words = nval; 140 dp0->num_words = nval;
138 dp0->words = devm_kcalloc(&slave->dev, 141 dp0->words = devm_kcalloc(&slave->dev,
139 dp0->num_words, sizeof(*dp0->words), 142 dp0->num_words, sizeof(*dp0->words),
140 GFP_KERNEL); 143 GFP_KERNEL);
141 if (!dp0->words) 144 if (!dp0->words)
142 return -ENOMEM; 145 return -ENOMEM;
143 146
@@ -146,20 +149,21 @@ static int sdw_slave_read_dp0(struct sdw_slave *slave,
146 dp0->words, dp0->num_words); 149 dp0->words, dp0->num_words);
147 } 150 }
148 151
149 dp0->flow_controlled = fwnode_property_read_bool( 152 dp0->flow_controlled = fwnode_property_read_bool(port,
150 port, "mipi-sdw-bra-flow-controlled"); 153 "mipi-sdw-bra-flow-controlled");
151 154
152 dp0->simple_ch_prep_sm = fwnode_property_read_bool( 155 dp0->simple_ch_prep_sm = fwnode_property_read_bool(port,
153 port, "mipi-sdw-simplified-channel-prepare-sm"); 156 "mipi-sdw-simplified-channel-prepare-sm");
154 157
155 dp0->device_interrupts = fwnode_property_read_bool( 158 dp0->device_interrupts = fwnode_property_read_bool(port,
156 port, "mipi-sdw-imp-def-dp0-interrupts-supported"); 159 "mipi-sdw-imp-def-dp0-interrupts-supported");
157 160
158 return 0; 161 return 0;
159} 162}
160 163
161static int sdw_slave_read_dpn(struct sdw_slave *slave, 164static int sdw_slave_read_dpn(struct sdw_slave *slave,
162 struct sdw_dpn_prop *dpn, int count, int ports, char *type) 165 struct sdw_dpn_prop *dpn, int count, int ports,
166 char *type)
163{ 167{
164 struct fwnode_handle *node; 168 struct fwnode_handle *node;
165 u32 bit, i = 0; 169 u32 bit, i = 0;
@@ -173,7 +177,7 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
173 177
174 for_each_set_bit(bit, &addr, 32) { 178 for_each_set_bit(bit, &addr, 32) {
175 snprintf(name, sizeof(name), 179 snprintf(name, sizeof(name),
176 "mipi-sdw-dp-%d-%s-subproperties", bit, type); 180 "mipi-sdw-dp-%d-%s-subproperties", bit, type);
177 181
178 dpn[i].num = bit; 182 dpn[i].num = bit;
179 183
@@ -184,18 +188,18 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
184 } 188 }
185 189
186 fwnode_property_read_u32(node, "mipi-sdw-port-max-wordlength", 190 fwnode_property_read_u32(node, "mipi-sdw-port-max-wordlength",
187 &dpn[i].max_word); 191 &dpn[i].max_word);
188 fwnode_property_read_u32(node, "mipi-sdw-port-min-wordlength", 192 fwnode_property_read_u32(node, "mipi-sdw-port-min-wordlength",
189 &dpn[i].min_word); 193 &dpn[i].min_word);
190 194
191 nval = fwnode_property_read_u32_array(node, 195 nval = fwnode_property_read_u32_array(node,
192 "mipi-sdw-port-wordlength-configs", NULL, 0); 196 "mipi-sdw-port-wordlength-configs", NULL, 0);
193 if (nval > 0) { 197 if (nval > 0) {
194
195 dpn[i].num_words = nval; 198 dpn[i].num_words = nval;
196 dpn[i].words = devm_kcalloc(&slave->dev, 199 dpn[i].words = devm_kcalloc(&slave->dev,
197 dpn[i].num_words, 200 dpn[i].num_words,
198 sizeof(*dpn[i].words), GFP_KERNEL); 201 sizeof(*dpn[i].words),
202 GFP_KERNEL);
199 if (!dpn[i].words) 203 if (!dpn[i].words)
200 return -ENOMEM; 204 return -ENOMEM;
201 205
@@ -205,36 +209,36 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
205 } 209 }
206 210
207 fwnode_property_read_u32(node, "mipi-sdw-data-port-type", 211 fwnode_property_read_u32(node, "mipi-sdw-data-port-type",
208 &dpn[i].type); 212 &dpn[i].type);
209 213
210 fwnode_property_read_u32(node, 214 fwnode_property_read_u32(node,
211 "mipi-sdw-max-grouping-supported", 215 "mipi-sdw-max-grouping-supported",
212 &dpn[i].max_grouping); 216 &dpn[i].max_grouping);
213 217
214 dpn[i].simple_ch_prep_sm = fwnode_property_read_bool(node, 218 dpn[i].simple_ch_prep_sm = fwnode_property_read_bool(node,
215 "mipi-sdw-simplified-channelprepare-sm"); 219 "mipi-sdw-simplified-channelprepare-sm");
216 220
217 fwnode_property_read_u32(node, 221 fwnode_property_read_u32(node,
218 "mipi-sdw-port-channelprepare-timeout", 222 "mipi-sdw-port-channelprepare-timeout",
219 &dpn[i].ch_prep_timeout); 223 &dpn[i].ch_prep_timeout);
220 224
221 fwnode_property_read_u32(node, 225 fwnode_property_read_u32(node,
222 "mipi-sdw-imp-def-dpn-interrupts-supported", 226 "mipi-sdw-imp-def-dpn-interrupts-supported",
223 &dpn[i].device_interrupts); 227 &dpn[i].device_interrupts);
224 228
225 fwnode_property_read_u32(node, "mipi-sdw-min-channel-number", 229 fwnode_property_read_u32(node, "mipi-sdw-min-channel-number",
226 &dpn[i].min_ch); 230 &dpn[i].min_ch);
227 231
228 fwnode_property_read_u32(node, "mipi-sdw-max-channel-number", 232 fwnode_property_read_u32(node, "mipi-sdw-max-channel-number",
229 &dpn[i].max_ch); 233 &dpn[i].max_ch);
230 234
231 nval = fwnode_property_read_u32_array(node, 235 nval = fwnode_property_read_u32_array(node,
232 "mipi-sdw-channel-number-list", NULL, 0); 236 "mipi-sdw-channel-number-list", NULL, 0);
233 if (nval > 0) { 237 if (nval > 0) {
234
235 dpn[i].num_ch = nval; 238 dpn[i].num_ch = nval;
236 dpn[i].ch = devm_kcalloc(&slave->dev, dpn[i].num_ch, 239 dpn[i].ch = devm_kcalloc(&slave->dev, dpn[i].num_ch,
237 sizeof(*dpn[i].ch), GFP_KERNEL); 240 sizeof(*dpn[i].ch),
241 GFP_KERNEL);
238 if (!dpn[i].ch) 242 if (!dpn[i].ch)
239 return -ENOMEM; 243 return -ENOMEM;
240 244
@@ -246,7 +250,6 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
246 nval = fwnode_property_read_u32_array(node, 250 nval = fwnode_property_read_u32_array(node,
247 "mipi-sdw-channel-combination-list", NULL, 0); 251 "mipi-sdw-channel-combination-list", NULL, 0);
248 if (nval > 0) { 252 if (nval > 0) {
249
250 dpn[i].num_ch_combinations = nval; 253 dpn[i].num_ch_combinations = nval;
251 dpn[i].ch_combinations = devm_kcalloc(&slave->dev, 254 dpn[i].ch_combinations = devm_kcalloc(&slave->dev,
252 dpn[i].num_ch_combinations, 255 dpn[i].num_ch_combinations,
@@ -265,13 +268,13 @@ static int sdw_slave_read_dpn(struct sdw_slave *slave,
265 "mipi-sdw-modes-supported", &dpn[i].modes); 268 "mipi-sdw-modes-supported", &dpn[i].modes);
266 269
267 fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer", 270 fwnode_property_read_u32(node, "mipi-sdw-max-async-buffer",
268 &dpn[i].max_async_buffer); 271 &dpn[i].max_async_buffer);
269 272
270 dpn[i].block_pack_mode = fwnode_property_read_bool(node, 273 dpn[i].block_pack_mode = fwnode_property_read_bool(node,
271 "mipi-sdw-block-packing-mode"); 274 "mipi-sdw-block-packing-mode");
272 275
273 fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type", 276 fwnode_property_read_u32(node, "mipi-sdw-port-encoding-type",
274 &dpn[i].port_encoding); 277 &dpn[i].port_encoding);
275 278
276 /* TODO: Read audio mode */ 279 /* TODO: Read audio mode */
277 280
@@ -293,7 +296,7 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
293 int num_of_ports, nval, i, dp0 = 0; 296 int num_of_ports, nval, i, dp0 = 0;
294 297
295 device_property_read_u32(dev, "mipi-sdw-sw-interface-revision", 298 device_property_read_u32(dev, "mipi-sdw-sw-interface-revision",
296 &prop->mipi_revision); 299 &prop->mipi_revision);
297 300
298 prop->wake_capable = device_property_read_bool(dev, 301 prop->wake_capable = device_property_read_bool(dev,
299 "mipi-sdw-wake-up-unavailable"); 302 "mipi-sdw-wake-up-unavailable");
@@ -311,10 +314,10 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
311 "mipi-sdw-simplified-clockstopprepare-sm-supported"); 314 "mipi-sdw-simplified-clockstopprepare-sm-supported");
312 315
313 device_property_read_u32(dev, "mipi-sdw-clockstopprepare-timeout", 316 device_property_read_u32(dev, "mipi-sdw-clockstopprepare-timeout",
314 &prop->clk_stop_timeout); 317 &prop->clk_stop_timeout);
315 318
316 device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout", 319 device_property_read_u32(dev, "mipi-sdw-slave-channelprepare-timeout",
317 &prop->ch_prep_timeout); 320 &prop->ch_prep_timeout);
318 321
319 device_property_read_u32(dev, 322 device_property_read_u32(dev,
320 "mipi-sdw-clockstopprepare-hard-reset-behavior", 323 "mipi-sdw-clockstopprepare-hard-reset-behavior",
@@ -333,22 +336,22 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
333 "mipi-sdw-port15-read-behavior", &prop->p15_behave); 336 "mipi-sdw-port15-read-behavior", &prop->p15_behave);
334 337
335 device_property_read_u32(dev, "mipi-sdw-master-count", 338 device_property_read_u32(dev, "mipi-sdw-master-count",
336 &prop->master_count); 339 &prop->master_count);
337 340
338 device_property_read_u32(dev, "mipi-sdw-source-port-list", 341 device_property_read_u32(dev, "mipi-sdw-source-port-list",
339 &prop->source_ports); 342 &prop->source_ports);
340 343
341 device_property_read_u32(dev, "mipi-sdw-sink-port-list", 344 device_property_read_u32(dev, "mipi-sdw-sink-port-list",
342 &prop->sink_ports); 345 &prop->sink_ports);
343 346
344 /* Read dp0 properties */ 347 /* Read dp0 properties */
345 port = device_get_named_child_node(dev, "mipi-sdw-dp-0-subproperties"); 348 port = device_get_named_child_node(dev, "mipi-sdw-dp-0-subproperties");
346 if (!port) { 349 if (!port) {
347 dev_dbg(dev, "DP0 node not found!!\n"); 350 dev_dbg(dev, "DP0 node not found!!\n");
348 } else { 351 } else {
349
350 prop->dp0_prop = devm_kzalloc(&slave->dev, 352 prop->dp0_prop = devm_kzalloc(&slave->dev,
351 sizeof(*prop->dp0_prop), GFP_KERNEL); 353 sizeof(*prop->dp0_prop),
354 GFP_KERNEL);
352 if (!prop->dp0_prop) 355 if (!prop->dp0_prop)
353 return -ENOMEM; 356 return -ENOMEM;
354 357
@@ -364,23 +367,25 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
364 /* Allocate memory for set bits in port lists */ 367 /* Allocate memory for set bits in port lists */
365 nval = hweight32(prop->source_ports); 368 nval = hweight32(prop->source_ports);
366 prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval, 369 prop->src_dpn_prop = devm_kcalloc(&slave->dev, nval,
367 sizeof(*prop->src_dpn_prop), GFP_KERNEL); 370 sizeof(*prop->src_dpn_prop),
371 GFP_KERNEL);
368 if (!prop->src_dpn_prop) 372 if (!prop->src_dpn_prop)
369 return -ENOMEM; 373 return -ENOMEM;
370 374
371 /* Read dpn properties for source port(s) */ 375 /* Read dpn properties for source port(s) */
372 sdw_slave_read_dpn(slave, prop->src_dpn_prop, nval, 376 sdw_slave_read_dpn(slave, prop->src_dpn_prop, nval,
373 prop->source_ports, "source"); 377 prop->source_ports, "source");
374 378
375 nval = hweight32(prop->sink_ports); 379 nval = hweight32(prop->sink_ports);
376 prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval, 380 prop->sink_dpn_prop = devm_kcalloc(&slave->dev, nval,
377 sizeof(*prop->sink_dpn_prop), GFP_KERNEL); 381 sizeof(*prop->sink_dpn_prop),
382 GFP_KERNEL);
378 if (!prop->sink_dpn_prop) 383 if (!prop->sink_dpn_prop)
379 return -ENOMEM; 384 return -ENOMEM;
380 385
381 /* Read dpn properties for sink port(s) */ 386 /* Read dpn properties for sink port(s) */
382 sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval, 387 sdw_slave_read_dpn(slave, prop->sink_dpn_prop, nval,
383 prop->sink_ports, "sink"); 388 prop->sink_ports, "sink");
384 389
385 /* some ports are bidirectional so check total ports by ORing */ 390 /* some ports are bidirectional so check total ports by ORing */
386 nval = prop->source_ports | prop->sink_ports; 391 nval = prop->source_ports | prop->sink_ports;
@@ -388,7 +393,8 @@ int sdw_slave_read_prop(struct sdw_slave *slave)
388 393
389 /* Allocate port_ready based on num_of_ports */ 394 /* Allocate port_ready based on num_of_ports */
390 slave->port_ready = devm_kcalloc(&slave->dev, num_of_ports, 395 slave->port_ready = devm_kcalloc(&slave->dev, num_of_ports,
391 sizeof(*slave->port_ready), GFP_KERNEL); 396 sizeof(*slave->port_ready),
397 GFP_KERNEL);
392 if (!slave->port_ready) 398 if (!slave->port_ready)
393 return -ENOMEM; 399 return -ENOMEM;
394 400
diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c
index ac103bd0c176..f39a5815e25d 100644
--- a/drivers/soundwire/slave.c
+++ b/drivers/soundwire/slave.c
@@ -14,7 +14,7 @@ static void sdw_slave_release(struct device *dev)
14} 14}
15 15
16static int sdw_slave_add(struct sdw_bus *bus, 16static int sdw_slave_add(struct sdw_bus *bus,
17 struct sdw_slave_id *id, struct fwnode_handle *fwnode) 17 struct sdw_slave_id *id, struct fwnode_handle *fwnode)
18{ 18{
19 struct sdw_slave *slave; 19 struct sdw_slave *slave;
20 int ret; 20 int ret;
@@ -30,8 +30,8 @@ static int sdw_slave_add(struct sdw_bus *bus,
30 30
31 /* name shall be sdw:link:mfg:part:class:unique */ 31 /* name shall be sdw:link:mfg:part:class:unique */
32 dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x", 32 dev_set_name(&slave->dev, "sdw:%x:%x:%x:%x:%x",
33 bus->link_id, id->mfg_id, id->part_id, 33 bus->link_id, id->mfg_id, id->part_id,
34 id->class_id, id->unique_id); 34 id->class_id, id->unique_id);
35 35
36 slave->dev.release = sdw_slave_release; 36 slave->dev.release = sdw_slave_release;
37 slave->dev.bus = &sdw_bus_type; 37 slave->dev.bus = &sdw_bus_type;
@@ -84,11 +84,11 @@ int sdw_acpi_find_slaves(struct sdw_bus *bus)
84 acpi_status status; 84 acpi_status status;
85 85
86 status = acpi_evaluate_integer(adev->handle, 86 status = acpi_evaluate_integer(adev->handle,
87 METHOD_NAME__ADR, NULL, &addr); 87 METHOD_NAME__ADR, NULL, &addr);
88 88
89 if (ACPI_FAILURE(status)) { 89 if (ACPI_FAILURE(status)) {
90 dev_err(bus->dev, "_ADR resolution failed: %x\n", 90 dev_err(bus->dev, "_ADR resolution failed: %x\n",
91 status); 91 status);
92 return status; 92 return status;
93 } 93 }
94 94
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index bd879b1a76c8..d01060dbee96 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -52,10 +52,11 @@ static int sdw_find_row_index(int row)
52 pr_warn("Requested row not found, selecting lowest row no: 48\n"); 52 pr_warn("Requested row not found, selecting lowest row no: 48\n");
53 return 0; 53 return 0;
54} 54}
55
55static int _sdw_program_slave_port_params(struct sdw_bus *bus, 56static int _sdw_program_slave_port_params(struct sdw_bus *bus,
56 struct sdw_slave *slave, 57 struct sdw_slave *slave,
57 struct sdw_transport_params *t_params, 58 struct sdw_transport_params *t_params,
58 enum sdw_dpn_type type) 59 enum sdw_dpn_type type)
59{ 60{
60 u32 addr1, addr2, addr3, addr4; 61 u32 addr1, addr2, addr3, addr4;
61 int ret; 62 int ret;
@@ -76,20 +77,20 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
76 /* Program DPN_OffsetCtrl2 registers */ 77 /* Program DPN_OffsetCtrl2 registers */
77 ret = sdw_write(slave, addr1, t_params->offset2); 78 ret = sdw_write(slave, addr1, t_params->offset2);
78 if (ret < 0) { 79 if (ret < 0) {
79 dev_err(bus->dev, "DPN_OffsetCtrl2 register write failed"); 80 dev_err(bus->dev, "DPN_OffsetCtrl2 register write failed\n");
80 return ret; 81 return ret;
81 } 82 }
82 83
83 /* Program DPN_BlockCtrl3 register */ 84 /* Program DPN_BlockCtrl3 register */
84 ret = sdw_write(slave, addr2, t_params->blk_pkg_mode); 85 ret = sdw_write(slave, addr2, t_params->blk_pkg_mode);
85 if (ret < 0) { 86 if (ret < 0) {
86 dev_err(bus->dev, "DPN_BlockCtrl3 register write failed"); 87 dev_err(bus->dev, "DPN_BlockCtrl3 register write failed\n");
87 return ret; 88 return ret;
88 } 89 }
89 90
90 /* 91 /*
91 * Data ports are FULL, SIMPLE and REDUCED. This function handles 92 * Data ports are FULL, SIMPLE and REDUCED. This function handles
92 * FULL and REDUCED only and and beyond this point only FULL is 93 * FULL and REDUCED only and beyond this point only FULL is
93 * handled, so bail out if we are not FULL data port type 94 * handled, so bail out if we are not FULL data port type
94 */ 95 */
95 if (type != SDW_DPN_FULL) 96 if (type != SDW_DPN_FULL)
@@ -102,7 +103,7 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
102 103
103 ret = sdw_write(slave, addr3, wbuf); 104 ret = sdw_write(slave, addr3, wbuf);
104 if (ret < 0) { 105 if (ret < 0) {
105 dev_err(bus->dev, "DPN_SampleCtrl2 register write failed"); 106 dev_err(bus->dev, "DPN_SampleCtrl2 register write failed\n");
106 return ret; 107 return ret;
107 } 108 }
108 109
@@ -113,14 +114,14 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
113 114
114 ret = sdw_write(slave, addr4, wbuf); 115 ret = sdw_write(slave, addr4, wbuf);
115 if (ret < 0) 116 if (ret < 0)
116 dev_err(bus->dev, "DPN_HCtrl register write failed"); 117 dev_err(bus->dev, "DPN_HCtrl register write failed\n");
117 118
118 return ret; 119 return ret;
119} 120}
120 121
121static int sdw_program_slave_port_params(struct sdw_bus *bus, 122static int sdw_program_slave_port_params(struct sdw_bus *bus,
122 struct sdw_slave_runtime *s_rt, 123 struct sdw_slave_runtime *s_rt,
123 struct sdw_port_runtime *p_rt) 124 struct sdw_port_runtime *p_rt)
124{ 125{
125 struct sdw_transport_params *t_params = &p_rt->transport_params; 126 struct sdw_transport_params *t_params = &p_rt->transport_params;
126 struct sdw_port_params *p_params = &p_rt->port_params; 127 struct sdw_port_params *p_params = &p_rt->port_params;
@@ -131,8 +132,8 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
131 u8 wbuf; 132 u8 wbuf;
132 133
133 dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave, 134 dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave,
134 s_rt->direction, 135 s_rt->direction,
135 t_params->port_num); 136 t_params->port_num);
136 if (!dpn_prop) 137 if (!dpn_prop)
137 return -EINVAL; 138 return -EINVAL;
138 139
@@ -159,7 +160,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
159 ret = sdw_update(s_rt->slave, addr1, 0xF, wbuf); 160 ret = sdw_update(s_rt->slave, addr1, 0xF, wbuf);
160 if (ret < 0) { 161 if (ret < 0) {
161 dev_err(&s_rt->slave->dev, 162 dev_err(&s_rt->slave->dev,
162 "DPN_PortCtrl register write failed for port %d", 163 "DPN_PortCtrl register write failed for port %d\n",
163 t_params->port_num); 164 t_params->port_num);
164 return ret; 165 return ret;
165 } 166 }
@@ -168,7 +169,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
168 ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1)); 169 ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1));
169 if (ret < 0) { 170 if (ret < 0) {
170 dev_err(&s_rt->slave->dev, 171 dev_err(&s_rt->slave->dev,
171 "DPN_BlockCtrl1 register write failed for port %d", 172 "DPN_BlockCtrl1 register write failed for port %d\n",
172 t_params->port_num); 173 t_params->port_num);
173 return ret; 174 return ret;
174 } 175 }
@@ -178,7 +179,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
178 ret = sdw_write(s_rt->slave, addr3, wbuf); 179 ret = sdw_write(s_rt->slave, addr3, wbuf);
179 if (ret < 0) { 180 if (ret < 0) {
180 dev_err(&s_rt->slave->dev, 181 dev_err(&s_rt->slave->dev,
181 "DPN_SampleCtrl1 register write failed for port %d", 182 "DPN_SampleCtrl1 register write failed for port %d\n",
182 t_params->port_num); 183 t_params->port_num);
183 return ret; 184 return ret;
184 } 185 }
@@ -187,7 +188,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
187 ret = sdw_write(s_rt->slave, addr4, t_params->offset1); 188 ret = sdw_write(s_rt->slave, addr4, t_params->offset1);
188 if (ret < 0) { 189 if (ret < 0) {
189 dev_err(&s_rt->slave->dev, 190 dev_err(&s_rt->slave->dev,
190 "DPN_OffsetCtrl1 register write failed for port %d", 191 "DPN_OffsetCtrl1 register write failed for port %d\n",
191 t_params->port_num); 192 t_params->port_num);
192 return ret; 193 return ret;
193 } 194 }
@@ -197,7 +198,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
197 ret = sdw_write(s_rt->slave, addr5, t_params->blk_grp_ctrl); 198 ret = sdw_write(s_rt->slave, addr5, t_params->blk_grp_ctrl);
198 if (ret < 0) { 199 if (ret < 0) {
199 dev_err(&s_rt->slave->dev, 200 dev_err(&s_rt->slave->dev,
200 "DPN_BlockCtrl2 reg write failed for port %d", 201 "DPN_BlockCtrl2 reg write failed for port %d\n",
201 t_params->port_num); 202 t_params->port_num);
202 return ret; 203 return ret;
203 } 204 }
@@ -208,7 +209,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
208 ret = sdw_write(s_rt->slave, addr6, t_params->lane_ctrl); 209 ret = sdw_write(s_rt->slave, addr6, t_params->lane_ctrl);
209 if (ret < 0) { 210 if (ret < 0) {
210 dev_err(&s_rt->slave->dev, 211 dev_err(&s_rt->slave->dev,
211 "DPN_LaneCtrl register write failed for port %d", 212 "DPN_LaneCtrl register write failed for port %d\n",
212 t_params->port_num); 213 t_params->port_num);
213 return ret; 214 return ret;
214 } 215 }
@@ -216,10 +217,10 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
216 217
217 if (dpn_prop->type != SDW_DPN_SIMPLE) { 218 if (dpn_prop->type != SDW_DPN_SIMPLE) {
218 ret = _sdw_program_slave_port_params(bus, s_rt->slave, 219 ret = _sdw_program_slave_port_params(bus, s_rt->slave,
219 t_params, dpn_prop->type); 220 t_params, dpn_prop->type);
220 if (ret < 0) 221 if (ret < 0)
221 dev_err(&s_rt->slave->dev, 222 dev_err(&s_rt->slave->dev,
222 "Transport reg write failed for port: %d", 223 "Transport reg write failed for port: %d\n",
223 t_params->port_num); 224 t_params->port_num);
224 } 225 }
225 226
@@ -227,13 +228,13 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
227} 228}
228 229
229static int sdw_program_master_port_params(struct sdw_bus *bus, 230static int sdw_program_master_port_params(struct sdw_bus *bus,
230 struct sdw_port_runtime *p_rt) 231 struct sdw_port_runtime *p_rt)
231{ 232{
232 int ret; 233 int ret;
233 234
234 /* 235 /*
235 * we need to set transport and port parameters for the port. 236 * we need to set transport and port parameters for the port.
236 * Transport parameters refers to the smaple interval, offsets and 237 * Transport parameters refers to the sample interval, offsets and
237 * hstart/stop etc of the data. Port parameters refers to word 238 * hstart/stop etc of the data. Port parameters refers to word
238 * length, flow mode etc of the port 239 * length, flow mode etc of the port
239 */ 240 */
@@ -244,8 +245,8 @@ static int sdw_program_master_port_params(struct sdw_bus *bus,
244 return ret; 245 return ret;
245 246
246 return bus->port_ops->dpn_set_port_params(bus, 247 return bus->port_ops->dpn_set_port_params(bus,
247 &p_rt->port_params, 248 &p_rt->port_params,
248 bus->params.next_bank); 249 bus->params.next_bank);
249} 250}
250 251
251/** 252/**
@@ -292,8 +293,9 @@ static int sdw_program_port_params(struct sdw_master_runtime *m_rt)
292 * actual enable/disable is done with a bank switch 293 * actual enable/disable is done with a bank switch
293 */ 294 */
294static int sdw_enable_disable_slave_ports(struct sdw_bus *bus, 295static int sdw_enable_disable_slave_ports(struct sdw_bus *bus,
295 struct sdw_slave_runtime *s_rt, 296 struct sdw_slave_runtime *s_rt,
296 struct sdw_port_runtime *p_rt, bool en) 297 struct sdw_port_runtime *p_rt,
298 bool en)
297{ 299{
298 struct sdw_transport_params *t_params = &p_rt->transport_params; 300 struct sdw_transport_params *t_params = &p_rt->transport_params;
299 u32 addr; 301 u32 addr;
@@ -315,19 +317,20 @@ static int sdw_enable_disable_slave_ports(struct sdw_bus *bus,
315 317
316 if (ret < 0) 318 if (ret < 0)
317 dev_err(&s_rt->slave->dev, 319 dev_err(&s_rt->slave->dev,
318 "Slave chn_en reg write failed:%d port:%d", 320 "Slave chn_en reg write failed:%d port:%d\n",
319 ret, t_params->port_num); 321 ret, t_params->port_num);
320 322
321 return ret; 323 return ret;
322} 324}
323 325
324static int sdw_enable_disable_master_ports(struct sdw_master_runtime *m_rt, 326static int sdw_enable_disable_master_ports(struct sdw_master_runtime *m_rt,
325 struct sdw_port_runtime *p_rt, bool en) 327 struct sdw_port_runtime *p_rt,
328 bool en)
326{ 329{
327 struct sdw_transport_params *t_params = &p_rt->transport_params; 330 struct sdw_transport_params *t_params = &p_rt->transport_params;
328 struct sdw_bus *bus = m_rt->bus; 331 struct sdw_bus *bus = m_rt->bus;
329 struct sdw_enable_ch enable_ch; 332 struct sdw_enable_ch enable_ch;
330 int ret = 0; 333 int ret;
331 334
332 enable_ch.port_num = p_rt->num; 335 enable_ch.port_num = p_rt->num;
333 enable_ch.ch_mask = p_rt->ch_mask; 336 enable_ch.ch_mask = p_rt->ch_mask;
@@ -336,10 +339,11 @@ static int sdw_enable_disable_master_ports(struct sdw_master_runtime *m_rt,
336 /* Perform Master port channel(s) enable/disable */ 339 /* Perform Master port channel(s) enable/disable */
337 if (bus->port_ops->dpn_port_enable_ch) { 340 if (bus->port_ops->dpn_port_enable_ch) {
338 ret = bus->port_ops->dpn_port_enable_ch(bus, 341 ret = bus->port_ops->dpn_port_enable_ch(bus,
339 &enable_ch, bus->params.next_bank); 342 &enable_ch,
343 bus->params.next_bank);
340 if (ret < 0) { 344 if (ret < 0) {
341 dev_err(bus->dev, 345 dev_err(bus->dev,
342 "Master chn_en write failed:%d port:%d", 346 "Master chn_en write failed:%d port:%d\n",
343 ret, t_params->port_num); 347 ret, t_params->port_num);
344 return ret; 348 return ret;
345 } 349 }
@@ -370,7 +374,7 @@ static int sdw_enable_disable_ports(struct sdw_master_runtime *m_rt, bool en)
370 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { 374 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
371 list_for_each_entry(s_port, &s_rt->port_list, port_node) { 375 list_for_each_entry(s_port, &s_rt->port_list, port_node) {
372 ret = sdw_enable_disable_slave_ports(m_rt->bus, s_rt, 376 ret = sdw_enable_disable_slave_ports(m_rt->bus, s_rt,
373 s_port, en); 377 s_port, en);
374 if (ret < 0) 378 if (ret < 0)
375 return ret; 379 return ret;
376 } 380 }
@@ -387,7 +391,8 @@ static int sdw_enable_disable_ports(struct sdw_master_runtime *m_rt, bool en)
387} 391}
388 392
389static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt, 393static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt,
390 struct sdw_prepare_ch prep_ch, enum sdw_port_prep_ops cmd) 394 struct sdw_prepare_ch prep_ch,
395 enum sdw_port_prep_ops cmd)
391{ 396{
392 const struct sdw_slave_ops *ops = s_rt->slave->ops; 397 const struct sdw_slave_ops *ops = s_rt->slave->ops;
393 int ret; 398 int ret;
@@ -396,7 +401,8 @@ static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt,
396 ret = ops->port_prep(s_rt->slave, &prep_ch, cmd); 401 ret = ops->port_prep(s_rt->slave, &prep_ch, cmd);
397 if (ret < 0) { 402 if (ret < 0) {
398 dev_err(&s_rt->slave->dev, 403 dev_err(&s_rt->slave->dev,
399 "Slave Port Prep cmd %d failed: %d", cmd, ret); 404 "Slave Port Prep cmd %d failed: %d\n",
405 cmd, ret);
400 return ret; 406 return ret;
401 } 407 }
402 } 408 }
@@ -405,8 +411,9 @@ static int sdw_do_port_prep(struct sdw_slave_runtime *s_rt,
405} 411}
406 412
407static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus, 413static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
408 struct sdw_slave_runtime *s_rt, 414 struct sdw_slave_runtime *s_rt,
409 struct sdw_port_runtime *p_rt, bool prep) 415 struct sdw_port_runtime *p_rt,
416 bool prep)
410{ 417{
411 struct completion *port_ready = NULL; 418 struct completion *port_ready = NULL;
412 struct sdw_dpn_prop *dpn_prop; 419 struct sdw_dpn_prop *dpn_prop;
@@ -420,11 +427,11 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
420 prep_ch.ch_mask = p_rt->ch_mask; 427 prep_ch.ch_mask = p_rt->ch_mask;
421 428
422 dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave, 429 dpn_prop = sdw_get_slave_dpn_prop(s_rt->slave,
423 s_rt->direction, 430 s_rt->direction,
424 prep_ch.num); 431 prep_ch.num);
425 if (!dpn_prop) { 432 if (!dpn_prop) {
426 dev_err(bus->dev, 433 dev_err(bus->dev,
427 "Slave Port:%d properties not found", prep_ch.num); 434 "Slave Port:%d properties not found\n", prep_ch.num);
428 return -EINVAL; 435 return -EINVAL;
429 } 436 }
430 437
@@ -442,7 +449,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
442 */ 449 */
443 if (prep && intr) { 450 if (prep && intr) {
444 ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep, 451 ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
445 dpn_prop->device_interrupts); 452 dpn_prop->device_interrupts);
446 if (ret < 0) 453 if (ret < 0)
447 return ret; 454 return ret;
448 } 455 }
@@ -456,13 +463,13 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
456 463
457 if (prep) 464 if (prep)
458 ret = sdw_update(s_rt->slave, addr, 465 ret = sdw_update(s_rt->slave, addr,
459 0xFF, p_rt->ch_mask); 466 0xFF, p_rt->ch_mask);
460 else 467 else
461 ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0); 468 ret = sdw_update(s_rt->slave, addr, 0xFF, 0x0);
462 469
463 if (ret < 0) { 470 if (ret < 0) {
464 dev_err(&s_rt->slave->dev, 471 dev_err(&s_rt->slave->dev,
465 "Slave prep_ctrl reg write failed"); 472 "Slave prep_ctrl reg write failed\n");
466 return ret; 473 return ret;
467 } 474 }
468 475
@@ -475,7 +482,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
475 val &= p_rt->ch_mask; 482 val &= p_rt->ch_mask;
476 if (!time_left || val) { 483 if (!time_left || val) {
477 dev_err(&s_rt->slave->dev, 484 dev_err(&s_rt->slave->dev,
478 "Chn prep failed for port:%d", prep_ch.num); 485 "Chn prep failed for port:%d\n", prep_ch.num);
479 return -ETIMEDOUT; 486 return -ETIMEDOUT;
480 } 487 }
481 } 488 }
@@ -486,13 +493,14 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
486 /* Disable interrupt after Port de-prepare */ 493 /* Disable interrupt after Port de-prepare */
487 if (!prep && intr) 494 if (!prep && intr)
488 ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep, 495 ret = sdw_configure_dpn_intr(s_rt->slave, p_rt->num, prep,
489 dpn_prop->device_interrupts); 496 dpn_prop->device_interrupts);
490 497
491 return ret; 498 return ret;
492} 499}
493 500
494static int sdw_prep_deprep_master_ports(struct sdw_master_runtime *m_rt, 501static int sdw_prep_deprep_master_ports(struct sdw_master_runtime *m_rt,
495 struct sdw_port_runtime *p_rt, bool prep) 502 struct sdw_port_runtime *p_rt,
503 bool prep)
496{ 504{
497 struct sdw_transport_params *t_params = &p_rt->transport_params; 505 struct sdw_transport_params *t_params = &p_rt->transport_params;
498 struct sdw_bus *bus = m_rt->bus; 506 struct sdw_bus *bus = m_rt->bus;
@@ -509,8 +517,8 @@ static int sdw_prep_deprep_master_ports(struct sdw_master_runtime *m_rt,
509 if (ops->dpn_port_prep) { 517 if (ops->dpn_port_prep) {
510 ret = ops->dpn_port_prep(bus, &prep_ch); 518 ret = ops->dpn_port_prep(bus, &prep_ch);
511 if (ret < 0) { 519 if (ret < 0) {
512 dev_err(bus->dev, "Port prepare failed for port:%d", 520 dev_err(bus->dev, "Port prepare failed for port:%d\n",
513 t_params->port_num); 521 t_params->port_num);
514 return ret; 522 return ret;
515 } 523 }
516 } 524 }
@@ -535,7 +543,7 @@ static int sdw_prep_deprep_ports(struct sdw_master_runtime *m_rt, bool prep)
535 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { 543 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
536 list_for_each_entry(p_rt, &s_rt->port_list, port_node) { 544 list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
537 ret = sdw_prep_deprep_slave_ports(m_rt->bus, s_rt, 545 ret = sdw_prep_deprep_slave_ports(m_rt->bus, s_rt,
538 p_rt, prep); 546 p_rt, prep);
539 if (ret < 0) 547 if (ret < 0)
540 return ret; 548 return ret;
541 } 549 }
@@ -578,8 +586,8 @@ static int sdw_notify_config(struct sdw_master_runtime *m_rt)
578 if (slave->ops->bus_config) { 586 if (slave->ops->bus_config) {
579 ret = slave->ops->bus_config(slave, &bus->params); 587 ret = slave->ops->bus_config(slave, &bus->params);
580 if (ret < 0) 588 if (ret < 0)
581 dev_err(bus->dev, "Notify Slave: %d failed", 589 dev_err(bus->dev, "Notify Slave: %d failed\n",
582 slave->dev_num); 590 slave->dev_num);
583 return ret; 591 return ret;
584 } 592 }
585 } 593 }
@@ -602,13 +610,14 @@ static int sdw_program_params(struct sdw_bus *bus)
602 ret = sdw_program_port_params(m_rt); 610 ret = sdw_program_port_params(m_rt);
603 if (ret < 0) { 611 if (ret < 0) {
604 dev_err(bus->dev, 612 dev_err(bus->dev,
605 "Program transport params failed: %d", ret); 613 "Program transport params failed: %d\n", ret);
606 return ret; 614 return ret;
607 } 615 }
608 616
609 ret = sdw_notify_config(m_rt); 617 ret = sdw_notify_config(m_rt);
610 if (ret < 0) { 618 if (ret < 0) {
611 dev_err(bus->dev, "Notify bus config failed: %d", ret); 619 dev_err(bus->dev,
620 "Notify bus config failed: %d\n", ret);
612 return ret; 621 return ret;
613 } 622 }
614 623
@@ -618,7 +627,7 @@ static int sdw_program_params(struct sdw_bus *bus)
618 627
619 ret = sdw_enable_disable_ports(m_rt, true); 628 ret = sdw_enable_disable_ports(m_rt, true);
620 if (ret < 0) { 629 if (ret < 0) {
621 dev_err(bus->dev, "Enable channel failed: %d", ret); 630 dev_err(bus->dev, "Enable channel failed: %d\n", ret);
622 return ret; 631 return ret;
623 } 632 }
624 } 633 }
@@ -658,7 +667,7 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
658 addr = SDW_SCP_FRAMECTRL_B0; 667 addr = SDW_SCP_FRAMECTRL_B0;
659 668
660 sdw_fill_msg(wr_msg, NULL, addr, 1, SDW_BROADCAST_DEV_NUM, 669 sdw_fill_msg(wr_msg, NULL, addr, 1, SDW_BROADCAST_DEV_NUM,
661 SDW_MSG_FLAG_WRITE, wbuf); 670 SDW_MSG_FLAG_WRITE, wbuf);
662 wr_msg->ssp_sync = true; 671 wr_msg->ssp_sync = true;
663 672
664 /* 673 /*
@@ -673,7 +682,7 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
673 ret = sdw_transfer(bus, wr_msg); 682 ret = sdw_transfer(bus, wr_msg);
674 683
675 if (ret < 0) { 684 if (ret < 0) {
676 dev_err(bus->dev, "Slave frame_ctrl reg write failed"); 685 dev_err(bus->dev, "Slave frame_ctrl reg write failed\n");
677 goto error; 686 goto error;
678 } 687 }
679 688
@@ -713,7 +722,7 @@ static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
713 bus->bank_switch_timeout); 722 bus->bank_switch_timeout);
714 723
715 if (!time_left) { 724 if (!time_left) {
716 dev_err(bus->dev, "Controller Timed out on bank switch"); 725 dev_err(bus->dev, "Controller Timed out on bank switch\n");
717 return -ETIMEDOUT; 726 return -ETIMEDOUT;
718 } 727 }
719 728
@@ -750,7 +759,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
750 ret = ops->pre_bank_switch(bus); 759 ret = ops->pre_bank_switch(bus);
751 if (ret < 0) { 760 if (ret < 0) {
752 dev_err(bus->dev, 761 dev_err(bus->dev,
753 "Pre bank switch op failed: %d", ret); 762 "Pre bank switch op failed: %d\n", ret);
754 goto msg_unlock; 763 goto msg_unlock;
755 } 764 }
756 } 765 }
@@ -763,9 +772,8 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
763 */ 772 */
764 ret = sdw_bank_switch(bus, stream->m_rt_count); 773 ret = sdw_bank_switch(bus, stream->m_rt_count);
765 if (ret < 0) { 774 if (ret < 0) {
766 dev_err(bus->dev, "Bank switch failed: %d", ret); 775 dev_err(bus->dev, "Bank switch failed: %d\n", ret);
767 goto error; 776 goto error;
768
769 } 777 }
770 } 778 }
771 779
@@ -784,12 +792,13 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
784 ret = ops->post_bank_switch(bus); 792 ret = ops->post_bank_switch(bus);
785 if (ret < 0) { 793 if (ret < 0) {
786 dev_err(bus->dev, 794 dev_err(bus->dev,
787 "Post bank switch op failed: %d", ret); 795 "Post bank switch op failed: %d\n",
796 ret);
788 goto error; 797 goto error;
789 } 798 }
790 } else if (bus->multi_link && stream->m_rt_count > 1) { 799 } else if (bus->multi_link && stream->m_rt_count > 1) {
791 dev_err(bus->dev, 800 dev_err(bus->dev,
792 "Post bank switch ops not implemented"); 801 "Post bank switch ops not implemented\n");
793 goto error; 802 goto error;
794 } 803 }
795 804
@@ -801,7 +810,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
801 ret = sdw_ml_sync_bank_switch(bus); 810 ret = sdw_ml_sync_bank_switch(bus);
802 if (ret < 0) { 811 if (ret < 0) {
803 dev_err(bus->dev, 812 dev_err(bus->dev,
804 "multi link bank switch failed: %d", ret); 813 "multi link bank switch failed: %d\n", ret);
805 goto error; 814 goto error;
806 } 815 }
807 816
@@ -812,7 +821,6 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
812 821
813error: 822error:
814 list_for_each_entry(m_rt, &stream->master_list, stream_node) { 823 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
815
816 bus = m_rt->bus; 824 bus = m_rt->bus;
817 825
818 kfree(bus->defer_msg.msg->buf); 826 kfree(bus->defer_msg.msg->buf);
@@ -873,7 +881,7 @@ EXPORT_SYMBOL(sdw_alloc_stream);
873 881
874static struct sdw_master_runtime 882static struct sdw_master_runtime
875*sdw_find_master_rt(struct sdw_bus *bus, 883*sdw_find_master_rt(struct sdw_bus *bus,
876 struct sdw_stream_runtime *stream) 884 struct sdw_stream_runtime *stream)
877{ 885{
878 struct sdw_master_runtime *m_rt = NULL; 886 struct sdw_master_runtime *m_rt = NULL;
879 887
@@ -897,8 +905,8 @@ static struct sdw_master_runtime
897 */ 905 */
898static struct sdw_master_runtime 906static struct sdw_master_runtime
899*sdw_alloc_master_rt(struct sdw_bus *bus, 907*sdw_alloc_master_rt(struct sdw_bus *bus,
900 struct sdw_stream_config *stream_config, 908 struct sdw_stream_config *stream_config,
901 struct sdw_stream_runtime *stream) 909 struct sdw_stream_runtime *stream)
902{ 910{
903 struct sdw_master_runtime *m_rt; 911 struct sdw_master_runtime *m_rt;
904 912
@@ -941,8 +949,8 @@ stream_config:
941 */ 949 */
942static struct sdw_slave_runtime 950static struct sdw_slave_runtime
943*sdw_alloc_slave_rt(struct sdw_slave *slave, 951*sdw_alloc_slave_rt(struct sdw_slave *slave,
944 struct sdw_stream_config *stream_config, 952 struct sdw_stream_config *stream_config,
945 struct sdw_stream_runtime *stream) 953 struct sdw_stream_runtime *stream)
946{ 954{
947 struct sdw_slave_runtime *s_rt = NULL; 955 struct sdw_slave_runtime *s_rt = NULL;
948 956
@@ -959,20 +967,19 @@ static struct sdw_slave_runtime
959} 967}
960 968
961static void sdw_master_port_release(struct sdw_bus *bus, 969static void sdw_master_port_release(struct sdw_bus *bus,
962 struct sdw_master_runtime *m_rt) 970 struct sdw_master_runtime *m_rt)
963{ 971{
964 struct sdw_port_runtime *p_rt, *_p_rt; 972 struct sdw_port_runtime *p_rt, *_p_rt;
965 973
966 list_for_each_entry_safe(p_rt, _p_rt, 974 list_for_each_entry_safe(p_rt, _p_rt, &m_rt->port_list, port_node) {
967 &m_rt->port_list, port_node) {
968 list_del(&p_rt->port_node); 975 list_del(&p_rt->port_node);
969 kfree(p_rt); 976 kfree(p_rt);
970 } 977 }
971} 978}
972 979
973static void sdw_slave_port_release(struct sdw_bus *bus, 980static void sdw_slave_port_release(struct sdw_bus *bus,
974 struct sdw_slave *slave, 981 struct sdw_slave *slave,
975 struct sdw_stream_runtime *stream) 982 struct sdw_stream_runtime *stream)
976{ 983{
977 struct sdw_port_runtime *p_rt, *_p_rt; 984 struct sdw_port_runtime *p_rt, *_p_rt;
978 struct sdw_master_runtime *m_rt; 985 struct sdw_master_runtime *m_rt;
@@ -980,13 +987,11 @@ static void sdw_slave_port_release(struct sdw_bus *bus,
980 987
981 list_for_each_entry(m_rt, &stream->master_list, stream_node) { 988 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
982 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) { 989 list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
983
984 if (s_rt->slave != slave) 990 if (s_rt->slave != slave)
985 continue; 991 continue;
986 992
987 list_for_each_entry_safe(p_rt, _p_rt, 993 list_for_each_entry_safe(p_rt, _p_rt,
988 &s_rt->port_list, port_node) { 994 &s_rt->port_list, port_node) {
989
990 list_del(&p_rt->port_node); 995 list_del(&p_rt->port_node);
991 kfree(p_rt); 996 kfree(p_rt);
992 } 997 }
@@ -1003,7 +1008,7 @@ static void sdw_slave_port_release(struct sdw_bus *bus,
1003 * This function is to be called with bus_lock held. 1008 * This function is to be called with bus_lock held.
1004 */ 1009 */
1005static void sdw_release_slave_stream(struct sdw_slave *slave, 1010static void sdw_release_slave_stream(struct sdw_slave *slave,
1006 struct sdw_stream_runtime *stream) 1011 struct sdw_stream_runtime *stream)
1007{ 1012{
1008 struct sdw_slave_runtime *s_rt, *_s_rt; 1013 struct sdw_slave_runtime *s_rt, *_s_rt;
1009 struct sdw_master_runtime *m_rt; 1014 struct sdw_master_runtime *m_rt;
@@ -1011,8 +1016,7 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
1011 list_for_each_entry(m_rt, &stream->master_list, stream_node) { 1016 list_for_each_entry(m_rt, &stream->master_list, stream_node) {
1012 /* Retrieve Slave runtime handle */ 1017 /* Retrieve Slave runtime handle */
1013 list_for_each_entry_safe(s_rt, _s_rt, 1018 list_for_each_entry_safe(s_rt, _s_rt,
1014 &m_rt->slave_rt_list, m_rt_node) { 1019 &m_rt->slave_rt_list, m_rt_node) {
1015
1016 if (s_rt->slave == slave) { 1020 if (s_rt->slave == slave) {
1017 list_del(&s_rt->m_rt_node); 1021 list_del(&s_rt->m_rt_node);
1018 kfree(s_rt); 1022 kfree(s_rt);
@@ -1034,7 +1038,7 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
1034 * no effect as Slave(s) runtime handle would already be freed up. 1038 * no effect as Slave(s) runtime handle would already be freed up.
1035 */ 1039 */
1036static void sdw_release_master_stream(struct sdw_master_runtime *m_rt, 1040static void sdw_release_master_stream(struct sdw_master_runtime *m_rt,
1037 struct sdw_stream_runtime *stream) 1041 struct sdw_stream_runtime *stream)
1038{ 1042{
1039 struct sdw_slave_runtime *s_rt, *_s_rt; 1043 struct sdw_slave_runtime *s_rt, *_s_rt;
1040 1044
@@ -1057,15 +1061,14 @@ static void sdw_release_master_stream(struct sdw_master_runtime *m_rt,
1057 * This removes and frees port_rt and master_rt from a stream 1061 * This removes and frees port_rt and master_rt from a stream
1058 */ 1062 */
1059int sdw_stream_remove_master(struct sdw_bus *bus, 1063int sdw_stream_remove_master(struct sdw_bus *bus,
1060 struct sdw_stream_runtime *stream) 1064 struct sdw_stream_runtime *stream)
1061{ 1065{
1062 struct sdw_master_runtime *m_rt, *_m_rt; 1066 struct sdw_master_runtime *m_rt, *_m_rt;
1063 1067
1064 mutex_lock(&bus->bus_lock); 1068 mutex_lock(&bus->bus_lock);
1065 1069
1066 list_for_each_entry_safe(m_rt, _m_rt, 1070 list_for_each_entry_safe(m_rt, _m_rt,
1067 &stream->master_list, stream_node) { 1071 &stream->master_list, stream_node) {
1068
1069 if (m_rt->bus != bus) 1072 if (m_rt->bus != bus)
1070 continue; 1073 continue;
1071 1074
@@ -1092,7 +1095,7 @@ EXPORT_SYMBOL(sdw_stream_remove_master);
1092 * This removes and frees port_rt and slave_rt from a stream 1095 * This removes and frees port_rt and slave_rt from a stream
1093 */ 1096 */
1094int sdw_stream_remove_slave(struct sdw_slave *slave, 1097int sdw_stream_remove_slave(struct sdw_slave *slave,
1095 struct sdw_stream_runtime *stream) 1098 struct sdw_stream_runtime *stream)
1096{ 1099{
1097 mutex_lock(&slave->bus->bus_lock); 1100 mutex_lock(&slave->bus->bus_lock);
1098 1101
@@ -1116,8 +1119,9 @@ EXPORT_SYMBOL(sdw_stream_remove_slave);
1116 * This function is to be called with bus_lock held. 1119 * This function is to be called with bus_lock held.
1117 */ 1120 */
1118static int sdw_config_stream(struct device *dev, 1121static int sdw_config_stream(struct device *dev,
1119 struct sdw_stream_runtime *stream, 1122 struct sdw_stream_runtime *stream,
1120 struct sdw_stream_config *stream_config, bool is_slave) 1123 struct sdw_stream_config *stream_config,
1124 bool is_slave)
1121{ 1125{
1122 /* 1126 /*
1123 * Update the stream rate, channel and bps based on data 1127 * Update the stream rate, channel and bps based on data
@@ -1128,14 +1132,14 @@ static int sdw_config_stream(struct device *dev,
1128 * comparison and allow the value to be set and stored in stream 1132 * comparison and allow the value to be set and stored in stream
1129 */ 1133 */
1130 if (stream->params.rate && 1134 if (stream->params.rate &&
1131 stream->params.rate != stream_config->frame_rate) { 1135 stream->params.rate != stream_config->frame_rate) {
1132 dev_err(dev, "rate not matching, stream:%s", stream->name); 1136 dev_err(dev, "rate not matching, stream:%s\n", stream->name);
1133 return -EINVAL; 1137 return -EINVAL;
1134 } 1138 }
1135 1139
1136 if (stream->params.bps && 1140 if (stream->params.bps &&
1137 stream->params.bps != stream_config->bps) { 1141 stream->params.bps != stream_config->bps) {
1138 dev_err(dev, "bps not matching, stream:%s", stream->name); 1142 dev_err(dev, "bps not matching, stream:%s\n", stream->name);
1139 return -EINVAL; 1143 return -EINVAL;
1140 } 1144 }
1141 1145
@@ -1151,20 +1155,21 @@ static int sdw_config_stream(struct device *dev,
1151} 1155}
1152 1156
1153static int sdw_is_valid_port_range(struct device *dev, 1157static int sdw_is_valid_port_range(struct device *dev,
1154 struct sdw_port_runtime *p_rt) 1158 struct sdw_port_runtime *p_rt)
1155{ 1159{
1156 if (!SDW_VALID_PORT_RANGE(p_rt->num)) { 1160 if (!SDW_VALID_PORT_RANGE(p_rt->num)) {
1157 dev_err(dev, 1161 dev_err(dev,
1158 "SoundWire: Invalid port number :%d", p_rt->num); 1162 "SoundWire: Invalid port number :%d\n", p_rt->num);
1159 return -EINVAL; 1163 return -EINVAL;
1160 } 1164 }
1161 1165
1162 return 0; 1166 return 0;
1163} 1167}
1164 1168
1165static struct sdw_port_runtime *sdw_port_alloc(struct device *dev, 1169static struct sdw_port_runtime
1166 struct sdw_port_config *port_config, 1170*sdw_port_alloc(struct device *dev,
1167 int port_index) 1171 struct sdw_port_config *port_config,
1172 int port_index)
1168{ 1173{
1169 struct sdw_port_runtime *p_rt; 1174 struct sdw_port_runtime *p_rt;
1170 1175
@@ -1179,9 +1184,9 @@ static struct sdw_port_runtime *sdw_port_alloc(struct device *dev,
1179} 1184}
1180 1185
1181static int sdw_master_port_config(struct sdw_bus *bus, 1186static int sdw_master_port_config(struct sdw_bus *bus,
1182 struct sdw_master_runtime *m_rt, 1187 struct sdw_master_runtime *m_rt,
1183 struct sdw_port_config *port_config, 1188 struct sdw_port_config *port_config,
1184 unsigned int num_ports) 1189 unsigned int num_ports)
1185{ 1190{
1186 struct sdw_port_runtime *p_rt; 1191 struct sdw_port_runtime *p_rt;
1187 int i; 1192 int i;
@@ -1204,9 +1209,9 @@ static int sdw_master_port_config(struct sdw_bus *bus,
1204} 1209}
1205 1210
1206static int sdw_slave_port_config(struct sdw_slave *slave, 1211static int sdw_slave_port_config(struct sdw_slave *slave,
1207 struct sdw_slave_runtime *s_rt, 1212 struct sdw_slave_runtime *s_rt,
1208 struct sdw_port_config *port_config, 1213 struct sdw_port_config *port_config,
1209 unsigned int num_config) 1214 unsigned int num_config)
1210{ 1215{
1211 struct sdw_port_runtime *p_rt; 1216 struct sdw_port_runtime *p_rt;
1212 int i, ret; 1217 int i, ret;
@@ -1248,10 +1253,10 @@ static int sdw_slave_port_config(struct sdw_slave *slave,
1248 * @stream: SoundWire stream 1253 * @stream: SoundWire stream
1249 */ 1254 */
1250int sdw_stream_add_master(struct sdw_bus *bus, 1255int sdw_stream_add_master(struct sdw_bus *bus,
1251 struct sdw_stream_config *stream_config, 1256 struct sdw_stream_config *stream_config,
1252 struct sdw_port_config *port_config, 1257 struct sdw_port_config *port_config,
1253 unsigned int num_ports, 1258 unsigned int num_ports,
1254 struct sdw_stream_runtime *stream) 1259 struct sdw_stream_runtime *stream)
1255{ 1260{
1256 struct sdw_master_runtime *m_rt = NULL; 1261 struct sdw_master_runtime *m_rt = NULL;
1257 int ret; 1262 int ret;
@@ -1265,7 +1270,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1265 */ 1270 */
1266 if (!bus->multi_link && stream->m_rt_count > 0) { 1271 if (!bus->multi_link && stream->m_rt_count > 0) {
1267 dev_err(bus->dev, 1272 dev_err(bus->dev,
1268 "Multilink not supported, link %d", bus->link_id); 1273 "Multilink not supported, link %d\n", bus->link_id);
1269 ret = -EINVAL; 1274 ret = -EINVAL;
1270 goto unlock; 1275 goto unlock;
1271 } 1276 }
@@ -1273,8 +1278,8 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1273 m_rt = sdw_alloc_master_rt(bus, stream_config, stream); 1278 m_rt = sdw_alloc_master_rt(bus, stream_config, stream);
1274 if (!m_rt) { 1279 if (!m_rt) {
1275 dev_err(bus->dev, 1280 dev_err(bus->dev,
1276 "Master runtime config failed for stream:%s", 1281 "Master runtime config failed for stream:%s\n",
1277 stream->name); 1282 stream->name);
1278 ret = -ENOMEM; 1283 ret = -ENOMEM;
1279 goto unlock; 1284 goto unlock;
1280 } 1285 }
@@ -1313,10 +1318,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
1313 * 1318 *
1314 */ 1319 */
1315int sdw_stream_add_slave(struct sdw_slave *slave, 1320int sdw_stream_add_slave(struct sdw_slave *slave,
1316 struct sdw_stream_config *stream_config, 1321 struct sdw_stream_config *stream_config,
1317 struct sdw_port_config *port_config, 1322 struct sdw_port_config *port_config,
1318 unsigned int num_ports, 1323 unsigned int num_ports,
1319 struct sdw_stream_runtime *stream) 1324 struct sdw_stream_runtime *stream)
1320{ 1325{
1321 struct sdw_slave_runtime *s_rt; 1326 struct sdw_slave_runtime *s_rt;
1322 struct sdw_master_runtime *m_rt; 1327 struct sdw_master_runtime *m_rt;
@@ -1331,8 +1336,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
1331 m_rt = sdw_alloc_master_rt(slave->bus, stream_config, stream); 1336 m_rt = sdw_alloc_master_rt(slave->bus, stream_config, stream);
1332 if (!m_rt) { 1337 if (!m_rt) {
1333 dev_err(&slave->dev, 1338 dev_err(&slave->dev,
1334 "alloc master runtime failed for stream:%s", 1339 "alloc master runtime failed for stream:%s\n",
1335 stream->name); 1340 stream->name);
1336 ret = -ENOMEM; 1341 ret = -ENOMEM;
1337 goto error; 1342 goto error;
1338 } 1343 }
@@ -1340,8 +1345,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
1340 s_rt = sdw_alloc_slave_rt(slave, stream_config, stream); 1345 s_rt = sdw_alloc_slave_rt(slave, stream_config, stream);
1341 if (!s_rt) { 1346 if (!s_rt) {
1342 dev_err(&slave->dev, 1347 dev_err(&slave->dev,
1343 "Slave runtime config failed for stream:%s", 1348 "Slave runtime config failed for stream:%s\n",
1344 stream->name); 1349 stream->name);
1345 ret = -ENOMEM; 1350 ret = -ENOMEM;
1346 goto stream_error; 1351 goto stream_error;
1347 } 1352 }
@@ -1385,8 +1390,8 @@ EXPORT_SYMBOL(sdw_stream_add_slave);
1385 * @port_num: Port number 1390 * @port_num: Port number
1386 */ 1391 */
1387struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave, 1392struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
1388 enum sdw_data_direction direction, 1393 enum sdw_data_direction direction,
1389 unsigned int port_num) 1394 unsigned int port_num)
1390{ 1395{
1391 struct sdw_dpn_prop *dpn_prop; 1396 struct sdw_dpn_prop *dpn_prop;
1392 u8 num_ports; 1397 u8 num_ports;
@@ -1470,7 +1475,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
1470 1475
1471 /* TODO: Support Asynchronous mode */ 1476 /* TODO: Support Asynchronous mode */
1472 if ((prop->max_freq % stream->params.rate) != 0) { 1477 if ((prop->max_freq % stream->params.rate) != 0) {
1473 dev_err(bus->dev, "Async mode not supported"); 1478 dev_err(bus->dev, "Async mode not supported\n");
1474 return -EINVAL; 1479 return -EINVAL;
1475 } 1480 }
1476 1481
@@ -1482,15 +1487,14 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
1482 /* Program params */ 1487 /* Program params */
1483 ret = sdw_program_params(bus); 1488 ret = sdw_program_params(bus);
1484 if (ret < 0) { 1489 if (ret < 0) {
1485 dev_err(bus->dev, "Program params failed: %d", ret); 1490 dev_err(bus->dev, "Program params failed: %d\n", ret);
1486 goto restore_params; 1491 goto restore_params;
1487 } 1492 }
1488
1489 } 1493 }
1490 1494
1491 ret = do_bank_switch(stream); 1495 ret = do_bank_switch(stream);
1492 if (ret < 0) { 1496 if (ret < 0) {
1493 dev_err(bus->dev, "Bank switch failed: %d", ret); 1497 dev_err(bus->dev, "Bank switch failed: %d\n", ret);
1494 goto restore_params; 1498 goto restore_params;
1495 } 1499 }
1496 1500
@@ -1500,8 +1504,8 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
1500 /* Prepare port(s) on the new clock configuration */ 1504 /* Prepare port(s) on the new clock configuration */
1501 ret = sdw_prep_deprep_ports(m_rt, true); 1505 ret = sdw_prep_deprep_ports(m_rt, true);
1502 if (ret < 0) { 1506 if (ret < 0) {
1503 dev_err(bus->dev, "Prepare port(s) failed ret = %d", 1507 dev_err(bus->dev, "Prepare port(s) failed ret = %d\n",
1504 ret); 1508 ret);
1505 return ret; 1509 return ret;
1506 } 1510 }
1507 } 1511 }
@@ -1527,7 +1531,7 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
1527 int ret = 0; 1531 int ret = 0;
1528 1532
1529 if (!stream) { 1533 if (!stream) {
1530 pr_err("SoundWire: Handle not found for stream"); 1534 pr_err("SoundWire: Handle not found for stream\n");
1531 return -EINVAL; 1535 return -EINVAL;
1532 } 1536 }
1533 1537
@@ -1535,7 +1539,7 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
1535 1539
1536 ret = _sdw_prepare_stream(stream); 1540 ret = _sdw_prepare_stream(stream);
1537 if (ret < 0) 1541 if (ret < 0)
1538 pr_err("Prepare for stream:%s failed: %d", stream->name, ret); 1542 pr_err("Prepare for stream:%s failed: %d\n", stream->name, ret);
1539 1543
1540 sdw_release_bus_lock(stream); 1544 sdw_release_bus_lock(stream);
1541 return ret; 1545 return ret;
@@ -1555,21 +1559,22 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
1555 /* Program params */ 1559 /* Program params */
1556 ret = sdw_program_params(bus); 1560 ret = sdw_program_params(bus);
1557 if (ret < 0) { 1561 if (ret < 0) {
1558 dev_err(bus->dev, "Program params failed: %d", ret); 1562 dev_err(bus->dev, "Program params failed: %d\n", ret);
1559 return ret; 1563 return ret;
1560 } 1564 }
1561 1565
1562 /* Enable port(s) */ 1566 /* Enable port(s) */
1563 ret = sdw_enable_disable_ports(m_rt, true); 1567 ret = sdw_enable_disable_ports(m_rt, true);
1564 if (ret < 0) { 1568 if (ret < 0) {
1565 dev_err(bus->dev, "Enable port(s) failed ret: %d", ret); 1569 dev_err(bus->dev,
1570 "Enable port(s) failed ret: %d\n", ret);
1566 return ret; 1571 return ret;
1567 } 1572 }
1568 } 1573 }
1569 1574
1570 ret = do_bank_switch(stream); 1575 ret = do_bank_switch(stream);
1571 if (ret < 0) { 1576 if (ret < 0) {
1572 dev_err(bus->dev, "Bank switch failed: %d", ret); 1577 dev_err(bus->dev, "Bank switch failed: %d\n", ret);
1573 return ret; 1578 return ret;
1574 } 1579 }
1575 1580
@@ -1589,7 +1594,7 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
1589 int ret = 0; 1594 int ret = 0;
1590 1595
1591 if (!stream) { 1596 if (!stream) {
1592 pr_err("SoundWire: Handle not found for stream"); 1597 pr_err("SoundWire: Handle not found for stream\n");
1593 return -EINVAL; 1598 return -EINVAL;
1594 } 1599 }
1595 1600
@@ -1597,7 +1602,7 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
1597 1602
1598 ret = _sdw_enable_stream(stream); 1603 ret = _sdw_enable_stream(stream);
1599 if (ret < 0) 1604 if (ret < 0)
1600 pr_err("Enable for stream:%s failed: %d", stream->name, ret); 1605 pr_err("Enable for stream:%s failed: %d\n", stream->name, ret);
1601 1606
1602 sdw_release_bus_lock(stream); 1607 sdw_release_bus_lock(stream);
1603 return ret; 1608 return ret;
@@ -1615,7 +1620,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
1615 /* Disable port(s) */ 1620 /* Disable port(s) */
1616 ret = sdw_enable_disable_ports(m_rt, false); 1621 ret = sdw_enable_disable_ports(m_rt, false);
1617 if (ret < 0) { 1622 if (ret < 0) {
1618 dev_err(bus->dev, "Disable port(s) failed: %d", ret); 1623 dev_err(bus->dev, "Disable port(s) failed: %d\n", ret);
1619 return ret; 1624 return ret;
1620 } 1625 }
1621 } 1626 }
@@ -1626,7 +1631,7 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
1626 /* Program params */ 1631 /* Program params */
1627 ret = sdw_program_params(bus); 1632 ret = sdw_program_params(bus);
1628 if (ret < 0) { 1633 if (ret < 0) {
1629 dev_err(bus->dev, "Program params failed: %d", ret); 1634 dev_err(bus->dev, "Program params failed: %d\n", ret);
1630 return ret; 1635 return ret;
1631 } 1636 }
1632 } 1637 }
@@ -1646,7 +1651,7 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
1646 int ret = 0; 1651 int ret = 0;
1647 1652
1648 if (!stream) { 1653 if (!stream) {
1649 pr_err("SoundWire: Handle not found for stream"); 1654 pr_err("SoundWire: Handle not found for stream\n");
1650 return -EINVAL; 1655 return -EINVAL;
1651 } 1656 }
1652 1657
@@ -1654,7 +1659,7 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
1654 1659
1655 ret = _sdw_disable_stream(stream); 1660 ret = _sdw_disable_stream(stream);
1656 if (ret < 0) 1661 if (ret < 0)
1657 pr_err("Disable for stream:%s failed: %d", stream->name, ret); 1662 pr_err("Disable for stream:%s failed: %d\n", stream->name, ret);
1658 1663
1659 sdw_release_bus_lock(stream); 1664 sdw_release_bus_lock(stream);
1660 return ret; 1665 return ret;
@@ -1672,7 +1677,8 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
1672 /* De-prepare port(s) */ 1677 /* De-prepare port(s) */
1673 ret = sdw_prep_deprep_ports(m_rt, false); 1678 ret = sdw_prep_deprep_ports(m_rt, false);
1674 if (ret < 0) { 1679 if (ret < 0) {
1675 dev_err(bus->dev, "De-prepare port(s) failed: %d", ret); 1680 dev_err(bus->dev,
1681 "De-prepare port(s) failed: %d\n", ret);
1676 return ret; 1682 return ret;
1677 } 1683 }
1678 1684
@@ -1683,10 +1689,9 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
1683 /* Program params */ 1689 /* Program params */
1684 ret = sdw_program_params(bus); 1690 ret = sdw_program_params(bus);
1685 if (ret < 0) { 1691 if (ret < 0) {
1686 dev_err(bus->dev, "Program params failed: %d", ret); 1692 dev_err(bus->dev, "Program params failed: %d\n", ret);
1687 return ret; 1693 return ret;
1688 } 1694 }
1689
1690 } 1695 }
1691 1696
1692 stream->state = SDW_STREAM_DEPREPARED; 1697 stream->state = SDW_STREAM_DEPREPARED;
@@ -1705,14 +1710,14 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
1705 int ret = 0; 1710 int ret = 0;
1706 1711
1707 if (!stream) { 1712 if (!stream) {
1708 pr_err("SoundWire: Handle not found for stream"); 1713 pr_err("SoundWire: Handle not found for stream\n");
1709 return -EINVAL; 1714 return -EINVAL;
1710 } 1715 }
1711 1716
1712 sdw_acquire_bus_lock(stream); 1717 sdw_acquire_bus_lock(stream);
1713 ret = _sdw_deprepare_stream(stream); 1718 ret = _sdw_deprepare_stream(stream);
1714 if (ret < 0) 1719 if (ret < 0)
1715 pr_err("De-prepare for stream:%d failed: %d", ret, ret); 1720 pr_err("De-prepare for stream:%d failed: %d\n", ret, ret);
1716 1721
1717 sdw_release_bus_lock(stream); 1722 sdw_release_bus_lock(stream);
1718 return ret; 1723 return ret;
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index f2f0de27252b..833bdee3cec7 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o 1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o 2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
3thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o 3thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 9553305c63ea..8bf8e031f0bc 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -13,6 +13,7 @@
13 13
14#define CAP_OFFSET_MAX 0xff 14#define CAP_OFFSET_MAX 0xff
15#define VSE_CAP_OFFSET_MAX 0xffff 15#define VSE_CAP_OFFSET_MAX 0xffff
16#define TMU_ACCESS_EN BIT(20)
16 17
17struct tb_cap_any { 18struct tb_cap_any {
18 union { 19 union {
@@ -22,28 +23,53 @@ struct tb_cap_any {
22 }; 23 };
23} __packed; 24} __packed;
24 25
25/** 26static int tb_port_enable_tmu(struct tb_port *port, bool enable)
26 * tb_port_find_cap() - Find port capability
27 * @port: Port to find the capability for
28 * @cap: Capability to look
29 *
30 * Returns offset to start of capability or %-ENOENT if no such
31 * capability was found. Negative errno is returned if there was an
32 * error.
33 */
34int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
35{ 27{
36 u32 offset; 28 struct tb_switch *sw = port->sw;
29 u32 value, offset;
30 int ret;
37 31
38 /* 32 /*
39 * DP out adapters claim to implement TMU capability but in 33 * Legacy devices need to have TMU access enabled before port
40 * reality they do not so we hard code the adapter specific 34 * space can be fully accessed.
41 * capability offset here.
42 */ 35 */
43 if (port->config.type == TB_TYPE_DP_HDMI_OUT) 36 if (tb_switch_is_lr(sw))
44 offset = 0x39; 37 offset = 0x26;
38 else if (tb_switch_is_er(sw))
39 offset = 0x2a;
45 else 40 else
46 offset = 0x1; 41 return 0;
42
43 ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1);
44 if (ret)
45 return ret;
46
47 if (enable)
48 value |= TMU_ACCESS_EN;
49 else
50 value &= ~TMU_ACCESS_EN;
51
52 return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
53}
54
55static void tb_port_dummy_read(struct tb_port *port)
56{
57 /*
58 * When reading from next capability pointer location in port
59 * config space the read data is not cleared on LR. To avoid
60 * reading stale data on next read perform one dummy read after
61 * port capabilities are walked.
62 */
63 if (tb_switch_is_lr(port->sw)) {
64 u32 dummy;
65
66 tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
67 }
68}
69
70static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
71{
72 u32 offset = 1;
47 73
48 do { 74 do {
49 struct tb_cap_any header; 75 struct tb_cap_any header;
@@ -62,6 +88,31 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
62 return -ENOENT; 88 return -ENOENT;
63} 89}
64 90
91/**
92 * tb_port_find_cap() - Find port capability
93 * @port: Port to find the capability for
94 * @cap: Capability to look
95 *
96 * Returns offset to start of capability or %-ENOENT if no such
97 * capability was found. Negative errno is returned if there was an
98 * error.
99 */
100int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
101{
102 int ret;
103
104 ret = tb_port_enable_tmu(port, true);
105 if (ret)
106 return ret;
107
108 ret = __tb_port_find_cap(port, cap);
109
110 tb_port_dummy_read(port);
111 tb_port_enable_tmu(port, false);
112
113 return ret;
114}
115
65static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) 116static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
66{ 117{
67 int offset = sw->config.first_cap_offset; 118 int offset = sw->config.first_cap_offset;
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 73b386de4d15..2427d73be731 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -720,7 +720,7 @@ int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
720 .port = port, 720 .port = port,
721 .error = error, 721 .error = error,
722 }; 722 };
723 tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port); 723 tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port);
724 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); 724 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
725} 725}
726 726
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index e3fc920af682..f1c10378fa3e 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -42,7 +42,6 @@
42#define ICM_TIMEOUT 5000 /* ms */ 42#define ICM_TIMEOUT 5000 /* ms */
43#define ICM_APPROVE_TIMEOUT 10000 /* ms */ 43#define ICM_APPROVE_TIMEOUT 10000 /* ms */
44#define ICM_MAX_LINK 4 44#define ICM_MAX_LINK 4
45#define ICM_MAX_DEPTH 6
46 45
47/** 46/**
48 * struct icm - Internal connection manager private data 47 * struct icm - Internal connection manager private data
@@ -469,10 +468,15 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
469 pm_runtime_get_sync(&parent_sw->dev); 468 pm_runtime_get_sync(&parent_sw->dev);
470 469
471 sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); 470 sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route);
472 if (!sw) 471 if (IS_ERR(sw))
473 goto out; 472 goto out;
474 473
475 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); 474 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
475 if (!sw->uuid) {
476 tb_sw_warn(sw, "cannot allocate memory for switch\n");
477 tb_switch_put(sw);
478 goto out;
479 }
476 sw->connection_id = connection_id; 480 sw->connection_id = connection_id;
477 sw->connection_key = connection_key; 481 sw->connection_key = connection_key;
478 sw->link = link; 482 sw->link = link;
@@ -709,7 +713,7 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
709 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 713 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
710 ICM_LINK_INFO_DEPTH_SHIFT; 714 ICM_LINK_INFO_DEPTH_SHIFT;
711 715
712 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { 716 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
713 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 717 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
714 return; 718 return;
715 } 719 }
@@ -739,7 +743,7 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
739 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> 743 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
740 ICM_LINK_INFO_DEPTH_SHIFT; 744 ICM_LINK_INFO_DEPTH_SHIFT;
741 745
742 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) { 746 if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
743 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); 747 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
744 return; 748 return;
745 } 749 }
@@ -793,9 +797,11 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
793 * connected another host to the same port, remove the switch 797 * connected another host to the same port, remove the switch
794 * first. 798 * first.
795 */ 799 */
796 sw = get_switch_at_route(tb->root_switch, route); 800 sw = tb_switch_find_by_route(tb, route);
797 if (sw) 801 if (sw) {
798 remove_switch(sw); 802 remove_switch(sw);
803 tb_switch_put(sw);
804 }
799 805
800 sw = tb_switch_find_by_link_depth(tb, link, depth); 806 sw = tb_switch_find_by_link_depth(tb, link, depth);
801 if (!sw) { 807 if (!sw) {
@@ -1138,9 +1144,11 @@ icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
1138 * connected another host to the same port, remove the switch 1144 * connected another host to the same port, remove the switch
1139 * first. 1145 * first.
1140 */ 1146 */
1141 sw = get_switch_at_route(tb->root_switch, route); 1147 sw = tb_switch_find_by_route(tb, route);
1142 if (sw) 1148 if (sw) {
1143 remove_switch(sw); 1149 remove_switch(sw);
1150 tb_switch_put(sw);
1151 }
1144 1152
1145 sw = tb_switch_find_by_route(tb, get_parent_route(route)); 1153 sw = tb_switch_find_by_route(tb, get_parent_route(route));
1146 if (!sw) { 1154 if (!sw) {
@@ -1191,6 +1199,8 @@ static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
1191 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: 1199 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
1192 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: 1200 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
1193 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: 1201 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
1202 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
1203 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
1194 return parent; 1204 return parent;
1195 } 1205 }
1196 1206
@@ -1560,7 +1570,7 @@ static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
1560 if (val & REG_FW_STS_ICM_EN) 1570 if (val & REG_FW_STS_ICM_EN)
1561 return 0; 1571 return 0;
1562 1572
1563 dev_info(&nhi->pdev->dev, "starting ICM firmware\n"); 1573 dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
1564 1574
1565 ret = icm_firmware_reset(tb, nhi); 1575 ret = icm_firmware_reset(tb, nhi);
1566 if (ret) 1576 if (ret)
@@ -1753,16 +1763,10 @@ static void icm_unplug_children(struct tb_switch *sw)
1753 for (i = 1; i <= sw->config.max_port_number; i++) { 1763 for (i = 1; i <= sw->config.max_port_number; i++) {
1754 struct tb_port *port = &sw->ports[i]; 1764 struct tb_port *port = &sw->ports[i];
1755 1765
1756 if (tb_is_upstream_port(port)) 1766 if (port->xdomain)
1757 continue;
1758 if (port->xdomain) {
1759 port->xdomain->is_unplugged = true; 1767 port->xdomain->is_unplugged = true;
1760 continue; 1768 else if (tb_port_has_remote(port))
1761 } 1769 icm_unplug_children(port->remote->sw);
1762 if (!port->remote)
1763 continue;
1764
1765 icm_unplug_children(port->remote->sw);
1766 } 1770 }
1767} 1771}
1768 1772
@@ -1773,23 +1777,16 @@ static void icm_free_unplugged_children(struct tb_switch *sw)
1773 for (i = 1; i <= sw->config.max_port_number; i++) { 1777 for (i = 1; i <= sw->config.max_port_number; i++) {
1774 struct tb_port *port = &sw->ports[i]; 1778 struct tb_port *port = &sw->ports[i];
1775 1779
1776 if (tb_is_upstream_port(port))
1777 continue;
1778
1779 if (port->xdomain && port->xdomain->is_unplugged) { 1780 if (port->xdomain && port->xdomain->is_unplugged) {
1780 tb_xdomain_remove(port->xdomain); 1781 tb_xdomain_remove(port->xdomain);
1781 port->xdomain = NULL; 1782 port->xdomain = NULL;
1782 continue; 1783 } else if (tb_port_has_remote(port)) {
1783 } 1784 if (port->remote->sw->is_unplugged) {
1784 1785 tb_switch_remove(port->remote->sw);
1785 if (!port->remote) 1786 port->remote = NULL;
1786 continue; 1787 } else {
1787 1788 icm_free_unplugged_children(port->remote->sw);
1788 if (port->remote->sw->is_unplugged) { 1789 }
1789 tb_switch_remove(port->remote->sw);
1790 port->remote = NULL;
1791 } else {
1792 icm_free_unplugged_children(port->remote->sw);
1793 } 1790 }
1794 } 1791 }
1795} 1792}
@@ -1853,8 +1850,8 @@ static int icm_start(struct tb *tb)
1853 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); 1850 tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
1854 else 1851 else
1855 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 1852 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
1856 if (!tb->root_switch) 1853 if (IS_ERR(tb->root_switch))
1857 return -ENODEV; 1854 return PTR_ERR(tb->root_switch);
1858 1855
1859 /* 1856 /*
1860 * NVM upgrade has not been tested on Apple systems and they 1857 * NVM upgrade has not been tested on Apple systems and they
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
new file mode 100644
index 000000000000..ae1e92611c3e
--- /dev/null
+++ b/drivers/thunderbolt/lc.c
@@ -0,0 +1,179 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt link controller support
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include "tb.h"
10
11/**
12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
13 * @sw: Switch whose UUID is read
14 * @uuid: UUID is placed here
15 */
16int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
17{
18 if (!sw->cap_lc)
19 return -EINVAL;
20 return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
21}
22
23static int read_lc_desc(struct tb_switch *sw, u32 *desc)
24{
25 if (!sw->cap_lc)
26 return -EINVAL;
27 return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
28}
29
30static int find_port_lc_cap(struct tb_port *port)
31{
32 struct tb_switch *sw = port->sw;
33 int start, phys, ret, size;
34 u32 desc;
35
36 ret = read_lc_desc(sw, &desc);
37 if (ret)
38 return ret;
39
40 /* Start of port LC registers */
41 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 phys = tb_phy_port_from_link(port->port);
44
45 return sw->cap_lc + start + phys * size;
46}
47
48static int tb_lc_configure_lane(struct tb_port *port, bool configure)
49{
50 bool upstream = tb_is_upstream_port(port);
51 struct tb_switch *sw = port->sw;
52 u32 ctrl, lane;
53 int cap, ret;
54
55 if (sw->generation < 2)
56 return 0;
57
58 cap = find_port_lc_cap(port);
59 if (cap < 0)
60 return cap;
61
62 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
63 if (ret)
64 return ret;
65
66 /* Resolve correct lane */
67 if (port->port % 2)
68 lane = TB_LC_SX_CTRL_L1C;
69 else
70 lane = TB_LC_SX_CTRL_L2C;
71
72 if (configure) {
73 ctrl |= lane;
74 if (upstream)
75 ctrl |= TB_LC_SX_CTRL_UPSTREAM;
76 } else {
77 ctrl &= ~lane;
78 if (upstream)
79 ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
80 }
81
82 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
83}
84
85/**
86 * tb_lc_configure_link() - Let LC know about configured link
87 * @sw: Switch that is being added
88 *
89 * Informs LC of both parent switch and @sw that there is established
90 * link between the two.
91 */
92int tb_lc_configure_link(struct tb_switch *sw)
93{
94 struct tb_port *up, *down;
95 int ret;
96
97 if (!sw->config.enabled || !tb_route(sw))
98 return 0;
99
100 up = tb_upstream_port(sw);
101 down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
102
103 /* Configure parent link toward this switch */
104 ret = tb_lc_configure_lane(down, true);
105 if (ret)
106 return ret;
107
108 /* Configure upstream link from this switch to the parent */
109 ret = tb_lc_configure_lane(up, true);
110 if (ret)
111 tb_lc_configure_lane(down, false);
112
113 return ret;
114}
115
116/**
117 * tb_lc_unconfigure_link() - Let LC know about unconfigured link
118 * @sw: Switch to unconfigure
119 *
120 * Informs LC of both parent switch and @sw that the link between the
121 * two does not exist anymore.
122 */
123void tb_lc_unconfigure_link(struct tb_switch *sw)
124{
125 struct tb_port *up, *down;
126
127 if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw))
128 return;
129
130 up = tb_upstream_port(sw);
131 down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent));
132
133 tb_lc_configure_lane(up, false);
134 tb_lc_configure_lane(down, false);
135}
136
137/**
138 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
139 * @sw: Switch to set sleep
140 *
141 * Let the switch link controllers know that the switch is going to
142 * sleep.
143 */
144int tb_lc_set_sleep(struct tb_switch *sw)
145{
146 int start, size, nlc, ret, i;
147 u32 desc;
148
149 if (sw->generation < 2)
150 return 0;
151
152 ret = read_lc_desc(sw, &desc);
153 if (ret)
154 return ret;
155
156 /* Figure out number of link controllers */
157 nlc = desc & TB_LC_DESC_NLC_MASK;
158 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
159 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
160
161 /* For each link controller set sleep bit */
162 for (i = 0; i < nlc; i++) {
163 unsigned int offset = sw->cap_lc + start + i * size;
164 u32 ctrl;
165
166 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
167 offset + TB_LC_SX_CTRL, 1);
168 if (ret)
169 return ret;
170
171 ctrl |= TB_LC_SX_CTRL_SLP;
172 ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
173 offset + TB_LC_SX_CTRL, 1);
174 if (ret)
175 return ret;
176 }
177
178 return 0;
179}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 9aa44f9762a3..cac1ead5e302 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -27,8 +27,7 @@
27 * use this ring for anything else. 27 * use this ring for anything else.
28 */ 28 */
29#define RING_E2E_UNUSED_HOPID 2 29#define RING_E2E_UNUSED_HOPID 2
30/* HopIDs 0-7 are reserved by the Thunderbolt protocol */ 30#define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID
31#define RING_FIRST_USABLE_HOPID 8
32 31
33/* 32/*
34 * Minimal number of vectors when we use MSI-X. Two for control channel 33 * Minimal number of vectors when we use MSI-X. Two for control channel
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index a11956522bac..afe5f8391ebf 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -1,62 +1,330 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - path/tunnel functionality 3 * Thunderbolt driver - path/tunnel functionality
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
6 */ 7 */
7 8
8#include <linux/slab.h> 9#include <linux/slab.h>
9#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/delay.h>
12#include <linux/ktime.h>
10 13
11#include "tb.h" 14#include "tb.h"
12 15
13 16static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
14static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
15{ 17{
16 tb_port_dbg(port, " Hop through port %d to hop %d (%s)\n", 18 const struct tb_port *port = hop->in_port;
17 hop->out_port, hop->next_hop, 19
18 hop->enable ? "enabled" : "disabled"); 20 tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
21 hop->in_hop_index, regs->out_port, regs->next_hop);
19 tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n", 22 tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
20 hop->weight, hop->priority, 23 regs->weight, regs->priority,
21 hop->initial_credits, hop->drop_packages); 24 regs->initial_credits, regs->drop_packages);
22 tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n", 25 tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
23 hop->counter_enable, hop->counter); 26 regs->counter_enable, regs->counter);
24 tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n", 27 tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
25 hop->ingress_fc, hop->egress_fc, 28 regs->ingress_fc, regs->egress_fc,
26 hop->ingress_shared_buffer, hop->egress_shared_buffer); 29 regs->ingress_shared_buffer, regs->egress_shared_buffer);
27 tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n", 30 tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
28 hop->unknown1, hop->unknown2, hop->unknown3); 31 regs->unknown1, regs->unknown2, regs->unknown3);
32}
33
34static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
35 int dst_hopid)
36{
37 struct tb_port *port, *out_port = NULL;
38 struct tb_regs_hop hop;
39 struct tb_switch *sw;
40 int i, ret, hopid;
41
42 hopid = src_hopid;
43 port = src;
44
45 for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
46 sw = port->sw;
47
48 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
49 if (ret) {
50 tb_port_warn(port, "failed to read path at %d\n", hopid);
51 return NULL;
52 }
53
54 if (!hop.enable)
55 return NULL;
56
57 out_port = &sw->ports[hop.out_port];
58 hopid = hop.next_hop;
59 port = out_port->remote;
60 }
61
62 return out_port && hopid == dst_hopid ? out_port : NULL;
63}
64
65static int tb_path_find_src_hopid(struct tb_port *src,
66 const struct tb_port *dst, int dst_hopid)
67{
68 struct tb_port *out;
69 int i;
70
71 for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
72 out = tb_path_find_dst_port(src, i, dst_hopid);
73 if (out == dst)
74 return i;
75 }
76
77 return 0;
78}
79
80/**
81 * tb_path_discover() - Discover a path
82 * @src: First input port of a path
83 * @src_hopid: Starting HopID of a path (%-1 if don't care)
84 * @dst: Expected destination port of the path (%NULL if don't care)
85 * @dst_hopid: HopID to the @dst (%-1 if don't care)
86 * @last: Last port is filled here if not %NULL
87 * @name: Name of the path
88 *
89 * Follows a path starting from @src and @src_hopid to the last output
90 * port of the path. Allocates HopIDs for the visited ports. Call
91 * tb_path_free() to release the path and allocated HopIDs when the path
92 * is not needed anymore.
93 *
94 * Note function discovers also incomplete paths so caller should check
95 * that the @dst port is the expected one. If it is not, the path can be
96 * cleaned up by calling tb_path_deactivate() before tb_path_free().
97 *
98 * Return: Discovered path on success, %NULL in case of failure
99 */
100struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
101 struct tb_port *dst, int dst_hopid,
102 struct tb_port **last, const char *name)
103{
104 struct tb_port *out_port;
105 struct tb_regs_hop hop;
106 struct tb_path *path;
107 struct tb_switch *sw;
108 struct tb_port *p;
109 size_t num_hops;
110 int ret, i, h;
111
112 if (src_hopid < 0 && dst) {
113 /*
114 * For incomplete paths the intermediate HopID can be
115 * different from the one used by the protocol adapter
116 * so in that case find a path that ends on @dst with
117 * matching @dst_hopid. That should give us the correct
118 * HopID for the @src.
119 */
120 src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
121 if (!src_hopid)
122 return NULL;
123 }
124
125 p = src;
126 h = src_hopid;
127 num_hops = 0;
128
129 for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
130 sw = p->sw;
131
132 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
133 if (ret) {
134 tb_port_warn(p, "failed to read path at %d\n", h);
135 return NULL;
136 }
137
138 /* If the hop is not enabled we got an incomplete path */
139 if (!hop.enable)
140 break;
141
142 out_port = &sw->ports[hop.out_port];
143 if (last)
144 *last = out_port;
145
146 h = hop.next_hop;
147 p = out_port->remote;
148 num_hops++;
149 }
150
151 path = kzalloc(sizeof(*path), GFP_KERNEL);
152 if (!path)
153 return NULL;
154
155 path->name = name;
156 path->tb = src->sw->tb;
157 path->path_length = num_hops;
158 path->activated = true;
159
160 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
161 if (!path->hops) {
162 kfree(path);
163 return NULL;
164 }
165
166 p = src;
167 h = src_hopid;
168
169 for (i = 0; i < num_hops; i++) {
170 int next_hop;
171
172 sw = p->sw;
173
174 ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
175 if (ret) {
176 tb_port_warn(p, "failed to read path at %d\n", h);
177 goto err;
178 }
179
180 if (tb_port_alloc_in_hopid(p, h, h) < 0)
181 goto err;
182
183 out_port = &sw->ports[hop.out_port];
184 next_hop = hop.next_hop;
185
186 if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
187 tb_port_release_in_hopid(p, h);
188 goto err;
189 }
190
191 path->hops[i].in_port = p;
192 path->hops[i].in_hop_index = h;
193 path->hops[i].in_counter_index = -1;
194 path->hops[i].out_port = out_port;
195 path->hops[i].next_hop_index = next_hop;
196
197 h = next_hop;
198 p = out_port->remote;
199 }
200
201 return path;
202
203err:
204 tb_port_warn(src, "failed to discover path starting at HopID %d\n",
205 src_hopid);
206 tb_path_free(path);
207 return NULL;
29} 208}
30 209
31/** 210/**
32 * tb_path_alloc() - allocate a thunderbolt path 211 * tb_path_alloc() - allocate a thunderbolt path between two ports
212 * @tb: Domain pointer
213 * @src: Source port of the path
214 * @src_hopid: HopID used for the first ingress port in the path
215 * @dst: Destination port of the path
216 * @dst_hopid: HopID used for the last egress port in the path
217 * @link_nr: Preferred link if there are dual links on the path
218 * @name: Name of the path
219 *
220 * Creates path between two ports starting with given @src_hopid. Reserves
221 * HopIDs for each port (they can be different from @src_hopid depending on
222 * how many HopIDs each port already have reserved). If there are dual
223 * links on the path, prioritizes using @link_nr.
33 * 224 *
34 * Return: Returns a tb_path on success or NULL on failure. 225 * Return: Returns a tb_path on success or NULL on failure.
35 */ 226 */
36struct tb_path *tb_path_alloc(struct tb *tb, int num_hops) 227struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
228 struct tb_port *dst, int dst_hopid, int link_nr,
229 const char *name)
37{ 230{
38 struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL); 231 struct tb_port *in_port, *out_port;
232 int in_hopid, out_hopid;
233 struct tb_path *path;
234 size_t num_hops;
235 int i, ret;
236
237 path = kzalloc(sizeof(*path), GFP_KERNEL);
39 if (!path) 238 if (!path)
40 return NULL; 239 return NULL;
240
241 /*
242 * Number of hops on a path is the distance between the two
243 * switches plus the source adapter port.
244 */
245 num_hops = abs(tb_route_length(tb_route(src->sw)) -
246 tb_route_length(tb_route(dst->sw))) + 1;
247
41 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); 248 path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
42 if (!path->hops) { 249 if (!path->hops) {
43 kfree(path); 250 kfree(path);
44 return NULL; 251 return NULL;
45 } 252 }
253
254 in_hopid = src_hopid;
255 out_port = NULL;
256
257 for (i = 0; i < num_hops; i++) {
258 in_port = tb_next_port_on_path(src, dst, out_port);
259 if (!in_port)
260 goto err;
261
262 if (in_port->dual_link_port && in_port->link_nr != link_nr)
263 in_port = in_port->dual_link_port;
264
265 ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
266 if (ret < 0)
267 goto err;
268 in_hopid = ret;
269
270 out_port = tb_next_port_on_path(src, dst, in_port);
271 if (!out_port)
272 goto err;
273
274 if (out_port->dual_link_port && out_port->link_nr != link_nr)
275 out_port = out_port->dual_link_port;
276
277 if (i == num_hops - 1)
278 ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
279 dst_hopid);
280 else
281 ret = tb_port_alloc_out_hopid(out_port, -1, -1);
282
283 if (ret < 0)
284 goto err;
285 out_hopid = ret;
286
287 path->hops[i].in_hop_index = in_hopid;
288 path->hops[i].in_port = in_port;
289 path->hops[i].in_counter_index = -1;
290 path->hops[i].out_port = out_port;
291 path->hops[i].next_hop_index = out_hopid;
292
293 in_hopid = out_hopid;
294 }
295
46 path->tb = tb; 296 path->tb = tb;
47 path->path_length = num_hops; 297 path->path_length = num_hops;
298 path->name = name;
299
48 return path; 300 return path;
301
302err:
303 tb_path_free(path);
304 return NULL;
49} 305}
50 306
51/** 307/**
52 * tb_path_free() - free a deactivated path 308 * tb_path_free() - free a path
309 * @path: Path to free
310 *
311 * Frees a path. The path does not need to be deactivated.
53 */ 312 */
54void tb_path_free(struct tb_path *path) 313void tb_path_free(struct tb_path *path)
55{ 314{
56 if (path->activated) { 315 int i;
57 tb_WARN(path->tb, "trying to free an activated path\n") 316
58 return; 317 for (i = 0; i < path->path_length; i++) {
318 const struct tb_path_hop *hop = &path->hops[i];
319
320 if (hop->in_port)
321 tb_port_release_in_hopid(hop->in_port,
322 hop->in_hop_index);
323 if (hop->out_port)
324 tb_port_release_out_hopid(hop->out_port,
325 hop->next_hop_index);
59 } 326 }
327
60 kfree(path->hops); 328 kfree(path->hops);
61 kfree(path); 329 kfree(path);
62} 330}
@@ -74,14 +342,65 @@ static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
74 } 342 }
75} 343}
76 344
345static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
346 bool clear_fc)
347{
348 struct tb_regs_hop hop;
349 ktime_t timeout;
350 int ret;
351
352 /* Disable the path */
353 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
354 if (ret)
355 return ret;
356
357 /* Already disabled */
358 if (!hop.enable)
359 return 0;
360
361 hop.enable = 0;
362
363 ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
364 if (ret)
365 return ret;
366
367 /* Wait until it is drained */
368 timeout = ktime_add_ms(ktime_get(), 500);
369 do {
370 ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
371 if (ret)
372 return ret;
373
374 if (!hop.pending) {
375 if (clear_fc) {
376 /* Clear flow control */
377 hop.ingress_fc = 0;
378 hop.egress_fc = 0;
379 hop.ingress_shared_buffer = 0;
380 hop.egress_shared_buffer = 0;
381
382 return tb_port_write(port, &hop, TB_CFG_HOPS,
383 2 * hop_index, 2);
384 }
385
386 return 0;
387 }
388
389 usleep_range(10, 20);
390 } while (ktime_before(ktime_get(), timeout));
391
392 return -ETIMEDOUT;
393}
394
77static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) 395static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
78{ 396{
79 int i, res; 397 int i, res;
80 struct tb_regs_hop hop = { }; 398
81 for (i = first_hop; i < path->path_length; i++) { 399 for (i = first_hop; i < path->path_length; i++) {
82 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, 400 res = __tb_path_deactivate_hop(path->hops[i].in_port,
83 2 * path->hops[i].in_hop_index, 2); 401 path->hops[i].in_hop_index,
84 if (res) 402 path->clear_fc);
403 if (res && res != -ENODEV)
85 tb_port_warn(path->hops[i].in_port, 404 tb_port_warn(path->hops[i].in_port,
86 "hop deactivation failed for hop %d, index %d\n", 405 "hop deactivation failed for hop %d, index %d\n",
87 i, path->hops[i].in_hop_index); 406 i, path->hops[i].in_hop_index);
@@ -94,12 +413,12 @@ void tb_path_deactivate(struct tb_path *path)
94 tb_WARN(path->tb, "trying to deactivate an inactive path\n"); 413 tb_WARN(path->tb, "trying to deactivate an inactive path\n");
95 return; 414 return;
96 } 415 }
97 tb_info(path->tb, 416 tb_dbg(path->tb,
98 "deactivating path from %llx:%x to %llx:%x\n", 417 "deactivating %s path from %llx:%x to %llx:%x\n",
99 tb_route(path->hops[0].in_port->sw), 418 path->name, tb_route(path->hops[0].in_port->sw),
100 path->hops[0].in_port->port, 419 path->hops[0].in_port->port,
101 tb_route(path->hops[path->path_length - 1].out_port->sw), 420 tb_route(path->hops[path->path_length - 1].out_port->sw),
102 path->hops[path->path_length - 1].out_port->port); 421 path->hops[path->path_length - 1].out_port->port);
103 __tb_path_deactivate_hops(path, 0); 422 __tb_path_deactivate_hops(path, 0);
104 __tb_path_deallocate_nfc(path, 0); 423 __tb_path_deallocate_nfc(path, 0);
105 path->activated = false; 424 path->activated = false;
@@ -122,12 +441,12 @@ int tb_path_activate(struct tb_path *path)
122 return -EINVAL; 441 return -EINVAL;
123 } 442 }
124 443
125 tb_info(path->tb, 444 tb_dbg(path->tb,
126 "activating path from %llx:%x to %llx:%x\n", 445 "activating %s path from %llx:%x to %llx:%x\n",
127 tb_route(path->hops[0].in_port->sw), 446 path->name, tb_route(path->hops[0].in_port->sw),
128 path->hops[0].in_port->port, 447 path->hops[0].in_port->port,
129 tb_route(path->hops[path->path_length - 1].out_port->sw), 448 tb_route(path->hops[path->path_length - 1].out_port->sw),
130 path->hops[path->path_length - 1].out_port->port); 449 path->hops[path->path_length - 1].out_port->port);
131 450
132 /* Clear counters. */ 451 /* Clear counters. */
133 for (i = path->path_length - 1; i >= 0; i--) { 452 for (i = path->path_length - 1; i >= 0; i--) {
@@ -153,30 +472,14 @@ int tb_path_activate(struct tb_path *path)
153 for (i = path->path_length - 1; i >= 0; i--) { 472 for (i = path->path_length - 1; i >= 0; i--) {
154 struct tb_regs_hop hop = { 0 }; 473 struct tb_regs_hop hop = { 0 };
155 474
156 /* 475 /* If it is left active deactivate it first */
157 * We do (currently) not tear down paths setup by the firmeware. 476 __tb_path_deactivate_hop(path->hops[i].in_port,
158 * If a firmware device is unplugged and plugged in again then 477 path->hops[i].in_hop_index, path->clear_fc);
159 * it can happen that we reuse some of the hops from the (now
160 * defunct) firmeware path. This causes the hotplug operation to
161 * fail (the pci device does not show up). Clearing the hop
162 * before overwriting it fixes the problem.
163 *
164 * Should be removed once we discover and tear down firmeware
165 * paths.
166 */
167 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
168 2 * path->hops[i].in_hop_index, 2);
169 if (res) {
170 __tb_path_deactivate_hops(path, i);
171 __tb_path_deallocate_nfc(path, 0);
172 goto err;
173 }
174 478
175 /* dword 0 */ 479 /* dword 0 */
176 hop.next_hop = path->hops[i].next_hop_index; 480 hop.next_hop = path->hops[i].next_hop_index;
177 hop.out_port = path->hops[i].out_port->port; 481 hop.out_port = path->hops[i].out_port->port;
178 /* TODO: figure out why these are good values */ 482 hop.initial_credits = path->hops[i].initial_credits;
179 hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
180 hop.unknown1 = 0; 483 hop.unknown1 = 0;
181 hop.enable = 1; 484 hop.enable = 1;
182 485
@@ -198,9 +501,8 @@ int tb_path_activate(struct tb_path *path)
198 & out_mask; 501 & out_mask;
199 hop.unknown3 = 0; 502 hop.unknown3 = 0;
200 503
201 tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d", 504 tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
202 i, path->hops[i].in_hop_index); 505 tb_dump_hop(&path->hops[i], &hop);
203 tb_dump_hop(path->hops[i].in_port, &hop);
204 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, 506 res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
205 2 * path->hops[i].in_hop_index, 2); 507 2 * path->hops[i].in_hop_index, 2);
206 if (res) { 508 if (res) {
@@ -210,7 +512,7 @@ int tb_path_activate(struct tb_path *path)
210 } 512 }
211 } 513 }
212 path->activated = true; 514 path->activated = true;
213 tb_info(path->tb, "path activation complete\n"); 515 tb_dbg(path->tb, "path activation complete\n");
214 return 0; 516 return 0;
215err: 517err:
216 tb_WARN(path->tb, "path activation failed\n"); 518 tb_WARN(path->tb, "path activation failed\n");
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index b2f0d6386cee..d5b0cdb8f0b1 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -176,6 +176,10 @@ static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
176 } else { 176 } else {
177 dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid), 177 dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
178 GFP_KERNEL); 178 GFP_KERNEL);
179 if (!dir->uuid) {
180 tb_property_free_dir(dir);
181 return NULL;
182 }
179 content_offset = dir_offset + 4; 183 content_offset = dir_offset + 4;
180 content_len = dir_len - 4; /* Length includes UUID */ 184 content_len = dir_len - 4; /* Length includes UUID */
181 } 185 }
@@ -548,6 +552,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key,
548 552
549 property->length = size / 4; 553 property->length = size / 4;
550 property->value.data = kzalloc(size, GFP_KERNEL); 554 property->value.data = kzalloc(size, GFP_KERNEL);
555 if (!property->value.data) {
556 kfree(property);
557 return -ENOMEM;
558 }
559
551 memcpy(property->value.data, buf, buflen); 560 memcpy(property->value.data, buf, buflen);
552 561
553 list_add_tail(&property->list, &parent->properties); 562 list_add_tail(&property->list, &parent->properties);
@@ -578,7 +587,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key,
578 return -ENOMEM; 587 return -ENOMEM;
579 588
580 property->length = size / 4; 589 property->length = size / 4;
581 property->value.data = kzalloc(size, GFP_KERNEL); 590 property->value.text = kzalloc(size, GFP_KERNEL);
591 if (!property->value.text) {
592 kfree(property);
593 return -ENOMEM;
594 }
595
582 strcpy(property->value.text, text); 596 strcpy(property->value.text, text);
583 597
584 list_add_tail(&property->list, &parent->properties); 598 list_add_tail(&property->list, &parent->properties);
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index cd96994dc094..c1b016574fb4 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -10,15 +10,13 @@
10#include <linux/idr.h> 10#include <linux/idr.h>
11#include <linux/nvmem-provider.h> 11#include <linux/nvmem-provider.h>
12#include <linux/pm_runtime.h> 12#include <linux/pm_runtime.h>
13#include <linux/sched/signal.h>
13#include <linux/sizes.h> 14#include <linux/sizes.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16 17
17#include "tb.h" 18#include "tb.h"
18 19
19/* Switch authorization from userspace is serialized by this lock */
20static DEFINE_MUTEX(switch_lock);
21
22/* Switch NVM support */ 20/* Switch NVM support */
23 21
24#define NVM_DEVID 0x05 22#define NVM_DEVID 0x05
@@ -254,8 +252,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
254 struct tb_switch *sw = priv; 252 struct tb_switch *sw = priv;
255 int ret = 0; 253 int ret = 0;
256 254
257 if (mutex_lock_interruptible(&switch_lock)) 255 if (!mutex_trylock(&sw->tb->lock))
258 return -ERESTARTSYS; 256 return restart_syscall();
259 257
260 /* 258 /*
261 * Since writing the NVM image might require some special steps, 259 * Since writing the NVM image might require some special steps,
@@ -275,7 +273,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
275 memcpy(sw->nvm->buf + offset, val, bytes); 273 memcpy(sw->nvm->buf + offset, val, bytes);
276 274
277unlock: 275unlock:
278 mutex_unlock(&switch_lock); 276 mutex_unlock(&sw->tb->lock);
279 277
280 return ret; 278 return ret;
281} 279}
@@ -364,10 +362,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
364 } 362 }
365 nvm->non_active = nvm_dev; 363 nvm->non_active = nvm_dev;
366 364
367 mutex_lock(&switch_lock);
368 sw->nvm = nvm; 365 sw->nvm = nvm;
369 mutex_unlock(&switch_lock);
370
371 return 0; 366 return 0;
372 367
373err_nvm_active: 368err_nvm_active:
@@ -384,10 +379,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
384{ 379{
385 struct tb_switch_nvm *nvm; 380 struct tb_switch_nvm *nvm;
386 381
387 mutex_lock(&switch_lock);
388 nvm = sw->nvm; 382 nvm = sw->nvm;
389 sw->nvm = NULL; 383 sw->nvm = NULL;
390 mutex_unlock(&switch_lock);
391 384
392 if (!nvm) 385 if (!nvm)
393 return; 386 return;
@@ -500,23 +493,22 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
500 if (state < 0) 493 if (state < 0)
501 return state; 494 return state;
502 if (state == TB_PORT_DISABLED) { 495 if (state == TB_PORT_DISABLED) {
503 tb_port_info(port, "is disabled (state: 0)\n"); 496 tb_port_dbg(port, "is disabled (state: 0)\n");
504 return 0; 497 return 0;
505 } 498 }
506 if (state == TB_PORT_UNPLUGGED) { 499 if (state == TB_PORT_UNPLUGGED) {
507 if (wait_if_unplugged) { 500 if (wait_if_unplugged) {
508 /* used during resume */ 501 /* used during resume */
509 tb_port_info(port, 502 tb_port_dbg(port,
510 "is unplugged (state: 7), retrying...\n"); 503 "is unplugged (state: 7), retrying...\n");
511 msleep(100); 504 msleep(100);
512 continue; 505 continue;
513 } 506 }
514 tb_port_info(port, "is unplugged (state: 7)\n"); 507 tb_port_dbg(port, "is unplugged (state: 7)\n");
515 return 0; 508 return 0;
516 } 509 }
517 if (state == TB_PORT_UP) { 510 if (state == TB_PORT_UP) {
518 tb_port_info(port, 511 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
519 "is connected, link is up (state: 2)\n");
520 return 1; 512 return 1;
521 } 513 }
522 514
@@ -524,9 +516,9 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
524 * After plug-in the state is TB_PORT_CONNECTING. Give it some 516 * After plug-in the state is TB_PORT_CONNECTING. Give it some
525 * time. 517 * time.
526 */ 518 */
527 tb_port_info(port, 519 tb_port_dbg(port,
528 "is connected, link is not up (state: %d), retrying...\n", 520 "is connected, link is not up (state: %d), retrying...\n",
529 state); 521 state);
530 msleep(100); 522 msleep(100);
531 } 523 }
532 tb_port_warn(port, 524 tb_port_warn(port,
@@ -544,19 +536,47 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
544 */ 536 */
545int tb_port_add_nfc_credits(struct tb_port *port, int credits) 537int tb_port_add_nfc_credits(struct tb_port *port, int credits)
546{ 538{
547 if (credits == 0) 539 u32 nfc_credits;
540
541 if (credits == 0 || port->sw->is_unplugged)
548 return 0; 542 return 0;
549 tb_port_info(port, 543
550 "adding %#x NFC credits (%#x -> %#x)", 544 nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK;
551 credits, 545 nfc_credits += credits;
552 port->config.nfc_credits, 546
553 port->config.nfc_credits + credits); 547 tb_port_dbg(port, "adding %d NFC credits to %lu",
554 port->config.nfc_credits += credits; 548 credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK);
549
550 port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK;
551 port->config.nfc_credits |= nfc_credits;
552
555 return tb_port_write(port, &port->config.nfc_credits, 553 return tb_port_write(port, &port->config.nfc_credits,
556 TB_CFG_PORT, 4, 1); 554 TB_CFG_PORT, 4, 1);
557} 555}
558 556
559/** 557/**
558 * tb_port_set_initial_credits() - Set initial port link credits allocated
559 * @port: Port to set the initial credits
560 * @credits: Number of credits to to allocate
561 *
562 * Set initial credits value to be used for ingress shared buffering.
563 */
564int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
565{
566 u32 data;
567 int ret;
568
569 ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1);
570 if (ret)
571 return ret;
572
573 data &= ~TB_PORT_LCA_MASK;
574 data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK;
575
576 return tb_port_write(port, &data, TB_CFG_PORT, 5, 1);
577}
578
579/**
560 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER 580 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
561 * 581 *
562 * Return: Returns 0 on success or an error code on failure. 582 * Return: Returns 0 on success or an error code on failure.
@@ -564,7 +584,7 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
564int tb_port_clear_counter(struct tb_port *port, int counter) 584int tb_port_clear_counter(struct tb_port *port, int counter)
565{ 585{
566 u32 zero[3] = { 0, 0, 0 }; 586 u32 zero[3] = { 0, 0, 0 };
567 tb_port_info(port, "clearing counter %d\n", counter); 587 tb_port_dbg(port, "clearing counter %d\n", counter);
568 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); 588 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
569} 589}
570 590
@@ -593,15 +613,304 @@ static int tb_init_port(struct tb_port *port)
593 port->cap_phy = cap; 613 port->cap_phy = cap;
594 else 614 else
595 tb_port_WARN(port, "non switch port without a PHY\n"); 615 tb_port_WARN(port, "non switch port without a PHY\n");
616 } else if (port->port != 0) {
617 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
618 if (cap > 0)
619 port->cap_adap = cap;
596 } 620 }
597 621
598 tb_dump_port(port->sw->tb, &port->config); 622 tb_dump_port(port->sw->tb, &port->config);
599 623
600 /* TODO: Read dual link port, DP port and more from EEPROM. */ 624 /* Control port does not need HopID allocation */
625 if (port->port) {
626 ida_init(&port->in_hopids);
627 ida_init(&port->out_hopids);
628 }
629
601 return 0; 630 return 0;
602 631
603} 632}
604 633
634static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
635 int max_hopid)
636{
637 int port_max_hopid;
638 struct ida *ida;
639
640 if (in) {
641 port_max_hopid = port->config.max_in_hop_id;
642 ida = &port->in_hopids;
643 } else {
644 port_max_hopid = port->config.max_out_hop_id;
645 ida = &port->out_hopids;
646 }
647
648 /* HopIDs 0-7 are reserved */
649 if (min_hopid < TB_PATH_MIN_HOPID)
650 min_hopid = TB_PATH_MIN_HOPID;
651
652 if (max_hopid < 0 || max_hopid > port_max_hopid)
653 max_hopid = port_max_hopid;
654
655 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
656}
657
658/**
659 * tb_port_alloc_in_hopid() - Allocate input HopID from port
660 * @port: Port to allocate HopID for
661 * @min_hopid: Minimum acceptable input HopID
662 * @max_hopid: Maximum acceptable input HopID
663 *
664 * Return: HopID between @min_hopid and @max_hopid or negative errno in
665 * case of error.
666 */
667int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
668{
669 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
670}
671
672/**
673 * tb_port_alloc_out_hopid() - Allocate output HopID from port
674 * @port: Port to allocate HopID for
675 * @min_hopid: Minimum acceptable output HopID
676 * @max_hopid: Maximum acceptable output HopID
677 *
678 * Return: HopID between @min_hopid and @max_hopid or negative errno in
679 * case of error.
680 */
681int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
682{
683 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
684}
685
686/**
687 * tb_port_release_in_hopid() - Release allocated input HopID from port
688 * @port: Port whose HopID to release
689 * @hopid: HopID to release
690 */
691void tb_port_release_in_hopid(struct tb_port *port, int hopid)
692{
693 ida_simple_remove(&port->in_hopids, hopid);
694}
695
696/**
697 * tb_port_release_out_hopid() - Release allocated output HopID from port
698 * @port: Port whose HopID to release
699 * @hopid: HopID to release
700 */
701void tb_port_release_out_hopid(struct tb_port *port, int hopid)
702{
703 ida_simple_remove(&port->out_hopids, hopid);
704}
705
706/**
707 * tb_next_port_on_path() - Return next port for given port on a path
708 * @start: Start port of the walk
709 * @end: End port of the walk
710 * @prev: Previous port (%NULL if this is the first)
711 *
712 * This function can be used to walk from one port to another if they
713 * are connected through zero or more switches. If the @prev is dual
714 * link port, the function follows that link and returns another end on
715 * that same link.
716 *
717 * If the @end port has been reached, return %NULL.
718 *
719 * Domain tb->lock must be held when this function is called.
720 */
721struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
722 struct tb_port *prev)
723{
724 struct tb_port *next;
725
726 if (!prev)
727 return start;
728
729 if (prev->sw == end->sw) {
730 if (prev == end)
731 return NULL;
732 return end;
733 }
734
735 if (start->sw->config.depth < end->sw->config.depth) {
736 if (prev->remote &&
737 prev->remote->sw->config.depth > prev->sw->config.depth)
738 next = prev->remote;
739 else
740 next = tb_port_at(tb_route(end->sw), prev->sw);
741 } else {
742 if (tb_is_upstream_port(prev)) {
743 next = prev->remote;
744 } else {
745 next = tb_upstream_port(prev->sw);
746 /*
747 * Keep the same link if prev and next are both
748 * dual link ports.
749 */
750 if (next->dual_link_port &&
751 next->link_nr != prev->link_nr) {
752 next = next->dual_link_port;
753 }
754 }
755 }
756
757 return next;
758}
759
760/**
761 * tb_port_is_enabled() - Is the adapter port enabled
762 * @port: Port to check
763 */
764bool tb_port_is_enabled(struct tb_port *port)
765{
766 switch (port->config.type) {
767 case TB_TYPE_PCIE_UP:
768 case TB_TYPE_PCIE_DOWN:
769 return tb_pci_port_is_enabled(port);
770
771 case TB_TYPE_DP_HDMI_IN:
772 case TB_TYPE_DP_HDMI_OUT:
773 return tb_dp_port_is_enabled(port);
774
775 default:
776 return false;
777 }
778}
779
780/**
781 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
782 * @port: PCIe port to check
783 */
784bool tb_pci_port_is_enabled(struct tb_port *port)
785{
786 u32 data;
787
788 if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
789 return false;
790
791 return !!(data & TB_PCI_EN);
792}
793
794/**
795 * tb_pci_port_enable() - Enable PCIe adapter port
796 * @port: PCIe port to enable
797 * @enable: Enable/disable the PCIe adapter
798 */
799int tb_pci_port_enable(struct tb_port *port, bool enable)
800{
801 u32 word = enable ? TB_PCI_EN : 0x0;
802 if (!port->cap_adap)
803 return -ENXIO;
804 return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1);
805}
806
807/**
808 * tb_dp_port_hpd_is_active() - Is HPD already active
809 * @port: DP out port to check
810 *
811 * Checks if the DP OUT adapter port has HDP bit already set.
812 */
813int tb_dp_port_hpd_is_active(struct tb_port *port)
814{
815 u32 data;
816 int ret;
817
818 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1);
819 if (ret)
820 return ret;
821
822 return !!(data & TB_DP_HDP);
823}
824
825/**
826 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
827 * @port: Port to clear HPD
828 *
829 * If the DP IN port has HDP set, this function can be used to clear it.
830 */
831int tb_dp_port_hpd_clear(struct tb_port *port)
832{
833 u32 data;
834 int ret;
835
836 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
837 if (ret)
838 return ret;
839
840 data |= TB_DP_HPDC;
841 return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1);
842}
843
844/**
845 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
846 * @port: DP IN/OUT port to set hops
847 * @video: Video Hop ID
848 * @aux_tx: AUX TX Hop ID
849 * @aux_rx: AUX RX Hop ID
850 *
851 * Programs specified Hop IDs for DP IN/OUT port.
852 */
853int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
854 unsigned int aux_tx, unsigned int aux_rx)
855{
856 u32 data[2];
857 int ret;
858
859 ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap,
860 ARRAY_SIZE(data));
861 if (ret)
862 return ret;
863
864 data[0] &= ~TB_DP_VIDEO_HOPID_MASK;
865 data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK);
866
867 data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK;
868 data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK;
869 data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK;
870
871 return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap,
872 ARRAY_SIZE(data));
873}
874
875/**
876 * tb_dp_port_is_enabled() - Is DP adapter port enabled
877 * @port: DP adapter port to check
878 */
879bool tb_dp_port_is_enabled(struct tb_port *port)
880{
881 u32 data;
882
883 if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1))
884 return false;
885
886 return !!(data & (TB_DP_VIDEO_EN | TB_DP_AUX_EN));
887}
888
889/**
890 * tb_dp_port_enable() - Enables/disables DP paths of a port
891 * @port: DP IN/OUT port
892 * @enable: Enable/disable DP path
893 *
894 * Once Hop IDs are programmed DP paths can be enabled or disabled by
895 * calling this function.
896 */
897int tb_dp_port_enable(struct tb_port *port, bool enable)
898{
899 u32 data;
900 int ret;
901
902 ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1);
903 if (ret)
904 return ret;
905
906 if (enable)
907 data |= TB_DP_VIDEO_EN | TB_DP_AUX_EN;
908 else
909 data &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN);
910
911 return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap, 1);
912}
913
605/* switch utility functions */ 914/* switch utility functions */
606 915
607static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) 916static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
@@ -644,24 +953,6 @@ int tb_switch_reset(struct tb *tb, u64 route)
644 return res.err; 953 return res.err;
645} 954}
646 955
647struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
648{
649 u8 next_port = route; /*
650 * Routes use a stride of 8 bits,
651 * eventhough a port index has 6 bits at most.
652 * */
653 if (route == 0)
654 return sw;
655 if (next_port > sw->config.max_port_number)
656 return NULL;
657 if (tb_is_upstream_port(&sw->ports[next_port]))
658 return NULL;
659 if (!sw->ports[next_port].remote)
660 return NULL;
661 return get_switch_at_route(sw->ports[next_port].remote->sw,
662 route >> TB_ROUTE_SHIFT);
663}
664
665/** 956/**
666 * tb_plug_events_active() - enable/disable plug events on a switch 957 * tb_plug_events_active() - enable/disable plug events on a switch
667 * 958 *
@@ -716,8 +1007,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
716{ 1007{
717 int ret = -EINVAL; 1008 int ret = -EINVAL;
718 1009
719 if (mutex_lock_interruptible(&switch_lock)) 1010 if (!mutex_trylock(&sw->tb->lock))
720 return -ERESTARTSYS; 1011 return restart_syscall();
721 1012
722 if (sw->authorized) 1013 if (sw->authorized)
723 goto unlock; 1014 goto unlock;
@@ -760,7 +1051,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
760 } 1051 }
761 1052
762unlock: 1053unlock:
763 mutex_unlock(&switch_lock); 1054 mutex_unlock(&sw->tb->lock);
764 return ret; 1055 return ret;
765} 1056}
766 1057
@@ -817,15 +1108,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
817 struct tb_switch *sw = tb_to_switch(dev); 1108 struct tb_switch *sw = tb_to_switch(dev);
818 ssize_t ret; 1109 ssize_t ret;
819 1110
820 if (mutex_lock_interruptible(&switch_lock)) 1111 if (!mutex_trylock(&sw->tb->lock))
821 return -ERESTARTSYS; 1112 return restart_syscall();
822 1113
823 if (sw->key) 1114 if (sw->key)
824 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); 1115 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
825 else 1116 else
826 ret = sprintf(buf, "\n"); 1117 ret = sprintf(buf, "\n");
827 1118
828 mutex_unlock(&switch_lock); 1119 mutex_unlock(&sw->tb->lock);
829 return ret; 1120 return ret;
830} 1121}
831 1122
@@ -842,8 +1133,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
842 else if (hex2bin(key, buf, sizeof(key))) 1133 else if (hex2bin(key, buf, sizeof(key)))
843 return -EINVAL; 1134 return -EINVAL;
844 1135
845 if (mutex_lock_interruptible(&switch_lock)) 1136 if (!mutex_trylock(&sw->tb->lock))
846 return -ERESTARTSYS; 1137 return restart_syscall();
847 1138
848 if (sw->authorized) { 1139 if (sw->authorized) {
849 ret = -EBUSY; 1140 ret = -EBUSY;
@@ -858,7 +1149,7 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
858 } 1149 }
859 } 1150 }
860 1151
861 mutex_unlock(&switch_lock); 1152 mutex_unlock(&sw->tb->lock);
862 return ret; 1153 return ret;
863} 1154}
864static DEVICE_ATTR(key, 0600, key_show, key_store); 1155static DEVICE_ATTR(key, 0600, key_show, key_store);
@@ -904,8 +1195,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
904 bool val; 1195 bool val;
905 int ret; 1196 int ret;
906 1197
907 if (mutex_lock_interruptible(&switch_lock)) 1198 if (!mutex_trylock(&sw->tb->lock))
908 return -ERESTARTSYS; 1199 return restart_syscall();
909 1200
910 /* If NVMem devices are not yet added */ 1201 /* If NVMem devices are not yet added */
911 if (!sw->nvm) { 1202 if (!sw->nvm) {
@@ -953,7 +1244,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
953 } 1244 }
954 1245
955exit_unlock: 1246exit_unlock:
956 mutex_unlock(&switch_lock); 1247 mutex_unlock(&sw->tb->lock);
957 1248
958 if (ret) 1249 if (ret)
959 return ret; 1250 return ret;
@@ -967,8 +1258,8 @@ static ssize_t nvm_version_show(struct device *dev,
967 struct tb_switch *sw = tb_to_switch(dev); 1258 struct tb_switch *sw = tb_to_switch(dev);
968 int ret; 1259 int ret;
969 1260
970 if (mutex_lock_interruptible(&switch_lock)) 1261 if (!mutex_trylock(&sw->tb->lock))
971 return -ERESTARTSYS; 1262 return restart_syscall();
972 1263
973 if (sw->safe_mode) 1264 if (sw->safe_mode)
974 ret = -ENODATA; 1265 ret = -ENODATA;
@@ -977,7 +1268,7 @@ static ssize_t nvm_version_show(struct device *dev,
977 else 1268 else
978 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); 1269 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
979 1270
980 mutex_unlock(&switch_lock); 1271 mutex_unlock(&sw->tb->lock);
981 1272
982 return ret; 1273 return ret;
983} 1274}
@@ -1063,9 +1354,17 @@ static const struct attribute_group *switch_groups[] = {
1063static void tb_switch_release(struct device *dev) 1354static void tb_switch_release(struct device *dev)
1064{ 1355{
1065 struct tb_switch *sw = tb_to_switch(dev); 1356 struct tb_switch *sw = tb_to_switch(dev);
1357 int i;
1066 1358
1067 dma_port_free(sw->dma_port); 1359 dma_port_free(sw->dma_port);
1068 1360
1361 for (i = 1; i <= sw->config.max_port_number; i++) {
1362 if (!sw->ports[i].disabled) {
1363 ida_destroy(&sw->ports[i].in_hopids);
1364 ida_destroy(&sw->ports[i].out_hopids);
1365 }
1366 }
1367
1069 kfree(sw->uuid); 1368 kfree(sw->uuid);
1070 kfree(sw->device_name); 1369 kfree(sw->device_name);
1071 kfree(sw->vendor_name); 1370 kfree(sw->vendor_name);
@@ -1150,24 +1449,32 @@ static int tb_switch_get_generation(struct tb_switch *sw)
1150 * separately. The returned switch should be released by calling 1449 * separately. The returned switch should be released by calling
1151 * tb_switch_put(). 1450 * tb_switch_put().
1152 * 1451 *
1153 * Return: Pointer to the allocated switch or %NULL in case of failure 1452 * Return: Pointer to the allocated switch or ERR_PTR() in case of
1453 * failure.
1154 */ 1454 */
1155struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, 1455struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1156 u64 route) 1456 u64 route)
1157{ 1457{
1158 int i;
1159 int cap;
1160 struct tb_switch *sw; 1458 struct tb_switch *sw;
1161 int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); 1459 int upstream_port;
1460 int i, ret, depth;
1461
1462 /* Make sure we do not exceed maximum topology limit */
1463 depth = tb_route_length(route);
1464 if (depth > TB_SWITCH_MAX_DEPTH)
1465 return ERR_PTR(-EADDRNOTAVAIL);
1466
1467 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
1162 if (upstream_port < 0) 1468 if (upstream_port < 0)
1163 return NULL; 1469 return ERR_PTR(upstream_port);
1164 1470
1165 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1471 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1166 if (!sw) 1472 if (!sw)
1167 return NULL; 1473 return ERR_PTR(-ENOMEM);
1168 1474
1169 sw->tb = tb; 1475 sw->tb = tb;
1170 if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5)) 1476 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
1477 if (ret)
1171 goto err_free_sw_ports; 1478 goto err_free_sw_ports;
1172 1479
1173 tb_dbg(tb, "current switch config:\n"); 1480 tb_dbg(tb, "current switch config:\n");
@@ -1175,16 +1482,18 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1175 1482
1176 /* configure switch */ 1483 /* configure switch */
1177 sw->config.upstream_port_number = upstream_port; 1484 sw->config.upstream_port_number = upstream_port;
1178 sw->config.depth = tb_route_length(route); 1485 sw->config.depth = depth;
1179 sw->config.route_lo = route; 1486 sw->config.route_hi = upper_32_bits(route);
1180 sw->config.route_hi = route >> 32; 1487 sw->config.route_lo = lower_32_bits(route);
1181 sw->config.enabled = 0; 1488 sw->config.enabled = 0;
1182 1489
1183 /* initialize ports */ 1490 /* initialize ports */
1184 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), 1491 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
1185 GFP_KERNEL); 1492 GFP_KERNEL);
1186 if (!sw->ports) 1493 if (!sw->ports) {
1494 ret = -ENOMEM;
1187 goto err_free_sw_ports; 1495 goto err_free_sw_ports;
1496 }
1188 1497
1189 for (i = 0; i <= sw->config.max_port_number; i++) { 1498 for (i = 0; i <= sw->config.max_port_number; i++) {
1190 /* minimum setup for tb_find_cap and tb_drom_read to work */ 1499 /* minimum setup for tb_find_cap and tb_drom_read to work */
@@ -1194,12 +1503,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
1194 1503
1195 sw->generation = tb_switch_get_generation(sw); 1504 sw->generation = tb_switch_get_generation(sw);
1196 1505
1197 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); 1506 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
1198 if (cap < 0) { 1507 if (ret < 0) {
1199 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); 1508 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
1200 goto err_free_sw_ports; 1509 goto err_free_sw_ports;
1201 } 1510 }
1202 sw->cap_plug_events = cap; 1511 sw->cap_plug_events = ret;
1512
1513 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
1514 if (ret > 0)
1515 sw->cap_lc = ret;
1203 1516
1204 /* Root switch is always authorized */ 1517 /* Root switch is always authorized */
1205 if (!route) 1518 if (!route)
@@ -1218,7 +1531,7 @@ err_free_sw_ports:
1218 kfree(sw->ports); 1531 kfree(sw->ports);
1219 kfree(sw); 1532 kfree(sw);
1220 1533
1221 return NULL; 1534 return ERR_PTR(ret);
1222} 1535}
1223 1536
1224/** 1537/**
@@ -1233,7 +1546,7 @@ err_free_sw_ports:
1233 * 1546 *
1234 * The returned switch must be released by calling tb_switch_put(). 1547 * The returned switch must be released by calling tb_switch_put().
1235 * 1548 *
1236 * Return: Pointer to the allocated switch or %NULL in case of failure 1549 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
1237 */ 1550 */
1238struct tb_switch * 1551struct tb_switch *
1239tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) 1552tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
@@ -1242,7 +1555,7 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1242 1555
1243 sw = kzalloc(sizeof(*sw), GFP_KERNEL); 1556 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1244 if (!sw) 1557 if (!sw)
1245 return NULL; 1558 return ERR_PTR(-ENOMEM);
1246 1559
1247 sw->tb = tb; 1560 sw->tb = tb;
1248 sw->config.depth = tb_route_length(route); 1561 sw->config.depth = tb_route_length(route);
@@ -1291,25 +1604,27 @@ int tb_switch_configure(struct tb_switch *sw)
1291 if (ret) 1604 if (ret)
1292 return ret; 1605 return ret;
1293 1606
1607 ret = tb_lc_configure_link(sw);
1608 if (ret)
1609 return ret;
1610
1294 return tb_plug_events_active(sw, true); 1611 return tb_plug_events_active(sw, true);
1295} 1612}
1296 1613
1297static void tb_switch_set_uuid(struct tb_switch *sw) 1614static int tb_switch_set_uuid(struct tb_switch *sw)
1298{ 1615{
1299 u32 uuid[4]; 1616 u32 uuid[4];
1300 int cap; 1617 int ret;
1301 1618
1302 if (sw->uuid) 1619 if (sw->uuid)
1303 return; 1620 return 0;
1304 1621
1305 /* 1622 /*
1306 * The newer controllers include fused UUID as part of link 1623 * The newer controllers include fused UUID as part of link
1307 * controller specific registers 1624 * controller specific registers
1308 */ 1625 */
1309 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); 1626 ret = tb_lc_read_uuid(sw, uuid);
1310 if (cap > 0) { 1627 if (ret) {
1311 tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
1312 } else {
1313 /* 1628 /*
1314 * ICM generates UUID based on UID and fills the upper 1629 * ICM generates UUID based on UID and fills the upper
1315 * two words with ones. This is not strictly following 1630 * two words with ones. This is not strictly following
@@ -1323,6 +1638,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
1323 } 1638 }
1324 1639
1325 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 1640 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
1641 if (!sw->uuid)
1642 return -ENOMEM;
1643 return 0;
1326} 1644}
1327 1645
1328static int tb_switch_add_dma_port(struct tb_switch *sw) 1646static int tb_switch_add_dma_port(struct tb_switch *sw)
@@ -1372,7 +1690,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
1372 1690
1373 if (status) { 1691 if (status) {
1374 tb_sw_info(sw, "switch flash authentication failed\n"); 1692 tb_sw_info(sw, "switch flash authentication failed\n");
1375 tb_switch_set_uuid(sw); 1693 ret = tb_switch_set_uuid(sw);
1694 if (ret)
1695 return ret;
1376 nvm_set_auth_status(sw, status); 1696 nvm_set_auth_status(sw, status);
1377 } 1697 }
1378 1698
@@ -1422,7 +1742,9 @@ int tb_switch_add(struct tb_switch *sw)
1422 } 1742 }
1423 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); 1743 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
1424 1744
1425 tb_switch_set_uuid(sw); 1745 ret = tb_switch_set_uuid(sw);
1746 if (ret)
1747 return ret;
1426 1748
1427 for (i = 0; i <= sw->config.max_port_number; i++) { 1749 for (i = 0; i <= sw->config.max_port_number; i++) {
1428 if (sw->ports[i].disabled) { 1750 if (sw->ports[i].disabled) {
@@ -1484,18 +1806,18 @@ void tb_switch_remove(struct tb_switch *sw)
1484 1806
1485 /* port 0 is the switch itself and never has a remote */ 1807 /* port 0 is the switch itself and never has a remote */
1486 for (i = 1; i <= sw->config.max_port_number; i++) { 1808 for (i = 1; i <= sw->config.max_port_number; i++) {
1487 if (tb_is_upstream_port(&sw->ports[i])) 1809 if (tb_port_has_remote(&sw->ports[i])) {
1488 continue;
1489 if (sw->ports[i].remote)
1490 tb_switch_remove(sw->ports[i].remote->sw); 1810 tb_switch_remove(sw->ports[i].remote->sw);
1491 sw->ports[i].remote = NULL; 1811 sw->ports[i].remote = NULL;
1492 if (sw->ports[i].xdomain) 1812 } else if (sw->ports[i].xdomain) {
1493 tb_xdomain_remove(sw->ports[i].xdomain); 1813 tb_xdomain_remove(sw->ports[i].xdomain);
1494 sw->ports[i].xdomain = NULL; 1814 sw->ports[i].xdomain = NULL;
1815 }
1495 } 1816 }
1496 1817
1497 if (!sw->is_unplugged) 1818 if (!sw->is_unplugged)
1498 tb_plug_events_active(sw, false); 1819 tb_plug_events_active(sw, false);
1820 tb_lc_unconfigure_link(sw);
1499 1821
1500 tb_switch_nvm_remove(sw); 1822 tb_switch_nvm_remove(sw);
1501 1823
@@ -1520,8 +1842,10 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
1520 } 1842 }
1521 sw->is_unplugged = true; 1843 sw->is_unplugged = true;
1522 for (i = 0; i <= sw->config.max_port_number; i++) { 1844 for (i = 0; i <= sw->config.max_port_number; i++) {
1523 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) 1845 if (tb_port_has_remote(&sw->ports[i]))
1524 tb_sw_set_unplugged(sw->ports[i].remote->sw); 1846 tb_sw_set_unplugged(sw->ports[i].remote->sw);
1847 else if (sw->ports[i].xdomain)
1848 sw->ports[i].xdomain->is_unplugged = true;
1525 } 1849 }
1526} 1850}
1527 1851
@@ -1537,6 +1861,17 @@ int tb_switch_resume(struct tb_switch *sw)
1537 if (tb_route(sw)) { 1861 if (tb_route(sw)) {
1538 u64 uid; 1862 u64 uid;
1539 1863
1864 /*
1865 * Check first that we can still read the switch config
1866 * space. It may be that there is now another domain
1867 * connected.
1868 */
1869 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
1870 if (err < 0) {
1871 tb_sw_info(sw, "switch not present anymore\n");
1872 return err;
1873 }
1874
1540 err = tb_drom_read_uid_only(sw, &uid); 1875 err = tb_drom_read_uid_only(sw, &uid);
1541 if (err) { 1876 if (err) {
1542 tb_sw_warn(sw, "uid read failed\n"); 1877 tb_sw_warn(sw, "uid read failed\n");
@@ -1555,6 +1890,10 @@ int tb_switch_resume(struct tb_switch *sw)
1555 if (err) 1890 if (err)
1556 return err; 1891 return err;
1557 1892
1893 err = tb_lc_configure_link(sw);
1894 if (err)
1895 return err;
1896
1558 err = tb_plug_events_active(sw, true); 1897 err = tb_plug_events_active(sw, true);
1559 if (err) 1898 if (err)
1560 return err; 1899 return err;
@@ -1562,15 +1901,23 @@ int tb_switch_resume(struct tb_switch *sw)
1562 /* check for surviving downstream switches */ 1901 /* check for surviving downstream switches */
1563 for (i = 1; i <= sw->config.max_port_number; i++) { 1902 for (i = 1; i <= sw->config.max_port_number; i++) {
1564 struct tb_port *port = &sw->ports[i]; 1903 struct tb_port *port = &sw->ports[i];
1565 if (tb_is_upstream_port(port)) 1904
1566 continue; 1905 if (!tb_port_has_remote(port) && !port->xdomain)
1567 if (!port->remote)
1568 continue; 1906 continue;
1569 if (tb_wait_for_port(port, true) <= 0 1907
1570 || tb_switch_resume(port->remote->sw)) { 1908 if (tb_wait_for_port(port, true) <= 0) {
1571 tb_port_warn(port, 1909 tb_port_warn(port,
1572 "lost during suspend, disconnecting\n"); 1910 "lost during suspend, disconnecting\n");
1573 tb_sw_set_unplugged(port->remote->sw); 1911 if (tb_port_has_remote(port))
1912 tb_sw_set_unplugged(port->remote->sw);
1913 else if (port->xdomain)
1914 port->xdomain->is_unplugged = true;
1915 } else if (tb_port_has_remote(port)) {
1916 if (tb_switch_resume(port->remote->sw)) {
1917 tb_port_warn(port,
1918 "lost during suspend, disconnecting\n");
1919 tb_sw_set_unplugged(port->remote->sw);
1920 }
1574 } 1921 }
1575 } 1922 }
1576 return 0; 1923 return 0;
@@ -1584,13 +1931,11 @@ void tb_switch_suspend(struct tb_switch *sw)
1584 return; 1931 return;
1585 1932
1586 for (i = 1; i <= sw->config.max_port_number; i++) { 1933 for (i = 1; i <= sw->config.max_port_number; i++) {
1587 if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) 1934 if (tb_port_has_remote(&sw->ports[i]))
1588 tb_switch_suspend(sw->ports[i].remote->sw); 1935 tb_switch_suspend(sw->ports[i].remote->sw);
1589 } 1936 }
1590 /* 1937
1591 * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any 1938 tb_lc_set_sleep(sw);
1592 * effect?
1593 */
1594} 1939}
1595 1940
1596struct tb_sw_lookup { 1941struct tb_sw_lookup {
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 30e02c716f6c..1f7a9e1cc09c 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1,8 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Thunderbolt Cactus Ridge driver - bus logic (NHI independent) 3 * Thunderbolt driver - bus logic (NHI independent)
4 * 4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
6 */ 7 */
7 8
8#include <linux/slab.h> 9#include <linux/slab.h>
@@ -12,7 +13,7 @@
12 13
13#include "tb.h" 14#include "tb.h"
14#include "tb_regs.h" 15#include "tb_regs.h"
15#include "tunnel_pci.h" 16#include "tunnel.h"
16 17
17/** 18/**
18 * struct tb_cm - Simple Thunderbolt connection manager 19 * struct tb_cm - Simple Thunderbolt connection manager
@@ -27,8 +28,100 @@ struct tb_cm {
27 bool hotplug_active; 28 bool hotplug_active;
28}; 29};
29 30
31struct tb_hotplug_event {
32 struct work_struct work;
33 struct tb *tb;
34 u64 route;
35 u8 port;
36 bool unplug;
37};
38
39static void tb_handle_hotplug(struct work_struct *work);
40
41static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
42{
43 struct tb_hotplug_event *ev;
44
45 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
46 if (!ev)
47 return;
48
49 ev->tb = tb;
50 ev->route = route;
51 ev->port = port;
52 ev->unplug = unplug;
53 INIT_WORK(&ev->work, tb_handle_hotplug);
54 queue_work(tb->wq, &ev->work);
55}
56
30/* enumeration & hot plug handling */ 57/* enumeration & hot plug handling */
31 58
59static void tb_discover_tunnels(struct tb_switch *sw)
60{
61 struct tb *tb = sw->tb;
62 struct tb_cm *tcm = tb_priv(tb);
63 struct tb_port *port;
64 int i;
65
66 for (i = 1; i <= sw->config.max_port_number; i++) {
67 struct tb_tunnel *tunnel = NULL;
68
69 port = &sw->ports[i];
70 switch (port->config.type) {
71 case TB_TYPE_DP_HDMI_IN:
72 tunnel = tb_tunnel_discover_dp(tb, port);
73 break;
74
75 case TB_TYPE_PCIE_DOWN:
76 tunnel = tb_tunnel_discover_pci(tb, port);
77 break;
78
79 default:
80 break;
81 }
82
83 if (!tunnel)
84 continue;
85
86 if (tb_tunnel_is_pci(tunnel)) {
87 struct tb_switch *parent = tunnel->dst_port->sw;
88
89 while (parent != tunnel->src_port->sw) {
90 parent->boot = true;
91 parent = tb_switch_parent(parent);
92 }
93 }
94
95 list_add_tail(&tunnel->list, &tcm->tunnel_list);
96 }
97
98 for (i = 1; i <= sw->config.max_port_number; i++) {
99 if (tb_port_has_remote(&sw->ports[i]))
100 tb_discover_tunnels(sw->ports[i].remote->sw);
101 }
102}
103
104static void tb_scan_xdomain(struct tb_port *port)
105{
106 struct tb_switch *sw = port->sw;
107 struct tb *tb = sw->tb;
108 struct tb_xdomain *xd;
109 u64 route;
110
111 route = tb_downstream_route(port);
112 xd = tb_xdomain_find_by_route(tb, route);
113 if (xd) {
114 tb_xdomain_put(xd);
115 return;
116 }
117
118 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
119 NULL);
120 if (xd) {
121 tb_port_at(route, sw)->xdomain = xd;
122 tb_xdomain_add(xd);
123 }
124}
32 125
33static void tb_scan_port(struct tb_port *port); 126static void tb_scan_port(struct tb_port *port);
34 127
@@ -47,9 +140,21 @@ static void tb_scan_switch(struct tb_switch *sw)
47 */ 140 */
48static void tb_scan_port(struct tb_port *port) 141static void tb_scan_port(struct tb_port *port)
49{ 142{
143 struct tb_cm *tcm = tb_priv(port->sw->tb);
144 struct tb_port *upstream_port;
50 struct tb_switch *sw; 145 struct tb_switch *sw;
146
51 if (tb_is_upstream_port(port)) 147 if (tb_is_upstream_port(port))
52 return; 148 return;
149
150 if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
151 !tb_dp_port_is_enabled(port)) {
152 tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
153 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
154 false);
155 return;
156 }
157
53 if (port->config.type != TB_TYPE_PORT) 158 if (port->config.type != TB_TYPE_PORT)
54 return; 159 return;
55 if (port->dual_link_port && port->link_nr) 160 if (port->dual_link_port && port->link_nr)
@@ -60,45 +165,95 @@ static void tb_scan_port(struct tb_port *port)
60 if (tb_wait_for_port(port, false) <= 0) 165 if (tb_wait_for_port(port, false) <= 0)
61 return; 166 return;
62 if (port->remote) { 167 if (port->remote) {
63 tb_port_WARN(port, "port already has a remote!\n"); 168 tb_port_dbg(port, "port already has a remote\n");
64 return; 169 return;
65 } 170 }
66 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 171 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
67 tb_downstream_route(port)); 172 tb_downstream_route(port));
68 if (!sw) 173 if (IS_ERR(sw)) {
174 /*
175 * If there is an error accessing the connected switch
176 * it may be connected to another domain. Also we allow
177 * the other domain to be connected to a max depth switch.
178 */
179 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
180 tb_scan_xdomain(port);
69 return; 181 return;
182 }
70 183
71 if (tb_switch_configure(sw)) { 184 if (tb_switch_configure(sw)) {
72 tb_switch_put(sw); 185 tb_switch_put(sw);
73 return; 186 return;
74 } 187 }
75 188
76 sw->authorized = true; 189 /*
190 * If there was previously another domain connected remove it
191 * first.
192 */
193 if (port->xdomain) {
194 tb_xdomain_remove(port->xdomain);
195 port->xdomain = NULL;
196 }
197
198 /*
199 * Do not send uevents until we have discovered all existing
200 * tunnels and know which switches were authorized already by
201 * the boot firmware.
202 */
203 if (!tcm->hotplug_active)
204 dev_set_uevent_suppress(&sw->dev, true);
77 205
78 if (tb_switch_add(sw)) { 206 if (tb_switch_add(sw)) {
79 tb_switch_put(sw); 207 tb_switch_put(sw);
80 return; 208 return;
81 } 209 }
82 210
83 port->remote = tb_upstream_port(sw); 211 /* Link the switches using both links if available */
84 tb_upstream_port(sw)->remote = port; 212 upstream_port = tb_upstream_port(sw);
213 port->remote = upstream_port;
214 upstream_port->remote = port;
215 if (port->dual_link_port && upstream_port->dual_link_port) {
216 port->dual_link_port->remote = upstream_port->dual_link_port;
217 upstream_port->dual_link_port->remote = port->dual_link_port;
218 }
219
85 tb_scan_switch(sw); 220 tb_scan_switch(sw);
86} 221}
87 222
223static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type,
224 struct tb_port *src_port, struct tb_port *dst_port)
225{
226 struct tb_cm *tcm = tb_priv(tb);
227 struct tb_tunnel *tunnel;
228
229 list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
230 if (tunnel->type == type &&
231 ((src_port && src_port == tunnel->src_port) ||
232 (dst_port && dst_port == tunnel->dst_port))) {
233 tb_tunnel_deactivate(tunnel);
234 list_del(&tunnel->list);
235 tb_tunnel_free(tunnel);
236 return 0;
237 }
238 }
239
240 return -ENODEV;
241}
242
88/** 243/**
89 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away 244 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
90 */ 245 */
91static void tb_free_invalid_tunnels(struct tb *tb) 246static void tb_free_invalid_tunnels(struct tb *tb)
92{ 247{
93 struct tb_cm *tcm = tb_priv(tb); 248 struct tb_cm *tcm = tb_priv(tb);
94 struct tb_pci_tunnel *tunnel; 249 struct tb_tunnel *tunnel;
95 struct tb_pci_tunnel *n; 250 struct tb_tunnel *n;
96 251
97 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 252 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
98 if (tb_pci_is_invalid(tunnel)) { 253 if (tb_tunnel_is_invalid(tunnel)) {
99 tb_pci_deactivate(tunnel); 254 tb_tunnel_deactivate(tunnel);
100 list_del(&tunnel->list); 255 list_del(&tunnel->list);
101 tb_pci_free(tunnel); 256 tb_tunnel_free(tunnel);
102 } 257 }
103 } 258 }
104} 259}
@@ -111,136 +266,232 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
111 int i; 266 int i;
112 for (i = 1; i <= sw->config.max_port_number; i++) { 267 for (i = 1; i <= sw->config.max_port_number; i++) {
113 struct tb_port *port = &sw->ports[i]; 268 struct tb_port *port = &sw->ports[i];
114 if (tb_is_upstream_port(port)) 269
115 continue; 270 if (!tb_port_has_remote(port))
116 if (!port->remote)
117 continue; 271 continue;
272
118 if (port->remote->sw->is_unplugged) { 273 if (port->remote->sw->is_unplugged) {
119 tb_switch_remove(port->remote->sw); 274 tb_switch_remove(port->remote->sw);
120 port->remote = NULL; 275 port->remote = NULL;
276 if (port->dual_link_port)
277 port->dual_link_port->remote = NULL;
121 } else { 278 } else {
122 tb_free_unplugged_children(port->remote->sw); 279 tb_free_unplugged_children(port->remote->sw);
123 } 280 }
124 } 281 }
125} 282}
126 283
127
128/** 284/**
129 * find_pci_up_port() - return the first PCIe up port on @sw or NULL 285 * tb_find_port() - return the first port of @type on @sw or NULL
286 * @sw: Switch to find the port from
287 * @type: Port type to look for
130 */ 288 */
131static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw) 289static struct tb_port *tb_find_port(struct tb_switch *sw,
290 enum tb_port_type type)
132{ 291{
133 int i; 292 int i;
134 for (i = 1; i <= sw->config.max_port_number; i++) 293 for (i = 1; i <= sw->config.max_port_number; i++)
135 if (sw->ports[i].config.type == TB_TYPE_PCIE_UP) 294 if (sw->ports[i].config.type == type)
136 return &sw->ports[i]; 295 return &sw->ports[i];
137 return NULL; 296 return NULL;
138} 297}
139 298
140/** 299/**
141 * find_unused_down_port() - return the first inactive PCIe down port on @sw 300 * tb_find_unused_port() - return the first inactive port on @sw
301 * @sw: Switch to find the port on
302 * @type: Port type to look for
142 */ 303 */
143static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw) 304static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
305 enum tb_port_type type)
144{ 306{
145 int i; 307 int i;
146 int cap; 308
147 int res;
148 int data;
149 for (i = 1; i <= sw->config.max_port_number; i++) { 309 for (i = 1; i <= sw->config.max_port_number; i++) {
150 if (tb_is_upstream_port(&sw->ports[i])) 310 if (tb_is_upstream_port(&sw->ports[i]))
151 continue; 311 continue;
152 if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN) 312 if (sw->ports[i].config.type != type)
153 continue;
154 cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
155 if (cap < 0)
156 continue; 313 continue;
157 res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1); 314 if (!sw->ports[i].cap_adap)
158 if (res < 0)
159 continue; 315 continue;
160 if (data & 0x80000000) 316 if (tb_port_is_enabled(&sw->ports[i]))
161 continue; 317 continue;
162 return &sw->ports[i]; 318 return &sw->ports[i];
163 } 319 }
164 return NULL; 320 return NULL;
165} 321}
166 322
167/** 323static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
168 * tb_activate_pcie_devices() - scan for and activate PCIe devices 324 const struct tb_port *port)
169 * 325{
170 * This method is somewhat ad hoc. For now it only supports one device 326 /*
171 * per port and only devices at depth 1. 327 * To keep plugging devices consistently in the same PCIe
172 */ 328 * hierarchy, do mapping here for root switch downstream PCIe
173static void tb_activate_pcie_devices(struct tb *tb) 329 * ports.
330 */
331 if (!tb_route(sw)) {
332 int phy_port = tb_phy_port_from_link(port->port);
333 int index;
334
335 /*
336 * Hard-coded Thunderbolt port to PCIe down port mapping
337 * per controller.
338 */
339 if (tb_switch_is_cr(sw))
340 index = !phy_port ? 6 : 7;
341 else if (tb_switch_is_fr(sw))
342 index = !phy_port ? 6 : 8;
343 else
344 goto out;
345
346 /* Validate the hard-coding */
347 if (WARN_ON(index > sw->config.max_port_number))
348 goto out;
349 if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index])))
350 goto out;
351 if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index])))
352 goto out;
353
354 return &sw->ports[index];
355 }
356
357out:
358 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
359}
360
361static int tb_tunnel_dp(struct tb *tb, struct tb_port *out)
174{ 362{
175 int i;
176 int cap;
177 u32 data;
178 struct tb_switch *sw;
179 struct tb_port *up_port;
180 struct tb_port *down_port;
181 struct tb_pci_tunnel *tunnel;
182 struct tb_cm *tcm = tb_priv(tb); 363 struct tb_cm *tcm = tb_priv(tb);
364 struct tb_switch *sw = out->sw;
365 struct tb_tunnel *tunnel;
366 struct tb_port *in;
367
368 if (tb_port_is_enabled(out))
369 return 0;
370
371 do {
372 sw = tb_to_switch(sw->dev.parent);
373 if (!sw)
374 return 0;
375 in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN);
376 } while (!in);
377
378 tunnel = tb_tunnel_alloc_dp(tb, in, out);
379 if (!tunnel) {
380 tb_port_dbg(out, "DP tunnel allocation failed\n");
381 return -ENOMEM;
382 }
183 383
184 /* scan for pcie devices at depth 1*/ 384 if (tb_tunnel_activate(tunnel)) {
185 for (i = 1; i <= tb->root_switch->config.max_port_number; i++) { 385 tb_port_info(out, "DP tunnel activation failed, aborting\n");
186 if (tb_is_upstream_port(&tb->root_switch->ports[i])) 386 tb_tunnel_free(tunnel);
187 continue; 387 return -EIO;
188 if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT) 388 }
189 continue;
190 if (!tb->root_switch->ports[i].remote)
191 continue;
192 sw = tb->root_switch->ports[i].remote->sw;
193 up_port = tb_find_pci_up_port(sw);
194 if (!up_port) {
195 tb_sw_info(sw, "no PCIe devices found, aborting\n");
196 continue;
197 }
198 389
199 /* check whether port is already activated */ 390 list_add_tail(&tunnel->list, &tcm->tunnel_list);
200 cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP); 391 return 0;
201 if (cap < 0) 392}
202 continue;
203 if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
204 continue;
205 if (data & 0x80000000) {
206 tb_port_info(up_port,
207 "PCIe port already activated, aborting\n");
208 continue;
209 }
210 393
211 down_port = tb_find_unused_down_port(tb->root_switch); 394static void tb_teardown_dp(struct tb *tb, struct tb_port *out)
212 if (!down_port) { 395{
213 tb_port_info(up_port, 396 tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out);
214 "All PCIe down ports are occupied, aborting\n"); 397}
215 continue;
216 }
217 tunnel = tb_pci_alloc(tb, up_port, down_port);
218 if (!tunnel) {
219 tb_port_info(up_port,
220 "PCIe tunnel allocation failed, aborting\n");
221 continue;
222 }
223 398
224 if (tb_pci_activate(tunnel)) { 399static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
225 tb_port_info(up_port, 400{
226 "PCIe tunnel activation failed, aborting\n"); 401 struct tb_port *up, *down, *port;
227 tb_pci_free(tunnel); 402 struct tb_cm *tcm = tb_priv(tb);
228 continue; 403 struct tb_switch *parent_sw;
229 } 404 struct tb_tunnel *tunnel;
405
406 up = tb_find_port(sw, TB_TYPE_PCIE_UP);
407 if (!up)
408 return 0;
230 409
231 list_add(&tunnel->list, &tcm->tunnel_list); 410 /*
411 * Look up available down port. Since we are chaining it should
412 * be found right above this switch.
413 */
414 parent_sw = tb_to_switch(sw->dev.parent);
415 port = tb_port_at(tb_route(sw), parent_sw);
416 down = tb_find_pcie_down(parent_sw, port);
417 if (!down)
418 return 0;
419
420 tunnel = tb_tunnel_alloc_pci(tb, up, down);
421 if (!tunnel)
422 return -ENOMEM;
423
424 if (tb_tunnel_activate(tunnel)) {
425 tb_port_info(up,
426 "PCIe tunnel activation failed, aborting\n");
427 tb_tunnel_free(tunnel);
428 return -EIO;
232 } 429 }
430
431 list_add_tail(&tunnel->list, &tcm->tunnel_list);
432 return 0;
233} 433}
234 434
235/* hotplug handling */ 435static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
436{
437 struct tb_cm *tcm = tb_priv(tb);
438 struct tb_port *nhi_port, *dst_port;
439 struct tb_tunnel *tunnel;
440 struct tb_switch *sw;
236 441
237struct tb_hotplug_event { 442 sw = tb_to_switch(xd->dev.parent);
238 struct work_struct work; 443 dst_port = tb_port_at(xd->route, sw);
239 struct tb *tb; 444 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
240 u64 route; 445
241 u8 port; 446 mutex_lock(&tb->lock);
242 bool unplug; 447 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
243}; 448 xd->transmit_path, xd->receive_ring,
449 xd->receive_path);
450 if (!tunnel) {
451 mutex_unlock(&tb->lock);
452 return -ENOMEM;
453 }
454
455 if (tb_tunnel_activate(tunnel)) {
456 tb_port_info(nhi_port,
457 "DMA tunnel activation failed, aborting\n");
458 tb_tunnel_free(tunnel);
459 mutex_unlock(&tb->lock);
460 return -EIO;
461 }
462
463 list_add_tail(&tunnel->list, &tcm->tunnel_list);
464 mutex_unlock(&tb->lock);
465 return 0;
466}
467
468static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
469{
470 struct tb_port *dst_port;
471 struct tb_switch *sw;
472
473 sw = tb_to_switch(xd->dev.parent);
474 dst_port = tb_port_at(xd->route, sw);
475
476 /*
477 * It is possible that the tunnel was already teared down (in
478 * case of cable disconnect) so it is fine if we cannot find it
479 * here anymore.
480 */
481 tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
482}
483
484static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
485{
486 if (!xd->is_unplugged) {
487 mutex_lock(&tb->lock);
488 __tb_disconnect_xdomain_paths(tb, xd);
489 mutex_unlock(&tb->lock);
490 }
491 return 0;
492}
493
494/* hotplug handling */
244 495
245/** 496/**
246 * tb_handle_hotplug() - handle hotplug event 497 * tb_handle_hotplug() - handle hotplug event
@@ -258,7 +509,7 @@ static void tb_handle_hotplug(struct work_struct *work)
258 if (!tcm->hotplug_active) 509 if (!tcm->hotplug_active)
259 goto out; /* during init, suspend or shutdown */ 510 goto out; /* during init, suspend or shutdown */
260 511
261 sw = get_switch_at_route(tb->root_switch, ev->route); 512 sw = tb_switch_find_by_route(tb, ev->route);
262 if (!sw) { 513 if (!sw) {
263 tb_warn(tb, 514 tb_warn(tb,
264 "hotplug event from non existent switch %llx:%x (unplug: %d)\n", 515 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
@@ -269,43 +520,60 @@ static void tb_handle_hotplug(struct work_struct *work)
269 tb_warn(tb, 520 tb_warn(tb,
270 "hotplug event from non existent port %llx:%x (unplug: %d)\n", 521 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
271 ev->route, ev->port, ev->unplug); 522 ev->route, ev->port, ev->unplug);
272 goto out; 523 goto put_sw;
273 } 524 }
274 port = &sw->ports[ev->port]; 525 port = &sw->ports[ev->port];
275 if (tb_is_upstream_port(port)) { 526 if (tb_is_upstream_port(port)) {
276 tb_warn(tb, 527 tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
277 "hotplug event for upstream port %llx:%x (unplug: %d)\n", 528 ev->route, ev->port, ev->unplug);
278 ev->route, ev->port, ev->unplug); 529 goto put_sw;
279 goto out;
280 } 530 }
281 if (ev->unplug) { 531 if (ev->unplug) {
282 if (port->remote) { 532 if (tb_port_has_remote(port)) {
283 tb_port_info(port, "unplugged\n"); 533 tb_port_dbg(port, "switch unplugged\n");
284 tb_sw_set_unplugged(port->remote->sw); 534 tb_sw_set_unplugged(port->remote->sw);
285 tb_free_invalid_tunnels(tb); 535 tb_free_invalid_tunnels(tb);
286 tb_switch_remove(port->remote->sw); 536 tb_switch_remove(port->remote->sw);
287 port->remote = NULL; 537 port->remote = NULL;
538 if (port->dual_link_port)
539 port->dual_link_port->remote = NULL;
540 } else if (port->xdomain) {
541 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
542
543 tb_port_dbg(port, "xdomain unplugged\n");
544 /*
545 * Service drivers are unbound during
546 * tb_xdomain_remove() so setting XDomain as
547 * unplugged here prevents deadlock if they call
548 * tb_xdomain_disable_paths(). We will tear down
549 * the path below.
550 */
551 xd->is_unplugged = true;
552 tb_xdomain_remove(xd);
553 port->xdomain = NULL;
554 __tb_disconnect_xdomain_paths(tb, xd);
555 tb_xdomain_put(xd);
556 } else if (tb_port_is_dpout(port)) {
557 tb_teardown_dp(tb, port);
288 } else { 558 } else {
289 tb_port_info(port, 559 tb_port_dbg(port,
290 "got unplug event for disconnected port, ignoring\n"); 560 "got unplug event for disconnected port, ignoring\n");
291 } 561 }
292 } else if (port->remote) { 562 } else if (port->remote) {
293 tb_port_info(port, 563 tb_port_dbg(port, "got plug event for connected port, ignoring\n");
294 "got plug event for connected port, ignoring\n");
295 } else { 564 } else {
296 tb_port_info(port, "hotplug: scanning\n"); 565 if (tb_port_is_null(port)) {
297 tb_scan_port(port); 566 tb_port_dbg(port, "hotplug: scanning\n");
298 if (!port->remote) { 567 tb_scan_port(port);
299 tb_port_info(port, "hotplug: no switch found\n"); 568 if (!port->remote)
300 } else if (port->remote->sw->config.depth > 1) { 569 tb_port_dbg(port, "hotplug: no switch found\n");
301 tb_sw_warn(port->remote->sw, 570 } else if (tb_port_is_dpout(port)) {
302 "hotplug: chaining not supported\n"); 571 tb_tunnel_dp(tb, port);
303 } else {
304 tb_sw_info(port->remote->sw,
305 "hotplug: activating pcie devices\n");
306 tb_activate_pcie_devices(tb);
307 } 572 }
308 } 573 }
574
575put_sw:
576 tb_switch_put(sw);
309out: 577out:
310 mutex_unlock(&tb->lock); 578 mutex_unlock(&tb->lock);
311 kfree(ev); 579 kfree(ev);
@@ -320,7 +588,6 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
320 const void *buf, size_t size) 588 const void *buf, size_t size)
321{ 589{
322 const struct cfg_event_pkg *pkg = buf; 590 const struct cfg_event_pkg *pkg = buf;
323 struct tb_hotplug_event *ev;
324 u64 route; 591 u64 route;
325 592
326 if (type != TB_CFG_PKG_EVENT) { 593 if (type != TB_CFG_PKG_EVENT) {
@@ -336,40 +603,59 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
336 pkg->port); 603 pkg->port);
337 } 604 }
338 605
339 ev = kmalloc(sizeof(*ev), GFP_KERNEL); 606 tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
340 if (!ev)
341 return;
342 INIT_WORK(&ev->work, tb_handle_hotplug);
343 ev->tb = tb;
344 ev->route = route;
345 ev->port = pkg->port;
346 ev->unplug = pkg->unplug;
347 queue_work(tb->wq, &ev->work);
348} 607}
349 608
350static void tb_stop(struct tb *tb) 609static void tb_stop(struct tb *tb)
351{ 610{
352 struct tb_cm *tcm = tb_priv(tb); 611 struct tb_cm *tcm = tb_priv(tb);
353 struct tb_pci_tunnel *tunnel; 612 struct tb_tunnel *tunnel;
354 struct tb_pci_tunnel *n; 613 struct tb_tunnel *n;
355 614
356 /* tunnels are only present after everything has been initialized */ 615 /* tunnels are only present after everything has been initialized */
357 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { 616 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
358 tb_pci_deactivate(tunnel); 617 /*
359 tb_pci_free(tunnel); 618 * DMA tunnels require the driver to be functional so we
619 * tear them down. Other protocol tunnels can be left
620 * intact.
621 */
622 if (tb_tunnel_is_dma(tunnel))
623 tb_tunnel_deactivate(tunnel);
624 tb_tunnel_free(tunnel);
360 } 625 }
361 tb_switch_remove(tb->root_switch); 626 tb_switch_remove(tb->root_switch);
362 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 627 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
363} 628}
364 629
630static int tb_scan_finalize_switch(struct device *dev, void *data)
631{
632 if (tb_is_switch(dev)) {
633 struct tb_switch *sw = tb_to_switch(dev);
634
635 /*
636 * If we found that the switch was already setup by the
637 * boot firmware, mark it as authorized now before we
638 * send uevent to userspace.
639 */
640 if (sw->boot)
641 sw->authorized = 1;
642
643 dev_set_uevent_suppress(dev, false);
644 kobject_uevent(&dev->kobj, KOBJ_ADD);
645 device_for_each_child(dev, NULL, tb_scan_finalize_switch);
646 }
647
648 return 0;
649}
650
365static int tb_start(struct tb *tb) 651static int tb_start(struct tb *tb)
366{ 652{
367 struct tb_cm *tcm = tb_priv(tb); 653 struct tb_cm *tcm = tb_priv(tb);
368 int ret; 654 int ret;
369 655
370 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); 656 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
371 if (!tb->root_switch) 657 if (IS_ERR(tb->root_switch))
372 return -ENOMEM; 658 return PTR_ERR(tb->root_switch);
373 659
374 /* 660 /*
375 * ICM firmware upgrade needs running firmware and in native 661 * ICM firmware upgrade needs running firmware and in native
@@ -393,7 +679,11 @@ static int tb_start(struct tb *tb)
393 679
394 /* Full scan to discover devices added before the driver was loaded. */ 680 /* Full scan to discover devices added before the driver was loaded. */
395 tb_scan_switch(tb->root_switch); 681 tb_scan_switch(tb->root_switch);
396 tb_activate_pcie_devices(tb); 682 /* Find out tunnels created by the boot firmware */
683 tb_discover_tunnels(tb->root_switch);
684 /* Make the discovered switches available to the userspace */
685 device_for_each_child(&tb->root_switch->dev, NULL,
686 tb_scan_finalize_switch);
397 687
398 /* Allow tb_handle_hotplug to progress events */ 688 /* Allow tb_handle_hotplug to progress events */
399 tcm->hotplug_active = true; 689 tcm->hotplug_active = true;
@@ -415,7 +705,7 @@ static int tb_suspend_noirq(struct tb *tb)
415static int tb_resume_noirq(struct tb *tb) 705static int tb_resume_noirq(struct tb *tb)
416{ 706{
417 struct tb_cm *tcm = tb_priv(tb); 707 struct tb_cm *tcm = tb_priv(tb);
418 struct tb_pci_tunnel *tunnel, *n; 708 struct tb_tunnel *tunnel, *n;
419 709
420 tb_dbg(tb, "resuming...\n"); 710 tb_dbg(tb, "resuming...\n");
421 711
@@ -426,7 +716,7 @@ static int tb_resume_noirq(struct tb *tb)
426 tb_free_invalid_tunnels(tb); 716 tb_free_invalid_tunnels(tb);
427 tb_free_unplugged_children(tb->root_switch); 717 tb_free_unplugged_children(tb->root_switch);
428 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 718 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
429 tb_pci_restart(tunnel); 719 tb_tunnel_restart(tunnel);
430 if (!list_empty(&tcm->tunnel_list)) { 720 if (!list_empty(&tcm->tunnel_list)) {
431 /* 721 /*
432 * the pcie links need some time to get going. 722 * the pcie links need some time to get going.
@@ -442,12 +732,50 @@ static int tb_resume_noirq(struct tb *tb)
442 return 0; 732 return 0;
443} 733}
444 734
735static int tb_free_unplugged_xdomains(struct tb_switch *sw)
736{
737 int i, ret = 0;
738
739 for (i = 1; i <= sw->config.max_port_number; i++) {
740 struct tb_port *port = &sw->ports[i];
741
742 if (tb_is_upstream_port(port))
743 continue;
744 if (port->xdomain && port->xdomain->is_unplugged) {
745 tb_xdomain_remove(port->xdomain);
746 port->xdomain = NULL;
747 ret++;
748 } else if (port->remote) {
749 ret += tb_free_unplugged_xdomains(port->remote->sw);
750 }
751 }
752
753 return ret;
754}
755
756static void tb_complete(struct tb *tb)
757{
758 /*
759 * Release any unplugged XDomains and if there is a case where
760 * another domain is swapped in place of unplugged XDomain we
761 * need to run another rescan.
762 */
763 mutex_lock(&tb->lock);
764 if (tb_free_unplugged_xdomains(tb->root_switch))
765 tb_scan_switch(tb->root_switch);
766 mutex_unlock(&tb->lock);
767}
768
445static const struct tb_cm_ops tb_cm_ops = { 769static const struct tb_cm_ops tb_cm_ops = {
446 .start = tb_start, 770 .start = tb_start,
447 .stop = tb_stop, 771 .stop = tb_stop,
448 .suspend_noirq = tb_suspend_noirq, 772 .suspend_noirq = tb_suspend_noirq,
449 .resume_noirq = tb_resume_noirq, 773 .resume_noirq = tb_resume_noirq,
774 .complete = tb_complete,
450 .handle_event = tb_handle_event, 775 .handle_event = tb_handle_event,
776 .approve_switch = tb_tunnel_pci,
777 .approve_xdomain_paths = tb_approve_xdomain_paths,
778 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
451}; 779};
452 780
453struct tb *tb_probe(struct tb_nhi *nhi) 781struct tb *tb_probe(struct tb_nhi *nhi)
@@ -462,7 +790,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
462 if (!tb) 790 if (!tb)
463 return NULL; 791 return NULL;
464 792
465 tb->security_level = TB_SECURITY_NONE; 793 tb->security_level = TB_SECURITY_USER;
466 tb->cm_ops = &tb_cm_ops; 794 tb->cm_ops = &tb_cm_ops;
467 795
468 tcm = tb_priv(tb); 796 tcm = tb_priv(tb);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 52584c4003e3..b12c8f33d89c 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -43,6 +43,7 @@ struct tb_switch_nvm {
43}; 43};
44 44
45#define TB_SWITCH_KEY_SIZE 32 45#define TB_SWITCH_KEY_SIZE 32
46#define TB_SWITCH_MAX_DEPTH 6
46 47
47/** 48/**
48 * struct tb_switch - a thunderbolt switch 49 * struct tb_switch - a thunderbolt switch
@@ -62,6 +63,7 @@ struct tb_switch_nvm {
62 * @device_name: Name of the device (or %NULL if not known) 63 * @device_name: Name of the device (or %NULL if not known)
63 * @generation: Switch Thunderbolt generation 64 * @generation: Switch Thunderbolt generation
64 * @cap_plug_events: Offset to the plug events capability (%0 if not found) 65 * @cap_plug_events: Offset to the plug events capability (%0 if not found)
66 * @cap_lc: Offset to the link controller capability (%0 if not found)
65 * @is_unplugged: The switch is going away 67 * @is_unplugged: The switch is going away
66 * @drom: DROM of the switch (%NULL if not found) 68 * @drom: DROM of the switch (%NULL if not found)
67 * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) 69 * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
@@ -70,7 +72,6 @@ struct tb_switch_nvm {
70 * @boot: Whether the switch was already authorized on boot or not 72 * @boot: Whether the switch was already authorized on boot or not
71 * @rpm: The switch supports runtime PM 73 * @rpm: The switch supports runtime PM
72 * @authorized: Whether the switch is authorized by user or policy 74 * @authorized: Whether the switch is authorized by user or policy
73 * @work: Work used to automatically authorize a switch
74 * @security_level: Switch supported security level 75 * @security_level: Switch supported security level
75 * @key: Contains the key used to challenge the device or %NULL if not 76 * @key: Contains the key used to challenge the device or %NULL if not
76 * supported. Size of the key is %TB_SWITCH_KEY_SIZE. 77 * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
@@ -80,8 +81,7 @@ struct tb_switch_nvm {
80 * @depth: Depth in the chain this switch is connected (ICM only) 81 * @depth: Depth in the chain this switch is connected (ICM only)
81 * 82 *
82 * When the switch is being added or removed to the domain (other 83 * When the switch is being added or removed to the domain (other
83 * switches) you need to have domain lock held. For switch authorization 84 * switches) you need to have domain lock held.
84 * internal switch_lock is enough.
85 */ 85 */
86struct tb_switch { 86struct tb_switch {
87 struct device dev; 87 struct device dev;
@@ -97,6 +97,7 @@ struct tb_switch {
97 const char *device_name; 97 const char *device_name;
98 unsigned int generation; 98 unsigned int generation;
99 int cap_plug_events; 99 int cap_plug_events;
100 int cap_lc;
100 bool is_unplugged; 101 bool is_unplugged;
101 u8 *drom; 102 u8 *drom;
102 struct tb_switch_nvm *nvm; 103 struct tb_switch_nvm *nvm;
@@ -105,7 +106,6 @@ struct tb_switch {
105 bool boot; 106 bool boot;
106 bool rpm; 107 bool rpm;
107 unsigned int authorized; 108 unsigned int authorized;
108 struct work_struct work;
109 enum tb_security_level security_level; 109 enum tb_security_level security_level;
110 u8 *key; 110 u8 *key;
111 u8 connection_id; 111 u8 connection_id;
@@ -121,11 +121,14 @@ struct tb_switch {
121 * @remote: Remote port (%NULL if not connected) 121 * @remote: Remote port (%NULL if not connected)
122 * @xdomain: Remote host (%NULL if not connected) 122 * @xdomain: Remote host (%NULL if not connected)
123 * @cap_phy: Offset, zero if not found 123 * @cap_phy: Offset, zero if not found
124 * @cap_adap: Offset of the adapter specific capability (%0 if not present)
124 * @port: Port number on switch 125 * @port: Port number on switch
125 * @disabled: Disabled by eeprom 126 * @disabled: Disabled by eeprom
126 * @dual_link_port: If the switch is connected using two ports, points 127 * @dual_link_port: If the switch is connected using two ports, points
127 * to the other port. 128 * to the other port.
128 * @link_nr: Is this primary or secondary port on the dual_link. 129 * @link_nr: Is this primary or secondary port on the dual_link.
130 * @in_hopids: Currently allocated input HopIDs
131 * @out_hopids: Currently allocated output HopIDs
129 */ 132 */
130struct tb_port { 133struct tb_port {
131 struct tb_regs_port_header config; 134 struct tb_regs_port_header config;
@@ -133,19 +136,35 @@ struct tb_port {
133 struct tb_port *remote; 136 struct tb_port *remote;
134 struct tb_xdomain *xdomain; 137 struct tb_xdomain *xdomain;
135 int cap_phy; 138 int cap_phy;
139 int cap_adap;
136 u8 port; 140 u8 port;
137 bool disabled; 141 bool disabled;
138 struct tb_port *dual_link_port; 142 struct tb_port *dual_link_port;
139 u8 link_nr:1; 143 u8 link_nr:1;
144 struct ida in_hopids;
145 struct ida out_hopids;
140}; 146};
141 147
142/** 148/**
143 * struct tb_path_hop - routing information for a tb_path 149 * struct tb_path_hop - routing information for a tb_path
150 * @in_port: Ingress port of a switch
151 * @out_port: Egress port of a switch where the packet is routed out
152 * (must be on the same switch than @in_port)
153 * @in_hop_index: HopID where the path configuration entry is placed in
154 * the path config space of @in_port.
155 * @in_counter_index: Used counter index (not used in the driver
156 * currently, %-1 to disable)
157 * @next_hop_index: HopID of the packet when it is routed out from @out_port
158 * @initial_credits: Number of initial flow control credits allocated for
159 * the path
144 * 160 *
145 * Hop configuration is always done on the IN port of a switch. 161 * Hop configuration is always done on the IN port of a switch.
146 * in_port and out_port have to be on the same switch. Packets arriving on 162 * in_port and out_port have to be on the same switch. Packets arriving on
147 * in_port with "hop" = in_hop_index will get routed to through out_port. The 163 * in_port with "hop" = in_hop_index will get routed to through out_port. The
148 * next hop to take (on out_port->remote) is determined by next_hop_index. 164 * next hop to take (on out_port->remote) is determined by
165 * next_hop_index. When routing packet to another switch (out->remote is
166 * set) the @next_hop_index must match the @in_hop_index of that next
167 * hop to make routing possible.
149 * 168 *
150 * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in 169 * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
151 * port. 170 * port.
@@ -154,44 +173,71 @@ struct tb_path_hop {
154 struct tb_port *in_port; 173 struct tb_port *in_port;
155 struct tb_port *out_port; 174 struct tb_port *out_port;
156 int in_hop_index; 175 int in_hop_index;
157 int in_counter_index; /* write -1 to disable counters for this hop. */ 176 int in_counter_index;
158 int next_hop_index; 177 int next_hop_index;
178 unsigned int initial_credits;
159}; 179};
160 180
161/** 181/**
162 * enum tb_path_port - path options mask 182 * enum tb_path_port - path options mask
183 * @TB_PATH_NONE: Do not activate on any hop on path
184 * @TB_PATH_SOURCE: Activate on the first hop (out of src)
185 * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
186 * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
187 * @TB_PATH_ALL: Activate on all hops on the path
163 */ 188 */
164enum tb_path_port { 189enum tb_path_port {
165 TB_PATH_NONE = 0, 190 TB_PATH_NONE = 0,
166 TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */ 191 TB_PATH_SOURCE = 1,
167 TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */ 192 TB_PATH_INTERNAL = 2,
168 TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */ 193 TB_PATH_DESTINATION = 4,
169 TB_PATH_ALL = 7, 194 TB_PATH_ALL = 7,
170}; 195};
171 196
172/** 197/**
173 * struct tb_path - a unidirectional path between two ports 198 * struct tb_path - a unidirectional path between two ports
199 * @tb: Pointer to the domain structure
200 * @name: Name of the path (used for debugging)
201 * @nfc_credits: Number of non flow controlled credits allocated for the path
202 * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
203 * @egress_shared_buffer: Shared buffering used for egress ports on the path
204 * @ingress_fc_enable: Flow control for ingress ports on the path
205 * @egress_fc_enable: Flow control for egress ports on the path
206 * @priority: Priority group if the path
207 * @weight: Weight of the path inside the priority group
208 * @drop_packages: Drop packages from queue tail or head
209 * @activated: Is the path active
210 * @clear_fc: Clear all flow control from the path config space entries
211 * when deactivating this path
212 * @hops: Path hops
213 * @path_length: How many hops the path uses
174 * 214 *
175 * A path consists of a number of hops (see tb_path_hop). To establish a PCIe 215 * A path consists of a number of hops (see &struct tb_path_hop). To
176 * tunnel two paths have to be created between the two PCIe ports. 216 * establish a PCIe tunnel two paths have to be created between the two
177 * 217 * PCIe ports.
178 */ 218 */
179struct tb_path { 219struct tb_path {
180 struct tb *tb; 220 struct tb *tb;
181 int nfc_credits; /* non flow controlled credits */ 221 const char *name;
222 int nfc_credits;
182 enum tb_path_port ingress_shared_buffer; 223 enum tb_path_port ingress_shared_buffer;
183 enum tb_path_port egress_shared_buffer; 224 enum tb_path_port egress_shared_buffer;
184 enum tb_path_port ingress_fc_enable; 225 enum tb_path_port ingress_fc_enable;
185 enum tb_path_port egress_fc_enable; 226 enum tb_path_port egress_fc_enable;
186 227
187 int priority:3; 228 unsigned int priority:3;
188 int weight:4; 229 int weight:4;
189 bool drop_packages; 230 bool drop_packages;
190 bool activated; 231 bool activated;
232 bool clear_fc;
191 struct tb_path_hop *hops; 233 struct tb_path_hop *hops;
192 int path_length; /* number of hops */ 234 int path_length;
193}; 235};
194 236
237/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
238#define TB_PATH_MIN_HOPID 8
239#define TB_PATH_MAX_HOPS 7
240
195/** 241/**
196 * struct tb_cm_ops - Connection manager specific operations vector 242 * struct tb_cm_ops - Connection manager specific operations vector
197 * @driver_ready: Called right after control channel is started. Used by 243 * @driver_ready: Called right after control channel is started. Used by
@@ -261,7 +307,20 @@ static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
261 return &sw->ports[sw->config.upstream_port_number]; 307 return &sw->ports[sw->config.upstream_port_number];
262} 308}
263 309
264static inline u64 tb_route(struct tb_switch *sw) 310/**
311 * tb_is_upstream_port() - Is the port upstream facing
312 * @port: Port to check
313 *
314 * Returns true if @port is upstream facing port. In case of dual link
315 * ports both return true.
316 */
317static inline bool tb_is_upstream_port(const struct tb_port *port)
318{
319 const struct tb_port *upstream_port = tb_upstream_port(port->sw);
320 return port == upstream_port || port->dual_link_port == upstream_port;
321}
322
323static inline u64 tb_route(const struct tb_switch *sw)
265{ 324{
266 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; 325 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
267} 326}
@@ -276,9 +335,54 @@ static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
276 return &sw->ports[port]; 335 return &sw->ports[port];
277} 336}
278 337
338/**
339 * tb_port_has_remote() - Does the port have switch connected downstream
340 * @port: Port to check
341 *
342 * Returns true only when the port is primary port and has remote set.
343 */
344static inline bool tb_port_has_remote(const struct tb_port *port)
345{
346 if (tb_is_upstream_port(port))
347 return false;
348 if (!port->remote)
349 return false;
350 if (port->dual_link_port && port->link_nr)
351 return false;
352
353 return true;
354}
355
356static inline bool tb_port_is_null(const struct tb_port *port)
357{
358 return port && port->port && port->config.type == TB_TYPE_PORT;
359}
360
361static inline bool tb_port_is_pcie_down(const struct tb_port *port)
362{
363 return port && port->config.type == TB_TYPE_PCIE_DOWN;
364}
365
366static inline bool tb_port_is_pcie_up(const struct tb_port *port)
367{
368 return port && port->config.type == TB_TYPE_PCIE_UP;
369}
370
371static inline bool tb_port_is_dpin(const struct tb_port *port)
372{
373 return port && port->config.type == TB_TYPE_DP_HDMI_IN;
374}
375
376static inline bool tb_port_is_dpout(const struct tb_port *port)
377{
378 return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
379}
380
279static inline int tb_sw_read(struct tb_switch *sw, void *buffer, 381static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
280 enum tb_cfg_space space, u32 offset, u32 length) 382 enum tb_cfg_space space, u32 offset, u32 length)
281{ 383{
384 if (sw->is_unplugged)
385 return -ENODEV;
282 return tb_cfg_read(sw->tb->ctl, 386 return tb_cfg_read(sw->tb->ctl,
283 buffer, 387 buffer,
284 tb_route(sw), 388 tb_route(sw),
@@ -291,6 +395,8 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
291static inline int tb_sw_write(struct tb_switch *sw, void *buffer, 395static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
292 enum tb_cfg_space space, u32 offset, u32 length) 396 enum tb_cfg_space space, u32 offset, u32 length)
293{ 397{
398 if (sw->is_unplugged)
399 return -ENODEV;
294 return tb_cfg_write(sw->tb->ctl, 400 return tb_cfg_write(sw->tb->ctl,
295 buffer, 401 buffer,
296 tb_route(sw), 402 tb_route(sw),
@@ -303,6 +409,8 @@ static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
303static inline int tb_port_read(struct tb_port *port, void *buffer, 409static inline int tb_port_read(struct tb_port *port, void *buffer,
304 enum tb_cfg_space space, u32 offset, u32 length) 410 enum tb_cfg_space space, u32 offset, u32 length)
305{ 411{
412 if (port->sw->is_unplugged)
413 return -ENODEV;
306 return tb_cfg_read(port->sw->tb->ctl, 414 return tb_cfg_read(port->sw->tb->ctl,
307 buffer, 415 buffer,
308 tb_route(port->sw), 416 tb_route(port->sw),
@@ -315,6 +423,8 @@ static inline int tb_port_read(struct tb_port *port, void *buffer,
315static inline int tb_port_write(struct tb_port *port, const void *buffer, 423static inline int tb_port_write(struct tb_port *port, const void *buffer,
316 enum tb_cfg_space space, u32 offset, u32 length) 424 enum tb_cfg_space space, u32 offset, u32 length)
317{ 425{
426 if (port->sw->is_unplugged)
427 return -ENODEV;
318 return tb_cfg_write(port->sw->tb->ctl, 428 return tb_cfg_write(port->sw->tb->ctl,
319 buffer, 429 buffer,
320 tb_route(port->sw), 430 tb_route(port->sw),
@@ -332,7 +442,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
332 442
333#define __TB_SW_PRINT(level, sw, fmt, arg...) \ 443#define __TB_SW_PRINT(level, sw, fmt, arg...) \
334 do { \ 444 do { \
335 struct tb_switch *__sw = (sw); \ 445 const struct tb_switch *__sw = (sw); \
336 level(__sw->tb, "%llx: " fmt, \ 446 level(__sw->tb, "%llx: " fmt, \
337 tb_route(__sw), ## arg); \ 447 tb_route(__sw), ## arg); \
338 } while (0) 448 } while (0)
@@ -343,7 +453,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
343 453
344#define __TB_PORT_PRINT(level, _port, fmt, arg...) \ 454#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
345 do { \ 455 do { \
346 struct tb_port *__port = (_port); \ 456 const struct tb_port *__port = (_port); \
347 level(__port->sw->tb, "%llx:%x: " fmt, \ 457 level(__port->sw->tb, "%llx:%x: " fmt, \
348 tb_route(__port->sw), __port->port, ## arg); \ 458 tb_route(__port->sw), __port->port, ## arg); \
349 } while (0) 459 } while (0)
@@ -385,6 +495,13 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
385int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); 495int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
386int tb_domain_disconnect_all_paths(struct tb *tb); 496int tb_domain_disconnect_all_paths(struct tb *tb);
387 497
498static inline struct tb *tb_domain_get(struct tb *tb)
499{
500 if (tb)
501 get_device(&tb->dev);
502 return tb;
503}
504
388static inline void tb_domain_put(struct tb *tb) 505static inline void tb_domain_put(struct tb *tb)
389{ 506{
390 put_device(&tb->dev); 507 put_device(&tb->dev);
@@ -401,7 +518,6 @@ void tb_switch_suspend(struct tb_switch *sw);
401int tb_switch_resume(struct tb_switch *sw); 518int tb_switch_resume(struct tb_switch *sw);
402int tb_switch_reset(struct tb *tb, u64 route); 519int tb_switch_reset(struct tb *tb, u64 route);
403void tb_sw_set_unplugged(struct tb_switch *sw); 520void tb_sw_set_unplugged(struct tb_switch *sw);
404struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
405struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, 521struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
406 u8 depth); 522 u8 depth);
407struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); 523struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
@@ -431,14 +547,74 @@ static inline struct tb_switch *tb_to_switch(struct device *dev)
431 return NULL; 547 return NULL;
432} 548}
433 549
550static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
551{
552 return tb_to_switch(sw->dev.parent);
553}
554
555static inline bool tb_switch_is_lr(const struct tb_switch *sw)
556{
557 return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
558}
559
560static inline bool tb_switch_is_er(const struct tb_switch *sw)
561{
562 return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
563}
564
565static inline bool tb_switch_is_cr(const struct tb_switch *sw)
566{
567 switch (sw->config.device_id) {
568 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
569 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
570 return true;
571 default:
572 return false;
573 }
574}
575
576static inline bool tb_switch_is_fr(const struct tb_switch *sw)
577{
578 switch (sw->config.device_id) {
579 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
580 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
581 return true;
582 default:
583 return false;
584 }
585}
586
434int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); 587int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
435int tb_port_add_nfc_credits(struct tb_port *port, int credits); 588int tb_port_add_nfc_credits(struct tb_port *port, int credits);
589int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
436int tb_port_clear_counter(struct tb_port *port, int counter); 590int tb_port_clear_counter(struct tb_port *port, int counter);
591int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
592void tb_port_release_in_hopid(struct tb_port *port, int hopid);
593int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
594void tb_port_release_out_hopid(struct tb_port *port, int hopid);
595struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
596 struct tb_port *prev);
437 597
438int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); 598int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
439int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); 599int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
440 600bool tb_port_is_enabled(struct tb_port *port);
441struct tb_path *tb_path_alloc(struct tb *tb, int num_hops); 601
602bool tb_pci_port_is_enabled(struct tb_port *port);
603int tb_pci_port_enable(struct tb_port *port, bool enable);
604
605int tb_dp_port_hpd_is_active(struct tb_port *port);
606int tb_dp_port_hpd_clear(struct tb_port *port);
607int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
608 unsigned int aux_tx, unsigned int aux_rx);
609bool tb_dp_port_is_enabled(struct tb_port *port);
610int tb_dp_port_enable(struct tb_port *port, bool enable);
611
612struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
613 struct tb_port *dst, int dst_hopid,
614 struct tb_port **last, const char *name);
615struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
616 struct tb_port *dst, int dst_hopid, int link_nr,
617 const char *name);
442void tb_path_free(struct tb_path *path); 618void tb_path_free(struct tb_path *path);
443int tb_path_activate(struct tb_path *path); 619int tb_path_activate(struct tb_path *path);
444void tb_path_deactivate(struct tb_path *path); 620void tb_path_deactivate(struct tb_path *path);
@@ -447,17 +623,16 @@ bool tb_path_is_invalid(struct tb_path *path);
447int tb_drom_read(struct tb_switch *sw); 623int tb_drom_read(struct tb_switch *sw);
448int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); 624int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
449 625
626int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
627int tb_lc_configure_link(struct tb_switch *sw);
628void tb_lc_unconfigure_link(struct tb_switch *sw);
629int tb_lc_set_sleep(struct tb_switch *sw);
450 630
451static inline int tb_route_length(u64 route) 631static inline int tb_route_length(u64 route)
452{ 632{
453 return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; 633 return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
454} 634}
455 635
456static inline bool tb_is_upstream_port(struct tb_port *port)
457{
458 return port == tb_upstream_port(port->sw);
459}
460
461/** 636/**
462 * tb_downstream_route() - get route to downstream switch 637 * tb_downstream_route() - get route to downstream switch
463 * 638 *
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 02c84aa3d018..afbe1d29bb03 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -492,6 +492,17 @@ struct tb_xdp_header {
492 u32 type; 492 u32 type;
493}; 493};
494 494
495struct tb_xdp_uuid {
496 struct tb_xdp_header hdr;
497};
498
499struct tb_xdp_uuid_response {
500 struct tb_xdp_header hdr;
501 uuid_t src_uuid;
502 u32 src_route_hi;
503 u32 src_route_lo;
504};
505
495struct tb_xdp_properties { 506struct tb_xdp_properties {
496 struct tb_xdp_header hdr; 507 struct tb_xdp_header hdr;
497 uuid_t src_uuid; 508 uuid_t src_uuid;
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f1ff04ee195..deb9d4a977b9 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -211,6 +211,38 @@ struct tb_regs_port_header {
211 211
212} __packed; 212} __packed;
213 213
214/* DWORD 4 */
215#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0)
216#define TB_PORT_MAX_CREDITS_SHIFT 20
217#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20)
218/* DWORD 5 */
219#define TB_PORT_LCA_SHIFT 22
220#define TB_PORT_LCA_MASK GENMASK(28, 22)
221
222/* Display Port adapter registers */
223
224/* DWORD 0 */
225#define TB_DP_VIDEO_HOPID_SHIFT 16
226#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16)
227#define TB_DP_AUX_EN BIT(30)
228#define TB_DP_VIDEO_EN BIT(31)
229/* DWORD 1 */
230#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0)
231#define TB_DP_AUX_RX_HOPID_SHIFT 11
232#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11)
233/* DWORD 2 */
234#define TB_DP_HDP BIT(6)
235/* DWORD 3 */
236#define TB_DP_HPDC BIT(9)
237/* DWORD 4 */
238#define TB_DP_LOCAL_CAP 0x4
239/* DWORD 5 */
240#define TB_DP_REMOTE_CAP 0x5
241
242/* PCIe adapter registers */
243
244#define TB_PCI_EN BIT(31)
245
214/* Hop register from TB_CFG_HOPS. 8 byte per entry. */ 246/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
215struct tb_regs_hop { 247struct tb_regs_hop {
216 /* DWORD 0 */ 248 /* DWORD 0 */
@@ -234,8 +266,24 @@ struct tb_regs_hop {
234 bool egress_fc:1; 266 bool egress_fc:1;
235 bool ingress_shared_buffer:1; 267 bool ingress_shared_buffer:1;
236 bool egress_shared_buffer:1; 268 bool egress_shared_buffer:1;
237 u32 unknown3:4; /* set to zero */ 269 bool pending:1;
270 u32 unknown3:3; /* set to zero */
238} __packed; 271} __packed;
239 272
273/* Common link controller registers */
274#define TB_LC_DESC 0x02
275#define TB_LC_DESC_NLC_MASK GENMASK(3, 0)
276#define TB_LC_DESC_SIZE_SHIFT 8
277#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
278#define TB_LC_DESC_PORT_SIZE_SHIFT 16
279#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
280#define TB_LC_FUSE 0x03
281
282/* Link controller registers */
283#define TB_LC_SX_CTRL 0x96
284#define TB_LC_SX_CTRL_L1C BIT(16)
285#define TB_LC_SX_CTRL_L2C BIT(20)
286#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
287#define TB_LC_SX_CTRL_SLP BIT(31)
240 288
241#endif 289#endif
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
new file mode 100644
index 000000000000..31d0234837e4
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.c
@@ -0,0 +1,691 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt driver - Tunneling support
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9#include <linux/slab.h>
10#include <linux/list.h>
11
12#include "tunnel.h"
13#include "tb.h"
14
15/* PCIe adapters use always HopID of 8 for both directions */
16#define TB_PCI_HOPID 8
17
18#define TB_PCI_PATH_DOWN 0
19#define TB_PCI_PATH_UP 1
20
21/* DP adapters use HopID 8 for AUX and 9 for Video */
22#define TB_DP_AUX_TX_HOPID 8
23#define TB_DP_AUX_RX_HOPID 8
24#define TB_DP_VIDEO_HOPID 9
25
26#define TB_DP_VIDEO_PATH_OUT 0
27#define TB_DP_AUX_PATH_OUT 1
28#define TB_DP_AUX_PATH_IN 2
29
30#define TB_DMA_PATH_OUT 0
31#define TB_DMA_PATH_IN 1
32
33static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" };
34
35#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
36 do { \
37 struct tb_tunnel *__tunnel = (tunnel); \
38 level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
39 tb_route(__tunnel->src_port->sw), \
40 __tunnel->src_port->port, \
41 tb_route(__tunnel->dst_port->sw), \
42 __tunnel->dst_port->port, \
43 tb_tunnel_names[__tunnel->type], \
44 ## arg); \
45 } while (0)
46
47#define tb_tunnel_WARN(tunnel, fmt, arg...) \
48 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
49#define tb_tunnel_warn(tunnel, fmt, arg...) \
50 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
51#define tb_tunnel_info(tunnel, fmt, arg...) \
52 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
53#define tb_tunnel_dbg(tunnel, fmt, arg...) \
54 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
55
56static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
57 enum tb_tunnel_type type)
58{
59 struct tb_tunnel *tunnel;
60
61 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
62 if (!tunnel)
63 return NULL;
64
65 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
66 if (!tunnel->paths) {
67 tb_tunnel_free(tunnel);
68 return NULL;
69 }
70
71 INIT_LIST_HEAD(&tunnel->list);
72 tunnel->tb = tb;
73 tunnel->npaths = npaths;
74 tunnel->type = type;
75
76 return tunnel;
77}
78
79static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
80{
81 int res;
82
83 res = tb_pci_port_enable(tunnel->src_port, activate);
84 if (res)
85 return res;
86
87 if (tb_port_is_pcie_up(tunnel->dst_port))
88 return tb_pci_port_enable(tunnel->dst_port, activate);
89
90 return 0;
91}
92
93static void tb_pci_init_path(struct tb_path *path)
94{
95 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
96 path->egress_shared_buffer = TB_PATH_NONE;
97 path->ingress_fc_enable = TB_PATH_ALL;
98 path->ingress_shared_buffer = TB_PATH_NONE;
99 path->priority = 3;
100 path->weight = 1;
101 path->drop_packages = 0;
102 path->nfc_credits = 0;
103 path->hops[0].initial_credits = 7;
104 path->hops[1].initial_credits = 16;
105}
106
107/**
108 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
109 * @tb: Pointer to the domain structure
110 * @down: PCIe downstream adapter
111 *
112 * If @down adapter is active, follows the tunnel to the PCIe upstream
113 * adapter and back. Returns the discovered tunnel or %NULL if there was
114 * no tunnel.
115 */
116struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
117{
118 struct tb_tunnel *tunnel;
119 struct tb_path *path;
120
121 if (!tb_pci_port_is_enabled(down))
122 return NULL;
123
124 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
125 if (!tunnel)
126 return NULL;
127
128 tunnel->activate = tb_pci_activate;
129 tunnel->src_port = down;
130
131 /*
132 * Discover both paths even if they are not complete. We will
133 * clean them up by calling tb_tunnel_deactivate() below in that
134 * case.
135 */
136 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
137 &tunnel->dst_port, "PCIe Up");
138 if (!path) {
139 /* Just disable the downstream port */
140 tb_pci_port_enable(down, false);
141 goto err_free;
142 }
143 tunnel->paths[TB_PCI_PATH_UP] = path;
144 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
145
146 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
147 "PCIe Down");
148 if (!path)
149 goto err_deactivate;
150 tunnel->paths[TB_PCI_PATH_DOWN] = path;
151 tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
152
153 /* Validate that the tunnel is complete */
154 if (!tb_port_is_pcie_up(tunnel->dst_port)) {
155 tb_port_warn(tunnel->dst_port,
156 "path does not end on a PCIe adapter, cleaning up\n");
157 goto err_deactivate;
158 }
159
160 if (down != tunnel->src_port) {
161 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
162 goto err_deactivate;
163 }
164
165 if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
166 tb_tunnel_warn(tunnel,
167 "tunnel is not fully activated, cleaning up\n");
168 goto err_deactivate;
169 }
170
171 tb_tunnel_dbg(tunnel, "discovered\n");
172 return tunnel;
173
174err_deactivate:
175 tb_tunnel_deactivate(tunnel);
176err_free:
177 tb_tunnel_free(tunnel);
178
179 return NULL;
180}
181
182/**
183 * tb_tunnel_alloc_pci() - allocate a pci tunnel
184 * @tb: Pointer to the domain structure
185 * @up: PCIe upstream adapter port
186 * @down: PCIe downstream adapter port
187 *
188 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
189 * TB_TYPE_PCIE_DOWN.
190 *
191 * Return: Returns a tb_tunnel on success or NULL on failure.
192 */
193struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
194 struct tb_port *down)
195{
196 struct tb_tunnel *tunnel;
197 struct tb_path *path;
198
199 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
200 if (!tunnel)
201 return NULL;
202
203 tunnel->activate = tb_pci_activate;
204 tunnel->src_port = down;
205 tunnel->dst_port = up;
206
207 path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
208 "PCIe Down");
209 if (!path) {
210 tb_tunnel_free(tunnel);
211 return NULL;
212 }
213 tb_pci_init_path(path);
214 tunnel->paths[TB_PCI_PATH_UP] = path;
215
216 path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
217 "PCIe Up");
218 if (!path) {
219 tb_tunnel_free(tunnel);
220 return NULL;
221 }
222 tb_pci_init_path(path);
223 tunnel->paths[TB_PCI_PATH_DOWN] = path;
224
225 return tunnel;
226}
227
228static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
229{
230 struct tb_port *out = tunnel->dst_port;
231 struct tb_port *in = tunnel->src_port;
232 u32 in_dp_cap, out_dp_cap;
233 int ret;
234
235 /*
236 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
237 * newer generation hardware.
238 */
239 if (in->sw->generation < 2 || out->sw->generation < 2)
240 return 0;
241
242 /* Read both DP_LOCAL_CAP registers */
243 ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
244 in->cap_adap + TB_DP_LOCAL_CAP, 1);
245 if (ret)
246 return ret;
247
248 ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
249 out->cap_adap + TB_DP_LOCAL_CAP, 1);
250 if (ret)
251 return ret;
252
253 /* Write IN local caps to OUT remote caps */
254 ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
255 out->cap_adap + TB_DP_REMOTE_CAP, 1);
256 if (ret)
257 return ret;
258
259 return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
260 in->cap_adap + TB_DP_REMOTE_CAP, 1);
261}
262
263static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
264{
265 int ret;
266
267 if (active) {
268 struct tb_path **paths;
269 int last;
270
271 paths = tunnel->paths;
272 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
273
274 tb_dp_port_set_hops(tunnel->src_port,
275 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
276 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
277 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
278
279 tb_dp_port_set_hops(tunnel->dst_port,
280 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
281 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
282 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
283 } else {
284 tb_dp_port_hpd_clear(tunnel->src_port);
285 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
286 if (tb_port_is_dpout(tunnel->dst_port))
287 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
288 }
289
290 ret = tb_dp_port_enable(tunnel->src_port, active);
291 if (ret)
292 return ret;
293
294 if (tb_port_is_dpout(tunnel->dst_port))
295 return tb_dp_port_enable(tunnel->dst_port, active);
296
297 return 0;
298}
299
300static void tb_dp_init_aux_path(struct tb_path *path)
301{
302 int i;
303
304 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
305 path->egress_shared_buffer = TB_PATH_NONE;
306 path->ingress_fc_enable = TB_PATH_ALL;
307 path->ingress_shared_buffer = TB_PATH_NONE;
308 path->priority = 2;
309 path->weight = 1;
310
311 for (i = 0; i < path->path_length; i++)
312 path->hops[i].initial_credits = 1;
313}
314
315static void tb_dp_init_video_path(struct tb_path *path, bool discover)
316{
317 u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
318
319 path->egress_fc_enable = TB_PATH_NONE;
320 path->egress_shared_buffer = TB_PATH_NONE;
321 path->ingress_fc_enable = TB_PATH_NONE;
322 path->ingress_shared_buffer = TB_PATH_NONE;
323 path->priority = 1;
324 path->weight = 1;
325
326 if (discover) {
327 path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK;
328 } else {
329 u32 max_credits;
330
331 max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
332 TB_PORT_MAX_CREDITS_SHIFT;
333 /* Leave some credits for AUX path */
334 path->nfc_credits = min(max_credits - 2, 12U);
335 }
336}
337
338/**
339 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
340 * @tb: Pointer to the domain structure
341 * @in: DP in adapter
342 *
343 * If @in adapter is active, follows the tunnel to the DP out adapter
344 * and back. Returns the discovered tunnel or %NULL if there was no
345 * tunnel.
346 *
347 * Return: DP tunnel or %NULL if no tunnel found.
348 */
349struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
350{
351 struct tb_tunnel *tunnel;
352 struct tb_port *port;
353 struct tb_path *path;
354
355 if (!tb_dp_port_is_enabled(in))
356 return NULL;
357
358 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
359 if (!tunnel)
360 return NULL;
361
362 tunnel->init = tb_dp_xchg_caps;
363 tunnel->activate = tb_dp_activate;
364 tunnel->src_port = in;
365
366 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
367 &tunnel->dst_port, "Video");
368 if (!path) {
369 /* Just disable the DP IN port */
370 tb_dp_port_enable(in, false);
371 goto err_free;
372 }
373 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
374 tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
375
376 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
377 if (!path)
378 goto err_deactivate;
379 tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
380 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
381
382 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
383 &port, "AUX RX");
384 if (!path)
385 goto err_deactivate;
386 tunnel->paths[TB_DP_AUX_PATH_IN] = path;
387 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
388
389 /* Validate that the tunnel is complete */
390 if (!tb_port_is_dpout(tunnel->dst_port)) {
391 tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
392 goto err_deactivate;
393 }
394
395 if (!tb_dp_port_is_enabled(tunnel->dst_port))
396 goto err_deactivate;
397
398 if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
399 goto err_deactivate;
400
401 if (port != tunnel->src_port) {
402 tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
403 goto err_deactivate;
404 }
405
406 tb_tunnel_dbg(tunnel, "discovered\n");
407 return tunnel;
408
409err_deactivate:
410 tb_tunnel_deactivate(tunnel);
411err_free:
412 tb_tunnel_free(tunnel);
413
414 return NULL;
415}
416
417/**
418 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
419 * @tb: Pointer to the domain structure
420 * @in: DP in adapter port
421 * @out: DP out adapter port
422 *
423 * Allocates a tunnel between @in and @out that is capable of tunneling
424 * Display Port traffic.
425 *
426 * Return: Returns a tb_tunnel on success or NULL on failure.
427 */
428struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
429 struct tb_port *out)
430{
431 struct tb_tunnel *tunnel;
432 struct tb_path **paths;
433 struct tb_path *path;
434
435 if (WARN_ON(!in->cap_adap || !out->cap_adap))
436 return NULL;
437
438 tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
439 if (!tunnel)
440 return NULL;
441
442 tunnel->init = tb_dp_xchg_caps;
443 tunnel->activate = tb_dp_activate;
444 tunnel->src_port = in;
445 tunnel->dst_port = out;
446
447 paths = tunnel->paths;
448
449 path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
450 1, "Video");
451 if (!path)
452 goto err_free;
453 tb_dp_init_video_path(path, false);
454 paths[TB_DP_VIDEO_PATH_OUT] = path;
455
456 path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
457 TB_DP_AUX_TX_HOPID, 1, "AUX TX");
458 if (!path)
459 goto err_free;
460 tb_dp_init_aux_path(path);
461 paths[TB_DP_AUX_PATH_OUT] = path;
462
463 path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
464 TB_DP_AUX_RX_HOPID, 1, "AUX RX");
465 if (!path)
466 goto err_free;
467 tb_dp_init_aux_path(path);
468 paths[TB_DP_AUX_PATH_IN] = path;
469
470 return tunnel;
471
472err_free:
473 tb_tunnel_free(tunnel);
474 return NULL;
475}
476
477static u32 tb_dma_credits(struct tb_port *nhi)
478{
479 u32 max_credits;
480
481 max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >>
482 TB_PORT_MAX_CREDITS_SHIFT;
483 return min(max_credits, 13U);
484}
485
486static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
487{
488 struct tb_port *nhi = tunnel->src_port;
489 u32 credits;
490
491 credits = active ? tb_dma_credits(nhi) : 0;
492 return tb_port_set_initial_credits(nhi, credits);
493}
494
495static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
496 unsigned int efc, u32 credits)
497{
498 int i;
499
500 path->egress_fc_enable = efc;
501 path->ingress_fc_enable = TB_PATH_ALL;
502 path->egress_shared_buffer = TB_PATH_NONE;
503 path->ingress_shared_buffer = isb;
504 path->priority = 5;
505 path->weight = 1;
506 path->clear_fc = true;
507
508 for (i = 0; i < path->path_length; i++)
509 path->hops[i].initial_credits = credits;
510}
511
512/**
513 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
514 * @tb: Pointer to the domain structure
515 * @nhi: Host controller port
516 * @dst: Destination null port which the other domain is connected to
517 * @transmit_ring: NHI ring number used to send packets towards the
518 * other domain
519 * @transmit_path: HopID used for transmitting packets
520 * @receive_ring: NHI ring number used to receive packets from the
521 * other domain
522 * @reveive_path: HopID used for receiving packets
523 *
524 * Return: Returns a tb_tunnel on success or NULL on failure.
525 */
526struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
527 struct tb_port *dst, int transmit_ring,
528 int transmit_path, int receive_ring,
529 int receive_path)
530{
531 struct tb_tunnel *tunnel;
532 struct tb_path *path;
533 u32 credits;
534
535 tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
536 if (!tunnel)
537 return NULL;
538
539 tunnel->activate = tb_dma_activate;
540 tunnel->src_port = nhi;
541 tunnel->dst_port = dst;
542
543 credits = tb_dma_credits(nhi);
544
545 path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
546 if (!path) {
547 tb_tunnel_free(tunnel);
548 return NULL;
549 }
550 tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
551 credits);
552 tunnel->paths[TB_DMA_PATH_IN] = path;
553
554 path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
555 if (!path) {
556 tb_tunnel_free(tunnel);
557 return NULL;
558 }
559 tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
560 tunnel->paths[TB_DMA_PATH_OUT] = path;
561
562 return tunnel;
563}
564
565/**
566 * tb_tunnel_free() - free a tunnel
567 * @tunnel: Tunnel to be freed
568 *
569 * Frees a tunnel. The tunnel does not need to be deactivated.
570 */
571void tb_tunnel_free(struct tb_tunnel *tunnel)
572{
573 int i;
574
575 if (!tunnel)
576 return;
577
578 for (i = 0; i < tunnel->npaths; i++) {
579 if (tunnel->paths[i])
580 tb_path_free(tunnel->paths[i]);
581 }
582
583 kfree(tunnel->paths);
584 kfree(tunnel);
585}
586
587/**
588 * tb_tunnel_is_invalid - check whether an activated path is still valid
589 * @tunnel: Tunnel to check
590 */
591bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
592{
593 int i;
594
595 for (i = 0; i < tunnel->npaths; i++) {
596 WARN_ON(!tunnel->paths[i]->activated);
597 if (tb_path_is_invalid(tunnel->paths[i]))
598 return true;
599 }
600
601 return false;
602}
603
604/**
605 * tb_tunnel_restart() - activate a tunnel after a hardware reset
606 * @tunnel: Tunnel to restart
607 *
608 * Return: 0 on success and negative errno in case if failure
609 */
610int tb_tunnel_restart(struct tb_tunnel *tunnel)
611{
612 int res, i;
613
614 tb_tunnel_dbg(tunnel, "activating\n");
615
616 /*
617 * Make sure all paths are properly disabled before enabling
618 * them again.
619 */
620 for (i = 0; i < tunnel->npaths; i++) {
621 if (tunnel->paths[i]->activated) {
622 tb_path_deactivate(tunnel->paths[i]);
623 tunnel->paths[i]->activated = false;
624 }
625 }
626
627 if (tunnel->init) {
628 res = tunnel->init(tunnel);
629 if (res)
630 return res;
631 }
632
633 for (i = 0; i < tunnel->npaths; i++) {
634 res = tb_path_activate(tunnel->paths[i]);
635 if (res)
636 goto err;
637 }
638
639 if (tunnel->activate) {
640 res = tunnel->activate(tunnel, true);
641 if (res)
642 goto err;
643 }
644
645 return 0;
646
647err:
648 tb_tunnel_warn(tunnel, "activation failed\n");
649 tb_tunnel_deactivate(tunnel);
650 return res;
651}
652
653/**
654 * tb_tunnel_activate() - activate a tunnel
655 * @tunnel: Tunnel to activate
656 *
657 * Return: Returns 0 on success or an error code on failure.
658 */
659int tb_tunnel_activate(struct tb_tunnel *tunnel)
660{
661 int i;
662
663 for (i = 0; i < tunnel->npaths; i++) {
664 if (tunnel->paths[i]->activated) {
665 tb_tunnel_WARN(tunnel,
666 "trying to activate an already activated tunnel\n");
667 return -EINVAL;
668 }
669 }
670
671 return tb_tunnel_restart(tunnel);
672}
673
674/**
675 * tb_tunnel_deactivate() - deactivate a tunnel
676 * @tunnel: Tunnel to deactivate
677 */
678void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
679{
680 int i;
681
682 tb_tunnel_dbg(tunnel, "deactivating\n");
683
684 if (tunnel->activate)
685 tunnel->activate(tunnel, false);
686
687 for (i = 0; i < tunnel->npaths; i++) {
688 if (tunnel->paths[i] && tunnel->paths[i]->activated)
689 tb_path_deactivate(tunnel->paths[i]);
690 }
691}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
new file mode 100644
index 000000000000..c68bbcd3a62c
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.h
@@ -0,0 +1,78 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Thunderbolt driver - Tunneling support
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2019, Intel Corporation
7 */
8
9#ifndef TB_TUNNEL_H_
10#define TB_TUNNEL_H_
11
12#include "tb.h"
13
14enum tb_tunnel_type {
15 TB_TUNNEL_PCI,
16 TB_TUNNEL_DP,
17 TB_TUNNEL_DMA,
18};
19
20/**
21 * struct tb_tunnel - Tunnel between two ports
22 * @tb: Pointer to the domain
23 * @src_port: Source port of the tunnel
24 * @dst_port: Destination port of the tunnel. For discovered incomplete
25 * tunnels may be %NULL or null adapter port instead.
26 * @paths: All paths required by the tunnel
27 * @npaths: Number of paths in @paths
28 * @init: Optional tunnel specific initialization
29 * @activate: Optional tunnel specific activation/deactivation
30 * @list: Tunnels are linked using this field
31 * @type: Type of the tunnel
32 */
33struct tb_tunnel {
34 struct tb *tb;
35 struct tb_port *src_port;
36 struct tb_port *dst_port;
37 struct tb_path **paths;
38 size_t npaths;
39 int (*init)(struct tb_tunnel *tunnel);
40 int (*activate)(struct tb_tunnel *tunnel, bool activate);
41 struct list_head list;
42 enum tb_tunnel_type type;
43};
44
45struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
46struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
47 struct tb_port *down);
48struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
49struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
50 struct tb_port *out);
51struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
52 struct tb_port *dst, int transmit_ring,
53 int transmit_path, int receive_ring,
54 int receive_path);
55
56void tb_tunnel_free(struct tb_tunnel *tunnel);
57int tb_tunnel_activate(struct tb_tunnel *tunnel);
58int tb_tunnel_restart(struct tb_tunnel *tunnel);
59void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
60bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
61
62static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
63{
64 return tunnel->type == TB_TUNNEL_PCI;
65}
66
67static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
68{
69 return tunnel->type == TB_TUNNEL_DP;
70}
71
72static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
73{
74 return tunnel->type == TB_TUNNEL_DMA;
75}
76
77#endif
78
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
deleted file mode 100644
index 0637537ea53f..000000000000
--- a/drivers/thunderbolt/tunnel_pci.c
+++ /dev/null
@@ -1,226 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt Cactus Ridge driver - PCIe tunnel
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 */
7
8#include <linux/slab.h>
9#include <linux/list.h>
10
11#include "tunnel_pci.h"
12#include "tb.h"
13
14#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
15 do { \
16 struct tb_pci_tunnel *__tunnel = (tunnel); \
17 level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
18 tb_route(__tunnel->down_port->sw), \
19 __tunnel->down_port->port, \
20 tb_route(__tunnel->up_port->sw), \
21 __tunnel->up_port->port, \
22 ## arg); \
23 } while (0)
24
25#define tb_tunnel_WARN(tunnel, fmt, arg...) \
26 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
27#define tb_tunnel_warn(tunnel, fmt, arg...) \
28 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
29#define tb_tunnel_info(tunnel, fmt, arg...) \
30 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
31
32static void tb_pci_init_path(struct tb_path *path)
33{
34 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
35 path->egress_shared_buffer = TB_PATH_NONE;
36 path->ingress_fc_enable = TB_PATH_ALL;
37 path->ingress_shared_buffer = TB_PATH_NONE;
38 path->priority = 3;
39 path->weight = 1;
40 path->drop_packages = 0;
41 path->nfc_credits = 0;
42}
43
44/**
45 * tb_pci_alloc() - allocate a pci tunnel
46 *
47 * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
48 * TB_TYPE_PCIE_DOWN.
49 *
50 * Currently only paths consisting of two hops are supported (that is the
51 * ports must be on "adjacent" switches).
52 *
53 * The paths are hard-coded to use hop 8 (the only working hop id available on
54 * my thunderbolt devices). Therefore at most ONE path per device may be
55 * activated.
56 *
57 * Return: Returns a tb_pci_tunnel on success or NULL on failure.
58 */
59struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
60 struct tb_port *down)
61{
62 struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
63 if (!tunnel)
64 goto err;
65 tunnel->tb = tb;
66 tunnel->down_port = down;
67 tunnel->up_port = up;
68 INIT_LIST_HEAD(&tunnel->list);
69 tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
70 if (!tunnel->path_to_up)
71 goto err;
72 tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
73 if (!tunnel->path_to_down)
74 goto err;
75 tb_pci_init_path(tunnel->path_to_up);
76 tb_pci_init_path(tunnel->path_to_down);
77
78 tunnel->path_to_up->hops[0].in_port = down;
79 tunnel->path_to_up->hops[0].in_hop_index = 8;
80 tunnel->path_to_up->hops[0].in_counter_index = -1;
81 tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
82 tunnel->path_to_up->hops[0].next_hop_index = 8;
83
84 tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
85 tunnel->path_to_up->hops[1].in_hop_index = 8;
86 tunnel->path_to_up->hops[1].in_counter_index = -1;
87 tunnel->path_to_up->hops[1].out_port = up;
88 tunnel->path_to_up->hops[1].next_hop_index = 8;
89
90 tunnel->path_to_down->hops[0].in_port = up;
91 tunnel->path_to_down->hops[0].in_hop_index = 8;
92 tunnel->path_to_down->hops[0].in_counter_index = -1;
93 tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
94 tunnel->path_to_down->hops[0].next_hop_index = 8;
95
96 tunnel->path_to_down->hops[1].in_port =
97 tb_upstream_port(up->sw)->remote;
98 tunnel->path_to_down->hops[1].in_hop_index = 8;
99 tunnel->path_to_down->hops[1].in_counter_index = -1;
100 tunnel->path_to_down->hops[1].out_port = down;
101 tunnel->path_to_down->hops[1].next_hop_index = 8;
102 return tunnel;
103
104err:
105 if (tunnel) {
106 if (tunnel->path_to_down)
107 tb_path_free(tunnel->path_to_down);
108 if (tunnel->path_to_up)
109 tb_path_free(tunnel->path_to_up);
110 kfree(tunnel);
111 }
112 return NULL;
113}
114
115/**
116 * tb_pci_free() - free a tunnel
117 *
118 * The tunnel must have been deactivated.
119 */
120void tb_pci_free(struct tb_pci_tunnel *tunnel)
121{
122 if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
123 tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
124 return;
125 }
126 tb_path_free(tunnel->path_to_up);
127 tb_path_free(tunnel->path_to_down);
128 kfree(tunnel);
129}
130
131/**
132 * tb_pci_is_invalid - check whether an activated path is still valid
133 */
134bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
135{
136 WARN_ON(!tunnel->path_to_up->activated);
137 WARN_ON(!tunnel->path_to_down->activated);
138
139 return tb_path_is_invalid(tunnel->path_to_up)
140 || tb_path_is_invalid(tunnel->path_to_down);
141}
142
143/**
144 * tb_pci_port_active() - activate/deactivate PCI capability
145 *
146 * Return: Returns 0 on success or an error code on failure.
147 */
148static int tb_pci_port_active(struct tb_port *port, bool active)
149{
150 u32 word = active ? 0x80000000 : 0x0;
151 int cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
152 if (cap < 0) {
153 tb_port_warn(port, "TB_PORT_CAP_ADAP not found: %d\n", cap);
154 return cap;
155 }
156 return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
157}
158
159/**
160 * tb_pci_restart() - activate a tunnel after a hardware reset
161 */
162int tb_pci_restart(struct tb_pci_tunnel *tunnel)
163{
164 int res;
165 tunnel->path_to_up->activated = false;
166 tunnel->path_to_down->activated = false;
167
168 tb_tunnel_info(tunnel, "activating\n");
169
170 res = tb_path_activate(tunnel->path_to_up);
171 if (res)
172 goto err;
173 res = tb_path_activate(tunnel->path_to_down);
174 if (res)
175 goto err;
176
177 res = tb_pci_port_active(tunnel->down_port, true);
178 if (res)
179 goto err;
180
181 res = tb_pci_port_active(tunnel->up_port, true);
182 if (res)
183 goto err;
184 return 0;
185err:
186 tb_tunnel_warn(tunnel, "activation failed\n");
187 tb_pci_deactivate(tunnel);
188 return res;
189}
190
191/**
192 * tb_pci_activate() - activate a tunnel
193 *
194 * Return: Returns 0 on success or an error code on failure.
195 */
196int tb_pci_activate(struct tb_pci_tunnel *tunnel)
197{
198 if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
199 tb_tunnel_WARN(tunnel,
200 "trying to activate an already activated tunnel\n");
201 return -EINVAL;
202 }
203
204 return tb_pci_restart(tunnel);
205}
206
207
208
209/**
210 * tb_pci_deactivate() - deactivate a tunnel
211 */
212void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
213{
214 tb_tunnel_info(tunnel, "deactivating\n");
215 /*
216 * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
217 * port. Seems to have no effect?
218 */
219 tb_pci_port_active(tunnel->up_port, false);
220 tb_pci_port_active(tunnel->down_port, false);
221 if (tunnel->path_to_down->activated)
222 tb_path_deactivate(tunnel->path_to_down);
223 if (tunnel->path_to_up->activated)
224 tb_path_deactivate(tunnel->path_to_up);
225}
226
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h
deleted file mode 100644
index f9b65fa1fd4d..000000000000
--- a/drivers/thunderbolt/tunnel_pci.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Thunderbolt Cactus Ridge driver - PCIe tunnel
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 */
7
8#ifndef TB_PCI_H_
9#define TB_PCI_H_
10
11#include "tb.h"
12
13struct tb_pci_tunnel {
14 struct tb *tb;
15 struct tb_port *up_port;
16 struct tb_port *down_port;
17 struct tb_path *path_to_up;
18 struct tb_path *path_to_down;
19 struct list_head list;
20};
21
22struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
23 struct tb_port *down);
24void tb_pci_free(struct tb_pci_tunnel *tunnel);
25int tb_pci_activate(struct tb_pci_tunnel *tunnel);
26int tb_pci_restart(struct tb_pci_tunnel *tunnel);
27void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
28bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
29
30#endif
31
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index e27dd8beb94b..5118d46702d5 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -18,6 +18,7 @@
18#include "tb.h" 18#include "tb.h"
19 19
20#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */ 20#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
21#define XDOMAIN_UUID_RETRIES 10
21#define XDOMAIN_PROPERTIES_RETRIES 60 22#define XDOMAIN_PROPERTIES_RETRIES 60
22#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 23#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
23 24
@@ -222,6 +223,50 @@ static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
222 return 0; 223 return 0;
223} 224}
224 225
226static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
227 uuid_t *uuid)
228{
229 struct tb_xdp_uuid_response res;
230 struct tb_xdp_uuid req;
231 int ret;
232
233 memset(&req, 0, sizeof(req));
234 tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
235 sizeof(req));
236
237 memset(&res, 0, sizeof(res));
238 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
239 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
240 TB_CFG_PKG_XDOMAIN_RESP,
241 XDOMAIN_DEFAULT_TIMEOUT);
242 if (ret)
243 return ret;
244
245 ret = tb_xdp_handle_error(&res.hdr);
246 if (ret)
247 return ret;
248
249 uuid_copy(uuid, &res.src_uuid);
250 return 0;
251}
252
253static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
254 const uuid_t *uuid)
255{
256 struct tb_xdp_uuid_response res;
257
258 memset(&res, 0, sizeof(res));
259 tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
260 sizeof(res));
261
262 uuid_copy(&res.src_uuid, uuid);
263 res.src_route_hi = upper_32_bits(route);
264 res.src_route_lo = lower_32_bits(route);
265
266 return __tb_xdomain_response(ctl, &res, sizeof(res),
267 TB_CFG_PKG_XDOMAIN_RESP);
268}
269
225static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, 270static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
226 enum tb_xdp_error error) 271 enum tb_xdp_error error)
227{ 272{
@@ -512,7 +557,14 @@ static void tb_xdp_handle_request(struct work_struct *work)
512 break; 557 break;
513 } 558 }
514 559
560 case UUID_REQUEST_OLD:
561 case UUID_REQUEST:
562 ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
563 break;
564
515 default: 565 default:
566 tb_xdp_error_response(ctl, route, sequence,
567 ERROR_NOT_SUPPORTED);
516 break; 568 break;
517 } 569 }
518 570
@@ -524,9 +576,11 @@ static void tb_xdp_handle_request(struct work_struct *work)
524out: 576out:
525 kfree(xw->pkg); 577 kfree(xw->pkg);
526 kfree(xw); 578 kfree(xw);
579
580 tb_domain_put(tb);
527} 581}
528 582
529static void 583static bool
530tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, 584tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
531 size_t size) 585 size_t size)
532{ 586{
@@ -534,13 +588,18 @@ tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
534 588
535 xw = kmalloc(sizeof(*xw), GFP_KERNEL); 589 xw = kmalloc(sizeof(*xw), GFP_KERNEL);
536 if (!xw) 590 if (!xw)
537 return; 591 return false;
538 592
539 INIT_WORK(&xw->work, tb_xdp_handle_request); 593 INIT_WORK(&xw->work, tb_xdp_handle_request);
540 xw->pkg = kmemdup(hdr, size, GFP_KERNEL); 594 xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
541 xw->tb = tb; 595 if (!xw->pkg) {
596 kfree(xw);
597 return false;
598 }
599 xw->tb = tb_domain_get(tb);
542 600
543 queue_work(tb->wq, &xw->work); 601 schedule_work(&xw->work);
602 return true;
544} 603}
545 604
546/** 605/**
@@ -740,6 +799,7 @@ static void enumerate_services(struct tb_xdomain *xd)
740 struct tb_service *svc; 799 struct tb_service *svc;
741 struct tb_property *p; 800 struct tb_property *p;
742 struct device *dev; 801 struct device *dev;
802 int id;
743 803
744 /* 804 /*
745 * First remove all services that are not available anymore in 805 * First remove all services that are not available anymore in
@@ -768,7 +828,12 @@ static void enumerate_services(struct tb_xdomain *xd)
768 break; 828 break;
769 } 829 }
770 830
771 svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); 831 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
832 if (id < 0) {
833 kfree(svc);
834 break;
835 }
836 svc->id = id;
772 svc->dev.bus = &tb_bus_type; 837 svc->dev.bus = &tb_bus_type;
773 svc->dev.type = &tb_service_type; 838 svc->dev.type = &tb_service_type;
774 svc->dev.parent = &xd->dev; 839 svc->dev.parent = &xd->dev;
@@ -826,6 +891,55 @@ static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
826 } 891 }
827} 892}
828 893
894static void tb_xdomain_get_uuid(struct work_struct *work)
895{
896 struct tb_xdomain *xd = container_of(work, typeof(*xd),
897 get_uuid_work.work);
898 struct tb *tb = xd->tb;
899 uuid_t uuid;
900 int ret;
901
902 ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
903 if (ret < 0) {
904 if (xd->uuid_retries-- > 0) {
905 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
906 msecs_to_jiffies(100));
907 } else {
908 dev_dbg(&xd->dev, "failed to read remote UUID\n");
909 }
910 return;
911 }
912
913 if (uuid_equal(&uuid, xd->local_uuid)) {
914 dev_dbg(&xd->dev, "intra-domain loop detected\n");
915 return;
916 }
917
918 /*
919 * If the UUID is different, there is another domain connected
920 * so mark this one unplugged and wait for the connection
921 * manager to replace it.
922 */
923 if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
924 dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
925 xd->is_unplugged = true;
926 return;
927 }
928
929 /* First time fill in the missing UUID */
930 if (!xd->remote_uuid) {
931 xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
932 if (!xd->remote_uuid)
933 return;
934 }
935
936 /* Now we can start the normal properties exchange */
937 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
938 msecs_to_jiffies(100));
939 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
940 msecs_to_jiffies(1000));
941}
942
829static void tb_xdomain_get_properties(struct work_struct *work) 943static void tb_xdomain_get_properties(struct work_struct *work)
830{ 944{
831 struct tb_xdomain *xd = container_of(work, typeof(*xd), 945 struct tb_xdomain *xd = container_of(work, typeof(*xd),
@@ -1032,21 +1146,29 @@ static void tb_xdomain_release(struct device *dev)
1032 1146
1033static void start_handshake(struct tb_xdomain *xd) 1147static void start_handshake(struct tb_xdomain *xd)
1034{ 1148{
1149 xd->uuid_retries = XDOMAIN_UUID_RETRIES;
1035 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; 1150 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1036 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; 1151 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1037 1152
1038 /* Start exchanging properties with the other host */ 1153 if (xd->needs_uuid) {
1039 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, 1154 queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
1040 msecs_to_jiffies(100)); 1155 msecs_to_jiffies(100));
1041 queue_delayed_work(xd->tb->wq, &xd->get_properties_work, 1156 } else {
1042 msecs_to_jiffies(1000)); 1157 /* Start exchanging properties with the other host */
1158 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1159 msecs_to_jiffies(100));
1160 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1161 msecs_to_jiffies(1000));
1162 }
1043} 1163}
1044 1164
1045static void stop_handshake(struct tb_xdomain *xd) 1165static void stop_handshake(struct tb_xdomain *xd)
1046{ 1166{
1167 xd->uuid_retries = 0;
1047 xd->properties_retries = 0; 1168 xd->properties_retries = 0;
1048 xd->properties_changed_retries = 0; 1169 xd->properties_changed_retries = 0;
1049 1170
1171 cancel_delayed_work_sync(&xd->get_uuid_work);
1050 cancel_delayed_work_sync(&xd->get_properties_work); 1172 cancel_delayed_work_sync(&xd->get_properties_work);
1051 cancel_delayed_work_sync(&xd->properties_changed_work); 1173 cancel_delayed_work_sync(&xd->properties_changed_work);
1052} 1174}
@@ -1089,7 +1211,7 @@ EXPORT_SYMBOL_GPL(tb_xdomain_type);
1089 * other domain is reached). 1211 * other domain is reached).
1090 * @route: Route string used to reach the other domain 1212 * @route: Route string used to reach the other domain
1091 * @local_uuid: Our local domain UUID 1213 * @local_uuid: Our local domain UUID
1092 * @remote_uuid: UUID of the other domain 1214 * @remote_uuid: UUID of the other domain (optional)
1093 * 1215 *
1094 * Allocates new XDomain structure and returns pointer to that. The 1216 * Allocates new XDomain structure and returns pointer to that. The
1095 * object must be released by calling tb_xdomain_put(). 1217 * object must be released by calling tb_xdomain_put().
@@ -1108,6 +1230,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1108 xd->route = route; 1230 xd->route = route;
1109 ida_init(&xd->service_ids); 1231 ida_init(&xd->service_ids);
1110 mutex_init(&xd->lock); 1232 mutex_init(&xd->lock);
1233 INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
1111 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); 1234 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1112 INIT_DELAYED_WORK(&xd->properties_changed_work, 1235 INIT_DELAYED_WORK(&xd->properties_changed_work,
1113 tb_xdomain_properties_changed); 1236 tb_xdomain_properties_changed);
@@ -1116,9 +1239,14 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1116 if (!xd->local_uuid) 1239 if (!xd->local_uuid)
1117 goto err_free; 1240 goto err_free;
1118 1241
1119 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL); 1242 if (remote_uuid) {
1120 if (!xd->remote_uuid) 1243 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
1121 goto err_free_local_uuid; 1244 GFP_KERNEL);
1245 if (!xd->remote_uuid)
1246 goto err_free_local_uuid;
1247 } else {
1248 xd->needs_uuid = true;
1249 }
1122 1250
1123 device_initialize(&xd->dev); 1251 device_initialize(&xd->dev);
1124 xd->dev.parent = get_device(parent); 1252 xd->dev.parent = get_device(parent);
@@ -1282,14 +1410,12 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1282 struct tb_port *port = &sw->ports[i]; 1410 struct tb_port *port = &sw->ports[i];
1283 struct tb_xdomain *xd; 1411 struct tb_xdomain *xd;
1284 1412
1285 if (tb_is_upstream_port(port))
1286 continue;
1287
1288 if (port->xdomain) { 1413 if (port->xdomain) {
1289 xd = port->xdomain; 1414 xd = port->xdomain;
1290 1415
1291 if (lookup->uuid) { 1416 if (lookup->uuid) {
1292 if (uuid_equal(xd->remote_uuid, lookup->uuid)) 1417 if (xd->remote_uuid &&
1418 uuid_equal(xd->remote_uuid, lookup->uuid))
1293 return xd; 1419 return xd;
1294 } else if (lookup->link && 1420 } else if (lookup->link &&
1295 lookup->link == xd->link && 1421 lookup->link == xd->link &&
@@ -1299,7 +1425,7 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1299 lookup->route == xd->route) { 1425 lookup->route == xd->route) {
1300 return xd; 1426 return xd;
1301 } 1427 }
1302 } else if (port->remote) { 1428 } else if (tb_port_has_remote(port)) {
1303 xd = switch_find_xdomain(port->remote->sw, lookup); 1429 xd = switch_find_xdomain(port->remote->sw, lookup);
1304 if (xd) 1430 if (xd)
1305 return xd; 1431 return xd;
@@ -1416,10 +1542,8 @@ bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1416 * handlers in turn. 1542 * handlers in turn.
1417 */ 1543 */
1418 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) { 1544 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1419 if (type == TB_CFG_PKG_XDOMAIN_REQ) { 1545 if (type == TB_CFG_PKG_XDOMAIN_REQ)
1420 tb_xdp_schedule_request(tb, hdr, size); 1546 return tb_xdp_schedule_request(tb, hdr, size);
1421 return true;
1422 }
1423 return false; 1547 return false;
1424 } 1548 }
1425 1549
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index 0ee3cd3c25ee..450e2f5c9b43 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -68,8 +68,8 @@ static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
68static ssize_t reg_store(struct device *dev, struct device_attribute *attr, 68static ssize_t reg_store(struct device *dev, struct device_attribute *attr,
69 const char *buf, size_t count); 69 const char *buf, size_t count);
70 70
71DEVICE_ATTR(reg_br, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store); 71static DEVICE_ATTR(reg_br, 0664, reg_show, reg_store);
72DEVICE_ATTR(reg_or, S_IRUGO|S_IWUSR|S_IWGRP, reg_show, reg_store); 72static DEVICE_ATTR(reg_or, 0664, reg_show, reg_store);
73 73
74static ssize_t reg_show(struct device *dev, struct device_attribute *attr, 74static ssize_t reg_show(struct device *dev, struct device_attribute *attr,
75 char *buf) 75 char *buf)
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 8ca333f21292..2307b0329aec 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1298,6 +1298,20 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1298 return ret; 1298 return ret;
1299} 1299}
1300 1300
1301static bool vbg_param_valid(enum vmmdev_hgcm_function_parameter_type type)
1302{
1303 switch (type) {
1304 case VMMDEV_HGCM_PARM_TYPE_32BIT:
1305 case VMMDEV_HGCM_PARM_TYPE_64BIT:
1306 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
1307 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
1308 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
1309 return true;
1310 default:
1311 return false;
1312 }
1313}
1314
1301static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, 1315static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1302 struct vbg_session *session, bool f32bit, 1316 struct vbg_session *session, bool f32bit,
1303 struct vbg_ioctl_hgcm_call *call) 1317 struct vbg_ioctl_hgcm_call *call)
@@ -1333,6 +1347,23 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1333 } 1347 }
1334 call->hdr.size_out = actual_size; 1348 call->hdr.size_out = actual_size;
1335 1349
1350 /* Validate parameter types */
1351 if (f32bit) {
1352 struct vmmdev_hgcm_function_parameter32 *parm =
1353 VBG_IOCTL_HGCM_CALL_PARMS32(call);
1354
1355 for (i = 0; i < call->parm_count; i++)
1356 if (!vbg_param_valid(parm[i].type))
1357 return -EINVAL;
1358 } else {
1359 struct vmmdev_hgcm_function_parameter *parm =
1360 VBG_IOCTL_HGCM_CALL_PARMS(call);
1361
1362 for (i = 0; i < call->parm_count; i++)
1363 if (!vbg_param_valid(parm[i].type))
1364 return -EINVAL;
1365 }
1366
1336 /* 1367 /*
1337 * Validate the client id. 1368 * Validate the client id.
1338 */ 1369 */
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 8b5e598ffdb3..8f2b25f1614c 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -37,6 +37,11 @@ module_param_named(active_pullup, ds2482_active_pullup, int, 0644);
37MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \ 37MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
38 "0-disable, 1-enable (default)"); 38 "0-disable, 1-enable (default)");
39 39
40/* extra configurations - e.g. 1WS */
41static int extra_config;
42module_param(extra_config, int, S_IRUGO | S_IWUSR);
43MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
44
40/** 45/**
41 * The DS2482 registers - there are 3 registers that are addressed by a read 46 * The DS2482 registers - there are 3 registers that are addressed by a read
42 * pointer. The read pointer is set by the last command executed. 47 * pointer. The read pointer is set by the last command executed.
@@ -70,8 +75,6 @@ MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
70#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */ 75#define DS2482_REG_CFG_PPM 0x02 /* presence pulse masking */
71#define DS2482_REG_CFG_APU 0x01 /* active pull-up */ 76#define DS2482_REG_CFG_APU 0x01 /* active pull-up */
72 77
73/* extra configurations - e.g. 1WS */
74static int extra_config;
75 78
76/** 79/**
77 * Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only). 80 * Write and verify codes for the CHANNEL_SELECT command (DS2482-800 only).
@@ -130,6 +133,8 @@ struct ds2482_data {
130 */ 133 */
131static inline u8 ds2482_calculate_config(u8 conf) 134static inline u8 ds2482_calculate_config(u8 conf)
132{ 135{
136 conf |= extra_config;
137
133 if (ds2482_active_pullup) 138 if (ds2482_active_pullup)
134 conf |= DS2482_REG_CFG_APU; 139 conf |= DS2482_REG_CFG_APU;
135 140
@@ -405,7 +410,7 @@ static u8 ds2482_w1_reset_bus(void *data)
405 /* If the chip did reset since detect, re-config it */ 410 /* If the chip did reset since detect, re-config it */
406 if (err & DS2482_REG_STS_RST) 411 if (err & DS2482_REG_STS_RST)
407 ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG, 412 ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
408 ds2482_calculate_config(extra_config)); 413 ds2482_calculate_config(0x00));
409 } 414 }
410 415
411 mutex_unlock(&pdev->access_lock); 416 mutex_unlock(&pdev->access_lock);
@@ -431,7 +436,8 @@ static u8 ds2482_w1_set_pullup(void *data, int delay)
431 ds2482_wait_1wire_idle(pdev); 436 ds2482_wait_1wire_idle(pdev);
432 /* note: it seems like both SPU and APU have to be set! */ 437 /* note: it seems like both SPU and APU have to be set! */
433 retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG, 438 retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
434 ds2482_calculate_config(extra_config|DS2482_REG_CFG_SPU|DS2482_REG_CFG_APU)); 439 ds2482_calculate_config(DS2482_REG_CFG_SPU |
440 DS2482_REG_CFG_APU));
435 ds2482_wait_1wire_idle(pdev); 441 ds2482_wait_1wire_idle(pdev);
436 } 442 }
437 443
@@ -484,7 +490,7 @@ static int ds2482_probe(struct i2c_client *client,
484 490
485 /* Set all config items to 0 (off) */ 491 /* Set all config items to 0 (off) */
486 ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 492 ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
487 ds2482_calculate_config(extra_config)); 493 ds2482_calculate_config(0x00));
488 494
489 mutex_init(&data->access_lock); 495 mutex_init(&data->access_lock);
490 496
@@ -559,7 +565,5 @@ module_i2c_driver(ds2482_driver);
559 565
560MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); 566MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
561MODULE_DESCRIPTION("DS2482 driver"); 567MODULE_DESCRIPTION("DS2482 driver");
562module_param(extra_config, int, S_IRUGO | S_IWUSR);
563MODULE_PARM_DESC(extra_config, "Extra Configuration settings 1=APU,2=PPM,3=SPU,8=1WS");
564 568
565MODULE_LICENSE("GPL"); 569MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index b535d5ec35b6..92e8f0755b9a 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -138,14 +138,37 @@ static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
138 W1_F29_REG_CONTROL_AND_STATUS, buf); 138 W1_F29_REG_CONTROL_AND_STATUS, buf);
139} 139}
140 140
141#ifdef fCONFIG_W1_SLAVE_DS2408_READBACK
142static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
143{
144 u8 w1_buf[3];
145
146 if (w1_reset_resume_command(sl->master))
147 return false;
148
149 w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
150 w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
151 w1_buf[2] = 0;
152
153 w1_write_block(sl->master, w1_buf, 3);
154
155 return (w1_read_8(sl->master) == expected);
156}
157#else
158static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
159{
160 return true;
161}
162#endif
163
141static ssize_t output_write(struct file *filp, struct kobject *kobj, 164static ssize_t output_write(struct file *filp, struct kobject *kobj,
142 struct bin_attribute *bin_attr, char *buf, 165 struct bin_attribute *bin_attr, char *buf,
143 loff_t off, size_t count) 166 loff_t off, size_t count)
144{ 167{
145 struct w1_slave *sl = kobj_to_w1_slave(kobj); 168 struct w1_slave *sl = kobj_to_w1_slave(kobj);
146 u8 w1_buf[3]; 169 u8 w1_buf[3];
147 u8 readBack;
148 unsigned int retries = W1_F29_RETRIES; 170 unsigned int retries = W1_F29_RETRIES;
171 ssize_t bytes_written = -EIO;
149 172
150 if (count != 1 || off != 0) 173 if (count != 1 || off != 0)
151 return -EFAULT; 174 return -EFAULT;
@@ -155,54 +178,33 @@ static ssize_t output_write(struct file *filp, struct kobject *kobj,
155 dev_dbg(&sl->dev, "mutex locked"); 178 dev_dbg(&sl->dev, "mutex locked");
156 179
157 if (w1_reset_select_slave(sl)) 180 if (w1_reset_select_slave(sl))
158 goto error; 181 goto out;
159 182
160 while (retries--) { 183 do {
161 w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE; 184 w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE;
162 w1_buf[1] = *buf; 185 w1_buf[1] = *buf;
163 w1_buf[2] = ~(*buf); 186 w1_buf[2] = ~(*buf);
164 w1_write_block(sl->master, w1_buf, 3);
165 187
166 readBack = w1_read_8(sl->master); 188 w1_write_block(sl->master, w1_buf, 3);
167 189
168 if (readBack != W1_F29_SUCCESS_CONFIRM_BYTE) { 190 if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE &&
169 if (w1_reset_resume_command(sl->master)) 191 optional_read_back_valid(sl, *buf)) {
170 goto error; 192 bytes_written = 1;
171 /* try again, the slave is ready for a command */ 193 goto out;
172 continue;
173 } 194 }
174 195
175#ifdef CONFIG_W1_SLAVE_DS2408_READBACK
176 /* here the master could read another byte which
177 would be the PIO reg (the actual pin logic state)
178 since in this driver we don't know which pins are
179 in and outs, there's no value to read the state and
180 compare. with (*buf) so end this command abruptly: */
181 if (w1_reset_resume_command(sl->master)) 196 if (w1_reset_resume_command(sl->master))
182 goto error; 197 goto out; /* unrecoverable error */
198 /* try again, the slave is ready for a command */
199 } while (--retries);
183 200
184 /* go read back the output latches */ 201out:
185 /* (the direct effect of the write above) */
186 w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
187 w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
188 w1_buf[2] = 0;
189 w1_write_block(sl->master, w1_buf, 3);
190 /* read the result of the READ_PIO_REGS command */
191 if (w1_read_8(sl->master) == *buf)
192#endif
193 {
194 /* success! */
195 mutex_unlock(&sl->master->bus_mutex);
196 dev_dbg(&sl->dev,
197 "mutex unlocked, retries:%d", retries);
198 return 1;
199 }
200 }
201error:
202 mutex_unlock(&sl->master->bus_mutex); 202 mutex_unlock(&sl->master->bus_mutex);
203 dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
204 203
205 return -EIO; 204 dev_dbg(&sl->dev, "%s, mutex unlocked retries:%d\n",
205 (bytes_written > 0) ? "succeeded" : "error", retries);
206
207 return bytes_written;
206} 208}
207 209
208 210
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 0364d3329c52..3516ce6718d9 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -432,8 +432,7 @@ int w1_reset_resume_command(struct w1_master *dev)
432 if (w1_reset_bus(dev)) 432 if (w1_reset_bus(dev))
433 return -1; 433 return -1;
434 434
435 /* This will make only the last matched slave perform a skip ROM. */ 435 w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
436 w1_write_8(dev, W1_RESUME_CMD);
437 return 0; 436 return 0;
438} 437}
439EXPORT_SYMBOL_GPL(w1_reset_resume_command); 438EXPORT_SYMBOL_GPL(w1_reset_resume_command);
diff --git a/fs/char_dev.c b/fs/char_dev.c
index a279c58fe360..d18cad28c1c3 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -88,22 +88,31 @@ static int find_dynamic_major(void)
88/* 88/*
89 * Register a single major with a specified minor range. 89 * Register a single major with a specified minor range.
90 * 90 *
91 * If major == 0 this functions will dynamically allocate a major and return 91 * If major == 0 this function will dynamically allocate an unused major.
92 * its number. 92 * If major > 0 this function will attempt to reserve the range of minors
93 * 93 * with given major.
94 * If major > 0 this function will attempt to reserve the passed range of
95 * minors and will return zero on success.
96 * 94 *
97 * Returns a -ve errno on failure.
98 */ 95 */
99static struct char_device_struct * 96static struct char_device_struct *
100__register_chrdev_region(unsigned int major, unsigned int baseminor, 97__register_chrdev_region(unsigned int major, unsigned int baseminor,
101 int minorct, const char *name) 98 int minorct, const char *name)
102{ 99{
103 struct char_device_struct *cd, **cp; 100 struct char_device_struct *cd, *curr, *prev = NULL;
104 int ret = 0; 101 int ret = -EBUSY;
105 int i; 102 int i;
106 103
104 if (major >= CHRDEV_MAJOR_MAX) {
105 pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
106 name, major, CHRDEV_MAJOR_MAX-1);
107 return ERR_PTR(-EINVAL);
108 }
109
110 if (minorct > MINORMASK + 1 - baseminor) {
111 pr_err("CHRDEV \"%s\" minor range requested (%u-%u) is out of range of maximum range (%u-%u) for a single major\n",
112 name, baseminor, baseminor + minorct - 1, 0, MINORMASK);
113 return ERR_PTR(-EINVAL);
114 }
115
107 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL); 116 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
108 if (cd == NULL) 117 if (cd == NULL)
109 return ERR_PTR(-ENOMEM); 118 return ERR_PTR(-ENOMEM);
@@ -120,10 +129,20 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
120 major = ret; 129 major = ret;
121 } 130 }
122 131
123 if (major >= CHRDEV_MAJOR_MAX) { 132 i = major_to_index(major);
124 pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n", 133 for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) {
125 name, major, CHRDEV_MAJOR_MAX-1); 134 if (curr->major < major)
126 ret = -EINVAL; 135 continue;
136
137 if (curr->major > major)
138 break;
139
140 if (curr->baseminor + curr->minorct <= baseminor)
141 continue;
142
143 if (curr->baseminor >= baseminor + minorct)
144 break;
145
127 goto out; 146 goto out;
128 } 147 }
129 148
@@ -132,37 +151,14 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
132 cd->minorct = minorct; 151 cd->minorct = minorct;
133 strlcpy(cd->name, name, sizeof(cd->name)); 152 strlcpy(cd->name, name, sizeof(cd->name));
134 153
135 i = major_to_index(major); 154 if (!prev) {
136 155 cd->next = curr;
137 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next) 156 chrdevs[i] = cd;
138 if ((*cp)->major > major || 157 } else {
139 ((*cp)->major == major && 158 cd->next = prev->next;
140 (((*cp)->baseminor >= baseminor) || 159 prev->next = cd;
141 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
142 break;
143
144 /* Check for overlapping minor ranges. */
145 if (*cp && (*cp)->major == major) {
146 int old_min = (*cp)->baseminor;
147 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
148 int new_min = baseminor;
149 int new_max = baseminor + minorct - 1;
150
151 /* New driver overlaps from the left. */
152 if (new_max >= old_min && new_max <= old_max) {
153 ret = -EBUSY;
154 goto out;
155 }
156
157 /* New driver overlaps from the right. */
158 if (new_min <= old_max && new_min >= old_min) {
159 ret = -EBUSY;
160 goto out;
161 }
162 } 160 }
163 161
164 cd->next = *cp;
165 *cp = cd;
166 mutex_unlock(&chrdevs_lock); 162 mutex_unlock(&chrdevs_lock);
167 return cd; 163 return cd;
168out: 164out:
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index a1a959ba24ff..b0e35eec6499 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -12,11 +12,13 @@
12 12
13/* ETMv3.5/PTM's ETMCR config bit */ 13/* ETMv3.5/PTM's ETMCR config bit */
14#define ETM_OPT_CYCACC 12 14#define ETM_OPT_CYCACC 12
15#define ETM_OPT_CTXTID 14
15#define ETM_OPT_TS 28 16#define ETM_OPT_TS 28
16#define ETM_OPT_RETSTK 29 17#define ETM_OPT_RETSTK 29
17 18
18/* ETMv4 CONFIGR programming bits for the ETM OPTs */ 19/* ETMv4 CONFIGR programming bits for the ETM OPTs */
19#define ETM4_CFG_BIT_CYCACC 4 20#define ETM4_CFG_BIT_CYCACC 4
21#define ETM4_CFG_BIT_CTXTID 6
20#define ETM4_CFG_BIT_TS 11 22#define ETM4_CFG_BIT_TS 11
21#define ETM4_CFG_BIT_RETSTK 12 23#define ETM4_CFG_BIT_RETSTK 12
22 24
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 7b87965f7a65..62a520df8add 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -192,9 +192,10 @@ struct coresight_device {
192 */ 192 */
193struct coresight_ops_sink { 193struct coresight_ops_sink {
194 int (*enable)(struct coresight_device *csdev, u32 mode, void *data); 194 int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
195 void (*disable)(struct coresight_device *csdev); 195 int (*disable)(struct coresight_device *csdev);
196 void *(*alloc_buffer)(struct coresight_device *csdev, int cpu, 196 void *(*alloc_buffer)(struct coresight_device *csdev,
197 void **pages, int nr_pages, bool overwrite); 197 struct perf_event *event, void **pages,
198 int nr_pages, bool overwrite);
198 void (*free_buffer)(void *config); 199 void (*free_buffer)(void *config);
199 unsigned long (*update_buffer)(struct coresight_device *csdev, 200 unsigned long (*update_buffer)(struct coresight_device *csdev,
200 struct perf_output_handle *handle, 201 struct perf_output_handle *handle,
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 03b6ba2a63f8..52aa4821093a 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -1,4 +1,7 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
4 */
2#ifndef _LINUX_MEI_CL_BUS_H 5#ifndef _LINUX_MEI_CL_BUS_H
3#define _LINUX_MEI_CL_BUS_H 6#define _LINUX_MEI_CL_BUS_H
4 7
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 312bfa5efd80..8f8be5b00060 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -61,6 +61,7 @@ void nvmem_cell_put(struct nvmem_cell *cell);
61void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); 61void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
62void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); 62void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
63int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); 63int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
64int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
64int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); 65int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
65 66
66/* direct nvmem device read/write interface */ 67/* direct nvmem device read/write interface */
@@ -122,6 +123,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
122 return -EOPNOTSUPP; 123 return -EOPNOTSUPP;
123} 124}
124 125
126static inline int nvmem_cell_read_u16(struct device *dev,
127 const char *cell_id, u16 *val)
128{
129 return -EOPNOTSUPP;
130}
131
125static inline int nvmem_cell_read_u32(struct device *dev, 132static inline int nvmem_cell_read_u32(struct device *dev,
126 const char *cell_id, u32 *val) 133 const char *cell_id, u32 *val)
127{ 134{
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index df313913e856..35662d9c2c62 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2// Copyright(c) 2015-17 Intel Corporation. 2/* Copyright(c) 2015-17 Intel Corporation. */
3 3
4#ifndef __SOUNDWIRE_H 4#ifndef __SOUNDWIRE_H
5#define __SOUNDWIRE_H 5#define __SOUNDWIRE_H
@@ -36,7 +36,7 @@ struct sdw_slave;
36#define SDW_FRAME_CTRL_BITS 48 36#define SDW_FRAME_CTRL_BITS 48
37#define SDW_MAX_DEVICES 11 37#define SDW_MAX_DEVICES 11
38 38
39#define SDW_VALID_PORT_RANGE(n) (n <= 14 && n >= 1) 39#define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1)
40 40
41#define SDW_DAI_ID_RANGE_START 100 41#define SDW_DAI_ID_RANGE_START 100
42#define SDW_DAI_ID_RANGE_END 200 42#define SDW_DAI_ID_RANGE_END 200
@@ -470,14 +470,14 @@ struct sdw_bus_params {
470struct sdw_slave_ops { 470struct sdw_slave_ops {
471 int (*read_prop)(struct sdw_slave *sdw); 471 int (*read_prop)(struct sdw_slave *sdw);
472 int (*interrupt_callback)(struct sdw_slave *slave, 472 int (*interrupt_callback)(struct sdw_slave *slave,
473 struct sdw_slave_intr_status *status); 473 struct sdw_slave_intr_status *status);
474 int (*update_status)(struct sdw_slave *slave, 474 int (*update_status)(struct sdw_slave *slave,
475 enum sdw_slave_status status); 475 enum sdw_slave_status status);
476 int (*bus_config)(struct sdw_slave *slave, 476 int (*bus_config)(struct sdw_slave *slave,
477 struct sdw_bus_params *params); 477 struct sdw_bus_params *params);
478 int (*port_prep)(struct sdw_slave *slave, 478 int (*port_prep)(struct sdw_slave *slave,
479 struct sdw_prepare_ch *prepare_ch, 479 struct sdw_prepare_ch *prepare_ch,
480 enum sdw_port_prep_ops pre_ops); 480 enum sdw_port_prep_ops pre_ops);
481}; 481};
482 482
483/** 483/**
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 2b9573b8aedd..4d70da45363d 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2// Copyright(c) 2015-17 Intel Corporation. 2/* Copyright(c) 2015-17 Intel Corporation. */
3 3
4#ifndef __SDW_INTEL_H 4#ifndef __SDW_INTEL_H
5#define __SDW_INTEL_H 5#define __SDW_INTEL_H
@@ -11,7 +11,7 @@
11 */ 11 */
12struct sdw_intel_ops { 12struct sdw_intel_ops {
13 int (*config_stream)(void *arg, void *substream, 13 int (*config_stream)(void *arg, void *substream,
14 void *dai, void *hw_params, int stream_num); 14 void *dai, void *hw_params, int stream_num);
15}; 15};
16 16
17/** 17/**
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index df472b1ab410..a686f7988156 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 1/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2// Copyright(c) 2015-17 Intel Corporation. 2/* Copyright(c) 2015-17 Intel Corporation. */
3 3
4#ifndef __SDW_REGISTERS_H 4#ifndef __SDW_REGISTERS_H
5#define __SDW_REGISTERS_H 5#define __SDW_REGISTERS_H
@@ -73,7 +73,6 @@
73#define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7) 73#define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7)
74#define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0) 74#define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0)
75 75
76
77#define SDW_SCP_INTSTAT3 0x43 76#define SDW_SCP_INTSTAT3 0x43
78#define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0) 77#define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0)
79 78
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index 9fd553e553e9..9c756b5a0dfe 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright(c) 2015-17 Intel Corporation. 2/* Copyright(c) 2015-17 Intel Corporation. */
3 3
4#ifndef __SOUNDWIRE_TYPES_H 4#ifndef __SOUNDWIRE_TYPES_H
5#define __SOUNDWIRE_TYPES_H 5#define __SOUNDWIRE_TYPES_H
@@ -11,7 +11,7 @@ extern struct bus_type sdw_bus_type;
11#define sdw_register_driver(drv) \ 11#define sdw_register_driver(drv) \
12 __sdw_register_driver(drv, THIS_MODULE) 12 __sdw_register_driver(drv, THIS_MODULE)
13 13
14int __sdw_register_driver(struct sdw_driver *drv, struct module *); 14int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
15void sdw_unregister_driver(struct sdw_driver *drv); 15void sdw_unregister_driver(struct sdw_driver *drv);
16 16
17int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); 17int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index bf6ec83e60ee..2d7e012db03f 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -181,6 +181,8 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
181 * @device_name: Name of the device (or %NULL if not known) 181 * @device_name: Name of the device (or %NULL if not known)
182 * @is_unplugged: The XDomain is unplugged 182 * @is_unplugged: The XDomain is unplugged
183 * @resume: The XDomain is being resumed 183 * @resume: The XDomain is being resumed
184 * @needs_uuid: If the XDomain does not have @remote_uuid it will be
185 * queried first
184 * @transmit_path: HopID which the remote end expects us to transmit 186 * @transmit_path: HopID which the remote end expects us to transmit
185 * @transmit_ring: Local ring (hop) where outgoing packets are pushed 187 * @transmit_ring: Local ring (hop) where outgoing packets are pushed
186 * @receive_path: HopID which we expect the remote end to transmit 188 * @receive_path: HopID which we expect the remote end to transmit
@@ -189,6 +191,9 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
189 * @properties: Properties exported by the remote domain 191 * @properties: Properties exported by the remote domain
190 * @property_block_gen: Generation of @properties 192 * @property_block_gen: Generation of @properties
191 * @properties_lock: Lock protecting @properties. 193 * @properties_lock: Lock protecting @properties.
194 * @get_uuid_work: Work used to retrieve @remote_uuid
195 * @uuid_retries: Number of times left @remote_uuid is requested before
196 * giving up
192 * @get_properties_work: Work used to get remote domain properties 197 * @get_properties_work: Work used to get remote domain properties
193 * @properties_retries: Number of times left to read properties 198 * @properties_retries: Number of times left to read properties
194 * @properties_changed_work: Work used to notify the remote domain that 199 * @properties_changed_work: Work used to notify the remote domain that
@@ -220,6 +225,7 @@ struct tb_xdomain {
220 const char *device_name; 225 const char *device_name;
221 bool is_unplugged; 226 bool is_unplugged;
222 bool resume; 227 bool resume;
228 bool needs_uuid;
223 u16 transmit_path; 229 u16 transmit_path;
224 u16 transmit_ring; 230 u16 transmit_ring;
225 u16 receive_path; 231 u16 receive_path;
@@ -227,6 +233,8 @@ struct tb_xdomain {
227 struct ida service_ids; 233 struct ida service_ids;
228 struct tb_property_dir *properties; 234 struct tb_property_dir *properties;
229 u32 property_block_gen; 235 u32 property_block_gen;
236 struct delayed_work get_uuid_work;
237 int uuid_retries;
230 struct delayed_work get_properties_work; 238 struct delayed_work get_properties_work;
231 int properties_retries; 239 int properties_retries;
232 struct delayed_work properties_changed_work; 240 struct delayed_work properties_changed_work;
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index eaa1e762bf06..0c06178e4985 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -17,6 +17,7 @@
17#define _VMW_VMCI_DEF_H_ 17#define _VMW_VMCI_DEF_H_
18 18
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/bits.h>
20 21
21/* Register offsets. */ 22/* Register offsets. */
22#define VMCI_STATUS_ADDR 0x00 23#define VMCI_STATUS_ADDR 0x00
@@ -33,27 +34,27 @@
33#define VMCI_MAX_DEVICES 1 34#define VMCI_MAX_DEVICES 1
34 35
35/* Status register bits. */ 36/* Status register bits. */
36#define VMCI_STATUS_INT_ON 0x1 37#define VMCI_STATUS_INT_ON BIT(0)
37 38
38/* Control register bits. */ 39/* Control register bits. */
39#define VMCI_CONTROL_RESET 0x1 40#define VMCI_CONTROL_RESET BIT(0)
40#define VMCI_CONTROL_INT_ENABLE 0x2 41#define VMCI_CONTROL_INT_ENABLE BIT(1)
41#define VMCI_CONTROL_INT_DISABLE 0x4 42#define VMCI_CONTROL_INT_DISABLE BIT(2)
42 43
43/* Capabilities register bits. */ 44/* Capabilities register bits. */
44#define VMCI_CAPS_HYPERCALL 0x1 45#define VMCI_CAPS_HYPERCALL BIT(0)
45#define VMCI_CAPS_GUESTCALL 0x2 46#define VMCI_CAPS_GUESTCALL BIT(1)
46#define VMCI_CAPS_DATAGRAM 0x4 47#define VMCI_CAPS_DATAGRAM BIT(2)
47#define VMCI_CAPS_NOTIFICATIONS 0x8 48#define VMCI_CAPS_NOTIFICATIONS BIT(3)
48#define VMCI_CAPS_PPN64 0x10 49#define VMCI_CAPS_PPN64 BIT(4)
49 50
50/* Interrupt Cause register bits. */ 51/* Interrupt Cause register bits. */
51#define VMCI_ICR_DATAGRAM 0x1 52#define VMCI_ICR_DATAGRAM BIT(0)
52#define VMCI_ICR_NOTIFICATION 0x2 53#define VMCI_ICR_NOTIFICATION BIT(1)
53 54
54/* Interrupt Mask register bits. */ 55/* Interrupt Mask register bits. */
55#define VMCI_IMR_DATAGRAM 0x1 56#define VMCI_IMR_DATAGRAM BIT(0)
56#define VMCI_IMR_NOTIFICATION 0x2 57#define VMCI_IMR_NOTIFICATION BIT(1)
57 58
58/* Maximum MSI/MSI-X interrupt vectors in the device. */ 59/* Maximum MSI/MSI-X interrupt vectors in the device. */
59#define VMCI_MAX_INTRS 2 60#define VMCI_MAX_INTRS 2
@@ -463,9 +464,9 @@ struct vmci_datagram {
463 * datagram callback is invoked in a delayed context (not interrupt context). 464 * datagram callback is invoked in a delayed context (not interrupt context).
464 */ 465 */
465#define VMCI_FLAG_DG_NONE 0 466#define VMCI_FLAG_DG_NONE 0
466#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1 467#define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0)
467#define VMCI_FLAG_ANYCID_DG_HND 0x2 468#define VMCI_FLAG_ANYCID_DG_HND BIT(1)
468#define VMCI_FLAG_DG_DELAYED_CB 0x4 469#define VMCI_FLAG_DG_DELAYED_CB BIT(2)
469 470
470/* 471/*
471 * Maximum supported size of a VMCI datagram for routable datagrams. 472 * Maximum supported size of a VMCI datagram for routable datagrams.
@@ -694,7 +695,7 @@ struct vmci_qp_detach_msg {
694}; 695};
695 696
696/* VMCI Doorbell API. */ 697/* VMCI Doorbell API. */
697#define VMCI_FLAG_DELAYED_CB 0x01 698#define VMCI_FLAG_DELAYED_CB BIT(0)
698 699
699typedef void (*vmci_callback) (void *client_data); 700typedef void (*vmci_callback) (void *client_data);
700 701
diff --git a/include/uapi/linux/aspeed-p2a-ctrl.h b/include/uapi/linux/aspeed-p2a-ctrl.h
new file mode 100644
index 000000000000..033355552a6e
--- /dev/null
+++ b/include/uapi/linux/aspeed-p2a-ctrl.h
@@ -0,0 +1,62 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/*
3 * Copyright 2019 Google Inc
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 *
10 * Provides a simple driver to control the ASPEED P2A interface which allows
11 * the host to read and write to various regions of the BMC's memory.
12 */
13
14#ifndef _UAPI_LINUX_ASPEED_P2A_CTRL_H
15#define _UAPI_LINUX_ASPEED_P2A_CTRL_H
16
17#include <linux/ioctl.h>
18#include <linux/types.h>
19
20#define ASPEED_P2A_CTRL_READ_ONLY 0
21#define ASPEED_P2A_CTRL_READWRITE 1
22
23/*
24 * This driver provides a mechanism for enabling or disabling the read-write
25 * property of specific windows into the ASPEED BMC's memory.
26 *
27 * A user can map a region of the BMC's memory as read-only or read-write, with
28 * the caveat that once any region is mapped, all regions are unlocked for
29 * reading.
30 */
31
32/*
33 * Unlock a region of BMC physical memory for access from the host.
34 *
35 * Also used to read back the optional memory-region configuration for the
36 * driver.
37 */
38struct aspeed_p2a_ctrl_mapping {
39 __u64 addr;
40 __u32 length;
41 __u32 flags;
42};
43
44#define __ASPEED_P2A_CTRL_IOCTL_MAGIC 0xb3
45
46/*
47 * This IOCTL is meant to configure a region or regions of memory given a
48 * starting address and length to be readable by the host, or
49 * readable-writeable.
50 */
51#define ASPEED_P2A_CTRL_IOCTL_SET_WINDOW _IOW(__ASPEED_P2A_CTRL_IOCTL_MAGIC, \
52 0x00, struct aspeed_p2a_ctrl_mapping)
53
54/*
55 * This IOCTL is meant to read back to the user the base address and length of
56 * the memory-region specified to the driver for use with mmap.
57 */
58#define ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG \
59 _IOWR(__ASPEED_P2A_CTRL_IOCTL_MAGIC, \
60 0x01, struct aspeed_p2a_ctrl_mapping)
61
62#endif /* _UAPI_LINUX_ASPEED_P2A_CTRL_H */
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index 0f681cbd38d3..c6aec86cc5de 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -1,70 +1,9 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
2/****************************************************************************** 2/*
3 * Copyright(c) 2003-2015 Intel Corporation. All rights reserved.
3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Intel MEI Interface Header 5 * Intel MEI Interface Header
5 * 6 */
6 * This file is provided under a dual BSD/GPLv2 license. When using or
7 * redistributing this file, you may do so under either license.
8 *
9 * GPL LICENSE SUMMARY
10 *
11 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * USA
26 *
27 * The full GNU General Public License is included in this distribution
28 * in the file called LICENSE.GPL.
29 *
30 * Contact Information:
31 * Intel Corporation.
32 * linux-mei@linux.intel.com
33 * http://www.intel.com
34 *
35 * BSD LICENSE
36 *
37 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 *
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 *****************************************************************************/
67
68#ifndef _LINUX_MEI_H 7#ifndef _LINUX_MEI_H
69#define _LINUX_MEI_H 8#define _LINUX_MEI_H
70 9
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 7fd6f633534c..8ac292cf4d00 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -20,8 +20,8 @@
20/* 20/*
21 * Queue Numbering 21 * Queue Numbering
22 * 22 *
23 * The external queues (DMA channels + CPU) MUST be before the internal queues 23 * The external queues (PCI DMA channels) MUST be before the internal queues
24 * and each group (DMA channels + CPU and internal) must be contiguous inside 24 * and each group (PCI DMA channels and internal) must be contiguous inside
25 * itself but there can be a gap between the two groups (although not 25 * itself but there can be a gap between the two groups (although not
26 * recommended) 26 * recommended)
27 */ 27 */
@@ -33,7 +33,7 @@ enum goya_queue_id {
33 GOYA_QUEUE_ID_DMA_3, 33 GOYA_QUEUE_ID_DMA_3,
34 GOYA_QUEUE_ID_DMA_4, 34 GOYA_QUEUE_ID_DMA_4,
35 GOYA_QUEUE_ID_CPU_PQ, 35 GOYA_QUEUE_ID_CPU_PQ,
36 GOYA_QUEUE_ID_MME, 36 GOYA_QUEUE_ID_MME, /* Internal queues start here */
37 GOYA_QUEUE_ID_TPC0, 37 GOYA_QUEUE_ID_TPC0,
38 GOYA_QUEUE_ID_TPC1, 38 GOYA_QUEUE_ID_TPC1,
39 GOYA_QUEUE_ID_TPC2, 39 GOYA_QUEUE_ID_TPC2,
@@ -45,11 +45,18 @@ enum goya_queue_id {
45 GOYA_QUEUE_ID_SIZE 45 GOYA_QUEUE_ID_SIZE
46}; 46};
47 47
48enum hl_device_status {
49 HL_DEVICE_STATUS_OPERATIONAL,
50 HL_DEVICE_STATUS_IN_RESET,
51 HL_DEVICE_STATUS_MALFUNCTION
52};
53
48/* Opcode for management ioctl */ 54/* Opcode for management ioctl */
49#define HL_INFO_HW_IP_INFO 0 55#define HL_INFO_HW_IP_INFO 0
50#define HL_INFO_HW_EVENTS 1 56#define HL_INFO_HW_EVENTS 1
51#define HL_INFO_DRAM_USAGE 2 57#define HL_INFO_DRAM_USAGE 2
52#define HL_INFO_HW_IDLE 3 58#define HL_INFO_HW_IDLE 3
59#define HL_INFO_DEVICE_STATUS 4
53 60
54#define HL_INFO_VERSION_MAX_LEN 128 61#define HL_INFO_VERSION_MAX_LEN 128
55 62
@@ -82,6 +89,11 @@ struct hl_info_hw_idle {
82 __u32 pad; 89 __u32 pad;
83}; 90};
84 91
92struct hl_info_device_status {
93 __u32 status;
94 __u32 pad;
95};
96
85struct hl_info_args { 97struct hl_info_args {
86 /* Location of relevant struct in userspace */ 98 /* Location of relevant struct in userspace */
87 __u64 return_pointer; 99 __u64 return_pointer;
@@ -181,7 +193,10 @@ struct hl_cs_in {
181}; 193};
182 194
183struct hl_cs_out { 195struct hl_cs_out {
184 /* this holds the sequence number of the CS to pass to wait ioctl */ 196 /*
197 * seq holds the sequence number of the CS to pass to wait ioctl. All
198 * values are valid except for 0 and ULLONG_MAX
199 */
185 __u64 seq; 200 __u64 seq;
186 /* HL_CS_STATUS_* */ 201 /* HL_CS_STATUS_* */
187 __u32 status; 202 __u32 status;
@@ -320,6 +335,110 @@ union hl_mem_args {
320 struct hl_mem_out out; 335 struct hl_mem_out out;
321}; 336};
322 337
338#define HL_DEBUG_MAX_AUX_VALUES 10
339
340struct hl_debug_params_etr {
341 /* Address in memory to allocate buffer */
342 __u64 buffer_address;
343
344 /* Size of buffer to allocate */
345 __u64 buffer_size;
346
347 /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
348 __u32 sink_mode;
349 __u32 pad;
350};
351
352struct hl_debug_params_etf {
353 /* Address in memory to allocate buffer */
354 __u64 buffer_address;
355
356 /* Size of buffer to allocate */
357 __u64 buffer_size;
358
359 /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
360 __u32 sink_mode;
361 __u32 pad;
362};
363
364struct hl_debug_params_stm {
365 /* Two bit masks for HW event and Stimulus Port */
366 __u64 he_mask;
367 __u64 sp_mask;
368
369 /* Trace source ID */
370 __u32 id;
371
372 /* Frequency for the timestamp register */
373 __u32 frequency;
374};
375
376struct hl_debug_params_bmon {
377 /* Two address ranges that the user can request to filter */
378 __u64 start_addr0;
379 __u64 addr_mask0;
380
381 __u64 start_addr1;
382 __u64 addr_mask1;
383
384 /* Capture window configuration */
385 __u32 bw_win;
386 __u32 win_capture;
387
388 /* Trace source ID */
389 __u32 id;
390 __u32 pad;
391};
392
393struct hl_debug_params_spmu {
394 /* Event types selection */
395 __u64 event_types[HL_DEBUG_MAX_AUX_VALUES];
396
397 /* Number of event types selection */
398 __u32 event_types_num;
399 __u32 pad;
400};
401
402/* Opcode for ETR component */
403#define HL_DEBUG_OP_ETR 0
404/* Opcode for ETF component */
405#define HL_DEBUG_OP_ETF 1
406/* Opcode for STM component */
407#define HL_DEBUG_OP_STM 2
408/* Opcode for FUNNEL component */
409#define HL_DEBUG_OP_FUNNEL 3
410/* Opcode for BMON component */
411#define HL_DEBUG_OP_BMON 4
412/* Opcode for SPMU component */
413#define HL_DEBUG_OP_SPMU 5
414/* Opcode for timestamp */
415#define HL_DEBUG_OP_TIMESTAMP 6
416
417struct hl_debug_args {
418 /*
419 * Pointer to user input structure.
420 * This field is relevant to specific opcodes.
421 */
422 __u64 input_ptr;
423 /* Pointer to user output structure */
424 __u64 output_ptr;
425 /* Size of user input structure */
426 __u32 input_size;
427 /* Size of user output structure */
428 __u32 output_size;
429 /* HL_DEBUG_OP_* */
430 __u32 op;
431 /*
432 * Register index in the component, taken from the debug_regs_index enum
433 * in the various ASIC header files
434 */
435 __u32 reg_idx;
436 /* Enable/disable */
437 __u32 enable;
438 /* Context ID - Currently not in use */
439 __u32 ctx_id;
440};
441
323/* 442/*
324 * Various information operations such as: 443 * Various information operations such as:
325 * - H/W IP information 444 * - H/W IP information
@@ -361,6 +480,12 @@ union hl_mem_args {
361 * Each JOB will be enqueued on a specific queue, according to the user's input. 480 * Each JOB will be enqueued on a specific queue, according to the user's input.
362 * There can be more then one JOB per queue. 481 * There can be more then one JOB per queue.
363 * 482 *
483 * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
484 * a second set is for "execution" phase and a third set is for "store" phase.
485 * The JOBS on the "restore" phase are enqueued only after context-switch
486 * (or if its the first CS for this context). The user can also order the
487 * driver to run the "restore" phase explicitly
488 *
364 * There are two types of queues - external and internal. External queues 489 * There are two types of queues - external and internal. External queues
365 * are DMA queues which transfer data from/to the Host. All other queues are 490 * are DMA queues which transfer data from/to the Host. All other queues are
366 * internal. The driver will get completion notifications from the device only 491 * internal. The driver will get completion notifications from the device only
@@ -377,19 +502,18 @@ union hl_mem_args {
377 * relevant queues. Therefore, the user mustn't assume the CS has been completed 502 * relevant queues. Therefore, the user mustn't assume the CS has been completed
378 * or has even started to execute. 503 * or has even started to execute.
379 * 504 *
380 * Upon successful enqueue, the IOCTL returns an opaque handle which the user 505 * Upon successful enqueue, the IOCTL returns a sequence number which the user
381 * can use with the "Wait for CS" IOCTL to check whether the handle's CS 506 * can use with the "Wait for CS" IOCTL to check whether the handle's CS
382 * external JOBS have been completed. Note that if the CS has internal JOBS 507 * external JOBS have been completed. Note that if the CS has internal JOBS
383 * which can execute AFTER the external JOBS have finished, the driver might 508 * which can execute AFTER the external JOBS have finished, the driver might
384 * report that the CS has finished executing BEFORE the internal JOBS have 509 * report that the CS has finished executing BEFORE the internal JOBS have
385 * actually finish executing. 510 * actually finish executing.
386 * 511 *
387 * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase, 512 * Even though the sequence number increments per CS, the user can NOT
388 * a second set is for "execution" phase and a third set is for "store" phase. 513 * automatically assume that if CS with sequence number N finished, then CS
389 * The JOBS on the "restore" phase are enqueued only after context-switch 514 * with sequence number N-1 also finished. The user can make this assumption if
390 * (or if its the first CS for this context). The user can also order the 515 * and only if CS N and CS N-1 are exactly the same (same CBs for the same
391 * driver to run the "restore" phase explicitly 516 * queues).
392 *
393 */ 517 */
394#define HL_IOCTL_CS \ 518#define HL_IOCTL_CS \
395 _IOWR('H', 0x03, union hl_cs_args) 519 _IOWR('H', 0x03, union hl_cs_args)
@@ -444,7 +568,20 @@ union hl_mem_args {
444#define HL_IOCTL_MEMORY \ 568#define HL_IOCTL_MEMORY \
445 _IOWR('H', 0x05, union hl_mem_args) 569 _IOWR('H', 0x05, union hl_mem_args)
446 570
571/*
572 * Debug
573 * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces
574 *
575 * This IOCTL allows the user to get debug traces from the chip.
576 *
577 * The user needs to provide the register index and essential data such as
578 * buffer address and size.
579 *
580 */
581#define HL_IOCTL_DEBUG \
582 _IOWR('H', 0x06, struct hl_debug_args)
583
447#define HL_COMMAND_START 0x01 584#define HL_COMMAND_START 0x01
448#define HL_COMMAND_END 0x06 585#define HL_COMMAND_END 0x07
449 586
450#endif /* HABANALABS_H_ */ 587#endif /* HABANALABS_H_ */
diff --git a/lib/siphash.c b/lib/siphash.c
index 3ae58b4edad6..c47bb6ff2149 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -68,11 +68,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
68 bytemask_from_count(left))); 68 bytemask_from_count(left)));
69#else 69#else
70 switch (left) { 70 switch (left) {
71 case 7: b |= ((u64)end[6]) << 48; 71 case 7: b |= ((u64)end[6]) << 48; /* fall through */
72 case 6: b |= ((u64)end[5]) << 40; 72 case 6: b |= ((u64)end[5]) << 40; /* fall through */
73 case 5: b |= ((u64)end[4]) << 32; 73 case 5: b |= ((u64)end[4]) << 32; /* fall through */
74 case 4: b |= le32_to_cpup(data); break; 74 case 4: b |= le32_to_cpup(data); break;
75 case 3: b |= ((u64)end[2]) << 16; 75 case 3: b |= ((u64)end[2]) << 16; /* fall through */
76 case 2: b |= le16_to_cpup(data); break; 76 case 2: b |= le16_to_cpup(data); break;
77 case 1: b |= end[0]; 77 case 1: b |= end[0];
78 } 78 }
@@ -101,11 +101,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
101 bytemask_from_count(left))); 101 bytemask_from_count(left)));
102#else 102#else
103 switch (left) { 103 switch (left) {
104 case 7: b |= ((u64)end[6]) << 48; 104 case 7: b |= ((u64)end[6]) << 48; /* fall through */
105 case 6: b |= ((u64)end[5]) << 40; 105 case 6: b |= ((u64)end[5]) << 40; /* fall through */
106 case 5: b |= ((u64)end[4]) << 32; 106 case 5: b |= ((u64)end[4]) << 32; /* fall through */
107 case 4: b |= get_unaligned_le32(end); break; 107 case 4: b |= get_unaligned_le32(end); break;
108 case 3: b |= ((u64)end[2]) << 16; 108 case 3: b |= ((u64)end[2]) << 16; /* fall through */
109 case 2: b |= get_unaligned_le16(end); break; 109 case 2: b |= get_unaligned_le16(end); break;
110 case 1: b |= end[0]; 110 case 1: b |= end[0];
111 } 111 }
@@ -268,11 +268,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
268 bytemask_from_count(left))); 268 bytemask_from_count(left)));
269#else 269#else
270 switch (left) { 270 switch (left) {
271 case 7: b |= ((u64)end[6]) << 48; 271 case 7: b |= ((u64)end[6]) << 48; /* fall through */
272 case 6: b |= ((u64)end[5]) << 40; 272 case 6: b |= ((u64)end[5]) << 40; /* fall through */
273 case 5: b |= ((u64)end[4]) << 32; 273 case 5: b |= ((u64)end[4]) << 32; /* fall through */
274 case 4: b |= le32_to_cpup(data); break; 274 case 4: b |= le32_to_cpup(data); break;
275 case 3: b |= ((u64)end[2]) << 16; 275 case 3: b |= ((u64)end[2]) << 16; /* fall through */
276 case 2: b |= le16_to_cpup(data); break; 276 case 2: b |= le16_to_cpup(data); break;
277 case 1: b |= end[0]; 277 case 1: b |= end[0];
278 } 278 }
@@ -301,11 +301,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
301 bytemask_from_count(left))); 301 bytemask_from_count(left)));
302#else 302#else
303 switch (left) { 303 switch (left) {
304 case 7: b |= ((u64)end[6]) << 48; 304 case 7: b |= ((u64)end[6]) << 48; /* fall through */
305 case 6: b |= ((u64)end[5]) << 40; 305 case 6: b |= ((u64)end[5]) << 40; /* fall through */
306 case 5: b |= ((u64)end[4]) << 32; 306 case 5: b |= ((u64)end[4]) << 32; /* fall through */
307 case 4: b |= get_unaligned_le32(end); break; 307 case 4: b |= get_unaligned_le32(end); break;
308 case 3: b |= ((u64)end[2]) << 16; 308 case 3: b |= ((u64)end[2]) << 16; /* fall through */
309 case 2: b |= get_unaligned_le16(end); break; 309 case 2: b |= get_unaligned_le16(end); break;
310 case 1: b |= end[0]; 310 case 1: b |= end[0];
311 } 311 }
@@ -431,7 +431,7 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
431 v0 ^= m; 431 v0 ^= m;
432 } 432 }
433 switch (left) { 433 switch (left) {
434 case 3: b |= ((u32)end[2]) << 16; 434 case 3: b |= ((u32)end[2]) << 16; /* fall through */
435 case 2: b |= le16_to_cpup(data); break; 435 case 2: b |= le16_to_cpup(data); break;
436 case 1: b |= end[0]; 436 case 1: b |= end[0];
437 } 437 }
@@ -454,7 +454,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
454 v0 ^= m; 454 v0 ^= m;
455 } 455 }
456 switch (left) { 456 switch (left) {
457 case 3: b |= ((u32)end[2]) << 16; 457 case 3: b |= ((u32)end[2]) << 16; /* fall through */
458 case 2: b |= get_unaligned_le16(end); break; 458 case 2: b |= get_unaligned_le16(end); break;
459 case 1: b |= end[0]; 459 case 1: b |= end[0];
460 } 460 }
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index a1a959ba24ff..b0e35eec6499 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -12,11 +12,13 @@
12 12
13/* ETMv3.5/PTM's ETMCR config bit */ 13/* ETMv3.5/PTM's ETMCR config bit */
14#define ETM_OPT_CYCACC 12 14#define ETM_OPT_CYCACC 12
15#define ETM_OPT_CTXTID 14
15#define ETM_OPT_TS 28 16#define ETM_OPT_TS 28
16#define ETM_OPT_RETSTK 29 17#define ETM_OPT_RETSTK 29
17 18
18/* ETMv4 CONFIGR programming bits for the ETM OPTs */ 19/* ETMv4 CONFIGR programming bits for the ETM OPTs */
19#define ETM4_CFG_BIT_CYCACC 4 20#define ETM4_CFG_BIT_CYCACC 4
21#define ETM4_CFG_BIT_CTXTID 6
20#define ETM4_CFG_BIT_TS 11 22#define ETM4_CFG_BIT_TS 11
21#define ETM4_CFG_BIT_RETSTK 12 23#define ETM4_CFG_BIT_RETSTK 12
22 24